e7320cd5fdbc53f069e03409a759b8bcbe6fb6bb
[deliverable/linux.git] / net / ceph / messenger.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
8 #include <linux/net.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dns_resolver.h>
15 #include <net/tcp.h>
16
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/export.h>
22
23 /*
24 * Ceph uses the messenger to exchange ceph_msg messages with other
25 * hosts in the system. The messenger provides ordered and reliable
26 * delivery. We tolerate TCP disconnects by reconnecting (with
27 * exponential backoff) in the case of a fault (disconnection, bad
28 * crc, protocol error). Acks allow sent messages to be discarded by
29 * the sender.
30 */
31
32 /*
33 * We track the state of the socket on a given connection using
34 * values defined below. The transition to a new socket state is
35 * handled by a function which verifies we aren't coming from an
36 * unexpected state.
37 *
38 * --------
39 * | NEW* | transient initial state
40 * --------
41 * | con_sock_state_init()
42 * v
43 * ----------
44 * | CLOSED | initialized, but no socket (and no
45 * ---------- TCP connection)
46 * ^ \
47 * | \ con_sock_state_connecting()
48 * | ----------------------
49 * | \
50 * + con_sock_state_closed() \
51 * |+--------------------------- \
52 * | \ \ \
53 * | ----------- \ \
54 * | | CLOSING | socket event; \ \
55 * | ----------- await close \ \
56 * | ^ \ |
57 * | | \ |
58 * | + con_sock_state_closing() \ |
59 * | / \ | |
60 * | / --------------- | |
61 * | / \ v v
62 * | / --------------
63 * | / -----------------| CONNECTING | socket created, TCP
64 * | | / -------------- connect initiated
65 * | | | con_sock_state_connected()
66 * | | v
67 * -------------
68 * | CONNECTED | TCP connection established
69 * -------------
70 *
71 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
72 */
73
74 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
75 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
76 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
77 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
78 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
79
80 /* static tag bytes (protocol control messages) */
81 static char tag_msg = CEPH_MSGR_TAG_MSG;
82 static char tag_ack = CEPH_MSGR_TAG_ACK;
83 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
84
85 #ifdef CONFIG_LOCKDEP
86 static struct lock_class_key socket_class;
87 #endif
88
89 /*
90 * When skipping (ignoring) a block of input we read it into a "skip
91 * buffer," which is this many bytes in size.
92 */
93 #define SKIP_BUF_SIZE 1024
94
95 static void queue_con(struct ceph_connection *con);
96 static void con_work(struct work_struct *);
97 static void ceph_fault(struct ceph_connection *con);
98
99 /*
100 * Nicely render a sockaddr as a string. An array of formatted
101 * strings is used, to approximate reentrancy.
102 */
103 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
104 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
105 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
106 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
107
108 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
109 static atomic_t addr_str_seq = ATOMIC_INIT(0);
110
111 static struct page *zero_page; /* used in certain error cases */
112
113 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
114 {
115 int i;
116 char *s;
117 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
118 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
119
120 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
121 s = addr_str[i];
122
123 switch (ss->ss_family) {
124 case AF_INET:
125 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
126 ntohs(in4->sin_port));
127 break;
128
129 case AF_INET6:
130 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
131 ntohs(in6->sin6_port));
132 break;
133
134 default:
135 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
136 ss->ss_family);
137 }
138
139 return s;
140 }
141 EXPORT_SYMBOL(ceph_pr_addr);
142
143 static void encode_my_addr(struct ceph_messenger *msgr)
144 {
145 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
146 ceph_encode_addr(&msgr->my_enc_addr);
147 }
148
149 /*
150 * work queue for all reading and writing to/from the socket.
151 */
152 static struct workqueue_struct *ceph_msgr_wq;
153
154 void _ceph_msgr_exit(void)
155 {
156 if (ceph_msgr_wq) {
157 destroy_workqueue(ceph_msgr_wq);
158 ceph_msgr_wq = NULL;
159 }
160
161 BUG_ON(zero_page == NULL);
162 kunmap(zero_page);
163 page_cache_release(zero_page);
164 zero_page = NULL;
165 }
166
167 int ceph_msgr_init(void)
168 {
169 BUG_ON(zero_page != NULL);
170 zero_page = ZERO_PAGE(0);
171 page_cache_get(zero_page);
172
173 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
174 if (ceph_msgr_wq)
175 return 0;
176
177 pr_err("msgr_init failed to create workqueue\n");
178 _ceph_msgr_exit();
179
180 return -ENOMEM;
181 }
182 EXPORT_SYMBOL(ceph_msgr_init);
183
184 void ceph_msgr_exit(void)
185 {
186 BUG_ON(ceph_msgr_wq == NULL);
187
188 _ceph_msgr_exit();
189 }
190 EXPORT_SYMBOL(ceph_msgr_exit);
191
192 void ceph_msgr_flush(void)
193 {
194 flush_workqueue(ceph_msgr_wq);
195 }
196 EXPORT_SYMBOL(ceph_msgr_flush);
197
198 /* Connection socket state transition functions */
199
200 static void con_sock_state_init(struct ceph_connection *con)
201 {
202 int old_state;
203
204 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
205 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
206 printk("%s: unexpected old state %d\n", __func__, old_state);
207 }
208
209 static void con_sock_state_connecting(struct ceph_connection *con)
210 {
211 int old_state;
212
213 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
214 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
215 printk("%s: unexpected old state %d\n", __func__, old_state);
216 }
217
218 static void con_sock_state_connected(struct ceph_connection *con)
219 {
220 int old_state;
221
222 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
223 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
224 printk("%s: unexpected old state %d\n", __func__, old_state);
225 }
226
227 static void con_sock_state_closing(struct ceph_connection *con)
228 {
229 int old_state;
230
231 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
232 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
233 old_state != CON_SOCK_STATE_CONNECTED &&
234 old_state != CON_SOCK_STATE_CLOSING))
235 printk("%s: unexpected old state %d\n", __func__, old_state);
236 }
237
238 static void con_sock_state_closed(struct ceph_connection *con)
239 {
240 int old_state;
241
242 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
243 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
244 old_state != CON_SOCK_STATE_CLOSING &&
245 old_state != CON_SOCK_STATE_CONNECTING))
246 printk("%s: unexpected old state %d\n", __func__, old_state);
247 }
248
249 /*
250 * socket callback functions
251 */
252
253 /* data available on socket, or listen socket received a connect */
254 static void ceph_sock_data_ready(struct sock *sk, int count_unused)
255 {
256 struct ceph_connection *con = sk->sk_user_data;
257 if (atomic_read(&con->msgr->stopping)) {
258 return;
259 }
260
261 if (sk->sk_state != TCP_CLOSE_WAIT) {
262 dout("%s on %p state = %lu, queueing work\n", __func__,
263 con, con->state);
264 queue_con(con);
265 }
266 }
267
268 /* socket has buffer space for writing */
269 static void ceph_sock_write_space(struct sock *sk)
270 {
271 struct ceph_connection *con = sk->sk_user_data;
272
273 /* only queue to workqueue if there is data we want to write,
274 * and there is sufficient space in the socket buffer to accept
275 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
276 * doesn't get called again until try_write() fills the socket
277 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
278 * and net/core/stream.c:sk_stream_write_space().
279 */
280 if (test_bit(WRITE_PENDING, &con->flags)) {
281 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
282 dout("%s %p queueing write work\n", __func__, con);
283 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
284 queue_con(con);
285 }
286 } else {
287 dout("%s %p nothing to write\n", __func__, con);
288 }
289 }
290
291 /* socket's state has changed */
292 static void ceph_sock_state_change(struct sock *sk)
293 {
294 struct ceph_connection *con = sk->sk_user_data;
295
296 dout("%s %p state = %lu sk_state = %u\n", __func__,
297 con, con->state, sk->sk_state);
298
299 switch (sk->sk_state) {
300 case TCP_CLOSE:
301 dout("%s TCP_CLOSE\n", __func__);
302 case TCP_CLOSE_WAIT:
303 dout("%s TCP_CLOSE_WAIT\n", __func__);
304 con_sock_state_closing(con);
305 set_bit(SOCK_CLOSED, &con->flags);
306 queue_con(con);
307 break;
308 case TCP_ESTABLISHED:
309 dout("%s TCP_ESTABLISHED\n", __func__);
310 con_sock_state_connected(con);
311 queue_con(con);
312 break;
313 default: /* Everything else is uninteresting */
314 break;
315 }
316 }
317
318 /*
319 * set up socket callbacks
320 */
321 static void set_sock_callbacks(struct socket *sock,
322 struct ceph_connection *con)
323 {
324 struct sock *sk = sock->sk;
325 sk->sk_user_data = con;
326 sk->sk_data_ready = ceph_sock_data_ready;
327 sk->sk_write_space = ceph_sock_write_space;
328 sk->sk_state_change = ceph_sock_state_change;
329 }
330
331
332 /*
333 * socket helpers
334 */
335
336 /*
337 * initiate connection to a remote socket.
338 */
339 static int ceph_tcp_connect(struct ceph_connection *con)
340 {
341 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
342 struct socket *sock;
343 int ret;
344
345 BUG_ON(con->sock);
346 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
347 IPPROTO_TCP, &sock);
348 if (ret)
349 return ret;
350 sock->sk->sk_allocation = GFP_NOFS;
351
352 #ifdef CONFIG_LOCKDEP
353 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
354 #endif
355
356 set_sock_callbacks(sock, con);
357
358 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
359
360 con_sock_state_connecting(con);
361 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
362 O_NONBLOCK);
363 if (ret == -EINPROGRESS) {
364 dout("connect %s EINPROGRESS sk_state = %u\n",
365 ceph_pr_addr(&con->peer_addr.in_addr),
366 sock->sk->sk_state);
367 } else if (ret < 0) {
368 pr_err("connect %s error %d\n",
369 ceph_pr_addr(&con->peer_addr.in_addr), ret);
370 sock_release(sock);
371 con->error_msg = "connect error";
372
373 return ret;
374 }
375 con->sock = sock;
376 return 0;
377 }
378
379 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
380 {
381 struct kvec iov = {buf, len};
382 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
383 int r;
384
385 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
386 if (r == -EAGAIN)
387 r = 0;
388 return r;
389 }
390
391 /*
392 * write something. @more is true if caller will be sending more data
393 * shortly.
394 */
395 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
396 size_t kvlen, size_t len, int more)
397 {
398 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
399 int r;
400
401 if (more)
402 msg.msg_flags |= MSG_MORE;
403 else
404 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
405
406 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
407 if (r == -EAGAIN)
408 r = 0;
409 return r;
410 }
411
412 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
413 int offset, size_t size, int more)
414 {
415 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
416 int ret;
417
418 ret = kernel_sendpage(sock, page, offset, size, flags);
419 if (ret == -EAGAIN)
420 ret = 0;
421
422 return ret;
423 }
424
425
426 /*
427 * Shutdown/close the socket for the given connection.
428 */
429 static int con_close_socket(struct ceph_connection *con)
430 {
431 int rc;
432
433 dout("con_close_socket on %p sock %p\n", con, con->sock);
434 if (!con->sock)
435 return 0;
436 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
437 sock_release(con->sock);
438 con->sock = NULL;
439
440 /*
441 * Forcibly clear the SOCK_CLOSE flag. It gets set
442 * independent of the connection mutex, and we could have
443 * received a socket close event before we had the chance to
444 * shut the socket down.
445 */
446 clear_bit(SOCK_CLOSED, &con->flags);
447 con_sock_state_closed(con);
448 return rc;
449 }
450
451 /*
452 * Reset a connection. Discard all incoming and outgoing messages
453 * and clear *_seq state.
454 */
455 static void ceph_msg_remove(struct ceph_msg *msg)
456 {
457 list_del_init(&msg->list_head);
458 BUG_ON(msg->con == NULL);
459 msg->con->ops->put(msg->con);
460 msg->con = NULL;
461
462 ceph_msg_put(msg);
463 }
464 static void ceph_msg_remove_list(struct list_head *head)
465 {
466 while (!list_empty(head)) {
467 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
468 list_head);
469 ceph_msg_remove(msg);
470 }
471 }
472
473 static void reset_connection(struct ceph_connection *con)
474 {
475 /* reset connection, out_queue, msg_ and connect_seq */
476 /* discard existing out_queue and msg_seq */
477 ceph_msg_remove_list(&con->out_queue);
478 ceph_msg_remove_list(&con->out_sent);
479
480 if (con->in_msg) {
481 BUG_ON(con->in_msg->con != con);
482 con->in_msg->con = NULL;
483 ceph_msg_put(con->in_msg);
484 con->in_msg = NULL;
485 con->ops->put(con);
486 }
487
488 con->connect_seq = 0;
489 con->out_seq = 0;
490 if (con->out_msg) {
491 ceph_msg_put(con->out_msg);
492 con->out_msg = NULL;
493 }
494 con->in_seq = 0;
495 con->in_seq_acked = 0;
496 }
497
498 /*
499 * mark a peer down. drop any open connections.
500 */
501 void ceph_con_close(struct ceph_connection *con)
502 {
503 mutex_lock(&con->mutex);
504 dout("con_close %p peer %s\n", con,
505 ceph_pr_addr(&con->peer_addr.in_addr));
506 clear_bit(NEGOTIATING, &con->state);
507 clear_bit(CONNECTING, &con->state);
508 clear_bit(CONNECTED, &con->state);
509 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
510 set_bit(CLOSED, &con->state);
511
512 clear_bit(LOSSYTX, &con->flags); /* so we retry next connect */
513 clear_bit(KEEPALIVE_PENDING, &con->flags);
514 clear_bit(WRITE_PENDING, &con->flags);
515
516 reset_connection(con);
517 con->peer_global_seq = 0;
518 cancel_delayed_work(&con->work);
519 con_close_socket(con);
520 mutex_unlock(&con->mutex);
521 }
522 EXPORT_SYMBOL(ceph_con_close);
523
524 /*
525 * Reopen a closed connection, with a new peer address.
526 */
527 void ceph_con_open(struct ceph_connection *con,
528 __u8 entity_type, __u64 entity_num,
529 struct ceph_entity_addr *addr)
530 {
531 mutex_lock(&con->mutex);
532 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
533 set_bit(OPENING, &con->state);
534 WARN_ON(!test_and_clear_bit(CLOSED, &con->state));
535
536 con->peer_name.type = (__u8) entity_type;
537 con->peer_name.num = cpu_to_le64(entity_num);
538
539 memcpy(&con->peer_addr, addr, sizeof(*addr));
540 con->delay = 0; /* reset backoff memory */
541 mutex_unlock(&con->mutex);
542 queue_con(con);
543 }
544 EXPORT_SYMBOL(ceph_con_open);
545
546 /*
547 * return true if this connection ever successfully opened
548 */
549 bool ceph_con_opened(struct ceph_connection *con)
550 {
551 return con->connect_seq > 0;
552 }
553
554 /*
555 * initialize a new connection.
556 */
557 void ceph_con_init(struct ceph_connection *con, void *private,
558 const struct ceph_connection_operations *ops,
559 struct ceph_messenger *msgr)
560 {
561 dout("con_init %p\n", con);
562 memset(con, 0, sizeof(*con));
563 con->private = private;
564 con->ops = ops;
565 con->msgr = msgr;
566
567 con_sock_state_init(con);
568
569 mutex_init(&con->mutex);
570 INIT_LIST_HEAD(&con->out_queue);
571 INIT_LIST_HEAD(&con->out_sent);
572 INIT_DELAYED_WORK(&con->work, con_work);
573
574 set_bit(CLOSED, &con->state);
575 }
576 EXPORT_SYMBOL(ceph_con_init);
577
578
579 /*
580 * We maintain a global counter to order connection attempts. Get
581 * a unique seq greater than @gt.
582 */
583 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
584 {
585 u32 ret;
586
587 spin_lock(&msgr->global_seq_lock);
588 if (msgr->global_seq < gt)
589 msgr->global_seq = gt;
590 ret = ++msgr->global_seq;
591 spin_unlock(&msgr->global_seq_lock);
592 return ret;
593 }
594
595 static void con_out_kvec_reset(struct ceph_connection *con)
596 {
597 con->out_kvec_left = 0;
598 con->out_kvec_bytes = 0;
599 con->out_kvec_cur = &con->out_kvec[0];
600 }
601
602 static void con_out_kvec_add(struct ceph_connection *con,
603 size_t size, void *data)
604 {
605 int index;
606
607 index = con->out_kvec_left;
608 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
609
610 con->out_kvec[index].iov_len = size;
611 con->out_kvec[index].iov_base = data;
612 con->out_kvec_left++;
613 con->out_kvec_bytes += size;
614 }
615
616 #ifdef CONFIG_BLOCK
617 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
618 {
619 if (!bio) {
620 *iter = NULL;
621 *seg = 0;
622 return;
623 }
624 *iter = bio;
625 *seg = bio->bi_idx;
626 }
627
628 static void iter_bio_next(struct bio **bio_iter, int *seg)
629 {
630 if (*bio_iter == NULL)
631 return;
632
633 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
634
635 (*seg)++;
636 if (*seg == (*bio_iter)->bi_vcnt)
637 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
638 }
639 #endif
640
641 static void prepare_write_message_data(struct ceph_connection *con)
642 {
643 struct ceph_msg *msg = con->out_msg;
644
645 BUG_ON(!msg);
646 BUG_ON(!msg->hdr.data_len);
647
648 /* initialize page iterator */
649 con->out_msg_pos.page = 0;
650 if (msg->pages)
651 con->out_msg_pos.page_pos = msg->page_alignment;
652 else
653 con->out_msg_pos.page_pos = 0;
654 #ifdef CONFIG_BLOCK
655 if (msg->bio)
656 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
657 #endif
658 con->out_msg_pos.data_pos = 0;
659 con->out_msg_pos.did_page_crc = false;
660 con->out_more = 1; /* data + footer will follow */
661 }
662
663 /*
664 * Prepare footer for currently outgoing message, and finish things
665 * off. Assumes out_kvec* are already valid.. we just add on to the end.
666 */
667 static void prepare_write_message_footer(struct ceph_connection *con)
668 {
669 struct ceph_msg *m = con->out_msg;
670 int v = con->out_kvec_left;
671
672 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
673
674 dout("prepare_write_message_footer %p\n", con);
675 con->out_kvec_is_msg = true;
676 con->out_kvec[v].iov_base = &m->footer;
677 con->out_kvec[v].iov_len = sizeof(m->footer);
678 con->out_kvec_bytes += sizeof(m->footer);
679 con->out_kvec_left++;
680 con->out_more = m->more_to_follow;
681 con->out_msg_done = true;
682 }
683
684 /*
685 * Prepare headers for the next outgoing message.
686 */
687 static void prepare_write_message(struct ceph_connection *con)
688 {
689 struct ceph_msg *m;
690 u32 crc;
691
692 con_out_kvec_reset(con);
693 con->out_kvec_is_msg = true;
694 con->out_msg_done = false;
695
696 /* Sneak an ack in there first? If we can get it into the same
697 * TCP packet that's a good thing. */
698 if (con->in_seq > con->in_seq_acked) {
699 con->in_seq_acked = con->in_seq;
700 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
701 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
702 con_out_kvec_add(con, sizeof (con->out_temp_ack),
703 &con->out_temp_ack);
704 }
705
706 BUG_ON(list_empty(&con->out_queue));
707 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
708 con->out_msg = m;
709 BUG_ON(m->con != con);
710
711 /* put message on sent list */
712 ceph_msg_get(m);
713 list_move_tail(&m->list_head, &con->out_sent);
714
715 /*
716 * only assign outgoing seq # if we haven't sent this message
717 * yet. if it is requeued, resend with it's original seq.
718 */
719 if (m->needs_out_seq) {
720 m->hdr.seq = cpu_to_le64(++con->out_seq);
721 m->needs_out_seq = false;
722 }
723
724 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
725 m, con->out_seq, le16_to_cpu(m->hdr.type),
726 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
727 le32_to_cpu(m->hdr.data_len),
728 m->nr_pages);
729 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
730
731 /* tag + hdr + front + middle */
732 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
733 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
734 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
735
736 if (m->middle)
737 con_out_kvec_add(con, m->middle->vec.iov_len,
738 m->middle->vec.iov_base);
739
740 /* fill in crc (except data pages), footer */
741 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
742 con->out_msg->hdr.crc = cpu_to_le32(crc);
743 con->out_msg->footer.flags = 0;
744
745 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
746 con->out_msg->footer.front_crc = cpu_to_le32(crc);
747 if (m->middle) {
748 crc = crc32c(0, m->middle->vec.iov_base,
749 m->middle->vec.iov_len);
750 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
751 } else
752 con->out_msg->footer.middle_crc = 0;
753 dout("%s front_crc %u middle_crc %u\n", __func__,
754 le32_to_cpu(con->out_msg->footer.front_crc),
755 le32_to_cpu(con->out_msg->footer.middle_crc));
756
757 /* is there a data payload? */
758 con->out_msg->footer.data_crc = 0;
759 if (m->hdr.data_len)
760 prepare_write_message_data(con);
761 else
762 /* no, queue up footer too and be done */
763 prepare_write_message_footer(con);
764
765 set_bit(WRITE_PENDING, &con->flags);
766 }
767
768 /*
769 * Prepare an ack.
770 */
771 static void prepare_write_ack(struct ceph_connection *con)
772 {
773 dout("prepare_write_ack %p %llu -> %llu\n", con,
774 con->in_seq_acked, con->in_seq);
775 con->in_seq_acked = con->in_seq;
776
777 con_out_kvec_reset(con);
778
779 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
780
781 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
782 con_out_kvec_add(con, sizeof (con->out_temp_ack),
783 &con->out_temp_ack);
784
785 con->out_more = 1; /* more will follow.. eventually.. */
786 set_bit(WRITE_PENDING, &con->flags);
787 }
788
789 /*
790 * Prepare to write keepalive byte.
791 */
792 static void prepare_write_keepalive(struct ceph_connection *con)
793 {
794 dout("prepare_write_keepalive %p\n", con);
795 con_out_kvec_reset(con);
796 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
797 set_bit(WRITE_PENDING, &con->flags);
798 }
799
800 /*
801 * Connection negotiation.
802 */
803
804 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
805 int *auth_proto)
806 {
807 struct ceph_auth_handshake *auth;
808
809 if (!con->ops->get_authorizer) {
810 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
811 con->out_connect.authorizer_len = 0;
812
813 return NULL;
814 }
815
816 /* Can't hold the mutex while getting authorizer */
817
818 mutex_unlock(&con->mutex);
819
820 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
821
822 mutex_lock(&con->mutex);
823
824 if (IS_ERR(auth))
825 return auth;
826 if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->flags))
827 return ERR_PTR(-EAGAIN);
828
829 con->auth_reply_buf = auth->authorizer_reply_buf;
830 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
831
832
833 return auth;
834 }
835
836 /*
837 * We connected to a peer and are saying hello.
838 */
839 static void prepare_write_banner(struct ceph_connection *con)
840 {
841 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
842 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
843 &con->msgr->my_enc_addr);
844
845 con->out_more = 0;
846 set_bit(WRITE_PENDING, &con->flags);
847 }
848
849 static int prepare_write_connect(struct ceph_connection *con)
850 {
851 unsigned int global_seq = get_global_seq(con->msgr, 0);
852 int proto;
853 int auth_proto;
854 struct ceph_auth_handshake *auth;
855
856 switch (con->peer_name.type) {
857 case CEPH_ENTITY_TYPE_MON:
858 proto = CEPH_MONC_PROTOCOL;
859 break;
860 case CEPH_ENTITY_TYPE_OSD:
861 proto = CEPH_OSDC_PROTOCOL;
862 break;
863 case CEPH_ENTITY_TYPE_MDS:
864 proto = CEPH_MDSC_PROTOCOL;
865 break;
866 default:
867 BUG();
868 }
869
870 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
871 con->connect_seq, global_seq, proto);
872
873 con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
874 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
875 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
876 con->out_connect.global_seq = cpu_to_le32(global_seq);
877 con->out_connect.protocol_version = cpu_to_le32(proto);
878 con->out_connect.flags = 0;
879
880 auth_proto = CEPH_AUTH_UNKNOWN;
881 auth = get_connect_authorizer(con, &auth_proto);
882 if (IS_ERR(auth))
883 return PTR_ERR(auth);
884
885 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
886 con->out_connect.authorizer_len = auth ?
887 cpu_to_le32(auth->authorizer_buf_len) : 0;
888
889 con_out_kvec_reset(con);
890 con_out_kvec_add(con, sizeof (con->out_connect),
891 &con->out_connect);
892 if (auth && auth->authorizer_buf_len)
893 con_out_kvec_add(con, auth->authorizer_buf_len,
894 auth->authorizer_buf);
895
896 con->out_more = 0;
897 set_bit(WRITE_PENDING, &con->flags);
898
899 return 0;
900 }
901
902 /*
903 * write as much of pending kvecs to the socket as we can.
904 * 1 -> done
905 * 0 -> socket full, but more to do
906 * <0 -> error
907 */
908 static int write_partial_kvec(struct ceph_connection *con)
909 {
910 int ret;
911
912 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
913 while (con->out_kvec_bytes > 0) {
914 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
915 con->out_kvec_left, con->out_kvec_bytes,
916 con->out_more);
917 if (ret <= 0)
918 goto out;
919 con->out_kvec_bytes -= ret;
920 if (con->out_kvec_bytes == 0)
921 break; /* done */
922
923 /* account for full iov entries consumed */
924 while (ret >= con->out_kvec_cur->iov_len) {
925 BUG_ON(!con->out_kvec_left);
926 ret -= con->out_kvec_cur->iov_len;
927 con->out_kvec_cur++;
928 con->out_kvec_left--;
929 }
930 /* and for a partially-consumed entry */
931 if (ret) {
932 con->out_kvec_cur->iov_len -= ret;
933 con->out_kvec_cur->iov_base += ret;
934 }
935 }
936 con->out_kvec_left = 0;
937 con->out_kvec_is_msg = false;
938 ret = 1;
939 out:
940 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
941 con->out_kvec_bytes, con->out_kvec_left, ret);
942 return ret; /* done! */
943 }
944
945 static void out_msg_pos_next(struct ceph_connection *con, struct page *page,
946 size_t len, size_t sent, bool in_trail)
947 {
948 struct ceph_msg *msg = con->out_msg;
949
950 BUG_ON(!msg);
951 BUG_ON(!sent);
952
953 con->out_msg_pos.data_pos += sent;
954 con->out_msg_pos.page_pos += sent;
955 if (sent < len)
956 return;
957
958 BUG_ON(sent != len);
959 con->out_msg_pos.page_pos = 0;
960 con->out_msg_pos.page++;
961 con->out_msg_pos.did_page_crc = false;
962 if (in_trail)
963 list_move_tail(&page->lru,
964 &msg->trail->head);
965 else if (msg->pagelist)
966 list_move_tail(&page->lru,
967 &msg->pagelist->head);
968 #ifdef CONFIG_BLOCK
969 else if (msg->bio)
970 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
971 #endif
972 }
973
974 /*
975 * Write as much message data payload as we can. If we finish, queue
976 * up the footer.
977 * 1 -> done, footer is now queued in out_kvec[].
978 * 0 -> socket full, but more to do
979 * <0 -> error
980 */
981 static int write_partial_msg_pages(struct ceph_connection *con)
982 {
983 struct ceph_msg *msg = con->out_msg;
984 unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
985 size_t len;
986 bool do_datacrc = !con->msgr->nocrc;
987 int ret;
988 int total_max_write;
989 bool in_trail = false;
990 const size_t trail_len = (msg->trail ? msg->trail->length : 0);
991 const size_t trail_off = data_len - trail_len;
992
993 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
994 con, msg, con->out_msg_pos.page, msg->nr_pages,
995 con->out_msg_pos.page_pos);
996
997 /*
998 * Iterate through each page that contains data to be
999 * written, and send as much as possible for each.
1000 *
1001 * If we are calculating the data crc (the default), we will
1002 * need to map the page. If we have no pages, they have
1003 * been revoked, so use the zero page.
1004 */
1005 while (data_len > con->out_msg_pos.data_pos) {
1006 struct page *page = NULL;
1007 int max_write = PAGE_SIZE;
1008 int bio_offset = 0;
1009
1010 in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off;
1011 if (!in_trail)
1012 total_max_write = trail_off - con->out_msg_pos.data_pos;
1013
1014 if (in_trail) {
1015 total_max_write = data_len - con->out_msg_pos.data_pos;
1016
1017 page = list_first_entry(&msg->trail->head,
1018 struct page, lru);
1019 } else if (msg->pages) {
1020 page = msg->pages[con->out_msg_pos.page];
1021 } else if (msg->pagelist) {
1022 page = list_first_entry(&msg->pagelist->head,
1023 struct page, lru);
1024 #ifdef CONFIG_BLOCK
1025 } else if (msg->bio) {
1026 struct bio_vec *bv;
1027
1028 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
1029 page = bv->bv_page;
1030 bio_offset = bv->bv_offset;
1031 max_write = bv->bv_len;
1032 #endif
1033 } else {
1034 page = zero_page;
1035 }
1036 len = min_t(int, max_write - con->out_msg_pos.page_pos,
1037 total_max_write);
1038
1039 if (do_datacrc && !con->out_msg_pos.did_page_crc) {
1040 void *base;
1041 u32 crc = le32_to_cpu(msg->footer.data_crc);
1042 char *kaddr;
1043
1044 kaddr = kmap(page);
1045 BUG_ON(kaddr == NULL);
1046 base = kaddr + con->out_msg_pos.page_pos + bio_offset;
1047 crc = crc32c(crc, base, len);
1048 msg->footer.data_crc = cpu_to_le32(crc);
1049 con->out_msg_pos.did_page_crc = true;
1050 }
1051 ret = ceph_tcp_sendpage(con->sock, page,
1052 con->out_msg_pos.page_pos + bio_offset,
1053 len, 1);
1054
1055 if (do_datacrc)
1056 kunmap(page);
1057
1058 if (ret <= 0)
1059 goto out;
1060
1061 out_msg_pos_next(con, page, len, (size_t) ret, in_trail);
1062 }
1063
1064 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
1065
1066 /* prepare and queue up footer, too */
1067 if (!do_datacrc)
1068 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1069 con_out_kvec_reset(con);
1070 prepare_write_message_footer(con);
1071 ret = 1;
1072 out:
1073 return ret;
1074 }
1075
1076 /*
1077 * write some zeros
1078 */
1079 static int write_partial_skip(struct ceph_connection *con)
1080 {
1081 int ret;
1082
1083 while (con->out_skip > 0) {
1084 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
1085
1086 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1);
1087 if (ret <= 0)
1088 goto out;
1089 con->out_skip -= ret;
1090 }
1091 ret = 1;
1092 out:
1093 return ret;
1094 }
1095
1096 /*
1097 * Prepare to read connection handshake, or an ack.
1098 */
1099 static void prepare_read_banner(struct ceph_connection *con)
1100 {
1101 dout("prepare_read_banner %p\n", con);
1102 con->in_base_pos = 0;
1103 }
1104
1105 static void prepare_read_connect(struct ceph_connection *con)
1106 {
1107 dout("prepare_read_connect %p\n", con);
1108 con->in_base_pos = 0;
1109 }
1110
1111 static void prepare_read_ack(struct ceph_connection *con)
1112 {
1113 dout("prepare_read_ack %p\n", con);
1114 con->in_base_pos = 0;
1115 }
1116
1117 static void prepare_read_tag(struct ceph_connection *con)
1118 {
1119 dout("prepare_read_tag %p\n", con);
1120 con->in_base_pos = 0;
1121 con->in_tag = CEPH_MSGR_TAG_READY;
1122 }
1123
1124 /*
1125 * Prepare to read a message.
1126 */
1127 static int prepare_read_message(struct ceph_connection *con)
1128 {
1129 dout("prepare_read_message %p\n", con);
1130 BUG_ON(con->in_msg != NULL);
1131 con->in_base_pos = 0;
1132 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1133 return 0;
1134 }
1135
1136
1137 static int read_partial(struct ceph_connection *con,
1138 int end, int size, void *object)
1139 {
1140 while (con->in_base_pos < end) {
1141 int left = end - con->in_base_pos;
1142 int have = size - left;
1143 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1144 if (ret <= 0)
1145 return ret;
1146 con->in_base_pos += ret;
1147 }
1148 return 1;
1149 }
1150
1151
1152 /*
1153 * Read all or part of the connect-side handshake on a new connection
1154 */
1155 static int read_partial_banner(struct ceph_connection *con)
1156 {
1157 int size;
1158 int end;
1159 int ret;
1160
1161 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1162
1163 /* peer's banner */
1164 size = strlen(CEPH_BANNER);
1165 end = size;
1166 ret = read_partial(con, end, size, con->in_banner);
1167 if (ret <= 0)
1168 goto out;
1169
1170 size = sizeof (con->actual_peer_addr);
1171 end += size;
1172 ret = read_partial(con, end, size, &con->actual_peer_addr);
1173 if (ret <= 0)
1174 goto out;
1175
1176 size = sizeof (con->peer_addr_for_me);
1177 end += size;
1178 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1179 if (ret <= 0)
1180 goto out;
1181
1182 out:
1183 return ret;
1184 }
1185
1186 static int read_partial_connect(struct ceph_connection *con)
1187 {
1188 int size;
1189 int end;
1190 int ret;
1191
1192 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1193
1194 size = sizeof (con->in_reply);
1195 end = size;
1196 ret = read_partial(con, end, size, &con->in_reply);
1197 if (ret <= 0)
1198 goto out;
1199
1200 size = le32_to_cpu(con->in_reply.authorizer_len);
1201 end += size;
1202 ret = read_partial(con, end, size, con->auth_reply_buf);
1203 if (ret <= 0)
1204 goto out;
1205
1206 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1207 con, (int)con->in_reply.tag,
1208 le32_to_cpu(con->in_reply.connect_seq),
1209 le32_to_cpu(con->in_reply.global_seq));
1210 out:
1211 return ret;
1212
1213 }
1214
1215 /*
1216 * Verify the hello banner looks okay.
1217 */
1218 static int verify_hello(struct ceph_connection *con)
1219 {
1220 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1221 pr_err("connect to %s got bad banner\n",
1222 ceph_pr_addr(&con->peer_addr.in_addr));
1223 con->error_msg = "protocol error, bad banner";
1224 return -1;
1225 }
1226 return 0;
1227 }
1228
1229 static bool addr_is_blank(struct sockaddr_storage *ss)
1230 {
1231 switch (ss->ss_family) {
1232 case AF_INET:
1233 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1234 case AF_INET6:
1235 return
1236 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1237 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1238 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1239 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1240 }
1241 return false;
1242 }
1243
1244 static int addr_port(struct sockaddr_storage *ss)
1245 {
1246 switch (ss->ss_family) {
1247 case AF_INET:
1248 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1249 case AF_INET6:
1250 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1251 }
1252 return 0;
1253 }
1254
1255 static void addr_set_port(struct sockaddr_storage *ss, int p)
1256 {
1257 switch (ss->ss_family) {
1258 case AF_INET:
1259 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1260 break;
1261 case AF_INET6:
1262 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1263 break;
1264 }
1265 }
1266
1267 /*
1268 * Unlike other *_pton function semantics, zero indicates success.
1269 */
1270 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1271 char delim, const char **ipend)
1272 {
1273 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1274 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1275
1276 memset(ss, 0, sizeof(*ss));
1277
1278 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1279 ss->ss_family = AF_INET;
1280 return 0;
1281 }
1282
1283 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1284 ss->ss_family = AF_INET6;
1285 return 0;
1286 }
1287
1288 return -EINVAL;
1289 }
1290
1291 /*
1292 * Extract hostname string and resolve using kernel DNS facility.
1293 */
1294 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1295 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1296 struct sockaddr_storage *ss, char delim, const char **ipend)
1297 {
1298 const char *end, *delim_p;
1299 char *colon_p, *ip_addr = NULL;
1300 int ip_len, ret;
1301
1302 /*
1303 * The end of the hostname occurs immediately preceding the delimiter or
1304 * the port marker (':') where the delimiter takes precedence.
1305 */
1306 delim_p = memchr(name, delim, namelen);
1307 colon_p = memchr(name, ':', namelen);
1308
1309 if (delim_p && colon_p)
1310 end = delim_p < colon_p ? delim_p : colon_p;
1311 else if (!delim_p && colon_p)
1312 end = colon_p;
1313 else {
1314 end = delim_p;
1315 if (!end) /* case: hostname:/ */
1316 end = name + namelen;
1317 }
1318
1319 if (end <= name)
1320 return -EINVAL;
1321
1322 /* do dns_resolve upcall */
1323 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1324 if (ip_len > 0)
1325 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1326 else
1327 ret = -ESRCH;
1328
1329 kfree(ip_addr);
1330
1331 *ipend = end;
1332
1333 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1334 ret, ret ? "failed" : ceph_pr_addr(ss));
1335
1336 return ret;
1337 }
1338 #else
1339 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1340 struct sockaddr_storage *ss, char delim, const char **ipend)
1341 {
1342 return -EINVAL;
1343 }
1344 #endif
1345
1346 /*
1347 * Parse a server name (IP or hostname). If a valid IP address is not found
1348 * then try to extract a hostname to resolve using userspace DNS upcall.
1349 */
1350 static int ceph_parse_server_name(const char *name, size_t namelen,
1351 struct sockaddr_storage *ss, char delim, const char **ipend)
1352 {
1353 int ret;
1354
1355 ret = ceph_pton(name, namelen, ss, delim, ipend);
1356 if (ret)
1357 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1358
1359 return ret;
1360 }
1361
1362 /*
1363 * Parse an ip[:port] list into an addr array. Use the default
1364 * monitor port if a port isn't specified.
1365 */
1366 int ceph_parse_ips(const char *c, const char *end,
1367 struct ceph_entity_addr *addr,
1368 int max_count, int *count)
1369 {
1370 int i, ret = -EINVAL;
1371 const char *p = c;
1372
1373 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1374 for (i = 0; i < max_count; i++) {
1375 const char *ipend;
1376 struct sockaddr_storage *ss = &addr[i].in_addr;
1377 int port;
1378 char delim = ',';
1379
1380 if (*p == '[') {
1381 delim = ']';
1382 p++;
1383 }
1384
1385 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1386 if (ret)
1387 goto bad;
1388 ret = -EINVAL;
1389
1390 p = ipend;
1391
1392 if (delim == ']') {
1393 if (*p != ']') {
1394 dout("missing matching ']'\n");
1395 goto bad;
1396 }
1397 p++;
1398 }
1399
1400 /* port? */
1401 if (p < end && *p == ':') {
1402 port = 0;
1403 p++;
1404 while (p < end && *p >= '0' && *p <= '9') {
1405 port = (port * 10) + (*p - '0');
1406 p++;
1407 }
1408 if (port > 65535 || port == 0)
1409 goto bad;
1410 } else {
1411 port = CEPH_MON_PORT;
1412 }
1413
1414 addr_set_port(ss, port);
1415
1416 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1417
1418 if (p == end)
1419 break;
1420 if (*p != ',')
1421 goto bad;
1422 p++;
1423 }
1424
1425 if (p != end)
1426 goto bad;
1427
1428 if (count)
1429 *count = i + 1;
1430 return 0;
1431
1432 bad:
1433 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1434 return ret;
1435 }
1436 EXPORT_SYMBOL(ceph_parse_ips);
1437
1438 static int process_banner(struct ceph_connection *con)
1439 {
1440 dout("process_banner on %p\n", con);
1441
1442 if (verify_hello(con) < 0)
1443 return -1;
1444
1445 ceph_decode_addr(&con->actual_peer_addr);
1446 ceph_decode_addr(&con->peer_addr_for_me);
1447
1448 /*
1449 * Make sure the other end is who we wanted. note that the other
1450 * end may not yet know their ip address, so if it's 0.0.0.0, give
1451 * them the benefit of the doubt.
1452 */
1453 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1454 sizeof(con->peer_addr)) != 0 &&
1455 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1456 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1457 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1458 ceph_pr_addr(&con->peer_addr.in_addr),
1459 (int)le32_to_cpu(con->peer_addr.nonce),
1460 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1461 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1462 con->error_msg = "wrong peer at address";
1463 return -1;
1464 }
1465
1466 /*
1467 * did we learn our address?
1468 */
1469 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1470 int port = addr_port(&con->msgr->inst.addr.in_addr);
1471
1472 memcpy(&con->msgr->inst.addr.in_addr,
1473 &con->peer_addr_for_me.in_addr,
1474 sizeof(con->peer_addr_for_me.in_addr));
1475 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1476 encode_my_addr(con->msgr);
1477 dout("process_banner learned my addr is %s\n",
1478 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1479 }
1480
1481 return 0;
1482 }
1483
1484 static void fail_protocol(struct ceph_connection *con)
1485 {
1486 reset_connection(con);
1487 set_bit(CLOSED, &con->state); /* in case there's queued work */
1488 }
1489
1490 static int process_connect(struct ceph_connection *con)
1491 {
1492 u64 sup_feat = con->msgr->supported_features;
1493 u64 req_feat = con->msgr->required_features;
1494 u64 server_feat = le64_to_cpu(con->in_reply.features);
1495 int ret;
1496
1497 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1498
1499 switch (con->in_reply.tag) {
1500 case CEPH_MSGR_TAG_FEATURES:
1501 pr_err("%s%lld %s feature set mismatch,"
1502 " my %llx < server's %llx, missing %llx\n",
1503 ENTITY_NAME(con->peer_name),
1504 ceph_pr_addr(&con->peer_addr.in_addr),
1505 sup_feat, server_feat, server_feat & ~sup_feat);
1506 con->error_msg = "missing required protocol features";
1507 fail_protocol(con);
1508 return -1;
1509
1510 case CEPH_MSGR_TAG_BADPROTOVER:
1511 pr_err("%s%lld %s protocol version mismatch,"
1512 " my %d != server's %d\n",
1513 ENTITY_NAME(con->peer_name),
1514 ceph_pr_addr(&con->peer_addr.in_addr),
1515 le32_to_cpu(con->out_connect.protocol_version),
1516 le32_to_cpu(con->in_reply.protocol_version));
1517 con->error_msg = "protocol version mismatch";
1518 fail_protocol(con);
1519 return -1;
1520
1521 case CEPH_MSGR_TAG_BADAUTHORIZER:
1522 con->auth_retry++;
1523 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1524 con->auth_retry);
1525 if (con->auth_retry == 2) {
1526 con->error_msg = "connect authorization failure";
1527 return -1;
1528 }
1529 con->auth_retry = 1;
1530 ret = prepare_write_connect(con);
1531 if (ret < 0)
1532 return ret;
1533 prepare_read_connect(con);
1534 break;
1535
1536 case CEPH_MSGR_TAG_RESETSESSION:
1537 /*
1538 * If we connected with a large connect_seq but the peer
1539 * has no record of a session with us (no connection, or
1540 * connect_seq == 0), they will send RESETSESION to indicate
1541 * that they must have reset their session, and may have
1542 * dropped messages.
1543 */
1544 dout("process_connect got RESET peer seq %u\n",
1545 le32_to_cpu(con->in_reply.connect_seq));
1546 pr_err("%s%lld %s connection reset\n",
1547 ENTITY_NAME(con->peer_name),
1548 ceph_pr_addr(&con->peer_addr.in_addr));
1549 reset_connection(con);
1550 ret = prepare_write_connect(con);
1551 if (ret < 0)
1552 return ret;
1553 prepare_read_connect(con);
1554
1555 /* Tell ceph about it. */
1556 mutex_unlock(&con->mutex);
1557 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1558 if (con->ops->peer_reset)
1559 con->ops->peer_reset(con);
1560 mutex_lock(&con->mutex);
1561 if (test_bit(CLOSED, &con->state) ||
1562 test_bit(OPENING, &con->state))
1563 return -EAGAIN;
1564 break;
1565
1566 case CEPH_MSGR_TAG_RETRY_SESSION:
1567 /*
1568 * If we sent a smaller connect_seq than the peer has, try
1569 * again with a larger value.
1570 */
1571 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
1572 le32_to_cpu(con->out_connect.connect_seq),
1573 le32_to_cpu(con->in_reply.connect_seq));
1574 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
1575 ret = prepare_write_connect(con);
1576 if (ret < 0)
1577 return ret;
1578 prepare_read_connect(con);
1579 break;
1580
1581 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1582 /*
1583 * If we sent a smaller global_seq than the peer has, try
1584 * again with a larger value.
1585 */
1586 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1587 con->peer_global_seq,
1588 le32_to_cpu(con->in_reply.global_seq));
1589 get_global_seq(con->msgr,
1590 le32_to_cpu(con->in_reply.global_seq));
1591 ret = prepare_write_connect(con);
1592 if (ret < 0)
1593 return ret;
1594 prepare_read_connect(con);
1595 break;
1596
1597 case CEPH_MSGR_TAG_READY:
1598 if (req_feat & ~server_feat) {
1599 pr_err("%s%lld %s protocol feature mismatch,"
1600 " my required %llx > server's %llx, need %llx\n",
1601 ENTITY_NAME(con->peer_name),
1602 ceph_pr_addr(&con->peer_addr.in_addr),
1603 req_feat, server_feat, req_feat & ~server_feat);
1604 con->error_msg = "missing required protocol features";
1605 fail_protocol(con);
1606 return -1;
1607 }
1608 clear_bit(NEGOTIATING, &con->state);
1609 set_bit(CONNECTED, &con->state);
1610 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1611 con->connect_seq++;
1612 con->peer_features = server_feat;
1613 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1614 con->peer_global_seq,
1615 le32_to_cpu(con->in_reply.connect_seq),
1616 con->connect_seq);
1617 WARN_ON(con->connect_seq !=
1618 le32_to_cpu(con->in_reply.connect_seq));
1619
1620 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1621 set_bit(LOSSYTX, &con->flags);
1622
1623 con->delay = 0; /* reset backoff memory */
1624
1625 prepare_read_tag(con);
1626 break;
1627
1628 case CEPH_MSGR_TAG_WAIT:
1629 /*
1630 * If there is a connection race (we are opening
1631 * connections to each other), one of us may just have
1632 * to WAIT. This shouldn't happen if we are the
1633 * client.
1634 */
1635 pr_err("process_connect got WAIT as client\n");
1636 con->error_msg = "protocol error, got WAIT as client";
1637 return -1;
1638
1639 default:
1640 pr_err("connect protocol error, will retry\n");
1641 con->error_msg = "protocol error, garbage tag during connect";
1642 return -1;
1643 }
1644 return 0;
1645 }
1646
1647
1648 /*
1649 * read (part of) an ack
1650 */
1651 static int read_partial_ack(struct ceph_connection *con)
1652 {
1653 int size = sizeof (con->in_temp_ack);
1654 int end = size;
1655
1656 return read_partial(con, end, size, &con->in_temp_ack);
1657 }
1658
1659
1660 /*
1661 * We can finally discard anything that's been acked.
1662 */
1663 static void process_ack(struct ceph_connection *con)
1664 {
1665 struct ceph_msg *m;
1666 u64 ack = le64_to_cpu(con->in_temp_ack);
1667 u64 seq;
1668
1669 while (!list_empty(&con->out_sent)) {
1670 m = list_first_entry(&con->out_sent, struct ceph_msg,
1671 list_head);
1672 seq = le64_to_cpu(m->hdr.seq);
1673 if (seq > ack)
1674 break;
1675 dout("got ack for seq %llu type %d at %p\n", seq,
1676 le16_to_cpu(m->hdr.type), m);
1677 m->ack_stamp = jiffies;
1678 ceph_msg_remove(m);
1679 }
1680 prepare_read_tag(con);
1681 }
1682
1683
1684
1685
1686 static int read_partial_message_section(struct ceph_connection *con,
1687 struct kvec *section,
1688 unsigned int sec_len, u32 *crc)
1689 {
1690 int ret, left;
1691
1692 BUG_ON(!section);
1693
1694 while (section->iov_len < sec_len) {
1695 BUG_ON(section->iov_base == NULL);
1696 left = sec_len - section->iov_len;
1697 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1698 section->iov_len, left);
1699 if (ret <= 0)
1700 return ret;
1701 section->iov_len += ret;
1702 }
1703 if (section->iov_len == sec_len)
1704 *crc = crc32c(0, section->iov_base, section->iov_len);
1705
1706 return 1;
1707 }
1708
1709 static bool ceph_con_in_msg_alloc(struct ceph_connection *con,
1710 struct ceph_msg_header *hdr);
1711
1712
1713 static int read_partial_message_pages(struct ceph_connection *con,
1714 struct page **pages,
1715 unsigned int data_len, bool do_datacrc)
1716 {
1717 void *p;
1718 int ret;
1719 int left;
1720
1721 left = min((int)(data_len - con->in_msg_pos.data_pos),
1722 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1723 /* (page) data */
1724 BUG_ON(pages == NULL);
1725 p = kmap(pages[con->in_msg_pos.page]);
1726 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1727 left);
1728 if (ret > 0 && do_datacrc)
1729 con->in_data_crc =
1730 crc32c(con->in_data_crc,
1731 p + con->in_msg_pos.page_pos, ret);
1732 kunmap(pages[con->in_msg_pos.page]);
1733 if (ret <= 0)
1734 return ret;
1735 con->in_msg_pos.data_pos += ret;
1736 con->in_msg_pos.page_pos += ret;
1737 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1738 con->in_msg_pos.page_pos = 0;
1739 con->in_msg_pos.page++;
1740 }
1741
1742 return ret;
1743 }
1744
1745 #ifdef CONFIG_BLOCK
1746 static int read_partial_message_bio(struct ceph_connection *con,
1747 struct bio **bio_iter, int *bio_seg,
1748 unsigned int data_len, bool do_datacrc)
1749 {
1750 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1751 void *p;
1752 int ret, left;
1753
1754 left = min((int)(data_len - con->in_msg_pos.data_pos),
1755 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1756
1757 p = kmap(bv->bv_page) + bv->bv_offset;
1758
1759 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1760 left);
1761 if (ret > 0 && do_datacrc)
1762 con->in_data_crc =
1763 crc32c(con->in_data_crc,
1764 p + con->in_msg_pos.page_pos, ret);
1765 kunmap(bv->bv_page);
1766 if (ret <= 0)
1767 return ret;
1768 con->in_msg_pos.data_pos += ret;
1769 con->in_msg_pos.page_pos += ret;
1770 if (con->in_msg_pos.page_pos == bv->bv_len) {
1771 con->in_msg_pos.page_pos = 0;
1772 iter_bio_next(bio_iter, bio_seg);
1773 }
1774
1775 return ret;
1776 }
1777 #endif
1778
1779 /*
1780 * read (part of) a message.
1781 */
1782 static int read_partial_message(struct ceph_connection *con)
1783 {
1784 struct ceph_msg *m = con->in_msg;
1785 int size;
1786 int end;
1787 int ret;
1788 unsigned int front_len, middle_len, data_len;
1789 bool do_datacrc = !con->msgr->nocrc;
1790 u64 seq;
1791 u32 crc;
1792
1793 dout("read_partial_message con %p msg %p\n", con, m);
1794
1795 /* header */
1796 size = sizeof (con->in_hdr);
1797 end = size;
1798 ret = read_partial(con, end, size, &con->in_hdr);
1799 if (ret <= 0)
1800 return ret;
1801
1802 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
1803 if (cpu_to_le32(crc) != con->in_hdr.crc) {
1804 pr_err("read_partial_message bad hdr "
1805 " crc %u != expected %u\n",
1806 crc, con->in_hdr.crc);
1807 return -EBADMSG;
1808 }
1809
1810 front_len = le32_to_cpu(con->in_hdr.front_len);
1811 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1812 return -EIO;
1813 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1814 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1815 return -EIO;
1816 data_len = le32_to_cpu(con->in_hdr.data_len);
1817 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1818 return -EIO;
1819
1820 /* verify seq# */
1821 seq = le64_to_cpu(con->in_hdr.seq);
1822 if ((s64)seq - (s64)con->in_seq < 1) {
1823 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1824 ENTITY_NAME(con->peer_name),
1825 ceph_pr_addr(&con->peer_addr.in_addr),
1826 seq, con->in_seq + 1);
1827 con->in_base_pos = -front_len - middle_len - data_len -
1828 sizeof(m->footer);
1829 con->in_tag = CEPH_MSGR_TAG_READY;
1830 return 0;
1831 } else if ((s64)seq - (s64)con->in_seq > 1) {
1832 pr_err("read_partial_message bad seq %lld expected %lld\n",
1833 seq, con->in_seq + 1);
1834 con->error_msg = "bad message sequence # for incoming message";
1835 return -EBADMSG;
1836 }
1837
1838 /* allocate message? */
1839 if (!con->in_msg) {
1840 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1841 con->in_hdr.front_len, con->in_hdr.data_len);
1842 if (ceph_con_in_msg_alloc(con, &con->in_hdr)) {
1843 /* skip this message */
1844 dout("alloc_msg said skip message\n");
1845 BUG_ON(con->in_msg);
1846 con->in_base_pos = -front_len - middle_len - data_len -
1847 sizeof(m->footer);
1848 con->in_tag = CEPH_MSGR_TAG_READY;
1849 con->in_seq++;
1850 return 0;
1851 }
1852 if (!con->in_msg) {
1853 con->error_msg =
1854 "error allocating memory for incoming message";
1855 return -ENOMEM;
1856 }
1857
1858 BUG_ON(con->in_msg->con != con);
1859 m = con->in_msg;
1860 m->front.iov_len = 0; /* haven't read it yet */
1861 if (m->middle)
1862 m->middle->vec.iov_len = 0;
1863
1864 con->in_msg_pos.page = 0;
1865 if (m->pages)
1866 con->in_msg_pos.page_pos = m->page_alignment;
1867 else
1868 con->in_msg_pos.page_pos = 0;
1869 con->in_msg_pos.data_pos = 0;
1870
1871 #ifdef CONFIG_BLOCK
1872 if (m->bio)
1873 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1874 #endif
1875 }
1876
1877 /* front */
1878 ret = read_partial_message_section(con, &m->front, front_len,
1879 &con->in_front_crc);
1880 if (ret <= 0)
1881 return ret;
1882
1883 /* middle */
1884 if (m->middle) {
1885 ret = read_partial_message_section(con, &m->middle->vec,
1886 middle_len,
1887 &con->in_middle_crc);
1888 if (ret <= 0)
1889 return ret;
1890 }
1891
1892 /* (page) data */
1893 while (con->in_msg_pos.data_pos < data_len) {
1894 if (m->pages) {
1895 ret = read_partial_message_pages(con, m->pages,
1896 data_len, do_datacrc);
1897 if (ret <= 0)
1898 return ret;
1899 #ifdef CONFIG_BLOCK
1900 } else if (m->bio) {
1901 BUG_ON(!m->bio_iter);
1902 ret = read_partial_message_bio(con,
1903 &m->bio_iter, &m->bio_seg,
1904 data_len, do_datacrc);
1905 if (ret <= 0)
1906 return ret;
1907 #endif
1908 } else {
1909 BUG_ON(1);
1910 }
1911 }
1912
1913 /* footer */
1914 size = sizeof (m->footer);
1915 end += size;
1916 ret = read_partial(con, end, size, &m->footer);
1917 if (ret <= 0)
1918 return ret;
1919
1920 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1921 m, front_len, m->footer.front_crc, middle_len,
1922 m->footer.middle_crc, data_len, m->footer.data_crc);
1923
1924 /* crc ok? */
1925 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1926 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1927 m, con->in_front_crc, m->footer.front_crc);
1928 return -EBADMSG;
1929 }
1930 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1931 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1932 m, con->in_middle_crc, m->footer.middle_crc);
1933 return -EBADMSG;
1934 }
1935 if (do_datacrc &&
1936 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1937 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1938 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1939 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1940 return -EBADMSG;
1941 }
1942
1943 return 1; /* done! */
1944 }
1945
1946 /*
1947 * Process message. This happens in the worker thread. The callback should
1948 * be careful not to do anything that waits on other incoming messages or it
1949 * may deadlock.
1950 */
1951 static void process_message(struct ceph_connection *con)
1952 {
1953 struct ceph_msg *msg;
1954
1955 BUG_ON(con->in_msg->con != con);
1956 con->in_msg->con = NULL;
1957 msg = con->in_msg;
1958 con->in_msg = NULL;
1959 con->ops->put(con);
1960
1961 /* if first message, set peer_name */
1962 if (con->peer_name.type == 0)
1963 con->peer_name = msg->hdr.src;
1964
1965 con->in_seq++;
1966 mutex_unlock(&con->mutex);
1967
1968 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1969 msg, le64_to_cpu(msg->hdr.seq),
1970 ENTITY_NAME(msg->hdr.src),
1971 le16_to_cpu(msg->hdr.type),
1972 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1973 le32_to_cpu(msg->hdr.front_len),
1974 le32_to_cpu(msg->hdr.data_len),
1975 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1976 con->ops->dispatch(con, msg);
1977
1978 mutex_lock(&con->mutex);
1979 prepare_read_tag(con);
1980 }
1981
1982
1983 /*
1984 * Write something to the socket. Called in a worker thread when the
1985 * socket appears to be writeable and we have something ready to send.
1986 */
1987 static int try_write(struct ceph_connection *con)
1988 {
1989 int ret = 1;
1990
1991 dout("try_write start %p state %lu\n", con, con->state);
1992
1993 more:
1994 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1995
1996 /* open the socket first? */
1997 if (con->sock == NULL) {
1998 set_bit(CONNECTING, &con->state);
1999
2000 con_out_kvec_reset(con);
2001 prepare_write_banner(con);
2002 prepare_read_banner(con);
2003
2004 BUG_ON(con->in_msg);
2005 con->in_tag = CEPH_MSGR_TAG_READY;
2006 dout("try_write initiating connect on %p new state %lu\n",
2007 con, con->state);
2008 ret = ceph_tcp_connect(con);
2009 if (ret < 0) {
2010 con->error_msg = "connect error";
2011 goto out;
2012 }
2013 }
2014
2015 more_kvec:
2016 /* kvec data queued? */
2017 if (con->out_skip) {
2018 ret = write_partial_skip(con);
2019 if (ret <= 0)
2020 goto out;
2021 }
2022 if (con->out_kvec_left) {
2023 ret = write_partial_kvec(con);
2024 if (ret <= 0)
2025 goto out;
2026 }
2027
2028 /* msg pages? */
2029 if (con->out_msg) {
2030 if (con->out_msg_done) {
2031 ceph_msg_put(con->out_msg);
2032 con->out_msg = NULL; /* we're done with this one */
2033 goto do_next;
2034 }
2035
2036 ret = write_partial_msg_pages(con);
2037 if (ret == 1)
2038 goto more_kvec; /* we need to send the footer, too! */
2039 if (ret == 0)
2040 goto out;
2041 if (ret < 0) {
2042 dout("try_write write_partial_msg_pages err %d\n",
2043 ret);
2044 goto out;
2045 }
2046 }
2047
2048 do_next:
2049 if (!test_bit(CONNECTING, &con->state) &&
2050 !test_bit(NEGOTIATING, &con->state)) {
2051 /* is anything else pending? */
2052 if (!list_empty(&con->out_queue)) {
2053 prepare_write_message(con);
2054 goto more;
2055 }
2056 if (con->in_seq > con->in_seq_acked) {
2057 prepare_write_ack(con);
2058 goto more;
2059 }
2060 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->flags)) {
2061 prepare_write_keepalive(con);
2062 goto more;
2063 }
2064 }
2065
2066 /* Nothing to do! */
2067 clear_bit(WRITE_PENDING, &con->flags);
2068 dout("try_write nothing else to write.\n");
2069 ret = 0;
2070 out:
2071 dout("try_write done on %p ret %d\n", con, ret);
2072 return ret;
2073 }
2074
2075
2076
2077 /*
2078 * Read what we can from the socket.
2079 */
2080 static int try_read(struct ceph_connection *con)
2081 {
2082 int ret = -1;
2083
2084 if (!con->sock)
2085 return 0;
2086
2087 if (test_bit(STANDBY, &con->state))
2088 return 0;
2089
2090 dout("try_read start on %p\n", con);
2091
2092 more:
2093 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2094 con->in_base_pos);
2095
2096 /*
2097 * process_connect and process_message drop and re-take
2098 * con->mutex. make sure we handle a racing close or reopen.
2099 */
2100 if (test_bit(CLOSED, &con->state) ||
2101 test_bit(OPENING, &con->state)) {
2102 ret = -EAGAIN;
2103 goto out;
2104 }
2105
2106 if (test_bit(CONNECTING, &con->state)) {
2107 dout("try_read connecting\n");
2108 ret = read_partial_banner(con);
2109 if (ret <= 0)
2110 goto out;
2111 ret = process_banner(con);
2112 if (ret < 0)
2113 goto out;
2114
2115 clear_bit(CONNECTING, &con->state);
2116 set_bit(NEGOTIATING, &con->state);
2117
2118 /* Banner is good, exchange connection info */
2119 ret = prepare_write_connect(con);
2120 if (ret < 0)
2121 goto out;
2122 prepare_read_connect(con);
2123
2124 /* Send connection info before awaiting response */
2125 goto out;
2126 }
2127
2128 if (test_bit(NEGOTIATING, &con->state)) {
2129 dout("try_read negotiating\n");
2130 ret = read_partial_connect(con);
2131 if (ret <= 0)
2132 goto out;
2133 ret = process_connect(con);
2134 if (ret < 0)
2135 goto out;
2136 goto more;
2137 }
2138
2139 if (con->in_base_pos < 0) {
2140 /*
2141 * skipping + discarding content.
2142 *
2143 * FIXME: there must be a better way to do this!
2144 */
2145 static char buf[SKIP_BUF_SIZE];
2146 int skip = min((int) sizeof (buf), -con->in_base_pos);
2147
2148 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2149 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2150 if (ret <= 0)
2151 goto out;
2152 con->in_base_pos += ret;
2153 if (con->in_base_pos)
2154 goto more;
2155 }
2156 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2157 /*
2158 * what's next?
2159 */
2160 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2161 if (ret <= 0)
2162 goto out;
2163 dout("try_read got tag %d\n", (int)con->in_tag);
2164 switch (con->in_tag) {
2165 case CEPH_MSGR_TAG_MSG:
2166 prepare_read_message(con);
2167 break;
2168 case CEPH_MSGR_TAG_ACK:
2169 prepare_read_ack(con);
2170 break;
2171 case CEPH_MSGR_TAG_CLOSE:
2172 clear_bit(CONNECTED, &con->state);
2173 set_bit(CLOSED, &con->state); /* fixme */
2174 goto out;
2175 default:
2176 goto bad_tag;
2177 }
2178 }
2179 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2180 ret = read_partial_message(con);
2181 if (ret <= 0) {
2182 switch (ret) {
2183 case -EBADMSG:
2184 con->error_msg = "bad crc";
2185 ret = -EIO;
2186 break;
2187 case -EIO:
2188 con->error_msg = "io error";
2189 break;
2190 }
2191 goto out;
2192 }
2193 if (con->in_tag == CEPH_MSGR_TAG_READY)
2194 goto more;
2195 process_message(con);
2196 goto more;
2197 }
2198 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
2199 ret = read_partial_ack(con);
2200 if (ret <= 0)
2201 goto out;
2202 process_ack(con);
2203 goto more;
2204 }
2205
2206 out:
2207 dout("try_read done on %p ret %d\n", con, ret);
2208 return ret;
2209
2210 bad_tag:
2211 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2212 con->error_msg = "protocol error, garbage tag";
2213 ret = -1;
2214 goto out;
2215 }
2216
2217
2218 /*
2219 * Atomically queue work on a connection. Bump @con reference to
2220 * avoid races with connection teardown.
2221 */
2222 static void queue_con(struct ceph_connection *con)
2223 {
2224 if (!con->ops->get(con)) {
2225 dout("queue_con %p ref count 0\n", con);
2226 return;
2227 }
2228
2229 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
2230 dout("queue_con %p - already queued\n", con);
2231 con->ops->put(con);
2232 } else {
2233 dout("queue_con %p\n", con);
2234 }
2235 }
2236
2237 /*
2238 * Do some work on a connection. Drop a connection ref when we're done.
2239 */
2240 static void con_work(struct work_struct *work)
2241 {
2242 struct ceph_connection *con = container_of(work, struct ceph_connection,
2243 work.work);
2244 int ret;
2245
2246 mutex_lock(&con->mutex);
2247 restart:
2248 if (test_and_clear_bit(SOCK_CLOSED, &con->flags)) {
2249 if (test_and_clear_bit(CONNECTED, &con->state))
2250 con->error_msg = "socket closed";
2251 else if (test_and_clear_bit(NEGOTIATING, &con->state))
2252 con->error_msg = "negotiation failed";
2253 else if (test_and_clear_bit(CONNECTING, &con->state))
2254 con->error_msg = "connection failed";
2255 else
2256 con->error_msg = "unrecognized con state";
2257 goto fault;
2258 }
2259
2260 if (test_and_clear_bit(BACKOFF, &con->flags)) {
2261 dout("con_work %p backing off\n", con);
2262 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2263 round_jiffies_relative(con->delay))) {
2264 dout("con_work %p backoff %lu\n", con, con->delay);
2265 mutex_unlock(&con->mutex);
2266 return;
2267 } else {
2268 con->ops->put(con);
2269 dout("con_work %p FAILED to back off %lu\n", con,
2270 con->delay);
2271 }
2272 }
2273
2274 if (test_bit(STANDBY, &con->state)) {
2275 dout("con_work %p STANDBY\n", con);
2276 goto done;
2277 }
2278 if (test_bit(CLOSED, &con->state)) {
2279 dout("con_work %p CLOSED\n", con);
2280 BUG_ON(con->sock);
2281 goto done;
2282 }
2283 if (test_and_clear_bit(OPENING, &con->state)) {
2284 /* reopen w/ new peer */
2285 dout("con_work OPENING\n");
2286 BUG_ON(con->sock);
2287 }
2288
2289 ret = try_read(con);
2290 if (ret == -EAGAIN)
2291 goto restart;
2292 if (ret < 0) {
2293 con->error_msg = "socket error on read";
2294 goto fault;
2295 }
2296
2297 ret = try_write(con);
2298 if (ret == -EAGAIN)
2299 goto restart;
2300 if (ret < 0) {
2301 con->error_msg = "socket error on write";
2302 goto fault;
2303 }
2304
2305 done:
2306 mutex_unlock(&con->mutex);
2307 done_unlocked:
2308 con->ops->put(con);
2309 return;
2310
2311 fault:
2312 mutex_unlock(&con->mutex);
2313 ceph_fault(con); /* error/fault path */
2314 goto done_unlocked;
2315 }
2316
2317
2318 /*
2319 * Generic error/fault handler. A retry mechanism is used with
2320 * exponential backoff
2321 */
2322 static void ceph_fault(struct ceph_connection *con)
2323 {
2324 mutex_lock(&con->mutex);
2325
2326 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2327 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2328 dout("fault %p state %lu to peer %s\n",
2329 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2330
2331 if (test_bit(CLOSED, &con->state))
2332 goto out_unlock;
2333
2334 con_close_socket(con);
2335
2336 if (test_bit(LOSSYTX, &con->flags)) {
2337 dout("fault on LOSSYTX channel\n");
2338 goto out_unlock;
2339 }
2340
2341 if (con->in_msg) {
2342 BUG_ON(con->in_msg->con != con);
2343 con->in_msg->con = NULL;
2344 ceph_msg_put(con->in_msg);
2345 con->in_msg = NULL;
2346 con->ops->put(con);
2347 }
2348
2349 /* Requeue anything that hasn't been acked */
2350 list_splice_init(&con->out_sent, &con->out_queue);
2351
2352 /* If there are no messages queued or keepalive pending, place
2353 * the connection in a STANDBY state */
2354 if (list_empty(&con->out_queue) &&
2355 !test_bit(KEEPALIVE_PENDING, &con->flags)) {
2356 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2357 clear_bit(WRITE_PENDING, &con->flags);
2358 set_bit(STANDBY, &con->state);
2359 } else {
2360 /* retry after a delay. */
2361 if (con->delay == 0)
2362 con->delay = BASE_DELAY_INTERVAL;
2363 else if (con->delay < MAX_DELAY_INTERVAL)
2364 con->delay *= 2;
2365 con->ops->get(con);
2366 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2367 round_jiffies_relative(con->delay))) {
2368 dout("fault queued %p delay %lu\n", con, con->delay);
2369 } else {
2370 con->ops->put(con);
2371 dout("fault failed to queue %p delay %lu, backoff\n",
2372 con, con->delay);
2373 /*
2374 * In many cases we see a socket state change
2375 * while con_work is running and end up
2376 * queuing (non-delayed) work, such that we
2377 * can't backoff with a delay. Set a flag so
2378 * that when con_work restarts we schedule the
2379 * delay then.
2380 */
2381 set_bit(BACKOFF, &con->flags);
2382 }
2383 }
2384
2385 out_unlock:
2386 mutex_unlock(&con->mutex);
2387 /*
2388 * in case we faulted due to authentication, invalidate our
2389 * current tickets so that we can get new ones.
2390 */
2391 if (con->auth_retry && con->ops->invalidate_authorizer) {
2392 dout("calling invalidate_authorizer()\n");
2393 con->ops->invalidate_authorizer(con);
2394 }
2395
2396 if (con->ops->fault)
2397 con->ops->fault(con);
2398 }
2399
2400
2401
2402 /*
2403 * initialize a new messenger instance
2404 */
2405 void ceph_messenger_init(struct ceph_messenger *msgr,
2406 struct ceph_entity_addr *myaddr,
2407 u32 supported_features,
2408 u32 required_features,
2409 bool nocrc)
2410 {
2411 msgr->supported_features = supported_features;
2412 msgr->required_features = required_features;
2413
2414 spin_lock_init(&msgr->global_seq_lock);
2415
2416 if (myaddr)
2417 msgr->inst.addr = *myaddr;
2418
2419 /* select a random nonce */
2420 msgr->inst.addr.type = 0;
2421 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2422 encode_my_addr(msgr);
2423 msgr->nocrc = nocrc;
2424
2425 atomic_set(&msgr->stopping, 0);
2426
2427 dout("%s %p\n", __func__, msgr);
2428 }
2429 EXPORT_SYMBOL(ceph_messenger_init);
2430
2431 static void clear_standby(struct ceph_connection *con)
2432 {
2433 /* come back from STANDBY? */
2434 if (test_and_clear_bit(STANDBY, &con->state)) {
2435 dout("clear_standby %p and ++connect_seq\n", con);
2436 con->connect_seq++;
2437 WARN_ON(test_bit(WRITE_PENDING, &con->flags));
2438 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->flags));
2439 }
2440 }
2441
2442 /*
2443 * Queue up an outgoing message on the given connection.
2444 */
2445 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2446 {
2447 /* set src+dst */
2448 msg->hdr.src = con->msgr->inst.name;
2449 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2450 msg->needs_out_seq = true;
2451
2452 mutex_lock(&con->mutex);
2453
2454 if (test_bit(CLOSED, &con->state)) {
2455 dout("con_send %p closed, dropping %p\n", con, msg);
2456 ceph_msg_put(msg);
2457 mutex_unlock(&con->mutex);
2458 return;
2459 }
2460
2461 BUG_ON(msg->con != NULL);
2462 msg->con = con->ops->get(con);
2463 BUG_ON(msg->con == NULL);
2464
2465 BUG_ON(!list_empty(&msg->list_head));
2466 list_add_tail(&msg->list_head, &con->out_queue);
2467 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2468 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2469 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2470 le32_to_cpu(msg->hdr.front_len),
2471 le32_to_cpu(msg->hdr.middle_len),
2472 le32_to_cpu(msg->hdr.data_len));
2473
2474 clear_standby(con);
2475 mutex_unlock(&con->mutex);
2476
2477 /* if there wasn't anything waiting to send before, queue
2478 * new work */
2479 if (test_and_set_bit(WRITE_PENDING, &con->flags) == 0)
2480 queue_con(con);
2481 }
2482 EXPORT_SYMBOL(ceph_con_send);
2483
2484 /*
2485 * Revoke a message that was previously queued for send
2486 */
2487 void ceph_msg_revoke(struct ceph_msg *msg)
2488 {
2489 struct ceph_connection *con = msg->con;
2490
2491 if (!con)
2492 return; /* Message not in our possession */
2493
2494 mutex_lock(&con->mutex);
2495 if (!list_empty(&msg->list_head)) {
2496 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
2497 list_del_init(&msg->list_head);
2498 BUG_ON(msg->con == NULL);
2499 msg->con->ops->put(msg->con);
2500 msg->con = NULL;
2501 msg->hdr.seq = 0;
2502
2503 ceph_msg_put(msg);
2504 }
2505 if (con->out_msg == msg) {
2506 dout("%s %p msg %p - was sending\n", __func__, con, msg);
2507 con->out_msg = NULL;
2508 if (con->out_kvec_is_msg) {
2509 con->out_skip = con->out_kvec_bytes;
2510 con->out_kvec_is_msg = false;
2511 }
2512 msg->hdr.seq = 0;
2513
2514 ceph_msg_put(msg);
2515 }
2516 mutex_unlock(&con->mutex);
2517 }
2518
2519 /*
2520 * Revoke a message that we may be reading data into
2521 */
2522 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
2523 {
2524 struct ceph_connection *con;
2525
2526 BUG_ON(msg == NULL);
2527 if (!msg->con) {
2528 dout("%s msg %p null con\n", __func__, msg);
2529
2530 return; /* Message not in our possession */
2531 }
2532
2533 con = msg->con;
2534 mutex_lock(&con->mutex);
2535 if (con->in_msg == msg) {
2536 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
2537 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
2538 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
2539
2540 /* skip rest of message */
2541 dout("%s %p msg %p revoked\n", __func__, con, msg);
2542 con->in_base_pos = con->in_base_pos -
2543 sizeof(struct ceph_msg_header) -
2544 front_len -
2545 middle_len -
2546 data_len -
2547 sizeof(struct ceph_msg_footer);
2548 ceph_msg_put(con->in_msg);
2549 con->in_msg = NULL;
2550 con->in_tag = CEPH_MSGR_TAG_READY;
2551 con->in_seq++;
2552 } else {
2553 dout("%s %p in_msg %p msg %p no-op\n",
2554 __func__, con, con->in_msg, msg);
2555 }
2556 mutex_unlock(&con->mutex);
2557 }
2558
2559 /*
2560 * Queue a keepalive byte to ensure the tcp connection is alive.
2561 */
2562 void ceph_con_keepalive(struct ceph_connection *con)
2563 {
2564 dout("con_keepalive %p\n", con);
2565 mutex_lock(&con->mutex);
2566 clear_standby(con);
2567 mutex_unlock(&con->mutex);
2568 if (test_and_set_bit(KEEPALIVE_PENDING, &con->flags) == 0 &&
2569 test_and_set_bit(WRITE_PENDING, &con->flags) == 0)
2570 queue_con(con);
2571 }
2572 EXPORT_SYMBOL(ceph_con_keepalive);
2573
2574
2575 /*
2576 * construct a new message with given type, size
2577 * the new msg has a ref count of 1.
2578 */
2579 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2580 bool can_fail)
2581 {
2582 struct ceph_msg *m;
2583
2584 m = kmalloc(sizeof(*m), flags);
2585 if (m == NULL)
2586 goto out;
2587 kref_init(&m->kref);
2588
2589 m->con = NULL;
2590 INIT_LIST_HEAD(&m->list_head);
2591
2592 m->hdr.tid = 0;
2593 m->hdr.type = cpu_to_le16(type);
2594 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2595 m->hdr.version = 0;
2596 m->hdr.front_len = cpu_to_le32(front_len);
2597 m->hdr.middle_len = 0;
2598 m->hdr.data_len = 0;
2599 m->hdr.data_off = 0;
2600 m->hdr.reserved = 0;
2601 m->footer.front_crc = 0;
2602 m->footer.middle_crc = 0;
2603 m->footer.data_crc = 0;
2604 m->footer.flags = 0;
2605 m->front_max = front_len;
2606 m->front_is_vmalloc = false;
2607 m->more_to_follow = false;
2608 m->ack_stamp = 0;
2609 m->pool = NULL;
2610
2611 /* middle */
2612 m->middle = NULL;
2613
2614 /* data */
2615 m->nr_pages = 0;
2616 m->page_alignment = 0;
2617 m->pages = NULL;
2618 m->pagelist = NULL;
2619 m->bio = NULL;
2620 m->bio_iter = NULL;
2621 m->bio_seg = 0;
2622 m->trail = NULL;
2623
2624 /* front */
2625 if (front_len) {
2626 if (front_len > PAGE_CACHE_SIZE) {
2627 m->front.iov_base = __vmalloc(front_len, flags,
2628 PAGE_KERNEL);
2629 m->front_is_vmalloc = true;
2630 } else {
2631 m->front.iov_base = kmalloc(front_len, flags);
2632 }
2633 if (m->front.iov_base == NULL) {
2634 dout("ceph_msg_new can't allocate %d bytes\n",
2635 front_len);
2636 goto out2;
2637 }
2638 } else {
2639 m->front.iov_base = NULL;
2640 }
2641 m->front.iov_len = front_len;
2642
2643 dout("ceph_msg_new %p front %d\n", m, front_len);
2644 return m;
2645
2646 out2:
2647 ceph_msg_put(m);
2648 out:
2649 if (!can_fail) {
2650 pr_err("msg_new can't create type %d front %d\n", type,
2651 front_len);
2652 WARN_ON(1);
2653 } else {
2654 dout("msg_new can't create type %d front %d\n", type,
2655 front_len);
2656 }
2657 return NULL;
2658 }
2659 EXPORT_SYMBOL(ceph_msg_new);
2660
2661 /*
2662 * Allocate "middle" portion of a message, if it is needed and wasn't
2663 * allocated by alloc_msg. This allows us to read a small fixed-size
2664 * per-type header in the front and then gracefully fail (i.e.,
2665 * propagate the error to the caller based on info in the front) when
2666 * the middle is too large.
2667 */
2668 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2669 {
2670 int type = le16_to_cpu(msg->hdr.type);
2671 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2672
2673 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2674 ceph_msg_type_name(type), middle_len);
2675 BUG_ON(!middle_len);
2676 BUG_ON(msg->middle);
2677
2678 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2679 if (!msg->middle)
2680 return -ENOMEM;
2681 return 0;
2682 }
2683
2684 /*
2685 * Allocate a message for receiving an incoming message on a
2686 * connection, and save the result in con->in_msg. Uses the
2687 * connection's private alloc_msg op if available.
2688 *
2689 * Returns true if the message should be skipped, false otherwise.
2690 * If true is returned (skip message), con->in_msg will be NULL.
2691 * If false is returned, con->in_msg will contain a pointer to the
2692 * newly-allocated message, or NULL in case of memory exhaustion.
2693 */
2694 static bool ceph_con_in_msg_alloc(struct ceph_connection *con,
2695 struct ceph_msg_header *hdr)
2696 {
2697 int type = le16_to_cpu(hdr->type);
2698 int front_len = le32_to_cpu(hdr->front_len);
2699 int middle_len = le32_to_cpu(hdr->middle_len);
2700 int ret;
2701
2702 BUG_ON(con->in_msg != NULL);
2703
2704 if (con->ops->alloc_msg) {
2705 int skip = 0;
2706
2707 mutex_unlock(&con->mutex);
2708 con->in_msg = con->ops->alloc_msg(con, hdr, &skip);
2709 mutex_lock(&con->mutex);
2710 if (con->in_msg) {
2711 con->in_msg->con = con->ops->get(con);
2712 BUG_ON(con->in_msg->con == NULL);
2713 }
2714 if (skip)
2715 con->in_msg = NULL;
2716
2717 if (!con->in_msg)
2718 return skip != 0;
2719 }
2720 if (!con->in_msg) {
2721 con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2722 if (!con->in_msg) {
2723 pr_err("unable to allocate msg type %d len %d\n",
2724 type, front_len);
2725 return false;
2726 }
2727 con->in_msg->con = con->ops->get(con);
2728 BUG_ON(con->in_msg->con == NULL);
2729 con->in_msg->page_alignment = le16_to_cpu(hdr->data_off);
2730 }
2731 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2732
2733 if (middle_len && !con->in_msg->middle) {
2734 ret = ceph_alloc_middle(con, con->in_msg);
2735 if (ret < 0) {
2736 ceph_msg_put(con->in_msg);
2737 con->in_msg = NULL;
2738 }
2739 }
2740
2741 return false;
2742 }
2743
2744
2745 /*
2746 * Free a generically kmalloc'd message.
2747 */
2748 void ceph_msg_kfree(struct ceph_msg *m)
2749 {
2750 dout("msg_kfree %p\n", m);
2751 if (m->front_is_vmalloc)
2752 vfree(m->front.iov_base);
2753 else
2754 kfree(m->front.iov_base);
2755 kfree(m);
2756 }
2757
2758 /*
2759 * Drop a msg ref. Destroy as needed.
2760 */
2761 void ceph_msg_last_put(struct kref *kref)
2762 {
2763 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2764
2765 dout("ceph_msg_put last one on %p\n", m);
2766 WARN_ON(!list_empty(&m->list_head));
2767
2768 /* drop middle, data, if any */
2769 if (m->middle) {
2770 ceph_buffer_put(m->middle);
2771 m->middle = NULL;
2772 }
2773 m->nr_pages = 0;
2774 m->pages = NULL;
2775
2776 if (m->pagelist) {
2777 ceph_pagelist_release(m->pagelist);
2778 kfree(m->pagelist);
2779 m->pagelist = NULL;
2780 }
2781
2782 m->trail = NULL;
2783
2784 if (m->pool)
2785 ceph_msgpool_put(m->pool, m);
2786 else
2787 ceph_msg_kfree(m);
2788 }
2789 EXPORT_SYMBOL(ceph_msg_last_put);
2790
2791 void ceph_msg_dump(struct ceph_msg *msg)
2792 {
2793 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2794 msg->front_max, msg->nr_pages);
2795 print_hex_dump(KERN_DEBUG, "header: ",
2796 DUMP_PREFIX_OFFSET, 16, 1,
2797 &msg->hdr, sizeof(msg->hdr), true);
2798 print_hex_dump(KERN_DEBUG, " front: ",
2799 DUMP_PREFIX_OFFSET, 16, 1,
2800 msg->front.iov_base, msg->front.iov_len, true);
2801 if (msg->middle)
2802 print_hex_dump(KERN_DEBUG, "middle: ",
2803 DUMP_PREFIX_OFFSET, 16, 1,
2804 msg->middle->vec.iov_base,
2805 msg->middle->vec.iov_len, true);
2806 print_hex_dump(KERN_DEBUG, "footer: ",
2807 DUMP_PREFIX_OFFSET, 16, 1,
2808 &msg->footer, sizeof(msg->footer), true);
2809 }
2810 EXPORT_SYMBOL(ceph_msg_dump);
This page took 0.161193 seconds and 4 git commands to generate.