libceph: implement pages array cursor
[deliverable/linux.git] / net / ceph / messenger.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
8 #include <linux/net.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif /* CONFIG_BLOCK */
15 #include <linux/dns_resolver.h>
16 #include <net/tcp.h>
17
18 #include <linux/ceph/libceph.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/pagelist.h>
22 #include <linux/export.h>
23
24 #define list_entry_next(pos, member) \
25 list_entry(pos->member.next, typeof(*pos), member)
26
27 /*
28 * Ceph uses the messenger to exchange ceph_msg messages with other
29 * hosts in the system. The messenger provides ordered and reliable
30 * delivery. We tolerate TCP disconnects by reconnecting (with
31 * exponential backoff) in the case of a fault (disconnection, bad
32 * crc, protocol error). Acks allow sent messages to be discarded by
33 * the sender.
34 */
35
36 /*
37 * We track the state of the socket on a given connection using
38 * values defined below. The transition to a new socket state is
39 * handled by a function which verifies we aren't coming from an
40 * unexpected state.
41 *
42 * --------
43 * | NEW* | transient initial state
44 * --------
45 * | con_sock_state_init()
46 * v
47 * ----------
48 * | CLOSED | initialized, but no socket (and no
49 * ---------- TCP connection)
50 * ^ \
51 * | \ con_sock_state_connecting()
52 * | ----------------------
53 * | \
54 * + con_sock_state_closed() \
55 * |+--------------------------- \
56 * | \ \ \
57 * | ----------- \ \
58 * | | CLOSING | socket event; \ \
59 * | ----------- await close \ \
60 * | ^ \ |
61 * | | \ |
62 * | + con_sock_state_closing() \ |
63 * | / \ | |
64 * | / --------------- | |
65 * | / \ v v
66 * | / --------------
67 * | / -----------------| CONNECTING | socket created, TCP
68 * | | / -------------- connect initiated
69 * | | | con_sock_state_connected()
70 * | | v
71 * -------------
72 * | CONNECTED | TCP connection established
73 * -------------
74 *
75 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
76 */
77
78 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
79 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
80 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
81 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
82 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
83
84 /*
85 * connection states
86 */
87 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
88 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
89 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
90 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
91 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
92 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
93
94 /*
95 * ceph_connection flag bits
96 */
97 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
98 * messages on errors */
99 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
100 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
101 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
102 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
103
104 static bool con_flag_valid(unsigned long con_flag)
105 {
106 switch (con_flag) {
107 case CON_FLAG_LOSSYTX:
108 case CON_FLAG_KEEPALIVE_PENDING:
109 case CON_FLAG_WRITE_PENDING:
110 case CON_FLAG_SOCK_CLOSED:
111 case CON_FLAG_BACKOFF:
112 return true;
113 default:
114 return false;
115 }
116 }
117
118 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
119 {
120 BUG_ON(!con_flag_valid(con_flag));
121
122 clear_bit(con_flag, &con->flags);
123 }
124
125 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
126 {
127 BUG_ON(!con_flag_valid(con_flag));
128
129 set_bit(con_flag, &con->flags);
130 }
131
132 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
133 {
134 BUG_ON(!con_flag_valid(con_flag));
135
136 return test_bit(con_flag, &con->flags);
137 }
138
139 static bool con_flag_test_and_clear(struct ceph_connection *con,
140 unsigned long con_flag)
141 {
142 BUG_ON(!con_flag_valid(con_flag));
143
144 return test_and_clear_bit(con_flag, &con->flags);
145 }
146
147 static bool con_flag_test_and_set(struct ceph_connection *con,
148 unsigned long con_flag)
149 {
150 BUG_ON(!con_flag_valid(con_flag));
151
152 return test_and_set_bit(con_flag, &con->flags);
153 }
154
155 /* static tag bytes (protocol control messages) */
156 static char tag_msg = CEPH_MSGR_TAG_MSG;
157 static char tag_ack = CEPH_MSGR_TAG_ACK;
158 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
159
160 #ifdef CONFIG_LOCKDEP
161 static struct lock_class_key socket_class;
162 #endif
163
164 /*
165 * When skipping (ignoring) a block of input we read it into a "skip
166 * buffer," which is this many bytes in size.
167 */
168 #define SKIP_BUF_SIZE 1024
169
170 static void queue_con(struct ceph_connection *con);
171 static void con_work(struct work_struct *);
172 static void con_fault(struct ceph_connection *con);
173
174 /*
175 * Nicely render a sockaddr as a string. An array of formatted
176 * strings is used, to approximate reentrancy.
177 */
178 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
179 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
180 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
181 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
182
183 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
184 static atomic_t addr_str_seq = ATOMIC_INIT(0);
185
186 static struct page *zero_page; /* used in certain error cases */
187
188 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
189 {
190 int i;
191 char *s;
192 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
193 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
194
195 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
196 s = addr_str[i];
197
198 switch (ss->ss_family) {
199 case AF_INET:
200 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
201 ntohs(in4->sin_port));
202 break;
203
204 case AF_INET6:
205 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
206 ntohs(in6->sin6_port));
207 break;
208
209 default:
210 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
211 ss->ss_family);
212 }
213
214 return s;
215 }
216 EXPORT_SYMBOL(ceph_pr_addr);
217
218 static void encode_my_addr(struct ceph_messenger *msgr)
219 {
220 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
221 ceph_encode_addr(&msgr->my_enc_addr);
222 }
223
224 /*
225 * work queue for all reading and writing to/from the socket.
226 */
227 static struct workqueue_struct *ceph_msgr_wq;
228
229 static void _ceph_msgr_exit(void)
230 {
231 if (ceph_msgr_wq) {
232 destroy_workqueue(ceph_msgr_wq);
233 ceph_msgr_wq = NULL;
234 }
235
236 BUG_ON(zero_page == NULL);
237 kunmap(zero_page);
238 page_cache_release(zero_page);
239 zero_page = NULL;
240 }
241
242 int ceph_msgr_init(void)
243 {
244 BUG_ON(zero_page != NULL);
245 zero_page = ZERO_PAGE(0);
246 page_cache_get(zero_page);
247
248 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
249 if (ceph_msgr_wq)
250 return 0;
251
252 pr_err("msgr_init failed to create workqueue\n");
253 _ceph_msgr_exit();
254
255 return -ENOMEM;
256 }
257 EXPORT_SYMBOL(ceph_msgr_init);
258
259 void ceph_msgr_exit(void)
260 {
261 BUG_ON(ceph_msgr_wq == NULL);
262
263 _ceph_msgr_exit();
264 }
265 EXPORT_SYMBOL(ceph_msgr_exit);
266
267 void ceph_msgr_flush(void)
268 {
269 flush_workqueue(ceph_msgr_wq);
270 }
271 EXPORT_SYMBOL(ceph_msgr_flush);
272
273 /* Connection socket state transition functions */
274
275 static void con_sock_state_init(struct ceph_connection *con)
276 {
277 int old_state;
278
279 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
280 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
281 printk("%s: unexpected old state %d\n", __func__, old_state);
282 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
283 CON_SOCK_STATE_CLOSED);
284 }
285
286 static void con_sock_state_connecting(struct ceph_connection *con)
287 {
288 int old_state;
289
290 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
291 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
292 printk("%s: unexpected old state %d\n", __func__, old_state);
293 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
294 CON_SOCK_STATE_CONNECTING);
295 }
296
297 static void con_sock_state_connected(struct ceph_connection *con)
298 {
299 int old_state;
300
301 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
302 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
303 printk("%s: unexpected old state %d\n", __func__, old_state);
304 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
305 CON_SOCK_STATE_CONNECTED);
306 }
307
308 static void con_sock_state_closing(struct ceph_connection *con)
309 {
310 int old_state;
311
312 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
313 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
314 old_state != CON_SOCK_STATE_CONNECTED &&
315 old_state != CON_SOCK_STATE_CLOSING))
316 printk("%s: unexpected old state %d\n", __func__, old_state);
317 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
318 CON_SOCK_STATE_CLOSING);
319 }
320
321 static void con_sock_state_closed(struct ceph_connection *con)
322 {
323 int old_state;
324
325 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
326 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
327 old_state != CON_SOCK_STATE_CLOSING &&
328 old_state != CON_SOCK_STATE_CONNECTING &&
329 old_state != CON_SOCK_STATE_CLOSED))
330 printk("%s: unexpected old state %d\n", __func__, old_state);
331 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
332 CON_SOCK_STATE_CLOSED);
333 }
334
335 /*
336 * socket callback functions
337 */
338
339 /* data available on socket, or listen socket received a connect */
340 static void ceph_sock_data_ready(struct sock *sk, int count_unused)
341 {
342 struct ceph_connection *con = sk->sk_user_data;
343 if (atomic_read(&con->msgr->stopping)) {
344 return;
345 }
346
347 if (sk->sk_state != TCP_CLOSE_WAIT) {
348 dout("%s on %p state = %lu, queueing work\n", __func__,
349 con, con->state);
350 queue_con(con);
351 }
352 }
353
354 /* socket has buffer space for writing */
355 static void ceph_sock_write_space(struct sock *sk)
356 {
357 struct ceph_connection *con = sk->sk_user_data;
358
359 /* only queue to workqueue if there is data we want to write,
360 * and there is sufficient space in the socket buffer to accept
361 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
362 * doesn't get called again until try_write() fills the socket
363 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
364 * and net/core/stream.c:sk_stream_write_space().
365 */
366 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
367 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
368 dout("%s %p queueing write work\n", __func__, con);
369 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
370 queue_con(con);
371 }
372 } else {
373 dout("%s %p nothing to write\n", __func__, con);
374 }
375 }
376
377 /* socket's state has changed */
378 static void ceph_sock_state_change(struct sock *sk)
379 {
380 struct ceph_connection *con = sk->sk_user_data;
381
382 dout("%s %p state = %lu sk_state = %u\n", __func__,
383 con, con->state, sk->sk_state);
384
385 switch (sk->sk_state) {
386 case TCP_CLOSE:
387 dout("%s TCP_CLOSE\n", __func__);
388 case TCP_CLOSE_WAIT:
389 dout("%s TCP_CLOSE_WAIT\n", __func__);
390 con_sock_state_closing(con);
391 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
392 queue_con(con);
393 break;
394 case TCP_ESTABLISHED:
395 dout("%s TCP_ESTABLISHED\n", __func__);
396 con_sock_state_connected(con);
397 queue_con(con);
398 break;
399 default: /* Everything else is uninteresting */
400 break;
401 }
402 }
403
404 /*
405 * set up socket callbacks
406 */
407 static void set_sock_callbacks(struct socket *sock,
408 struct ceph_connection *con)
409 {
410 struct sock *sk = sock->sk;
411 sk->sk_user_data = con;
412 sk->sk_data_ready = ceph_sock_data_ready;
413 sk->sk_write_space = ceph_sock_write_space;
414 sk->sk_state_change = ceph_sock_state_change;
415 }
416
417
418 /*
419 * socket helpers
420 */
421
422 /*
423 * initiate connection to a remote socket.
424 */
425 static int ceph_tcp_connect(struct ceph_connection *con)
426 {
427 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
428 struct socket *sock;
429 int ret;
430
431 BUG_ON(con->sock);
432 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
433 IPPROTO_TCP, &sock);
434 if (ret)
435 return ret;
436 sock->sk->sk_allocation = GFP_NOFS;
437
438 #ifdef CONFIG_LOCKDEP
439 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
440 #endif
441
442 set_sock_callbacks(sock, con);
443
444 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
445
446 con_sock_state_connecting(con);
447 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
448 O_NONBLOCK);
449 if (ret == -EINPROGRESS) {
450 dout("connect %s EINPROGRESS sk_state = %u\n",
451 ceph_pr_addr(&con->peer_addr.in_addr),
452 sock->sk->sk_state);
453 } else if (ret < 0) {
454 pr_err("connect %s error %d\n",
455 ceph_pr_addr(&con->peer_addr.in_addr), ret);
456 sock_release(sock);
457 con->error_msg = "connect error";
458
459 return ret;
460 }
461 con->sock = sock;
462 return 0;
463 }
464
465 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
466 {
467 struct kvec iov = {buf, len};
468 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
469 int r;
470
471 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
472 if (r == -EAGAIN)
473 r = 0;
474 return r;
475 }
476
477 static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
478 int page_offset, size_t length)
479 {
480 void *kaddr;
481 int ret;
482
483 BUG_ON(page_offset + length > PAGE_SIZE);
484
485 kaddr = kmap(page);
486 BUG_ON(!kaddr);
487 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length);
488 kunmap(page);
489
490 return ret;
491 }
492
493 /*
494 * write something. @more is true if caller will be sending more data
495 * shortly.
496 */
497 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
498 size_t kvlen, size_t len, int more)
499 {
500 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
501 int r;
502
503 if (more)
504 msg.msg_flags |= MSG_MORE;
505 else
506 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
507
508 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
509 if (r == -EAGAIN)
510 r = 0;
511 return r;
512 }
513
514 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
515 int offset, size_t size, bool more)
516 {
517 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
518 int ret;
519
520 ret = kernel_sendpage(sock, page, offset, size, flags);
521 if (ret == -EAGAIN)
522 ret = 0;
523
524 return ret;
525 }
526
527
528 /*
529 * Shutdown/close the socket for the given connection.
530 */
531 static int con_close_socket(struct ceph_connection *con)
532 {
533 int rc = 0;
534
535 dout("con_close_socket on %p sock %p\n", con, con->sock);
536 if (con->sock) {
537 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
538 sock_release(con->sock);
539 con->sock = NULL;
540 }
541
542 /*
543 * Forcibly clear the SOCK_CLOSED flag. It gets set
544 * independent of the connection mutex, and we could have
545 * received a socket close event before we had the chance to
546 * shut the socket down.
547 */
548 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
549
550 con_sock_state_closed(con);
551 return rc;
552 }
553
554 /*
555 * Reset a connection. Discard all incoming and outgoing messages
556 * and clear *_seq state.
557 */
558 static void ceph_msg_remove(struct ceph_msg *msg)
559 {
560 list_del_init(&msg->list_head);
561 BUG_ON(msg->con == NULL);
562 msg->con->ops->put(msg->con);
563 msg->con = NULL;
564
565 ceph_msg_put(msg);
566 }
567 static void ceph_msg_remove_list(struct list_head *head)
568 {
569 while (!list_empty(head)) {
570 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
571 list_head);
572 ceph_msg_remove(msg);
573 }
574 }
575
576 static void reset_connection(struct ceph_connection *con)
577 {
578 /* reset connection, out_queue, msg_ and connect_seq */
579 /* discard existing out_queue and msg_seq */
580 dout("reset_connection %p\n", con);
581 ceph_msg_remove_list(&con->out_queue);
582 ceph_msg_remove_list(&con->out_sent);
583
584 if (con->in_msg) {
585 BUG_ON(con->in_msg->con != con);
586 con->in_msg->con = NULL;
587 ceph_msg_put(con->in_msg);
588 con->in_msg = NULL;
589 con->ops->put(con);
590 }
591
592 con->connect_seq = 0;
593 con->out_seq = 0;
594 if (con->out_msg) {
595 ceph_msg_put(con->out_msg);
596 con->out_msg = NULL;
597 }
598 con->in_seq = 0;
599 con->in_seq_acked = 0;
600 }
601
602 /*
603 * mark a peer down. drop any open connections.
604 */
605 void ceph_con_close(struct ceph_connection *con)
606 {
607 mutex_lock(&con->mutex);
608 dout("con_close %p peer %s\n", con,
609 ceph_pr_addr(&con->peer_addr.in_addr));
610 con->state = CON_STATE_CLOSED;
611
612 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
613 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
614 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
615 con_flag_clear(con, CON_FLAG_BACKOFF);
616
617 reset_connection(con);
618 con->peer_global_seq = 0;
619 cancel_delayed_work(&con->work);
620 con_close_socket(con);
621 mutex_unlock(&con->mutex);
622 }
623 EXPORT_SYMBOL(ceph_con_close);
624
625 /*
626 * Reopen a closed connection, with a new peer address.
627 */
628 void ceph_con_open(struct ceph_connection *con,
629 __u8 entity_type, __u64 entity_num,
630 struct ceph_entity_addr *addr)
631 {
632 mutex_lock(&con->mutex);
633 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
634
635 WARN_ON(con->state != CON_STATE_CLOSED);
636 con->state = CON_STATE_PREOPEN;
637
638 con->peer_name.type = (__u8) entity_type;
639 con->peer_name.num = cpu_to_le64(entity_num);
640
641 memcpy(&con->peer_addr, addr, sizeof(*addr));
642 con->delay = 0; /* reset backoff memory */
643 mutex_unlock(&con->mutex);
644 queue_con(con);
645 }
646 EXPORT_SYMBOL(ceph_con_open);
647
648 /*
649 * return true if this connection ever successfully opened
650 */
651 bool ceph_con_opened(struct ceph_connection *con)
652 {
653 return con->connect_seq > 0;
654 }
655
656 /*
657 * initialize a new connection.
658 */
659 void ceph_con_init(struct ceph_connection *con, void *private,
660 const struct ceph_connection_operations *ops,
661 struct ceph_messenger *msgr)
662 {
663 dout("con_init %p\n", con);
664 memset(con, 0, sizeof(*con));
665 con->private = private;
666 con->ops = ops;
667 con->msgr = msgr;
668
669 con_sock_state_init(con);
670
671 mutex_init(&con->mutex);
672 INIT_LIST_HEAD(&con->out_queue);
673 INIT_LIST_HEAD(&con->out_sent);
674 INIT_DELAYED_WORK(&con->work, con_work);
675
676 con->state = CON_STATE_CLOSED;
677 }
678 EXPORT_SYMBOL(ceph_con_init);
679
680
681 /*
682 * We maintain a global counter to order connection attempts. Get
683 * a unique seq greater than @gt.
684 */
685 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
686 {
687 u32 ret;
688
689 spin_lock(&msgr->global_seq_lock);
690 if (msgr->global_seq < gt)
691 msgr->global_seq = gt;
692 ret = ++msgr->global_seq;
693 spin_unlock(&msgr->global_seq_lock);
694 return ret;
695 }
696
697 static void con_out_kvec_reset(struct ceph_connection *con)
698 {
699 con->out_kvec_left = 0;
700 con->out_kvec_bytes = 0;
701 con->out_kvec_cur = &con->out_kvec[0];
702 }
703
704 static void con_out_kvec_add(struct ceph_connection *con,
705 size_t size, void *data)
706 {
707 int index;
708
709 index = con->out_kvec_left;
710 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
711
712 con->out_kvec[index].iov_len = size;
713 con->out_kvec[index].iov_base = data;
714 con->out_kvec_left++;
715 con->out_kvec_bytes += size;
716 }
717
718 #ifdef CONFIG_BLOCK
719 static void init_bio_iter(struct bio *bio, struct bio **bio_iter,
720 unsigned int *bio_seg)
721 {
722 if (!bio) {
723 *bio_iter = NULL;
724 *bio_seg = 0;
725 return;
726 }
727 *bio_iter = bio;
728 *bio_seg = (unsigned int) bio->bi_idx;
729 }
730
731 static void iter_bio_next(struct bio **bio_iter, unsigned int *seg)
732 {
733 if (*bio_iter == NULL)
734 return;
735
736 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
737
738 (*seg)++;
739 if (*seg == (*bio_iter)->bi_vcnt)
740 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
741 }
742
743 /*
744 * For a bio data item, a piece is whatever remains of the next
745 * entry in the current bio iovec, or the first entry in the next
746 * bio in the list.
747 */
748 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data)
749 {
750 struct ceph_msg_data_cursor *cursor = &data->cursor;
751 struct bio *bio;
752
753 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
754
755 bio = data->bio;
756 BUG_ON(!bio);
757 BUG_ON(!bio->bi_vcnt);
758 /* resid = bio->bi_size */
759
760 cursor->bio = bio;
761 cursor->vector_index = 0;
762 cursor->vector_offset = 0;
763 cursor->last_piece = !bio->bi_next && bio->bi_vcnt == 1;
764 }
765
766 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data,
767 size_t *page_offset,
768 size_t *length)
769 {
770 struct ceph_msg_data_cursor *cursor = &data->cursor;
771 struct bio *bio;
772 struct bio_vec *bio_vec;
773 unsigned int index;
774
775 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
776
777 bio = cursor->bio;
778 BUG_ON(!bio);
779
780 index = cursor->vector_index;
781 BUG_ON(index >= (unsigned int) bio->bi_vcnt);
782
783 bio_vec = &bio->bi_io_vec[index];
784 BUG_ON(cursor->vector_offset >= bio_vec->bv_len);
785 *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset);
786 BUG_ON(*page_offset >= PAGE_SIZE);
787 *length = (size_t) (bio_vec->bv_len - cursor->vector_offset);
788 BUG_ON(*length > PAGE_SIZE);
789
790 return bio_vec->bv_page;
791 }
792
793 static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes)
794 {
795 struct ceph_msg_data_cursor *cursor = &data->cursor;
796 struct bio *bio;
797 struct bio_vec *bio_vec;
798 unsigned int index;
799
800 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
801
802 bio = cursor->bio;
803 BUG_ON(!bio);
804
805 index = cursor->vector_index;
806 BUG_ON(index >= (unsigned int) bio->bi_vcnt);
807 bio_vec = &bio->bi_io_vec[index];
808 BUG_ON(cursor->vector_offset + bytes > bio_vec->bv_len);
809
810 /* Advance the cursor offset */
811
812 cursor->vector_offset += bytes;
813 if (cursor->vector_offset < bio_vec->bv_len)
814 return false; /* more bytes to process in this segment */
815
816 /* Move on to the next segment, and possibly the next bio */
817
818 if (++cursor->vector_index == (unsigned int) bio->bi_vcnt) {
819 bio = bio->bi_next;
820 cursor->bio = bio;
821 cursor->vector_index = 0;
822 }
823 cursor->vector_offset = 0;
824
825 if (!cursor->last_piece && bio && !bio->bi_next)
826 if (cursor->vector_index == (unsigned int) bio->bi_vcnt - 1)
827 cursor->last_piece = true;
828
829 return true;
830 }
831 #endif
832
833 /*
834 * For a page array, a piece comes from the first page in the array
835 * that has not already been fully consumed.
836 */
837 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data)
838 {
839 struct ceph_msg_data_cursor *cursor = &data->cursor;
840 int page_count;
841
842 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
843
844 BUG_ON(!data->pages);
845 BUG_ON(!data->length);
846
847 page_count = calc_pages_for(data->alignment, (u64)data->length);
848 BUG_ON(page_count > (int) USHRT_MAX);
849 cursor->resid = data->length;
850 cursor->page_offset = data->alignment & ~PAGE_MASK;
851 cursor->page_index = 0;
852 cursor->page_count = (unsigned short) page_count;
853 cursor->last_piece = cursor->page_count == 1;
854 }
855
856 static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data,
857 size_t *page_offset,
858 size_t *length)
859 {
860 struct ceph_msg_data_cursor *cursor = &data->cursor;
861
862 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
863
864 BUG_ON(cursor->page_index >= cursor->page_count);
865 BUG_ON(cursor->page_offset >= PAGE_SIZE);
866 BUG_ON(!cursor->resid);
867
868 *page_offset = cursor->page_offset;
869 if (cursor->last_piece) {
870 BUG_ON(*page_offset + cursor->resid > PAGE_SIZE);
871 *length = cursor->resid;
872 } else {
873 *length = PAGE_SIZE - *page_offset;
874 }
875
876 return data->pages[cursor->page_index];
877 }
878
879 static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data,
880 size_t bytes)
881 {
882 struct ceph_msg_data_cursor *cursor = &data->cursor;
883
884 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
885
886 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
887 BUG_ON(bytes > cursor->resid);
888
889 /* Advance the cursor page offset */
890
891 cursor->resid -= bytes;
892 cursor->page_offset += bytes;
893 if (!bytes || cursor->page_offset & ~PAGE_MASK)
894 return false; /* more bytes to process in the current page */
895
896 /* Move on to the next page */
897
898 BUG_ON(cursor->page_index >= cursor->page_count);
899 cursor->page_offset = 0;
900 cursor->page_index++;
901 cursor->last_piece = cursor->page_index == cursor->page_count - 1;
902
903 return true;
904 }
905
906 /*
907 * For a pagelist, a piece is whatever remains to be consumed in the
908 * first page in the list, or the front of the next page.
909 */
910 static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data)
911 {
912 struct ceph_msg_data_cursor *cursor = &data->cursor;
913 struct ceph_pagelist *pagelist;
914 struct page *page;
915
916 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
917
918 pagelist = data->pagelist;
919 BUG_ON(!pagelist);
920 if (!pagelist->length)
921 return; /* pagelist can be assigned but empty */
922
923 BUG_ON(list_empty(&pagelist->head));
924 page = list_first_entry(&pagelist->head, struct page, lru);
925
926 cursor->page = page;
927 cursor->offset = 0;
928 cursor->last_piece = pagelist->length <= PAGE_SIZE;
929 }
930
931 static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data,
932 size_t *page_offset,
933 size_t *length)
934 {
935 struct ceph_msg_data_cursor *cursor = &data->cursor;
936 struct ceph_pagelist *pagelist;
937 size_t piece_end;
938
939 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
940
941 pagelist = data->pagelist;
942 BUG_ON(!pagelist);
943
944 BUG_ON(!cursor->page);
945 BUG_ON(cursor->offset >= pagelist->length);
946
947 if (cursor->last_piece) {
948 /* pagelist offset is always 0 */
949 piece_end = pagelist->length & ~PAGE_MASK;
950 if (!piece_end)
951 piece_end = PAGE_SIZE;
952 } else {
953 piece_end = PAGE_SIZE;
954 }
955 *page_offset = cursor->offset & ~PAGE_MASK;
956 *length = piece_end - *page_offset;
957
958 return data->cursor.page;
959 }
960
961 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data,
962 size_t bytes)
963 {
964 struct ceph_msg_data_cursor *cursor = &data->cursor;
965 struct ceph_pagelist *pagelist;
966
967 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
968
969 pagelist = data->pagelist;
970 BUG_ON(!pagelist);
971 BUG_ON(!cursor->page);
972 BUG_ON(cursor->offset + bytes > pagelist->length);
973 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
974
975 /* Advance the cursor offset */
976
977 cursor->offset += bytes;
978 /* pagelist offset is always 0 */
979 if (!bytes || cursor->offset & ~PAGE_MASK)
980 return false; /* more bytes to process in the current page */
981
982 /* Move on to the next page */
983
984 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
985 cursor->page = list_entry_next(cursor->page, lru);
986
987 /* cursor offset is at page boundary; pagelist offset is always 0 */
988 if (pagelist->length - cursor->offset <= PAGE_SIZE)
989 cursor->last_piece = true;
990
991 return true;
992 }
993
994 /*
995 * Message data is handled (sent or received) in pieces, where each
996 * piece resides on a single page. The network layer might not
997 * consume an entire piece at once. A data item's cursor keeps
998 * track of which piece is next to process and how much remains to
999 * be processed in that piece. It also tracks whether the current
1000 * piece is the last one in the data item.
1001 */
1002 static void ceph_msg_data_cursor_init(struct ceph_msg_data *data)
1003 {
1004 switch (data->type) {
1005 case CEPH_MSG_DATA_PAGELIST:
1006 ceph_msg_data_pagelist_cursor_init(data);
1007 break;
1008 case CEPH_MSG_DATA_PAGES:
1009 ceph_msg_data_pages_cursor_init(data);
1010 break;
1011 #ifdef CONFIG_BLOCK
1012 case CEPH_MSG_DATA_BIO:
1013 ceph_msg_data_bio_cursor_init(data);
1014 break;
1015 #endif /* CONFIG_BLOCK */
1016 case CEPH_MSG_DATA_NONE:
1017 default:
1018 /* BUG(); */
1019 break;
1020 }
1021 }
1022
1023 /*
1024 * Return the page containing the next piece to process for a given
1025 * data item, and supply the page offset and length of that piece.
1026 * Indicate whether this is the last piece in this data item.
1027 */
1028 static struct page *ceph_msg_data_next(struct ceph_msg_data *data,
1029 size_t *page_offset,
1030 size_t *length,
1031 bool *last_piece)
1032 {
1033 struct page *page;
1034
1035 switch (data->type) {
1036 case CEPH_MSG_DATA_PAGELIST:
1037 page = ceph_msg_data_pagelist_next(data, page_offset, length);
1038 break;
1039 case CEPH_MSG_DATA_PAGES:
1040 page = ceph_msg_data_pages_next(data, page_offset, length);
1041 break;
1042 #ifdef CONFIG_BLOCK
1043 case CEPH_MSG_DATA_BIO:
1044 page = ceph_msg_data_bio_next(data, page_offset, length);
1045 break;
1046 #endif /* CONFIG_BLOCK */
1047 case CEPH_MSG_DATA_NONE:
1048 default:
1049 page = NULL;
1050 break;
1051 }
1052 BUG_ON(!page);
1053 BUG_ON(*page_offset + *length > PAGE_SIZE);
1054 BUG_ON(!*length);
1055 if (last_piece)
1056 *last_piece = data->cursor.last_piece;
1057
1058 return page;
1059 }
1060
1061 /*
1062 * Returns true if the result moves the cursor on to the next piece
1063 * of the data item.
1064 */
1065 static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes)
1066 {
1067 bool new_piece;
1068
1069 switch (data->type) {
1070 case CEPH_MSG_DATA_PAGELIST:
1071 new_piece = ceph_msg_data_pagelist_advance(data, bytes);
1072 break;
1073 case CEPH_MSG_DATA_PAGES:
1074 new_piece = ceph_msg_data_pages_advance(data, bytes);
1075 break;
1076 #ifdef CONFIG_BLOCK
1077 case CEPH_MSG_DATA_BIO:
1078 new_piece = ceph_msg_data_bio_advance(data, bytes);
1079 break;
1080 #endif /* CONFIG_BLOCK */
1081 case CEPH_MSG_DATA_NONE:
1082 default:
1083 BUG();
1084 break;
1085 }
1086
1087 return new_piece;
1088 }
1089
1090 static void prepare_message_data(struct ceph_msg *msg,
1091 struct ceph_msg_pos *msg_pos)
1092 {
1093 BUG_ON(!msg);
1094 BUG_ON(!msg->hdr.data_len);
1095
1096 /* initialize page iterator */
1097 msg_pos->page = 0;
1098 if (ceph_msg_has_pages(msg))
1099 msg_pos->page_pos = msg->p.alignment;
1100 else
1101 msg_pos->page_pos = 0;
1102 #ifdef CONFIG_BLOCK
1103 if (ceph_msg_has_bio(msg))
1104 init_bio_iter(msg->b.bio, &msg->b.bio_iter, &msg->b.bio_seg);
1105 #endif
1106 msg_pos->data_pos = 0;
1107
1108 /* Initialize data cursors */
1109
1110 #ifdef CONFIG_BLOCK
1111 if (ceph_msg_has_bio(msg))
1112 ceph_msg_data_cursor_init(&msg->b);
1113 #endif /* CONFIG_BLOCK */
1114 if (ceph_msg_has_pages(msg))
1115 ceph_msg_data_cursor_init(&msg->p);
1116 if (ceph_msg_has_pagelist(msg))
1117 ceph_msg_data_cursor_init(&msg->l);
1118 if (ceph_msg_has_trail(msg))
1119 ceph_msg_data_cursor_init(&msg->t);
1120
1121 msg_pos->did_page_crc = false;
1122 }
1123
1124 /*
1125 * Prepare footer for currently outgoing message, and finish things
1126 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1127 */
1128 static void prepare_write_message_footer(struct ceph_connection *con)
1129 {
1130 struct ceph_msg *m = con->out_msg;
1131 int v = con->out_kvec_left;
1132
1133 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1134
1135 dout("prepare_write_message_footer %p\n", con);
1136 con->out_kvec_is_msg = true;
1137 con->out_kvec[v].iov_base = &m->footer;
1138 con->out_kvec[v].iov_len = sizeof(m->footer);
1139 con->out_kvec_bytes += sizeof(m->footer);
1140 con->out_kvec_left++;
1141 con->out_more = m->more_to_follow;
1142 con->out_msg_done = true;
1143 }
1144
1145 /*
1146 * Prepare headers for the next outgoing message.
1147 */
1148 static void prepare_write_message(struct ceph_connection *con)
1149 {
1150 struct ceph_msg *m;
1151 u32 crc;
1152
1153 con_out_kvec_reset(con);
1154 con->out_kvec_is_msg = true;
1155 con->out_msg_done = false;
1156
1157 /* Sneak an ack in there first? If we can get it into the same
1158 * TCP packet that's a good thing. */
1159 if (con->in_seq > con->in_seq_acked) {
1160 con->in_seq_acked = con->in_seq;
1161 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1162 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1163 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1164 &con->out_temp_ack);
1165 }
1166
1167 BUG_ON(list_empty(&con->out_queue));
1168 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
1169 con->out_msg = m;
1170 BUG_ON(m->con != con);
1171
1172 /* put message on sent list */
1173 ceph_msg_get(m);
1174 list_move_tail(&m->list_head, &con->out_sent);
1175
1176 /*
1177 * only assign outgoing seq # if we haven't sent this message
1178 * yet. if it is requeued, resend with it's original seq.
1179 */
1180 if (m->needs_out_seq) {
1181 m->hdr.seq = cpu_to_le64(++con->out_seq);
1182 m->needs_out_seq = false;
1183 }
1184
1185 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d (%zd)\n",
1186 m, con->out_seq, le16_to_cpu(m->hdr.type),
1187 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
1188 le32_to_cpu(m->hdr.data_len), m->p.length);
1189 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
1190
1191 /* tag + hdr + front + middle */
1192 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
1193 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
1194 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
1195
1196 if (m->middle)
1197 con_out_kvec_add(con, m->middle->vec.iov_len,
1198 m->middle->vec.iov_base);
1199
1200 /* fill in crc (except data pages), footer */
1201 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1202 con->out_msg->hdr.crc = cpu_to_le32(crc);
1203 con->out_msg->footer.flags = 0;
1204
1205 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1206 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1207 if (m->middle) {
1208 crc = crc32c(0, m->middle->vec.iov_base,
1209 m->middle->vec.iov_len);
1210 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1211 } else
1212 con->out_msg->footer.middle_crc = 0;
1213 dout("%s front_crc %u middle_crc %u\n", __func__,
1214 le32_to_cpu(con->out_msg->footer.front_crc),
1215 le32_to_cpu(con->out_msg->footer.middle_crc));
1216
1217 /* is there a data payload? */
1218 con->out_msg->footer.data_crc = 0;
1219 if (m->hdr.data_len) {
1220 prepare_message_data(con->out_msg, &con->out_msg_pos);
1221 con->out_more = 1; /* data + footer will follow */
1222 } else {
1223 /* no, queue up footer too and be done */
1224 prepare_write_message_footer(con);
1225 }
1226
1227 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1228 }
1229
1230 /*
1231 * Prepare an ack.
1232 */
1233 static void prepare_write_ack(struct ceph_connection *con)
1234 {
1235 dout("prepare_write_ack %p %llu -> %llu\n", con,
1236 con->in_seq_acked, con->in_seq);
1237 con->in_seq_acked = con->in_seq;
1238
1239 con_out_kvec_reset(con);
1240
1241 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1242
1243 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1244 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1245 &con->out_temp_ack);
1246
1247 con->out_more = 1; /* more will follow.. eventually.. */
1248 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1249 }
1250
1251 /*
1252 * Prepare to write keepalive byte.
1253 */
1254 static void prepare_write_keepalive(struct ceph_connection *con)
1255 {
1256 dout("prepare_write_keepalive %p\n", con);
1257 con_out_kvec_reset(con);
1258 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
1259 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1260 }
1261
1262 /*
1263 * Connection negotiation.
1264 */
1265
1266 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
1267 int *auth_proto)
1268 {
1269 struct ceph_auth_handshake *auth;
1270
1271 if (!con->ops->get_authorizer) {
1272 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1273 con->out_connect.authorizer_len = 0;
1274 return NULL;
1275 }
1276
1277 /* Can't hold the mutex while getting authorizer */
1278 mutex_unlock(&con->mutex);
1279 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
1280 mutex_lock(&con->mutex);
1281
1282 if (IS_ERR(auth))
1283 return auth;
1284 if (con->state != CON_STATE_NEGOTIATING)
1285 return ERR_PTR(-EAGAIN);
1286
1287 con->auth_reply_buf = auth->authorizer_reply_buf;
1288 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
1289 return auth;
1290 }
1291
1292 /*
1293 * We connected to a peer and are saying hello.
1294 */
1295 static void prepare_write_banner(struct ceph_connection *con)
1296 {
1297 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1298 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
1299 &con->msgr->my_enc_addr);
1300
1301 con->out_more = 0;
1302 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1303 }
1304
1305 static int prepare_write_connect(struct ceph_connection *con)
1306 {
1307 unsigned int global_seq = get_global_seq(con->msgr, 0);
1308 int proto;
1309 int auth_proto;
1310 struct ceph_auth_handshake *auth;
1311
1312 switch (con->peer_name.type) {
1313 case CEPH_ENTITY_TYPE_MON:
1314 proto = CEPH_MONC_PROTOCOL;
1315 break;
1316 case CEPH_ENTITY_TYPE_OSD:
1317 proto = CEPH_OSDC_PROTOCOL;
1318 break;
1319 case CEPH_ENTITY_TYPE_MDS:
1320 proto = CEPH_MDSC_PROTOCOL;
1321 break;
1322 default:
1323 BUG();
1324 }
1325
1326 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1327 con->connect_seq, global_seq, proto);
1328
1329 con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
1330 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1331 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1332 con->out_connect.global_seq = cpu_to_le32(global_seq);
1333 con->out_connect.protocol_version = cpu_to_le32(proto);
1334 con->out_connect.flags = 0;
1335
1336 auth_proto = CEPH_AUTH_UNKNOWN;
1337 auth = get_connect_authorizer(con, &auth_proto);
1338 if (IS_ERR(auth))
1339 return PTR_ERR(auth);
1340
1341 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1342 con->out_connect.authorizer_len = auth ?
1343 cpu_to_le32(auth->authorizer_buf_len) : 0;
1344
1345 con_out_kvec_add(con, sizeof (con->out_connect),
1346 &con->out_connect);
1347 if (auth && auth->authorizer_buf_len)
1348 con_out_kvec_add(con, auth->authorizer_buf_len,
1349 auth->authorizer_buf);
1350
1351 con->out_more = 0;
1352 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1353
1354 return 0;
1355 }
1356
1357 /*
1358 * write as much of pending kvecs to the socket as we can.
1359 * 1 -> done
1360 * 0 -> socket full, but more to do
1361 * <0 -> error
1362 */
1363 static int write_partial_kvec(struct ceph_connection *con)
1364 {
1365 int ret;
1366
1367 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1368 while (con->out_kvec_bytes > 0) {
1369 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1370 con->out_kvec_left, con->out_kvec_bytes,
1371 con->out_more);
1372 if (ret <= 0)
1373 goto out;
1374 con->out_kvec_bytes -= ret;
1375 if (con->out_kvec_bytes == 0)
1376 break; /* done */
1377
1378 /* account for full iov entries consumed */
1379 while (ret >= con->out_kvec_cur->iov_len) {
1380 BUG_ON(!con->out_kvec_left);
1381 ret -= con->out_kvec_cur->iov_len;
1382 con->out_kvec_cur++;
1383 con->out_kvec_left--;
1384 }
1385 /* and for a partially-consumed entry */
1386 if (ret) {
1387 con->out_kvec_cur->iov_len -= ret;
1388 con->out_kvec_cur->iov_base += ret;
1389 }
1390 }
1391 con->out_kvec_left = 0;
1392 con->out_kvec_is_msg = false;
1393 ret = 1;
1394 out:
1395 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1396 con->out_kvec_bytes, con->out_kvec_left, ret);
1397 return ret; /* done! */
1398 }
1399
1400 static void out_msg_pos_next(struct ceph_connection *con, struct page *page,
1401 size_t len, size_t sent, bool in_trail)
1402 {
1403 struct ceph_msg *msg = con->out_msg;
1404 struct ceph_msg_pos *msg_pos = &con->out_msg_pos;
1405 bool need_crc = false;
1406
1407 BUG_ON(!msg);
1408 BUG_ON(!sent);
1409
1410 msg_pos->data_pos += sent;
1411 msg_pos->page_pos += sent;
1412 if (in_trail)
1413 need_crc = ceph_msg_data_advance(&msg->t, sent);
1414 else if (ceph_msg_has_pages(msg))
1415 need_crc = ceph_msg_data_advance(&msg->p, sent);
1416 else if (ceph_msg_has_pagelist(msg))
1417 need_crc = ceph_msg_data_advance(&msg->l, sent);
1418 #ifdef CONFIG_BLOCK
1419 else if (ceph_msg_has_bio(msg))
1420 need_crc = ceph_msg_data_advance(&msg->b, sent);
1421 #endif /* CONFIG_BLOCK */
1422 BUG_ON(need_crc && sent != len);
1423
1424 if (sent < len)
1425 return;
1426
1427 BUG_ON(sent != len);
1428 msg_pos->page_pos = 0;
1429 msg_pos->page++;
1430 msg_pos->did_page_crc = false;
1431 }
1432
1433 static void in_msg_pos_next(struct ceph_connection *con, size_t len,
1434 size_t received)
1435 {
1436 struct ceph_msg *msg = con->in_msg;
1437 struct ceph_msg_pos *msg_pos = &con->in_msg_pos;
1438
1439 BUG_ON(!msg);
1440 BUG_ON(!received);
1441
1442 msg_pos->data_pos += received;
1443 msg_pos->page_pos += received;
1444 if (received < len)
1445 return;
1446
1447 BUG_ON(received != len);
1448 msg_pos->page_pos = 0;
1449 msg_pos->page++;
1450 #ifdef CONFIG_BLOCK
1451 if (msg->b.bio)
1452 iter_bio_next(&msg->b.bio_iter, &msg->b.bio_seg);
1453 #endif /* CONFIG_BLOCK */
1454 }
1455
1456 static u32 ceph_crc32c_page(u32 crc, struct page *page,
1457 unsigned int page_offset,
1458 unsigned int length)
1459 {
1460 char *kaddr;
1461
1462 kaddr = kmap(page);
1463 BUG_ON(kaddr == NULL);
1464 crc = crc32c(crc, kaddr + page_offset, length);
1465 kunmap(page);
1466
1467 return crc;
1468 }
1469 /*
1470 * Write as much message data payload as we can. If we finish, queue
1471 * up the footer.
1472 * 1 -> done, footer is now queued in out_kvec[].
1473 * 0 -> socket full, but more to do
1474 * <0 -> error
1475 */
1476 static int write_partial_message_data(struct ceph_connection *con)
1477 {
1478 struct ceph_msg *msg = con->out_msg;
1479 struct ceph_msg_pos *msg_pos = &con->out_msg_pos;
1480 unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
1481 bool do_datacrc = !con->msgr->nocrc;
1482 int ret;
1483 int total_max_write;
1484 bool in_trail = false;
1485 size_t trail_len = 0;
1486 size_t trail_off = data_len;
1487
1488 if (ceph_msg_has_trail(msg)) {
1489 trail_len = msg->t.pagelist->length;
1490 trail_off -= trail_len;
1491 }
1492
1493 dout("%s %p msg %p page %d offset %d\n", __func__,
1494 con, msg, msg_pos->page, msg_pos->page_pos);
1495
1496 /*
1497 * Iterate through each page that contains data to be
1498 * written, and send as much as possible for each.
1499 *
1500 * If we are calculating the data crc (the default), we will
1501 * need to map the page. If we have no pages, they have
1502 * been revoked, so use the zero page.
1503 */
1504 while (data_len > msg_pos->data_pos) {
1505 struct page *page = NULL;
1506 size_t page_offset;
1507 size_t length;
1508 bool use_cursor = false;
1509 bool last_piece = true; /* preserve existing behavior */
1510
1511 in_trail = in_trail || msg_pos->data_pos >= trail_off;
1512 if (!in_trail)
1513 total_max_write = trail_off - msg_pos->data_pos;
1514
1515 if (in_trail) {
1516 BUG_ON(!ceph_msg_has_trail(msg));
1517 use_cursor = true;
1518 page = ceph_msg_data_next(&msg->t, &page_offset,
1519 &length, &last_piece);
1520 } else if (ceph_msg_has_pages(msg)) {
1521 use_cursor = true;
1522 page = ceph_msg_data_next(&msg->p, &page_offset,
1523 &length, &last_piece);
1524 } else if (ceph_msg_has_pagelist(msg)) {
1525 use_cursor = true;
1526 page = ceph_msg_data_next(&msg->l, &page_offset,
1527 &length, &last_piece);
1528 #ifdef CONFIG_BLOCK
1529 } else if (ceph_msg_has_bio(msg)) {
1530 use_cursor = true;
1531 page = ceph_msg_data_next(&msg->b, &page_offset,
1532 &length, &last_piece);
1533 #endif
1534 } else {
1535 page = zero_page;
1536 }
1537 if (!use_cursor) {
1538 length = min_t(int, PAGE_SIZE - msg_pos->page_pos,
1539 total_max_write);
1540
1541 page_offset = msg_pos->page_pos;
1542 }
1543 if (do_datacrc && !msg_pos->did_page_crc) {
1544 u32 crc = le32_to_cpu(msg->footer.data_crc);
1545
1546 crc = ceph_crc32c_page(crc, page, page_offset, length);
1547 msg->footer.data_crc = cpu_to_le32(crc);
1548 msg_pos->did_page_crc = true;
1549 }
1550 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
1551 length, last_piece);
1552 if (ret <= 0)
1553 goto out;
1554
1555 out_msg_pos_next(con, page, length, (size_t) ret, in_trail);
1556 }
1557
1558 dout("%s %p msg %p done\n", __func__, con, msg);
1559
1560 /* prepare and queue up footer, too */
1561 if (!do_datacrc)
1562 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1563 con_out_kvec_reset(con);
1564 prepare_write_message_footer(con);
1565 ret = 1;
1566 out:
1567 return ret;
1568 }
1569
1570 /*
1571 * write some zeros
1572 */
1573 static int write_partial_skip(struct ceph_connection *con)
1574 {
1575 int ret;
1576
1577 while (con->out_skip > 0) {
1578 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
1579
1580 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1581 if (ret <= 0)
1582 goto out;
1583 con->out_skip -= ret;
1584 }
1585 ret = 1;
1586 out:
1587 return ret;
1588 }
1589
1590 /*
1591 * Prepare to read connection handshake, or an ack.
1592 */
1593 static void prepare_read_banner(struct ceph_connection *con)
1594 {
1595 dout("prepare_read_banner %p\n", con);
1596 con->in_base_pos = 0;
1597 }
1598
1599 static void prepare_read_connect(struct ceph_connection *con)
1600 {
1601 dout("prepare_read_connect %p\n", con);
1602 con->in_base_pos = 0;
1603 }
1604
1605 static void prepare_read_ack(struct ceph_connection *con)
1606 {
1607 dout("prepare_read_ack %p\n", con);
1608 con->in_base_pos = 0;
1609 }
1610
1611 static void prepare_read_tag(struct ceph_connection *con)
1612 {
1613 dout("prepare_read_tag %p\n", con);
1614 con->in_base_pos = 0;
1615 con->in_tag = CEPH_MSGR_TAG_READY;
1616 }
1617
1618 /*
1619 * Prepare to read a message.
1620 */
1621 static int prepare_read_message(struct ceph_connection *con)
1622 {
1623 dout("prepare_read_message %p\n", con);
1624 BUG_ON(con->in_msg != NULL);
1625 con->in_base_pos = 0;
1626 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1627 return 0;
1628 }
1629
1630
1631 static int read_partial(struct ceph_connection *con,
1632 int end, int size, void *object)
1633 {
1634 while (con->in_base_pos < end) {
1635 int left = end - con->in_base_pos;
1636 int have = size - left;
1637 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1638 if (ret <= 0)
1639 return ret;
1640 con->in_base_pos += ret;
1641 }
1642 return 1;
1643 }
1644
1645
1646 /*
1647 * Read all or part of the connect-side handshake on a new connection
1648 */
1649 static int read_partial_banner(struct ceph_connection *con)
1650 {
1651 int size;
1652 int end;
1653 int ret;
1654
1655 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1656
1657 /* peer's banner */
1658 size = strlen(CEPH_BANNER);
1659 end = size;
1660 ret = read_partial(con, end, size, con->in_banner);
1661 if (ret <= 0)
1662 goto out;
1663
1664 size = sizeof (con->actual_peer_addr);
1665 end += size;
1666 ret = read_partial(con, end, size, &con->actual_peer_addr);
1667 if (ret <= 0)
1668 goto out;
1669
1670 size = sizeof (con->peer_addr_for_me);
1671 end += size;
1672 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1673 if (ret <= 0)
1674 goto out;
1675
1676 out:
1677 return ret;
1678 }
1679
1680 static int read_partial_connect(struct ceph_connection *con)
1681 {
1682 int size;
1683 int end;
1684 int ret;
1685
1686 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1687
1688 size = sizeof (con->in_reply);
1689 end = size;
1690 ret = read_partial(con, end, size, &con->in_reply);
1691 if (ret <= 0)
1692 goto out;
1693
1694 size = le32_to_cpu(con->in_reply.authorizer_len);
1695 end += size;
1696 ret = read_partial(con, end, size, con->auth_reply_buf);
1697 if (ret <= 0)
1698 goto out;
1699
1700 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1701 con, (int)con->in_reply.tag,
1702 le32_to_cpu(con->in_reply.connect_seq),
1703 le32_to_cpu(con->in_reply.global_seq));
1704 out:
1705 return ret;
1706
1707 }
1708
1709 /*
1710 * Verify the hello banner looks okay.
1711 */
1712 static int verify_hello(struct ceph_connection *con)
1713 {
1714 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1715 pr_err("connect to %s got bad banner\n",
1716 ceph_pr_addr(&con->peer_addr.in_addr));
1717 con->error_msg = "protocol error, bad banner";
1718 return -1;
1719 }
1720 return 0;
1721 }
1722
1723 static bool addr_is_blank(struct sockaddr_storage *ss)
1724 {
1725 switch (ss->ss_family) {
1726 case AF_INET:
1727 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1728 case AF_INET6:
1729 return
1730 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1731 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1732 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1733 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1734 }
1735 return false;
1736 }
1737
1738 static int addr_port(struct sockaddr_storage *ss)
1739 {
1740 switch (ss->ss_family) {
1741 case AF_INET:
1742 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1743 case AF_INET6:
1744 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1745 }
1746 return 0;
1747 }
1748
1749 static void addr_set_port(struct sockaddr_storage *ss, int p)
1750 {
1751 switch (ss->ss_family) {
1752 case AF_INET:
1753 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1754 break;
1755 case AF_INET6:
1756 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1757 break;
1758 }
1759 }
1760
1761 /*
1762 * Unlike other *_pton function semantics, zero indicates success.
1763 */
1764 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1765 char delim, const char **ipend)
1766 {
1767 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1768 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1769
1770 memset(ss, 0, sizeof(*ss));
1771
1772 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1773 ss->ss_family = AF_INET;
1774 return 0;
1775 }
1776
1777 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1778 ss->ss_family = AF_INET6;
1779 return 0;
1780 }
1781
1782 return -EINVAL;
1783 }
1784
1785 /*
1786 * Extract hostname string and resolve using kernel DNS facility.
1787 */
1788 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1789 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1790 struct sockaddr_storage *ss, char delim, const char **ipend)
1791 {
1792 const char *end, *delim_p;
1793 char *colon_p, *ip_addr = NULL;
1794 int ip_len, ret;
1795
1796 /*
1797 * The end of the hostname occurs immediately preceding the delimiter or
1798 * the port marker (':') where the delimiter takes precedence.
1799 */
1800 delim_p = memchr(name, delim, namelen);
1801 colon_p = memchr(name, ':', namelen);
1802
1803 if (delim_p && colon_p)
1804 end = delim_p < colon_p ? delim_p : colon_p;
1805 else if (!delim_p && colon_p)
1806 end = colon_p;
1807 else {
1808 end = delim_p;
1809 if (!end) /* case: hostname:/ */
1810 end = name + namelen;
1811 }
1812
1813 if (end <= name)
1814 return -EINVAL;
1815
1816 /* do dns_resolve upcall */
1817 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1818 if (ip_len > 0)
1819 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1820 else
1821 ret = -ESRCH;
1822
1823 kfree(ip_addr);
1824
1825 *ipend = end;
1826
1827 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1828 ret, ret ? "failed" : ceph_pr_addr(ss));
1829
1830 return ret;
1831 }
1832 #else
1833 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1834 struct sockaddr_storage *ss, char delim, const char **ipend)
1835 {
1836 return -EINVAL;
1837 }
1838 #endif
1839
1840 /*
1841 * Parse a server name (IP or hostname). If a valid IP address is not found
1842 * then try to extract a hostname to resolve using userspace DNS upcall.
1843 */
1844 static int ceph_parse_server_name(const char *name, size_t namelen,
1845 struct sockaddr_storage *ss, char delim, const char **ipend)
1846 {
1847 int ret;
1848
1849 ret = ceph_pton(name, namelen, ss, delim, ipend);
1850 if (ret)
1851 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1852
1853 return ret;
1854 }
1855
1856 /*
1857 * Parse an ip[:port] list into an addr array. Use the default
1858 * monitor port if a port isn't specified.
1859 */
1860 int ceph_parse_ips(const char *c, const char *end,
1861 struct ceph_entity_addr *addr,
1862 int max_count, int *count)
1863 {
1864 int i, ret = -EINVAL;
1865 const char *p = c;
1866
1867 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1868 for (i = 0; i < max_count; i++) {
1869 const char *ipend;
1870 struct sockaddr_storage *ss = &addr[i].in_addr;
1871 int port;
1872 char delim = ',';
1873
1874 if (*p == '[') {
1875 delim = ']';
1876 p++;
1877 }
1878
1879 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1880 if (ret)
1881 goto bad;
1882 ret = -EINVAL;
1883
1884 p = ipend;
1885
1886 if (delim == ']') {
1887 if (*p != ']') {
1888 dout("missing matching ']'\n");
1889 goto bad;
1890 }
1891 p++;
1892 }
1893
1894 /* port? */
1895 if (p < end && *p == ':') {
1896 port = 0;
1897 p++;
1898 while (p < end && *p >= '0' && *p <= '9') {
1899 port = (port * 10) + (*p - '0');
1900 p++;
1901 }
1902 if (port > 65535 || port == 0)
1903 goto bad;
1904 } else {
1905 port = CEPH_MON_PORT;
1906 }
1907
1908 addr_set_port(ss, port);
1909
1910 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1911
1912 if (p == end)
1913 break;
1914 if (*p != ',')
1915 goto bad;
1916 p++;
1917 }
1918
1919 if (p != end)
1920 goto bad;
1921
1922 if (count)
1923 *count = i + 1;
1924 return 0;
1925
1926 bad:
1927 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1928 return ret;
1929 }
1930 EXPORT_SYMBOL(ceph_parse_ips);
1931
1932 static int process_banner(struct ceph_connection *con)
1933 {
1934 dout("process_banner on %p\n", con);
1935
1936 if (verify_hello(con) < 0)
1937 return -1;
1938
1939 ceph_decode_addr(&con->actual_peer_addr);
1940 ceph_decode_addr(&con->peer_addr_for_me);
1941
1942 /*
1943 * Make sure the other end is who we wanted. note that the other
1944 * end may not yet know their ip address, so if it's 0.0.0.0, give
1945 * them the benefit of the doubt.
1946 */
1947 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1948 sizeof(con->peer_addr)) != 0 &&
1949 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1950 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1951 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1952 ceph_pr_addr(&con->peer_addr.in_addr),
1953 (int)le32_to_cpu(con->peer_addr.nonce),
1954 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1955 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1956 con->error_msg = "wrong peer at address";
1957 return -1;
1958 }
1959
1960 /*
1961 * did we learn our address?
1962 */
1963 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1964 int port = addr_port(&con->msgr->inst.addr.in_addr);
1965
1966 memcpy(&con->msgr->inst.addr.in_addr,
1967 &con->peer_addr_for_me.in_addr,
1968 sizeof(con->peer_addr_for_me.in_addr));
1969 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1970 encode_my_addr(con->msgr);
1971 dout("process_banner learned my addr is %s\n",
1972 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1973 }
1974
1975 return 0;
1976 }
1977
1978 static int process_connect(struct ceph_connection *con)
1979 {
1980 u64 sup_feat = con->msgr->supported_features;
1981 u64 req_feat = con->msgr->required_features;
1982 u64 server_feat = le64_to_cpu(con->in_reply.features);
1983 int ret;
1984
1985 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1986
1987 switch (con->in_reply.tag) {
1988 case CEPH_MSGR_TAG_FEATURES:
1989 pr_err("%s%lld %s feature set mismatch,"
1990 " my %llx < server's %llx, missing %llx\n",
1991 ENTITY_NAME(con->peer_name),
1992 ceph_pr_addr(&con->peer_addr.in_addr),
1993 sup_feat, server_feat, server_feat & ~sup_feat);
1994 con->error_msg = "missing required protocol features";
1995 reset_connection(con);
1996 return -1;
1997
1998 case CEPH_MSGR_TAG_BADPROTOVER:
1999 pr_err("%s%lld %s protocol version mismatch,"
2000 " my %d != server's %d\n",
2001 ENTITY_NAME(con->peer_name),
2002 ceph_pr_addr(&con->peer_addr.in_addr),
2003 le32_to_cpu(con->out_connect.protocol_version),
2004 le32_to_cpu(con->in_reply.protocol_version));
2005 con->error_msg = "protocol version mismatch";
2006 reset_connection(con);
2007 return -1;
2008
2009 case CEPH_MSGR_TAG_BADAUTHORIZER:
2010 con->auth_retry++;
2011 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2012 con->auth_retry);
2013 if (con->auth_retry == 2) {
2014 con->error_msg = "connect authorization failure";
2015 return -1;
2016 }
2017 con->auth_retry = 1;
2018 con_out_kvec_reset(con);
2019 ret = prepare_write_connect(con);
2020 if (ret < 0)
2021 return ret;
2022 prepare_read_connect(con);
2023 break;
2024
2025 case CEPH_MSGR_TAG_RESETSESSION:
2026 /*
2027 * If we connected with a large connect_seq but the peer
2028 * has no record of a session with us (no connection, or
2029 * connect_seq == 0), they will send RESETSESION to indicate
2030 * that they must have reset their session, and may have
2031 * dropped messages.
2032 */
2033 dout("process_connect got RESET peer seq %u\n",
2034 le32_to_cpu(con->in_reply.connect_seq));
2035 pr_err("%s%lld %s connection reset\n",
2036 ENTITY_NAME(con->peer_name),
2037 ceph_pr_addr(&con->peer_addr.in_addr));
2038 reset_connection(con);
2039 con_out_kvec_reset(con);
2040 ret = prepare_write_connect(con);
2041 if (ret < 0)
2042 return ret;
2043 prepare_read_connect(con);
2044
2045 /* Tell ceph about it. */
2046 mutex_unlock(&con->mutex);
2047 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
2048 if (con->ops->peer_reset)
2049 con->ops->peer_reset(con);
2050 mutex_lock(&con->mutex);
2051 if (con->state != CON_STATE_NEGOTIATING)
2052 return -EAGAIN;
2053 break;
2054
2055 case CEPH_MSGR_TAG_RETRY_SESSION:
2056 /*
2057 * If we sent a smaller connect_seq than the peer has, try
2058 * again with a larger value.
2059 */
2060 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2061 le32_to_cpu(con->out_connect.connect_seq),
2062 le32_to_cpu(con->in_reply.connect_seq));
2063 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
2064 con_out_kvec_reset(con);
2065 ret = prepare_write_connect(con);
2066 if (ret < 0)
2067 return ret;
2068 prepare_read_connect(con);
2069 break;
2070
2071 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2072 /*
2073 * If we sent a smaller global_seq than the peer has, try
2074 * again with a larger value.
2075 */
2076 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2077 con->peer_global_seq,
2078 le32_to_cpu(con->in_reply.global_seq));
2079 get_global_seq(con->msgr,
2080 le32_to_cpu(con->in_reply.global_seq));
2081 con_out_kvec_reset(con);
2082 ret = prepare_write_connect(con);
2083 if (ret < 0)
2084 return ret;
2085 prepare_read_connect(con);
2086 break;
2087
2088 case CEPH_MSGR_TAG_READY:
2089 if (req_feat & ~server_feat) {
2090 pr_err("%s%lld %s protocol feature mismatch,"
2091 " my required %llx > server's %llx, need %llx\n",
2092 ENTITY_NAME(con->peer_name),
2093 ceph_pr_addr(&con->peer_addr.in_addr),
2094 req_feat, server_feat, req_feat & ~server_feat);
2095 con->error_msg = "missing required protocol features";
2096 reset_connection(con);
2097 return -1;
2098 }
2099
2100 WARN_ON(con->state != CON_STATE_NEGOTIATING);
2101 con->state = CON_STATE_OPEN;
2102
2103 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2104 con->connect_seq++;
2105 con->peer_features = server_feat;
2106 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2107 con->peer_global_seq,
2108 le32_to_cpu(con->in_reply.connect_seq),
2109 con->connect_seq);
2110 WARN_ON(con->connect_seq !=
2111 le32_to_cpu(con->in_reply.connect_seq));
2112
2113 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
2114 con_flag_set(con, CON_FLAG_LOSSYTX);
2115
2116 con->delay = 0; /* reset backoff memory */
2117
2118 prepare_read_tag(con);
2119 break;
2120
2121 case CEPH_MSGR_TAG_WAIT:
2122 /*
2123 * If there is a connection race (we are opening
2124 * connections to each other), one of us may just have
2125 * to WAIT. This shouldn't happen if we are the
2126 * client.
2127 */
2128 pr_err("process_connect got WAIT as client\n");
2129 con->error_msg = "protocol error, got WAIT as client";
2130 return -1;
2131
2132 default:
2133 pr_err("connect protocol error, will retry\n");
2134 con->error_msg = "protocol error, garbage tag during connect";
2135 return -1;
2136 }
2137 return 0;
2138 }
2139
2140
2141 /*
2142 * read (part of) an ack
2143 */
2144 static int read_partial_ack(struct ceph_connection *con)
2145 {
2146 int size = sizeof (con->in_temp_ack);
2147 int end = size;
2148
2149 return read_partial(con, end, size, &con->in_temp_ack);
2150 }
2151
2152
2153 /*
2154 * We can finally discard anything that's been acked.
2155 */
2156 static void process_ack(struct ceph_connection *con)
2157 {
2158 struct ceph_msg *m;
2159 u64 ack = le64_to_cpu(con->in_temp_ack);
2160 u64 seq;
2161
2162 while (!list_empty(&con->out_sent)) {
2163 m = list_first_entry(&con->out_sent, struct ceph_msg,
2164 list_head);
2165 seq = le64_to_cpu(m->hdr.seq);
2166 if (seq > ack)
2167 break;
2168 dout("got ack for seq %llu type %d at %p\n", seq,
2169 le16_to_cpu(m->hdr.type), m);
2170 m->ack_stamp = jiffies;
2171 ceph_msg_remove(m);
2172 }
2173 prepare_read_tag(con);
2174 }
2175
2176
2177
2178
2179 static int read_partial_message_section(struct ceph_connection *con,
2180 struct kvec *section,
2181 unsigned int sec_len, u32 *crc)
2182 {
2183 int ret, left;
2184
2185 BUG_ON(!section);
2186
2187 while (section->iov_len < sec_len) {
2188 BUG_ON(section->iov_base == NULL);
2189 left = sec_len - section->iov_len;
2190 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2191 section->iov_len, left);
2192 if (ret <= 0)
2193 return ret;
2194 section->iov_len += ret;
2195 }
2196 if (section->iov_len == sec_len)
2197 *crc = crc32c(0, section->iov_base, section->iov_len);
2198
2199 return 1;
2200 }
2201
2202 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2203
2204 static int read_partial_message_pages(struct ceph_connection *con,
2205 struct page **pages,
2206 unsigned int data_len, bool do_datacrc)
2207 {
2208 struct ceph_msg_pos *msg_pos = &con->in_msg_pos;
2209 struct page *page;
2210 size_t page_offset;
2211 size_t length;
2212 unsigned int left;
2213 int ret;
2214
2215 /* (page) data */
2216 BUG_ON(pages == NULL);
2217 page = pages[msg_pos->page];
2218 page_offset = msg_pos->page_pos;
2219 BUG_ON(msg_pos->data_pos >= data_len);
2220 left = data_len - msg_pos->data_pos;
2221 BUG_ON(page_offset >= PAGE_SIZE);
2222 length = min_t(unsigned int, PAGE_SIZE - page_offset, left);
2223
2224 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2225 if (ret <= 0)
2226 return ret;
2227
2228 if (do_datacrc)
2229 con->in_data_crc = ceph_crc32c_page(con->in_data_crc, page,
2230 page_offset, ret);
2231
2232 in_msg_pos_next(con, length, ret);
2233
2234 return ret;
2235 }
2236
2237 #ifdef CONFIG_BLOCK
2238 static int read_partial_message_bio(struct ceph_connection *con,
2239 unsigned int data_len, bool do_datacrc)
2240 {
2241 struct ceph_msg *msg = con->in_msg;
2242 struct ceph_msg_pos *msg_pos = &con->in_msg_pos;
2243 struct bio_vec *bv;
2244 struct page *page;
2245 size_t page_offset;
2246 size_t length;
2247 unsigned int left;
2248 int ret;
2249
2250 BUG_ON(!msg);
2251 BUG_ON(!msg->b.bio_iter);
2252 bv = bio_iovec_idx(msg->b.bio_iter, msg->b.bio_seg);
2253 page = bv->bv_page;
2254 page_offset = bv->bv_offset + msg_pos->page_pos;
2255 BUG_ON(msg_pos->data_pos >= data_len);
2256 left = data_len - msg_pos->data_pos;
2257 BUG_ON(msg_pos->page_pos >= bv->bv_len);
2258 length = min_t(unsigned int, bv->bv_len - msg_pos->page_pos, left);
2259
2260 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2261 if (ret <= 0)
2262 return ret;
2263
2264 if (do_datacrc)
2265 con->in_data_crc = ceph_crc32c_page(con->in_data_crc, page,
2266 page_offset, ret);
2267
2268 in_msg_pos_next(con, length, ret);
2269
2270 return ret;
2271 }
2272 #endif
2273
2274 static int read_partial_msg_data(struct ceph_connection *con)
2275 {
2276 struct ceph_msg *msg = con->in_msg;
2277 struct ceph_msg_pos *msg_pos = &con->in_msg_pos;
2278 const bool do_datacrc = !con->msgr->nocrc;
2279 unsigned int data_len;
2280 int ret;
2281
2282 BUG_ON(!msg);
2283
2284 data_len = le32_to_cpu(con->in_hdr.data_len);
2285 while (msg_pos->data_pos < data_len) {
2286 if (ceph_msg_has_pages(msg)) {
2287 ret = read_partial_message_pages(con, msg->p.pages,
2288 data_len, do_datacrc);
2289 if (ret <= 0)
2290 return ret;
2291 #ifdef CONFIG_BLOCK
2292 } else if (ceph_msg_has_bio(msg)) {
2293 ret = read_partial_message_bio(con,
2294 data_len, do_datacrc);
2295 if (ret <= 0)
2296 return ret;
2297 #endif
2298 } else {
2299 BUG_ON(1);
2300 }
2301 }
2302
2303 return 1; /* must return > 0 to indicate success */
2304 }
2305
2306 /*
2307 * read (part of) a message.
2308 */
2309 static int read_partial_message(struct ceph_connection *con)
2310 {
2311 struct ceph_msg *m = con->in_msg;
2312 int size;
2313 int end;
2314 int ret;
2315 unsigned int front_len, middle_len, data_len;
2316 bool do_datacrc = !con->msgr->nocrc;
2317 u64 seq;
2318 u32 crc;
2319
2320 dout("read_partial_message con %p msg %p\n", con, m);
2321
2322 /* header */
2323 size = sizeof (con->in_hdr);
2324 end = size;
2325 ret = read_partial(con, end, size, &con->in_hdr);
2326 if (ret <= 0)
2327 return ret;
2328
2329 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2330 if (cpu_to_le32(crc) != con->in_hdr.crc) {
2331 pr_err("read_partial_message bad hdr "
2332 " crc %u != expected %u\n",
2333 crc, con->in_hdr.crc);
2334 return -EBADMSG;
2335 }
2336
2337 front_len = le32_to_cpu(con->in_hdr.front_len);
2338 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2339 return -EIO;
2340 middle_len = le32_to_cpu(con->in_hdr.middle_len);
2341 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
2342 return -EIO;
2343 data_len = le32_to_cpu(con->in_hdr.data_len);
2344 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2345 return -EIO;
2346
2347 /* verify seq# */
2348 seq = le64_to_cpu(con->in_hdr.seq);
2349 if ((s64)seq - (s64)con->in_seq < 1) {
2350 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2351 ENTITY_NAME(con->peer_name),
2352 ceph_pr_addr(&con->peer_addr.in_addr),
2353 seq, con->in_seq + 1);
2354 con->in_base_pos = -front_len - middle_len - data_len -
2355 sizeof(m->footer);
2356 con->in_tag = CEPH_MSGR_TAG_READY;
2357 return 0;
2358 } else if ((s64)seq - (s64)con->in_seq > 1) {
2359 pr_err("read_partial_message bad seq %lld expected %lld\n",
2360 seq, con->in_seq + 1);
2361 con->error_msg = "bad message sequence # for incoming message";
2362 return -EBADMSG;
2363 }
2364
2365 /* allocate message? */
2366 if (!con->in_msg) {
2367 int skip = 0;
2368
2369 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
2370 front_len, data_len);
2371 ret = ceph_con_in_msg_alloc(con, &skip);
2372 if (ret < 0)
2373 return ret;
2374 if (skip) {
2375 /* skip this message */
2376 dout("alloc_msg said skip message\n");
2377 BUG_ON(con->in_msg);
2378 con->in_base_pos = -front_len - middle_len - data_len -
2379 sizeof(m->footer);
2380 con->in_tag = CEPH_MSGR_TAG_READY;
2381 con->in_seq++;
2382 return 0;
2383 }
2384
2385 BUG_ON(!con->in_msg);
2386 BUG_ON(con->in_msg->con != con);
2387 m = con->in_msg;
2388 m->front.iov_len = 0; /* haven't read it yet */
2389 if (m->middle)
2390 m->middle->vec.iov_len = 0;
2391
2392 /* prepare for data payload, if any */
2393
2394 if (data_len)
2395 prepare_message_data(con->in_msg, &con->in_msg_pos);
2396 }
2397
2398 /* front */
2399 ret = read_partial_message_section(con, &m->front, front_len,
2400 &con->in_front_crc);
2401 if (ret <= 0)
2402 return ret;
2403
2404 /* middle */
2405 if (m->middle) {
2406 ret = read_partial_message_section(con, &m->middle->vec,
2407 middle_len,
2408 &con->in_middle_crc);
2409 if (ret <= 0)
2410 return ret;
2411 }
2412
2413 /* (page) data */
2414 if (data_len) {
2415 ret = read_partial_msg_data(con);
2416 if (ret <= 0)
2417 return ret;
2418 }
2419
2420 /* footer */
2421 size = sizeof (m->footer);
2422 end += size;
2423 ret = read_partial(con, end, size, &m->footer);
2424 if (ret <= 0)
2425 return ret;
2426
2427 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2428 m, front_len, m->footer.front_crc, middle_len,
2429 m->footer.middle_crc, data_len, m->footer.data_crc);
2430
2431 /* crc ok? */
2432 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2433 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2434 m, con->in_front_crc, m->footer.front_crc);
2435 return -EBADMSG;
2436 }
2437 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2438 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2439 m, con->in_middle_crc, m->footer.middle_crc);
2440 return -EBADMSG;
2441 }
2442 if (do_datacrc &&
2443 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2444 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2445 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2446 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2447 return -EBADMSG;
2448 }
2449
2450 return 1; /* done! */
2451 }
2452
2453 /*
2454 * Process message. This happens in the worker thread. The callback should
2455 * be careful not to do anything that waits on other incoming messages or it
2456 * may deadlock.
2457 */
2458 static void process_message(struct ceph_connection *con)
2459 {
2460 struct ceph_msg *msg;
2461
2462 BUG_ON(con->in_msg->con != con);
2463 con->in_msg->con = NULL;
2464 msg = con->in_msg;
2465 con->in_msg = NULL;
2466 con->ops->put(con);
2467
2468 /* if first message, set peer_name */
2469 if (con->peer_name.type == 0)
2470 con->peer_name = msg->hdr.src;
2471
2472 con->in_seq++;
2473 mutex_unlock(&con->mutex);
2474
2475 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2476 msg, le64_to_cpu(msg->hdr.seq),
2477 ENTITY_NAME(msg->hdr.src),
2478 le16_to_cpu(msg->hdr.type),
2479 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2480 le32_to_cpu(msg->hdr.front_len),
2481 le32_to_cpu(msg->hdr.data_len),
2482 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2483 con->ops->dispatch(con, msg);
2484
2485 mutex_lock(&con->mutex);
2486 }
2487
2488
2489 /*
2490 * Write something to the socket. Called in a worker thread when the
2491 * socket appears to be writeable and we have something ready to send.
2492 */
2493 static int try_write(struct ceph_connection *con)
2494 {
2495 int ret = 1;
2496
2497 dout("try_write start %p state %lu\n", con, con->state);
2498
2499 more:
2500 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2501
2502 /* open the socket first? */
2503 if (con->state == CON_STATE_PREOPEN) {
2504 BUG_ON(con->sock);
2505 con->state = CON_STATE_CONNECTING;
2506
2507 con_out_kvec_reset(con);
2508 prepare_write_banner(con);
2509 prepare_read_banner(con);
2510
2511 BUG_ON(con->in_msg);
2512 con->in_tag = CEPH_MSGR_TAG_READY;
2513 dout("try_write initiating connect on %p new state %lu\n",
2514 con, con->state);
2515 ret = ceph_tcp_connect(con);
2516 if (ret < 0) {
2517 con->error_msg = "connect error";
2518 goto out;
2519 }
2520 }
2521
2522 more_kvec:
2523 /* kvec data queued? */
2524 if (con->out_skip) {
2525 ret = write_partial_skip(con);
2526 if (ret <= 0)
2527 goto out;
2528 }
2529 if (con->out_kvec_left) {
2530 ret = write_partial_kvec(con);
2531 if (ret <= 0)
2532 goto out;
2533 }
2534
2535 /* msg pages? */
2536 if (con->out_msg) {
2537 if (con->out_msg_done) {
2538 ceph_msg_put(con->out_msg);
2539 con->out_msg = NULL; /* we're done with this one */
2540 goto do_next;
2541 }
2542
2543 ret = write_partial_message_data(con);
2544 if (ret == 1)
2545 goto more_kvec; /* we need to send the footer, too! */
2546 if (ret == 0)
2547 goto out;
2548 if (ret < 0) {
2549 dout("try_write write_partial_message_data err %d\n",
2550 ret);
2551 goto out;
2552 }
2553 }
2554
2555 do_next:
2556 if (con->state == CON_STATE_OPEN) {
2557 /* is anything else pending? */
2558 if (!list_empty(&con->out_queue)) {
2559 prepare_write_message(con);
2560 goto more;
2561 }
2562 if (con->in_seq > con->in_seq_acked) {
2563 prepare_write_ack(con);
2564 goto more;
2565 }
2566 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2567 prepare_write_keepalive(con);
2568 goto more;
2569 }
2570 }
2571
2572 /* Nothing to do! */
2573 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2574 dout("try_write nothing else to write.\n");
2575 ret = 0;
2576 out:
2577 dout("try_write done on %p ret %d\n", con, ret);
2578 return ret;
2579 }
2580
2581
2582
2583 /*
2584 * Read what we can from the socket.
2585 */
2586 static int try_read(struct ceph_connection *con)
2587 {
2588 int ret = -1;
2589
2590 more:
2591 dout("try_read start on %p state %lu\n", con, con->state);
2592 if (con->state != CON_STATE_CONNECTING &&
2593 con->state != CON_STATE_NEGOTIATING &&
2594 con->state != CON_STATE_OPEN)
2595 return 0;
2596
2597 BUG_ON(!con->sock);
2598
2599 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2600 con->in_base_pos);
2601
2602 if (con->state == CON_STATE_CONNECTING) {
2603 dout("try_read connecting\n");
2604 ret = read_partial_banner(con);
2605 if (ret <= 0)
2606 goto out;
2607 ret = process_banner(con);
2608 if (ret < 0)
2609 goto out;
2610
2611 con->state = CON_STATE_NEGOTIATING;
2612
2613 /*
2614 * Received banner is good, exchange connection info.
2615 * Do not reset out_kvec, as sending our banner raced
2616 * with receiving peer banner after connect completed.
2617 */
2618 ret = prepare_write_connect(con);
2619 if (ret < 0)
2620 goto out;
2621 prepare_read_connect(con);
2622
2623 /* Send connection info before awaiting response */
2624 goto out;
2625 }
2626
2627 if (con->state == CON_STATE_NEGOTIATING) {
2628 dout("try_read negotiating\n");
2629 ret = read_partial_connect(con);
2630 if (ret <= 0)
2631 goto out;
2632 ret = process_connect(con);
2633 if (ret < 0)
2634 goto out;
2635 goto more;
2636 }
2637
2638 WARN_ON(con->state != CON_STATE_OPEN);
2639
2640 if (con->in_base_pos < 0) {
2641 /*
2642 * skipping + discarding content.
2643 *
2644 * FIXME: there must be a better way to do this!
2645 */
2646 static char buf[SKIP_BUF_SIZE];
2647 int skip = min((int) sizeof (buf), -con->in_base_pos);
2648
2649 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2650 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2651 if (ret <= 0)
2652 goto out;
2653 con->in_base_pos += ret;
2654 if (con->in_base_pos)
2655 goto more;
2656 }
2657 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2658 /*
2659 * what's next?
2660 */
2661 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2662 if (ret <= 0)
2663 goto out;
2664 dout("try_read got tag %d\n", (int)con->in_tag);
2665 switch (con->in_tag) {
2666 case CEPH_MSGR_TAG_MSG:
2667 prepare_read_message(con);
2668 break;
2669 case CEPH_MSGR_TAG_ACK:
2670 prepare_read_ack(con);
2671 break;
2672 case CEPH_MSGR_TAG_CLOSE:
2673 con_close_socket(con);
2674 con->state = CON_STATE_CLOSED;
2675 goto out;
2676 default:
2677 goto bad_tag;
2678 }
2679 }
2680 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2681 ret = read_partial_message(con);
2682 if (ret <= 0) {
2683 switch (ret) {
2684 case -EBADMSG:
2685 con->error_msg = "bad crc";
2686 ret = -EIO;
2687 break;
2688 case -EIO:
2689 con->error_msg = "io error";
2690 break;
2691 }
2692 goto out;
2693 }
2694 if (con->in_tag == CEPH_MSGR_TAG_READY)
2695 goto more;
2696 process_message(con);
2697 if (con->state == CON_STATE_OPEN)
2698 prepare_read_tag(con);
2699 goto more;
2700 }
2701 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
2702 ret = read_partial_ack(con);
2703 if (ret <= 0)
2704 goto out;
2705 process_ack(con);
2706 goto more;
2707 }
2708
2709 out:
2710 dout("try_read done on %p ret %d\n", con, ret);
2711 return ret;
2712
2713 bad_tag:
2714 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2715 con->error_msg = "protocol error, garbage tag";
2716 ret = -1;
2717 goto out;
2718 }
2719
2720
2721 /*
2722 * Atomically queue work on a connection after the specified delay.
2723 * Bump @con reference to avoid races with connection teardown.
2724 * Returns 0 if work was queued, or an error code otherwise.
2725 */
2726 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2727 {
2728 if (!con->ops->get(con)) {
2729 dout("%s %p ref count 0\n", __func__, con);
2730
2731 return -ENOENT;
2732 }
2733
2734 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2735 dout("%s %p - already queued\n", __func__, con);
2736 con->ops->put(con);
2737
2738 return -EBUSY;
2739 }
2740
2741 dout("%s %p %lu\n", __func__, con, delay);
2742
2743 return 0;
2744 }
2745
2746 static void queue_con(struct ceph_connection *con)
2747 {
2748 (void) queue_con_delay(con, 0);
2749 }
2750
2751 static bool con_sock_closed(struct ceph_connection *con)
2752 {
2753 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
2754 return false;
2755
2756 #define CASE(x) \
2757 case CON_STATE_ ## x: \
2758 con->error_msg = "socket closed (con state " #x ")"; \
2759 break;
2760
2761 switch (con->state) {
2762 CASE(CLOSED);
2763 CASE(PREOPEN);
2764 CASE(CONNECTING);
2765 CASE(NEGOTIATING);
2766 CASE(OPEN);
2767 CASE(STANDBY);
2768 default:
2769 pr_warning("%s con %p unrecognized state %lu\n",
2770 __func__, con, con->state);
2771 con->error_msg = "unrecognized con state";
2772 BUG();
2773 break;
2774 }
2775 #undef CASE
2776
2777 return true;
2778 }
2779
2780 static bool con_backoff(struct ceph_connection *con)
2781 {
2782 int ret;
2783
2784 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2785 return false;
2786
2787 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2788 if (ret) {
2789 dout("%s: con %p FAILED to back off %lu\n", __func__,
2790 con, con->delay);
2791 BUG_ON(ret == -ENOENT);
2792 con_flag_set(con, CON_FLAG_BACKOFF);
2793 }
2794
2795 return true;
2796 }
2797
2798 /* Finish fault handling; con->mutex must *not* be held here */
2799
2800 static void con_fault_finish(struct ceph_connection *con)
2801 {
2802 /*
2803 * in case we faulted due to authentication, invalidate our
2804 * current tickets so that we can get new ones.
2805 */
2806 if (con->auth_retry && con->ops->invalidate_authorizer) {
2807 dout("calling invalidate_authorizer()\n");
2808 con->ops->invalidate_authorizer(con);
2809 }
2810
2811 if (con->ops->fault)
2812 con->ops->fault(con);
2813 }
2814
2815 /*
2816 * Do some work on a connection. Drop a connection ref when we're done.
2817 */
2818 static void con_work(struct work_struct *work)
2819 {
2820 struct ceph_connection *con = container_of(work, struct ceph_connection,
2821 work.work);
2822 bool fault;
2823
2824 mutex_lock(&con->mutex);
2825 while (true) {
2826 int ret;
2827
2828 if ((fault = con_sock_closed(con))) {
2829 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2830 break;
2831 }
2832 if (con_backoff(con)) {
2833 dout("%s: con %p BACKOFF\n", __func__, con);
2834 break;
2835 }
2836 if (con->state == CON_STATE_STANDBY) {
2837 dout("%s: con %p STANDBY\n", __func__, con);
2838 break;
2839 }
2840 if (con->state == CON_STATE_CLOSED) {
2841 dout("%s: con %p CLOSED\n", __func__, con);
2842 BUG_ON(con->sock);
2843 break;
2844 }
2845 if (con->state == CON_STATE_PREOPEN) {
2846 dout("%s: con %p PREOPEN\n", __func__, con);
2847 BUG_ON(con->sock);
2848 }
2849
2850 ret = try_read(con);
2851 if (ret < 0) {
2852 if (ret == -EAGAIN)
2853 continue;
2854 con->error_msg = "socket error on read";
2855 fault = true;
2856 break;
2857 }
2858
2859 ret = try_write(con);
2860 if (ret < 0) {
2861 if (ret == -EAGAIN)
2862 continue;
2863 con->error_msg = "socket error on write";
2864 fault = true;
2865 }
2866
2867 break; /* If we make it to here, we're done */
2868 }
2869 if (fault)
2870 con_fault(con);
2871 mutex_unlock(&con->mutex);
2872
2873 if (fault)
2874 con_fault_finish(con);
2875
2876 con->ops->put(con);
2877 }
2878
2879 /*
2880 * Generic error/fault handler. A retry mechanism is used with
2881 * exponential backoff
2882 */
2883 static void con_fault(struct ceph_connection *con)
2884 {
2885 pr_warning("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2886 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2887 dout("fault %p state %lu to peer %s\n",
2888 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2889
2890 WARN_ON(con->state != CON_STATE_CONNECTING &&
2891 con->state != CON_STATE_NEGOTIATING &&
2892 con->state != CON_STATE_OPEN);
2893
2894 con_close_socket(con);
2895
2896 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
2897 dout("fault on LOSSYTX channel, marking CLOSED\n");
2898 con->state = CON_STATE_CLOSED;
2899 return;
2900 }
2901
2902 if (con->in_msg) {
2903 BUG_ON(con->in_msg->con != con);
2904 con->in_msg->con = NULL;
2905 ceph_msg_put(con->in_msg);
2906 con->in_msg = NULL;
2907 con->ops->put(con);
2908 }
2909
2910 /* Requeue anything that hasn't been acked */
2911 list_splice_init(&con->out_sent, &con->out_queue);
2912
2913 /* If there are no messages queued or keepalive pending, place
2914 * the connection in a STANDBY state */
2915 if (list_empty(&con->out_queue) &&
2916 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
2917 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2918 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2919 con->state = CON_STATE_STANDBY;
2920 } else {
2921 /* retry after a delay. */
2922 con->state = CON_STATE_PREOPEN;
2923 if (con->delay == 0)
2924 con->delay = BASE_DELAY_INTERVAL;
2925 else if (con->delay < MAX_DELAY_INTERVAL)
2926 con->delay *= 2;
2927 con_flag_set(con, CON_FLAG_BACKOFF);
2928 queue_con(con);
2929 }
2930 }
2931
2932
2933
2934 /*
2935 * initialize a new messenger instance
2936 */
2937 void ceph_messenger_init(struct ceph_messenger *msgr,
2938 struct ceph_entity_addr *myaddr,
2939 u32 supported_features,
2940 u32 required_features,
2941 bool nocrc)
2942 {
2943 msgr->supported_features = supported_features;
2944 msgr->required_features = required_features;
2945
2946 spin_lock_init(&msgr->global_seq_lock);
2947
2948 if (myaddr)
2949 msgr->inst.addr = *myaddr;
2950
2951 /* select a random nonce */
2952 msgr->inst.addr.type = 0;
2953 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2954 encode_my_addr(msgr);
2955 msgr->nocrc = nocrc;
2956
2957 atomic_set(&msgr->stopping, 0);
2958
2959 dout("%s %p\n", __func__, msgr);
2960 }
2961 EXPORT_SYMBOL(ceph_messenger_init);
2962
2963 static void clear_standby(struct ceph_connection *con)
2964 {
2965 /* come back from STANDBY? */
2966 if (con->state == CON_STATE_STANDBY) {
2967 dout("clear_standby %p and ++connect_seq\n", con);
2968 con->state = CON_STATE_PREOPEN;
2969 con->connect_seq++;
2970 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
2971 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
2972 }
2973 }
2974
2975 /*
2976 * Queue up an outgoing message on the given connection.
2977 */
2978 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2979 {
2980 /* set src+dst */
2981 msg->hdr.src = con->msgr->inst.name;
2982 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2983 msg->needs_out_seq = true;
2984
2985 mutex_lock(&con->mutex);
2986
2987 if (con->state == CON_STATE_CLOSED) {
2988 dout("con_send %p closed, dropping %p\n", con, msg);
2989 ceph_msg_put(msg);
2990 mutex_unlock(&con->mutex);
2991 return;
2992 }
2993
2994 BUG_ON(msg->con != NULL);
2995 msg->con = con->ops->get(con);
2996 BUG_ON(msg->con == NULL);
2997
2998 BUG_ON(!list_empty(&msg->list_head));
2999 list_add_tail(&msg->list_head, &con->out_queue);
3000 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
3001 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
3002 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
3003 le32_to_cpu(msg->hdr.front_len),
3004 le32_to_cpu(msg->hdr.middle_len),
3005 le32_to_cpu(msg->hdr.data_len));
3006
3007 clear_standby(con);
3008 mutex_unlock(&con->mutex);
3009
3010 /* if there wasn't anything waiting to send before, queue
3011 * new work */
3012 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3013 queue_con(con);
3014 }
3015 EXPORT_SYMBOL(ceph_con_send);
3016
3017 /*
3018 * Revoke a message that was previously queued for send
3019 */
3020 void ceph_msg_revoke(struct ceph_msg *msg)
3021 {
3022 struct ceph_connection *con = msg->con;
3023
3024 if (!con)
3025 return; /* Message not in our possession */
3026
3027 mutex_lock(&con->mutex);
3028 if (!list_empty(&msg->list_head)) {
3029 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
3030 list_del_init(&msg->list_head);
3031 BUG_ON(msg->con == NULL);
3032 msg->con->ops->put(msg->con);
3033 msg->con = NULL;
3034 msg->hdr.seq = 0;
3035
3036 ceph_msg_put(msg);
3037 }
3038 if (con->out_msg == msg) {
3039 dout("%s %p msg %p - was sending\n", __func__, con, msg);
3040 con->out_msg = NULL;
3041 if (con->out_kvec_is_msg) {
3042 con->out_skip = con->out_kvec_bytes;
3043 con->out_kvec_is_msg = false;
3044 }
3045 msg->hdr.seq = 0;
3046
3047 ceph_msg_put(msg);
3048 }
3049 mutex_unlock(&con->mutex);
3050 }
3051
3052 /*
3053 * Revoke a message that we may be reading data into
3054 */
3055 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
3056 {
3057 struct ceph_connection *con;
3058
3059 BUG_ON(msg == NULL);
3060 if (!msg->con) {
3061 dout("%s msg %p null con\n", __func__, msg);
3062
3063 return; /* Message not in our possession */
3064 }
3065
3066 con = msg->con;
3067 mutex_lock(&con->mutex);
3068 if (con->in_msg == msg) {
3069 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3070 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3071 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
3072
3073 /* skip rest of message */
3074 dout("%s %p msg %p revoked\n", __func__, con, msg);
3075 con->in_base_pos = con->in_base_pos -
3076 sizeof(struct ceph_msg_header) -
3077 front_len -
3078 middle_len -
3079 data_len -
3080 sizeof(struct ceph_msg_footer);
3081 ceph_msg_put(con->in_msg);
3082 con->in_msg = NULL;
3083 con->in_tag = CEPH_MSGR_TAG_READY;
3084 con->in_seq++;
3085 } else {
3086 dout("%s %p in_msg %p msg %p no-op\n",
3087 __func__, con, con->in_msg, msg);
3088 }
3089 mutex_unlock(&con->mutex);
3090 }
3091
3092 /*
3093 * Queue a keepalive byte to ensure the tcp connection is alive.
3094 */
3095 void ceph_con_keepalive(struct ceph_connection *con)
3096 {
3097 dout("con_keepalive %p\n", con);
3098 mutex_lock(&con->mutex);
3099 clear_standby(con);
3100 mutex_unlock(&con->mutex);
3101 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
3102 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3103 queue_con(con);
3104 }
3105 EXPORT_SYMBOL(ceph_con_keepalive);
3106
3107 static void ceph_msg_data_init(struct ceph_msg_data *data)
3108 {
3109 data->type = CEPH_MSG_DATA_NONE;
3110 }
3111
3112 void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages,
3113 size_t length, size_t alignment)
3114 {
3115 BUG_ON(!pages);
3116 BUG_ON(!length);
3117 BUG_ON(msg->p.type != CEPH_MSG_DATA_NONE);
3118
3119 msg->p.type = CEPH_MSG_DATA_PAGES;
3120 msg->p.pages = pages;
3121 msg->p.length = length;
3122 msg->p.alignment = alignment & ~PAGE_MASK;
3123 }
3124 EXPORT_SYMBOL(ceph_msg_data_set_pages);
3125
3126 void ceph_msg_data_set_pagelist(struct ceph_msg *msg,
3127 struct ceph_pagelist *pagelist)
3128 {
3129 BUG_ON(!pagelist);
3130 BUG_ON(!pagelist->length);
3131 BUG_ON(msg->l.type != CEPH_MSG_DATA_NONE);
3132
3133 msg->l.type = CEPH_MSG_DATA_PAGELIST;
3134 msg->l.pagelist = pagelist;
3135 }
3136 EXPORT_SYMBOL(ceph_msg_data_set_pagelist);
3137
3138 void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio)
3139 {
3140 BUG_ON(!bio);
3141 BUG_ON(msg->b.type != CEPH_MSG_DATA_NONE);
3142
3143 msg->b.type = CEPH_MSG_DATA_BIO;
3144 msg->b.bio = bio;
3145 }
3146 EXPORT_SYMBOL(ceph_msg_data_set_bio);
3147
3148 void ceph_msg_data_set_trail(struct ceph_msg *msg, struct ceph_pagelist *trail)
3149 {
3150 BUG_ON(!trail);
3151 BUG_ON(!trail->length);
3152 BUG_ON(msg->b.type != CEPH_MSG_DATA_NONE);
3153
3154 msg->t.type = CEPH_MSG_DATA_PAGELIST;
3155 msg->t.pagelist = trail;
3156 }
3157 EXPORT_SYMBOL(ceph_msg_data_set_trail);
3158
3159 /*
3160 * construct a new message with given type, size
3161 * the new msg has a ref count of 1.
3162 */
3163 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3164 bool can_fail)
3165 {
3166 struct ceph_msg *m;
3167
3168 m = kzalloc(sizeof(*m), flags);
3169 if (m == NULL)
3170 goto out;
3171
3172 m->hdr.type = cpu_to_le16(type);
3173 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
3174 m->hdr.front_len = cpu_to_le32(front_len);
3175
3176 INIT_LIST_HEAD(&m->list_head);
3177 kref_init(&m->kref);
3178
3179 ceph_msg_data_init(&m->p);
3180 ceph_msg_data_init(&m->l);
3181 ceph_msg_data_init(&m->b);
3182 ceph_msg_data_init(&m->t);
3183
3184 /* front */
3185 m->front_max = front_len;
3186 if (front_len) {
3187 if (front_len > PAGE_CACHE_SIZE) {
3188 m->front.iov_base = __vmalloc(front_len, flags,
3189 PAGE_KERNEL);
3190 m->front_is_vmalloc = true;
3191 } else {
3192 m->front.iov_base = kmalloc(front_len, flags);
3193 }
3194 if (m->front.iov_base == NULL) {
3195 dout("ceph_msg_new can't allocate %d bytes\n",
3196 front_len);
3197 goto out2;
3198 }
3199 } else {
3200 m->front.iov_base = NULL;
3201 }
3202 m->front.iov_len = front_len;
3203
3204 dout("ceph_msg_new %p front %d\n", m, front_len);
3205 return m;
3206
3207 out2:
3208 ceph_msg_put(m);
3209 out:
3210 if (!can_fail) {
3211 pr_err("msg_new can't create type %d front %d\n", type,
3212 front_len);
3213 WARN_ON(1);
3214 } else {
3215 dout("msg_new can't create type %d front %d\n", type,
3216 front_len);
3217 }
3218 return NULL;
3219 }
3220 EXPORT_SYMBOL(ceph_msg_new);
3221
3222 /*
3223 * Allocate "middle" portion of a message, if it is needed and wasn't
3224 * allocated by alloc_msg. This allows us to read a small fixed-size
3225 * per-type header in the front and then gracefully fail (i.e.,
3226 * propagate the error to the caller based on info in the front) when
3227 * the middle is too large.
3228 */
3229 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
3230 {
3231 int type = le16_to_cpu(msg->hdr.type);
3232 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3233
3234 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3235 ceph_msg_type_name(type), middle_len);
3236 BUG_ON(!middle_len);
3237 BUG_ON(msg->middle);
3238
3239 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
3240 if (!msg->middle)
3241 return -ENOMEM;
3242 return 0;
3243 }
3244
3245 /*
3246 * Allocate a message for receiving an incoming message on a
3247 * connection, and save the result in con->in_msg. Uses the
3248 * connection's private alloc_msg op if available.
3249 *
3250 * Returns 0 on success, or a negative error code.
3251 *
3252 * On success, if we set *skip = 1:
3253 * - the next message should be skipped and ignored.
3254 * - con->in_msg == NULL
3255 * or if we set *skip = 0:
3256 * - con->in_msg is non-null.
3257 * On error (ENOMEM, EAGAIN, ...),
3258 * - con->in_msg == NULL
3259 */
3260 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3261 {
3262 struct ceph_msg_header *hdr = &con->in_hdr;
3263 int middle_len = le32_to_cpu(hdr->middle_len);
3264 struct ceph_msg *msg;
3265 int ret = 0;
3266
3267 BUG_ON(con->in_msg != NULL);
3268 BUG_ON(!con->ops->alloc_msg);
3269
3270 mutex_unlock(&con->mutex);
3271 msg = con->ops->alloc_msg(con, hdr, skip);
3272 mutex_lock(&con->mutex);
3273 if (con->state != CON_STATE_OPEN) {
3274 if (msg)
3275 ceph_msg_put(msg);
3276 return -EAGAIN;
3277 }
3278 if (msg) {
3279 BUG_ON(*skip);
3280 con->in_msg = msg;
3281 con->in_msg->con = con->ops->get(con);
3282 BUG_ON(con->in_msg->con == NULL);
3283 } else {
3284 /*
3285 * Null message pointer means either we should skip
3286 * this message or we couldn't allocate memory. The
3287 * former is not an error.
3288 */
3289 if (*skip)
3290 return 0;
3291 con->error_msg = "error allocating memory for incoming message";
3292
3293 return -ENOMEM;
3294 }
3295 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
3296
3297 if (middle_len && !con->in_msg->middle) {
3298 ret = ceph_alloc_middle(con, con->in_msg);
3299 if (ret < 0) {
3300 ceph_msg_put(con->in_msg);
3301 con->in_msg = NULL;
3302 }
3303 }
3304
3305 return ret;
3306 }
3307
3308
3309 /*
3310 * Free a generically kmalloc'd message.
3311 */
3312 void ceph_msg_kfree(struct ceph_msg *m)
3313 {
3314 dout("msg_kfree %p\n", m);
3315 if (m->front_is_vmalloc)
3316 vfree(m->front.iov_base);
3317 else
3318 kfree(m->front.iov_base);
3319 kfree(m);
3320 }
3321
3322 /*
3323 * Drop a msg ref. Destroy as needed.
3324 */
3325 void ceph_msg_last_put(struct kref *kref)
3326 {
3327 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
3328
3329 dout("ceph_msg_put last one on %p\n", m);
3330 WARN_ON(!list_empty(&m->list_head));
3331
3332 /* drop middle, data, if any */
3333 if (m->middle) {
3334 ceph_buffer_put(m->middle);
3335 m->middle = NULL;
3336 }
3337 if (ceph_msg_has_pages(m)) {
3338 m->p.length = 0;
3339 m->p.pages = NULL;
3340 }
3341
3342 if (ceph_msg_has_pagelist(m)) {
3343 ceph_pagelist_release(m->l.pagelist);
3344 kfree(m->l.pagelist);
3345 m->l.pagelist = NULL;
3346 }
3347
3348 if (ceph_msg_has_trail(m))
3349 m->t.pagelist = NULL;
3350
3351 if (m->pool)
3352 ceph_msgpool_put(m->pool, m);
3353 else
3354 ceph_msg_kfree(m);
3355 }
3356 EXPORT_SYMBOL(ceph_msg_last_put);
3357
3358 void ceph_msg_dump(struct ceph_msg *msg)
3359 {
3360 pr_debug("msg_dump %p (front_max %d length %zd)\n", msg,
3361 msg->front_max, msg->p.length);
3362 print_hex_dump(KERN_DEBUG, "header: ",
3363 DUMP_PREFIX_OFFSET, 16, 1,
3364 &msg->hdr, sizeof(msg->hdr), true);
3365 print_hex_dump(KERN_DEBUG, " front: ",
3366 DUMP_PREFIX_OFFSET, 16, 1,
3367 msg->front.iov_base, msg->front.iov_len, true);
3368 if (msg->middle)
3369 print_hex_dump(KERN_DEBUG, "middle: ",
3370 DUMP_PREFIX_OFFSET, 16, 1,
3371 msg->middle->vec.iov_base,
3372 msg->middle->vec.iov_len, true);
3373 print_hex_dump(KERN_DEBUG, "footer: ",
3374 DUMP_PREFIX_OFFSET, 16, 1,
3375 &msg->footer, sizeof(msg->footer), true);
3376 }
3377 EXPORT_SYMBOL(ceph_msg_dump);
This page took 0.143236 seconds and 5 git commands to generate.