1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dns_resolver.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/export.h>
24 * Ceph uses the messenger to exchange ceph_msg messages with other
25 * hosts in the system. The messenger provides ordered and reliable
26 * delivery. We tolerate TCP disconnects by reconnecting (with
27 * exponential backoff) in the case of a fault (disconnection, bad
28 * crc, protocol error). Acks allow sent messages to be discarded by
32 /* static tag bytes (protocol control messages) */
33 static char tag_msg
= CEPH_MSGR_TAG_MSG
;
34 static char tag_ack
= CEPH_MSGR_TAG_ACK
;
35 static char tag_keepalive
= CEPH_MSGR_TAG_KEEPALIVE
;
38 static struct lock_class_key socket_class
;
42 * When skipping (ignoring) a block of input we read it into a "skip
43 * buffer," which is this many bytes in size.
45 #define SKIP_BUF_SIZE 1024
47 static void queue_con(struct ceph_connection
*con
);
48 static void con_work(struct work_struct
*);
49 static void ceph_fault(struct ceph_connection
*con
);
52 * Nicely render a sockaddr as a string. An array of formatted
53 * strings is used, to approximate reentrancy.
55 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
56 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
57 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
58 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
60 static char addr_str
[ADDR_STR_COUNT
][MAX_ADDR_STR_LEN
];
61 static atomic_t addr_str_seq
= ATOMIC_INIT(0);
63 static struct page
*zero_page
; /* used in certain error cases */
65 const char *ceph_pr_addr(const struct sockaddr_storage
*ss
)
69 struct sockaddr_in
*in4
= (struct sockaddr_in
*) ss
;
70 struct sockaddr_in6
*in6
= (struct sockaddr_in6
*) ss
;
72 i
= atomic_inc_return(&addr_str_seq
) & ADDR_STR_COUNT_MASK
;
75 switch (ss
->ss_family
) {
77 snprintf(s
, MAX_ADDR_STR_LEN
, "%pI4:%hu", &in4
->sin_addr
,
78 ntohs(in4
->sin_port
));
82 snprintf(s
, MAX_ADDR_STR_LEN
, "[%pI6c]:%hu", &in6
->sin6_addr
,
83 ntohs(in6
->sin6_port
));
87 snprintf(s
, MAX_ADDR_STR_LEN
, "(unknown sockaddr family %hu)",
93 EXPORT_SYMBOL(ceph_pr_addr
);
95 static void encode_my_addr(struct ceph_messenger
*msgr
)
97 memcpy(&msgr
->my_enc_addr
, &msgr
->inst
.addr
, sizeof(msgr
->my_enc_addr
));
98 ceph_encode_addr(&msgr
->my_enc_addr
);
102 * work queue for all reading and writing to/from the socket.
104 static struct workqueue_struct
*ceph_msgr_wq
;
106 void _ceph_msgr_exit(void)
109 destroy_workqueue(ceph_msgr_wq
);
113 BUG_ON(zero_page
== NULL
);
115 page_cache_release(zero_page
);
119 int ceph_msgr_init(void)
121 BUG_ON(zero_page
!= NULL
);
122 zero_page
= ZERO_PAGE(0);
123 page_cache_get(zero_page
);
125 ceph_msgr_wq
= alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT
, 0);
129 pr_err("msgr_init failed to create workqueue\n");
134 EXPORT_SYMBOL(ceph_msgr_init
);
136 void ceph_msgr_exit(void)
138 BUG_ON(ceph_msgr_wq
== NULL
);
142 EXPORT_SYMBOL(ceph_msgr_exit
);
144 void ceph_msgr_flush(void)
146 flush_workqueue(ceph_msgr_wq
);
148 EXPORT_SYMBOL(ceph_msgr_flush
);
152 * socket callback functions
155 /* data available on socket, or listen socket received a connect */
156 static void ceph_sock_data_ready(struct sock
*sk
, int count_unused
)
158 struct ceph_connection
*con
= sk
->sk_user_data
;
160 if (sk
->sk_state
!= TCP_CLOSE_WAIT
) {
161 dout("%s on %p state = %lu, queueing work\n", __func__
,
167 /* socket has buffer space for writing */
168 static void ceph_sock_write_space(struct sock
*sk
)
170 struct ceph_connection
*con
= sk
->sk_user_data
;
172 /* only queue to workqueue if there is data we want to write,
173 * and there is sufficient space in the socket buffer to accept
174 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
175 * doesn't get called again until try_write() fills the socket
176 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
177 * and net/core/stream.c:sk_stream_write_space().
179 if (test_bit(WRITE_PENDING
, &con
->state
)) {
180 if (sk_stream_wspace(sk
) >= sk_stream_min_wspace(sk
)) {
181 dout("%s %p queueing write work\n", __func__
, con
);
182 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
186 dout("%s %p nothing to write\n", __func__
, con
);
190 /* socket's state has changed */
191 static void ceph_sock_state_change(struct sock
*sk
)
193 struct ceph_connection
*con
= sk
->sk_user_data
;
195 dout("%s %p state = %lu sk_state = %u\n", __func__
,
196 con
, con
->state
, sk
->sk_state
);
198 if (test_bit(CLOSED
, &con
->state
))
201 switch (sk
->sk_state
) {
203 dout("%s TCP_CLOSE\n", __func__
);
205 dout("%s TCP_CLOSE_WAIT\n", __func__
);
206 if (test_and_set_bit(SOCK_CLOSED
, &con
->state
) == 0) {
207 if (test_bit(CONNECTING
, &con
->state
))
208 con
->error_msg
= "connection failed";
210 con
->error_msg
= "socket closed";
214 case TCP_ESTABLISHED
:
215 dout("%s TCP_ESTABLISHED\n", __func__
);
218 default: /* Everything else is uninteresting */
224 * set up socket callbacks
226 static void set_sock_callbacks(struct socket
*sock
,
227 struct ceph_connection
*con
)
229 struct sock
*sk
= sock
->sk
;
230 sk
->sk_user_data
= con
;
231 sk
->sk_data_ready
= ceph_sock_data_ready
;
232 sk
->sk_write_space
= ceph_sock_write_space
;
233 sk
->sk_state_change
= ceph_sock_state_change
;
242 * initiate connection to a remote socket.
244 static int ceph_tcp_connect(struct ceph_connection
*con
)
246 struct sockaddr_storage
*paddr
= &con
->peer_addr
.in_addr
;
251 ret
= sock_create_kern(con
->peer_addr
.in_addr
.ss_family
, SOCK_STREAM
,
255 sock
->sk
->sk_allocation
= GFP_NOFS
;
257 #ifdef CONFIG_LOCKDEP
258 lockdep_set_class(&sock
->sk
->sk_lock
, &socket_class
);
261 set_sock_callbacks(sock
, con
);
263 dout("connect %s\n", ceph_pr_addr(&con
->peer_addr
.in_addr
));
265 ret
= sock
->ops
->connect(sock
, (struct sockaddr
*)paddr
, sizeof(*paddr
),
267 if (ret
== -EINPROGRESS
) {
268 dout("connect %s EINPROGRESS sk_state = %u\n",
269 ceph_pr_addr(&con
->peer_addr
.in_addr
),
271 } else if (ret
< 0) {
272 pr_err("connect %s error %d\n",
273 ceph_pr_addr(&con
->peer_addr
.in_addr
), ret
);
275 con
->error_msg
= "connect error";
284 static int ceph_tcp_recvmsg(struct socket
*sock
, void *buf
, size_t len
)
286 struct kvec iov
= {buf
, len
};
287 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
290 r
= kernel_recvmsg(sock
, &msg
, &iov
, 1, len
, msg
.msg_flags
);
297 * write something. @more is true if caller will be sending more data
300 static int ceph_tcp_sendmsg(struct socket
*sock
, struct kvec
*iov
,
301 size_t kvlen
, size_t len
, int more
)
303 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
307 msg
.msg_flags
|= MSG_MORE
;
309 msg
.msg_flags
|= MSG_EOR
; /* superfluous, but what the hell */
311 r
= kernel_sendmsg(sock
, &msg
, iov
, kvlen
, len
);
317 static int ceph_tcp_sendpage(struct socket
*sock
, struct page
*page
,
318 int offset
, size_t size
, int more
)
320 int flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
| (more
? MSG_MORE
: MSG_EOR
);
323 ret
= kernel_sendpage(sock
, page
, offset
, size
, flags
);
332 * Shutdown/close the socket for the given connection.
334 static int con_close_socket(struct ceph_connection
*con
)
338 dout("con_close_socket on %p sock %p\n", con
, con
->sock
);
341 set_bit(SOCK_CLOSED
, &con
->state
);
342 rc
= con
->sock
->ops
->shutdown(con
->sock
, SHUT_RDWR
);
343 sock_release(con
->sock
);
345 clear_bit(SOCK_CLOSED
, &con
->state
);
350 * Reset a connection. Discard all incoming and outgoing messages
351 * and clear *_seq state.
353 static void ceph_msg_remove(struct ceph_msg
*msg
)
355 list_del_init(&msg
->list_head
);
358 static void ceph_msg_remove_list(struct list_head
*head
)
360 while (!list_empty(head
)) {
361 struct ceph_msg
*msg
= list_first_entry(head
, struct ceph_msg
,
363 ceph_msg_remove(msg
);
367 static void reset_connection(struct ceph_connection
*con
)
369 /* reset connection, out_queue, msg_ and connect_seq */
370 /* discard existing out_queue and msg_seq */
371 ceph_msg_remove_list(&con
->out_queue
);
372 ceph_msg_remove_list(&con
->out_sent
);
375 ceph_msg_put(con
->in_msg
);
379 con
->connect_seq
= 0;
382 ceph_msg_put(con
->out_msg
);
386 con
->in_seq_acked
= 0;
390 * mark a peer down. drop any open connections.
392 void ceph_con_close(struct ceph_connection
*con
)
394 dout("con_close %p peer %s\n", con
,
395 ceph_pr_addr(&con
->peer_addr
.in_addr
));
396 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
397 clear_bit(STANDBY
, &con
->state
); /* avoid connect_seq bump */
398 clear_bit(LOSSYTX
, &con
->state
); /* so we retry next connect */
399 clear_bit(KEEPALIVE_PENDING
, &con
->state
);
400 clear_bit(WRITE_PENDING
, &con
->state
);
401 mutex_lock(&con
->mutex
);
402 reset_connection(con
);
403 con
->peer_global_seq
= 0;
404 cancel_delayed_work(&con
->work
);
405 mutex_unlock(&con
->mutex
);
408 EXPORT_SYMBOL(ceph_con_close
);
411 * Reopen a closed connection, with a new peer address.
413 void ceph_con_open(struct ceph_connection
*con
, struct ceph_entity_addr
*addr
)
415 dout("con_open %p %s\n", con
, ceph_pr_addr(&addr
->in_addr
));
416 set_bit(OPENING
, &con
->state
);
417 clear_bit(CLOSED
, &con
->state
);
418 memcpy(&con
->peer_addr
, addr
, sizeof(*addr
));
419 con
->delay
= 0; /* reset backoff memory */
422 EXPORT_SYMBOL(ceph_con_open
);
425 * return true if this connection ever successfully opened
427 bool ceph_con_opened(struct ceph_connection
*con
)
429 return con
->connect_seq
> 0;
435 struct ceph_connection
*ceph_con_get(struct ceph_connection
*con
)
437 int nref
= __atomic_add_unless(&con
->nref
, 1, 0);
439 dout("con_get %p nref = %d -> %d\n", con
, nref
, nref
+ 1);
441 return nref
? con
: NULL
;
444 void ceph_con_put(struct ceph_connection
*con
)
446 int nref
= atomic_dec_return(&con
->nref
);
453 dout("con_put %p nref = %d -> %d\n", con
, nref
+ 1, nref
);
457 * initialize a new connection.
459 void ceph_con_init(struct ceph_messenger
*msgr
, struct ceph_connection
*con
)
461 dout("con_init %p\n", con
);
462 memset(con
, 0, sizeof(*con
));
463 atomic_set(&con
->nref
, 1);
465 mutex_init(&con
->mutex
);
466 INIT_LIST_HEAD(&con
->out_queue
);
467 INIT_LIST_HEAD(&con
->out_sent
);
468 INIT_DELAYED_WORK(&con
->work
, con_work
);
470 EXPORT_SYMBOL(ceph_con_init
);
474 * We maintain a global counter to order connection attempts. Get
475 * a unique seq greater than @gt.
477 static u32
get_global_seq(struct ceph_messenger
*msgr
, u32 gt
)
481 spin_lock(&msgr
->global_seq_lock
);
482 if (msgr
->global_seq
< gt
)
483 msgr
->global_seq
= gt
;
484 ret
= ++msgr
->global_seq
;
485 spin_unlock(&msgr
->global_seq_lock
);
489 static void ceph_con_out_kvec_reset(struct ceph_connection
*con
)
491 con
->out_kvec_left
= 0;
492 con
->out_kvec_bytes
= 0;
493 con
->out_kvec_cur
= &con
->out_kvec
[0];
496 static void ceph_con_out_kvec_add(struct ceph_connection
*con
,
497 size_t size
, void *data
)
501 index
= con
->out_kvec_left
;
502 BUG_ON(index
>= ARRAY_SIZE(con
->out_kvec
));
504 con
->out_kvec
[index
].iov_len
= size
;
505 con
->out_kvec
[index
].iov_base
= data
;
506 con
->out_kvec_left
++;
507 con
->out_kvec_bytes
+= size
;
511 * Prepare footer for currently outgoing message, and finish things
512 * off. Assumes out_kvec* are already valid.. we just add on to the end.
514 static void prepare_write_message_footer(struct ceph_connection
*con
)
516 struct ceph_msg
*m
= con
->out_msg
;
517 int v
= con
->out_kvec_left
;
519 dout("prepare_write_message_footer %p\n", con
);
520 con
->out_kvec_is_msg
= true;
521 con
->out_kvec
[v
].iov_base
= &m
->footer
;
522 con
->out_kvec
[v
].iov_len
= sizeof(m
->footer
);
523 con
->out_kvec_bytes
+= sizeof(m
->footer
);
524 con
->out_kvec_left
++;
525 con
->out_more
= m
->more_to_follow
;
526 con
->out_msg_done
= true;
530 * Prepare headers for the next outgoing message.
532 static void prepare_write_message(struct ceph_connection
*con
)
537 ceph_con_out_kvec_reset(con
);
538 con
->out_kvec_is_msg
= true;
539 con
->out_msg_done
= false;
541 /* Sneak an ack in there first? If we can get it into the same
542 * TCP packet that's a good thing. */
543 if (con
->in_seq
> con
->in_seq_acked
) {
544 con
->in_seq_acked
= con
->in_seq
;
545 ceph_con_out_kvec_add(con
, sizeof (tag_ack
), &tag_ack
);
546 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
547 ceph_con_out_kvec_add(con
, sizeof (con
->out_temp_ack
),
551 m
= list_first_entry(&con
->out_queue
, struct ceph_msg
, list_head
);
554 /* put message on sent list */
556 list_move_tail(&m
->list_head
, &con
->out_sent
);
559 * only assign outgoing seq # if we haven't sent this message
560 * yet. if it is requeued, resend with it's original seq.
562 if (m
->needs_out_seq
) {
563 m
->hdr
.seq
= cpu_to_le64(++con
->out_seq
);
564 m
->needs_out_seq
= false;
567 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
568 m
, con
->out_seq
, le16_to_cpu(m
->hdr
.type
),
569 le32_to_cpu(m
->hdr
.front_len
), le32_to_cpu(m
->hdr
.middle_len
),
570 le32_to_cpu(m
->hdr
.data_len
),
572 BUG_ON(le32_to_cpu(m
->hdr
.front_len
) != m
->front
.iov_len
);
574 /* tag + hdr + front + middle */
575 ceph_con_out_kvec_add(con
, sizeof (tag_msg
), &tag_msg
);
576 ceph_con_out_kvec_add(con
, sizeof (m
->hdr
), &m
->hdr
);
577 ceph_con_out_kvec_add(con
, m
->front
.iov_len
, m
->front
.iov_base
);
580 ceph_con_out_kvec_add(con
, m
->middle
->vec
.iov_len
,
581 m
->middle
->vec
.iov_base
);
583 /* fill in crc (except data pages), footer */
584 crc
= crc32c(0, &m
->hdr
, offsetof(struct ceph_msg_header
, crc
));
585 con
->out_msg
->hdr
.crc
= cpu_to_le32(crc
);
586 con
->out_msg
->footer
.flags
= CEPH_MSG_FOOTER_COMPLETE
;
588 crc
= crc32c(0, m
->front
.iov_base
, m
->front
.iov_len
);
589 con
->out_msg
->footer
.front_crc
= cpu_to_le32(crc
);
591 crc
= crc32c(0, m
->middle
->vec
.iov_base
,
592 m
->middle
->vec
.iov_len
);
593 con
->out_msg
->footer
.middle_crc
= cpu_to_le32(crc
);
595 con
->out_msg
->footer
.middle_crc
= 0;
596 con
->out_msg
->footer
.data_crc
= 0;
597 dout("prepare_write_message front_crc %u data_crc %u\n",
598 le32_to_cpu(con
->out_msg
->footer
.front_crc
),
599 le32_to_cpu(con
->out_msg
->footer
.middle_crc
));
601 /* is there a data payload? */
602 if (le32_to_cpu(m
->hdr
.data_len
) > 0) {
603 /* initialize page iterator */
604 con
->out_msg_pos
.page
= 0;
606 con
->out_msg_pos
.page_pos
= m
->page_alignment
;
608 con
->out_msg_pos
.page_pos
= 0;
609 con
->out_msg_pos
.data_pos
= 0;
610 con
->out_msg_pos
.did_page_crc
= false;
611 con
->out_more
= 1; /* data + footer will follow */
613 /* no, queue up footer too and be done */
614 prepare_write_message_footer(con
);
617 set_bit(WRITE_PENDING
, &con
->state
);
623 static void prepare_write_ack(struct ceph_connection
*con
)
625 dout("prepare_write_ack %p %llu -> %llu\n", con
,
626 con
->in_seq_acked
, con
->in_seq
);
627 con
->in_seq_acked
= con
->in_seq
;
629 ceph_con_out_kvec_reset(con
);
631 ceph_con_out_kvec_add(con
, sizeof (tag_ack
), &tag_ack
);
633 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
634 ceph_con_out_kvec_add(con
, sizeof (con
->out_temp_ack
),
637 con
->out_more
= 1; /* more will follow.. eventually.. */
638 set_bit(WRITE_PENDING
, &con
->state
);
642 * Prepare to write keepalive byte.
644 static void prepare_write_keepalive(struct ceph_connection
*con
)
646 dout("prepare_write_keepalive %p\n", con
);
647 ceph_con_out_kvec_reset(con
);
648 ceph_con_out_kvec_add(con
, sizeof (tag_keepalive
), &tag_keepalive
);
649 set_bit(WRITE_PENDING
, &con
->state
);
653 * Connection negotiation.
656 static struct ceph_auth_handshake
*get_connect_authorizer(struct ceph_connection
*con
,
659 struct ceph_auth_handshake
*auth
;
661 if (!con
->ops
->get_authorizer
) {
662 con
->out_connect
.authorizer_protocol
= CEPH_AUTH_UNKNOWN
;
663 con
->out_connect
.authorizer_len
= 0;
668 /* Can't hold the mutex while getting authorizer */
670 mutex_unlock(&con
->mutex
);
672 auth
= con
->ops
->get_authorizer(con
, auth_proto
, con
->auth_retry
);
674 mutex_lock(&con
->mutex
);
678 if (test_bit(CLOSED
, &con
->state
) || test_bit(OPENING
, &con
->state
))
679 return ERR_PTR(-EAGAIN
);
681 con
->auth_reply_buf
= auth
->authorizer_reply_buf
;
682 con
->auth_reply_buf_len
= auth
->authorizer_reply_buf_len
;
689 * We connected to a peer and are saying hello.
691 static void prepare_write_banner(struct ceph_connection
*con
)
693 ceph_con_out_kvec_add(con
, strlen(CEPH_BANNER
), CEPH_BANNER
);
694 ceph_con_out_kvec_add(con
, sizeof (con
->msgr
->my_enc_addr
),
695 &con
->msgr
->my_enc_addr
);
698 set_bit(WRITE_PENDING
, &con
->state
);
701 static int prepare_write_connect(struct ceph_connection
*con
)
703 unsigned global_seq
= get_global_seq(con
->msgr
, 0);
706 struct ceph_auth_handshake
*auth
;
708 switch (con
->peer_name
.type
) {
709 case CEPH_ENTITY_TYPE_MON
:
710 proto
= CEPH_MONC_PROTOCOL
;
712 case CEPH_ENTITY_TYPE_OSD
:
713 proto
= CEPH_OSDC_PROTOCOL
;
715 case CEPH_ENTITY_TYPE_MDS
:
716 proto
= CEPH_MDSC_PROTOCOL
;
722 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con
,
723 con
->connect_seq
, global_seq
, proto
);
725 con
->out_connect
.features
= cpu_to_le64(con
->msgr
->supported_features
);
726 con
->out_connect
.host_type
= cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT
);
727 con
->out_connect
.connect_seq
= cpu_to_le32(con
->connect_seq
);
728 con
->out_connect
.global_seq
= cpu_to_le32(global_seq
);
729 con
->out_connect
.protocol_version
= cpu_to_le32(proto
);
730 con
->out_connect
.flags
= 0;
732 auth_proto
= CEPH_AUTH_UNKNOWN
;
733 auth
= get_connect_authorizer(con
, &auth_proto
);
735 return PTR_ERR(auth
);
737 con
->out_connect
.authorizer_protocol
= cpu_to_le32(auth_proto
);
738 con
->out_connect
.authorizer_len
= auth
?
739 cpu_to_le32(auth
->authorizer_buf_len
) : 0;
741 ceph_con_out_kvec_add(con
, sizeof (con
->out_connect
),
743 if (auth
&& auth
->authorizer_buf_len
)
744 ceph_con_out_kvec_add(con
, auth
->authorizer_buf_len
,
745 auth
->authorizer_buf
);
748 set_bit(WRITE_PENDING
, &con
->state
);
754 * write as much of pending kvecs to the socket as we can.
756 * 0 -> socket full, but more to do
759 static int write_partial_kvec(struct ceph_connection
*con
)
763 dout("write_partial_kvec %p %d left\n", con
, con
->out_kvec_bytes
);
764 while (con
->out_kvec_bytes
> 0) {
765 ret
= ceph_tcp_sendmsg(con
->sock
, con
->out_kvec_cur
,
766 con
->out_kvec_left
, con
->out_kvec_bytes
,
770 con
->out_kvec_bytes
-= ret
;
771 if (con
->out_kvec_bytes
== 0)
774 /* account for full iov entries consumed */
775 while (ret
>= con
->out_kvec_cur
->iov_len
) {
776 BUG_ON(!con
->out_kvec_left
);
777 ret
-= con
->out_kvec_cur
->iov_len
;
779 con
->out_kvec_left
--;
781 /* and for a partially-consumed entry */
783 con
->out_kvec_cur
->iov_len
-= ret
;
784 con
->out_kvec_cur
->iov_base
+= ret
;
787 con
->out_kvec_left
= 0;
788 con
->out_kvec_is_msg
= false;
791 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con
,
792 con
->out_kvec_bytes
, con
->out_kvec_left
, ret
);
793 return ret
; /* done! */
797 static void init_bio_iter(struct bio
*bio
, struct bio
**iter
, int *seg
)
808 static void iter_bio_next(struct bio
**bio_iter
, int *seg
)
810 if (*bio_iter
== NULL
)
813 BUG_ON(*seg
>= (*bio_iter
)->bi_vcnt
);
816 if (*seg
== (*bio_iter
)->bi_vcnt
)
817 init_bio_iter((*bio_iter
)->bi_next
, bio_iter
, seg
);
822 * Write as much message data payload as we can. If we finish, queue
824 * 1 -> done, footer is now queued in out_kvec[].
825 * 0 -> socket full, but more to do
828 static int write_partial_msg_pages(struct ceph_connection
*con
)
830 struct ceph_msg
*msg
= con
->out_msg
;
831 unsigned data_len
= le32_to_cpu(msg
->hdr
.data_len
);
833 bool do_datacrc
= !con
->msgr
->nocrc
;
837 size_t trail_len
= (msg
->trail
? msg
->trail
->length
: 0);
839 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
840 con
, con
->out_msg
, con
->out_msg_pos
.page
, con
->out_msg
->nr_pages
,
841 con
->out_msg_pos
.page_pos
);
844 if (msg
->bio
&& !msg
->bio_iter
)
845 init_bio_iter(msg
->bio
, &msg
->bio_iter
, &msg
->bio_seg
);
848 while (data_len
> con
->out_msg_pos
.data_pos
) {
849 struct page
*page
= NULL
;
850 int max_write
= PAGE_SIZE
;
853 total_max_write
= data_len
- trail_len
-
854 con
->out_msg_pos
.data_pos
;
857 * if we are calculating the data crc (the default), we need
858 * to map the page. if our pages[] has been revoked, use the
862 /* have we reached the trail part of the data? */
863 if (con
->out_msg_pos
.data_pos
>= data_len
- trail_len
) {
866 total_max_write
= data_len
- con
->out_msg_pos
.data_pos
;
868 page
= list_first_entry(&msg
->trail
->head
,
870 max_write
= PAGE_SIZE
;
871 } else if (msg
->pages
) {
872 page
= msg
->pages
[con
->out_msg_pos
.page
];
873 } else if (msg
->pagelist
) {
874 page
= list_first_entry(&msg
->pagelist
->head
,
877 } else if (msg
->bio
) {
880 bv
= bio_iovec_idx(msg
->bio_iter
, msg
->bio_seg
);
882 bio_offset
= bv
->bv_offset
;
883 max_write
= bv
->bv_len
;
888 len
= min_t(int, max_write
- con
->out_msg_pos
.page_pos
,
891 if (do_datacrc
&& !con
->out_msg_pos
.did_page_crc
) {
894 u32 tmpcrc
= le32_to_cpu(con
->out_msg
->footer
.data_crc
);
898 BUG_ON(kaddr
== NULL
);
899 base
= kaddr
+ con
->out_msg_pos
.page_pos
+ bio_offset
;
900 crc
= crc32c(tmpcrc
, base
, len
);
901 con
->out_msg
->footer
.data_crc
= cpu_to_le32(crc
);
902 con
->out_msg_pos
.did_page_crc
= true;
904 ret
= ceph_tcp_sendpage(con
->sock
, page
,
905 con
->out_msg_pos
.page_pos
+ bio_offset
,
914 con
->out_msg_pos
.data_pos
+= ret
;
915 con
->out_msg_pos
.page_pos
+= ret
;
917 con
->out_msg_pos
.page_pos
= 0;
918 con
->out_msg_pos
.page
++;
919 con
->out_msg_pos
.did_page_crc
= false;
921 list_move_tail(&page
->lru
,
923 else if (msg
->pagelist
)
924 list_move_tail(&page
->lru
,
925 &msg
->pagelist
->head
);
928 iter_bio_next(&msg
->bio_iter
, &msg
->bio_seg
);
933 dout("write_partial_msg_pages %p msg %p done\n", con
, msg
);
935 /* prepare and queue up footer, too */
937 con
->out_msg
->footer
.flags
|= CEPH_MSG_FOOTER_NOCRC
;
938 ceph_con_out_kvec_reset(con
);
939 prepare_write_message_footer(con
);
948 static int write_partial_skip(struct ceph_connection
*con
)
952 while (con
->out_skip
> 0) {
953 size_t size
= min(con
->out_skip
, (int) PAGE_CACHE_SIZE
);
955 ret
= ceph_tcp_sendpage(con
->sock
, zero_page
, 0, size
, 1);
958 con
->out_skip
-= ret
;
966 * Prepare to read connection handshake, or an ack.
968 static void prepare_read_banner(struct ceph_connection
*con
)
970 dout("prepare_read_banner %p\n", con
);
971 con
->in_base_pos
= 0;
974 static void prepare_read_connect(struct ceph_connection
*con
)
976 dout("prepare_read_connect %p\n", con
);
977 con
->in_base_pos
= 0;
980 static void prepare_read_ack(struct ceph_connection
*con
)
982 dout("prepare_read_ack %p\n", con
);
983 con
->in_base_pos
= 0;
986 static void prepare_read_tag(struct ceph_connection
*con
)
988 dout("prepare_read_tag %p\n", con
);
989 con
->in_base_pos
= 0;
990 con
->in_tag
= CEPH_MSGR_TAG_READY
;
994 * Prepare to read a message.
996 static int prepare_read_message(struct ceph_connection
*con
)
998 dout("prepare_read_message %p\n", con
);
999 BUG_ON(con
->in_msg
!= NULL
);
1000 con
->in_base_pos
= 0;
1001 con
->in_front_crc
= con
->in_middle_crc
= con
->in_data_crc
= 0;
1006 static int read_partial(struct ceph_connection
*con
,
1007 int end
, int size
, void *object
)
1009 while (con
->in_base_pos
< end
) {
1010 int left
= end
- con
->in_base_pos
;
1011 int have
= size
- left
;
1012 int ret
= ceph_tcp_recvmsg(con
->sock
, object
+ have
, left
);
1015 con
->in_base_pos
+= ret
;
1022 * Read all or part of the connect-side handshake on a new connection
1024 static int read_partial_banner(struct ceph_connection
*con
)
1030 dout("read_partial_banner %p at %d\n", con
, con
->in_base_pos
);
1033 size
= strlen(CEPH_BANNER
);
1035 ret
= read_partial(con
, end
, size
, con
->in_banner
);
1039 size
= sizeof (con
->actual_peer_addr
);
1041 ret
= read_partial(con
, end
, size
, &con
->actual_peer_addr
);
1045 size
= sizeof (con
->peer_addr_for_me
);
1047 ret
= read_partial(con
, end
, size
, &con
->peer_addr_for_me
);
1055 static int read_partial_connect(struct ceph_connection
*con
)
1061 dout("read_partial_connect %p at %d\n", con
, con
->in_base_pos
);
1063 size
= sizeof (con
->in_reply
);
1065 ret
= read_partial(con
, end
, size
, &con
->in_reply
);
1069 size
= le32_to_cpu(con
->in_reply
.authorizer_len
);
1071 ret
= read_partial(con
, end
, size
, con
->auth_reply_buf
);
1075 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1076 con
, (int)con
->in_reply
.tag
,
1077 le32_to_cpu(con
->in_reply
.connect_seq
),
1078 le32_to_cpu(con
->in_reply
.global_seq
));
1085 * Verify the hello banner looks okay.
1087 static int verify_hello(struct ceph_connection
*con
)
1089 if (memcmp(con
->in_banner
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
1090 pr_err("connect to %s got bad banner\n",
1091 ceph_pr_addr(&con
->peer_addr
.in_addr
));
1092 con
->error_msg
= "protocol error, bad banner";
1098 static bool addr_is_blank(struct sockaddr_storage
*ss
)
1100 switch (ss
->ss_family
) {
1102 return ((struct sockaddr_in
*)ss
)->sin_addr
.s_addr
== 0;
1105 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[0] == 0 &&
1106 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[1] == 0 &&
1107 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[2] == 0 &&
1108 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[3] == 0;
1113 static int addr_port(struct sockaddr_storage
*ss
)
1115 switch (ss
->ss_family
) {
1117 return ntohs(((struct sockaddr_in
*)ss
)->sin_port
);
1119 return ntohs(((struct sockaddr_in6
*)ss
)->sin6_port
);
1124 static void addr_set_port(struct sockaddr_storage
*ss
, int p
)
1126 switch (ss
->ss_family
) {
1128 ((struct sockaddr_in
*)ss
)->sin_port
= htons(p
);
1131 ((struct sockaddr_in6
*)ss
)->sin6_port
= htons(p
);
1137 * Unlike other *_pton function semantics, zero indicates success.
1139 static int ceph_pton(const char *str
, size_t len
, struct sockaddr_storage
*ss
,
1140 char delim
, const char **ipend
)
1142 struct sockaddr_in
*in4
= (struct sockaddr_in
*) ss
;
1143 struct sockaddr_in6
*in6
= (struct sockaddr_in6
*) ss
;
1145 memset(ss
, 0, sizeof(*ss
));
1147 if (in4_pton(str
, len
, (u8
*)&in4
->sin_addr
.s_addr
, delim
, ipend
)) {
1148 ss
->ss_family
= AF_INET
;
1152 if (in6_pton(str
, len
, (u8
*)&in6
->sin6_addr
.s6_addr
, delim
, ipend
)) {
1153 ss
->ss_family
= AF_INET6
;
1161 * Extract hostname string and resolve using kernel DNS facility.
1163 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1164 static int ceph_dns_resolve_name(const char *name
, size_t namelen
,
1165 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1167 const char *end
, *delim_p
;
1168 char *colon_p
, *ip_addr
= NULL
;
1172 * The end of the hostname occurs immediately preceding the delimiter or
1173 * the port marker (':') where the delimiter takes precedence.
1175 delim_p
= memchr(name
, delim
, namelen
);
1176 colon_p
= memchr(name
, ':', namelen
);
1178 if (delim_p
&& colon_p
)
1179 end
= delim_p
< colon_p
? delim_p
: colon_p
;
1180 else if (!delim_p
&& colon_p
)
1184 if (!end
) /* case: hostname:/ */
1185 end
= name
+ namelen
;
1191 /* do dns_resolve upcall */
1192 ip_len
= dns_query(NULL
, name
, end
- name
, NULL
, &ip_addr
, NULL
);
1194 ret
= ceph_pton(ip_addr
, ip_len
, ss
, -1, NULL
);
1202 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end
- name
), name
,
1203 ret
, ret
? "failed" : ceph_pr_addr(ss
));
1208 static inline int ceph_dns_resolve_name(const char *name
, size_t namelen
,
1209 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1216 * Parse a server name (IP or hostname). If a valid IP address is not found
1217 * then try to extract a hostname to resolve using userspace DNS upcall.
1219 static int ceph_parse_server_name(const char *name
, size_t namelen
,
1220 struct sockaddr_storage
*ss
, char delim
, const char **ipend
)
1224 ret
= ceph_pton(name
, namelen
, ss
, delim
, ipend
);
1226 ret
= ceph_dns_resolve_name(name
, namelen
, ss
, delim
, ipend
);
1232 * Parse an ip[:port] list into an addr array. Use the default
1233 * monitor port if a port isn't specified.
1235 int ceph_parse_ips(const char *c
, const char *end
,
1236 struct ceph_entity_addr
*addr
,
1237 int max_count
, int *count
)
1239 int i
, ret
= -EINVAL
;
1242 dout("parse_ips on '%.*s'\n", (int)(end
-c
), c
);
1243 for (i
= 0; i
< max_count
; i
++) {
1245 struct sockaddr_storage
*ss
= &addr
[i
].in_addr
;
1254 ret
= ceph_parse_server_name(p
, end
- p
, ss
, delim
, &ipend
);
1263 dout("missing matching ']'\n");
1270 if (p
< end
&& *p
== ':') {
1273 while (p
< end
&& *p
>= '0' && *p
<= '9') {
1274 port
= (port
* 10) + (*p
- '0');
1277 if (port
> 65535 || port
== 0)
1280 port
= CEPH_MON_PORT
;
1283 addr_set_port(ss
, port
);
1285 dout("parse_ips got %s\n", ceph_pr_addr(ss
));
1302 pr_err("parse_ips bad ip '%.*s'\n", (int)(end
- c
), c
);
1305 EXPORT_SYMBOL(ceph_parse_ips
);
1307 static int process_banner(struct ceph_connection
*con
)
1309 dout("process_banner on %p\n", con
);
1311 if (verify_hello(con
) < 0)
1314 ceph_decode_addr(&con
->actual_peer_addr
);
1315 ceph_decode_addr(&con
->peer_addr_for_me
);
1318 * Make sure the other end is who we wanted. note that the other
1319 * end may not yet know their ip address, so if it's 0.0.0.0, give
1320 * them the benefit of the doubt.
1322 if (memcmp(&con
->peer_addr
, &con
->actual_peer_addr
,
1323 sizeof(con
->peer_addr
)) != 0 &&
1324 !(addr_is_blank(&con
->actual_peer_addr
.in_addr
) &&
1325 con
->actual_peer_addr
.nonce
== con
->peer_addr
.nonce
)) {
1326 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1327 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1328 (int)le32_to_cpu(con
->peer_addr
.nonce
),
1329 ceph_pr_addr(&con
->actual_peer_addr
.in_addr
),
1330 (int)le32_to_cpu(con
->actual_peer_addr
.nonce
));
1331 con
->error_msg
= "wrong peer at address";
1336 * did we learn our address?
1338 if (addr_is_blank(&con
->msgr
->inst
.addr
.in_addr
)) {
1339 int port
= addr_port(&con
->msgr
->inst
.addr
.in_addr
);
1341 memcpy(&con
->msgr
->inst
.addr
.in_addr
,
1342 &con
->peer_addr_for_me
.in_addr
,
1343 sizeof(con
->peer_addr_for_me
.in_addr
));
1344 addr_set_port(&con
->msgr
->inst
.addr
.in_addr
, port
);
1345 encode_my_addr(con
->msgr
);
1346 dout("process_banner learned my addr is %s\n",
1347 ceph_pr_addr(&con
->msgr
->inst
.addr
.in_addr
));
1350 set_bit(NEGOTIATING
, &con
->state
);
1351 prepare_read_connect(con
);
1355 static void fail_protocol(struct ceph_connection
*con
)
1357 reset_connection(con
);
1358 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
1361 static int process_connect(struct ceph_connection
*con
)
1363 u64 sup_feat
= con
->msgr
->supported_features
;
1364 u64 req_feat
= con
->msgr
->required_features
;
1365 u64 server_feat
= le64_to_cpu(con
->in_reply
.features
);
1368 dout("process_connect on %p tag %d\n", con
, (int)con
->in_tag
);
1370 switch (con
->in_reply
.tag
) {
1371 case CEPH_MSGR_TAG_FEATURES
:
1372 pr_err("%s%lld %s feature set mismatch,"
1373 " my %llx < server's %llx, missing %llx\n",
1374 ENTITY_NAME(con
->peer_name
),
1375 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1376 sup_feat
, server_feat
, server_feat
& ~sup_feat
);
1377 con
->error_msg
= "missing required protocol features";
1381 case CEPH_MSGR_TAG_BADPROTOVER
:
1382 pr_err("%s%lld %s protocol version mismatch,"
1383 " my %d != server's %d\n",
1384 ENTITY_NAME(con
->peer_name
),
1385 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1386 le32_to_cpu(con
->out_connect
.protocol_version
),
1387 le32_to_cpu(con
->in_reply
.protocol_version
));
1388 con
->error_msg
= "protocol version mismatch";
1392 case CEPH_MSGR_TAG_BADAUTHORIZER
:
1394 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con
,
1396 if (con
->auth_retry
== 2) {
1397 con
->error_msg
= "connect authorization failure";
1400 con
->auth_retry
= 1;
1401 ceph_con_out_kvec_reset(con
);
1402 ret
= prepare_write_connect(con
);
1405 prepare_read_connect(con
);
1408 case CEPH_MSGR_TAG_RESETSESSION
:
1410 * If we connected with a large connect_seq but the peer
1411 * has no record of a session with us (no connection, or
1412 * connect_seq == 0), they will send RESETSESION to indicate
1413 * that they must have reset their session, and may have
1416 dout("process_connect got RESET peer seq %u\n",
1417 le32_to_cpu(con
->in_connect
.connect_seq
));
1418 pr_err("%s%lld %s connection reset\n",
1419 ENTITY_NAME(con
->peer_name
),
1420 ceph_pr_addr(&con
->peer_addr
.in_addr
));
1421 reset_connection(con
);
1422 ceph_con_out_kvec_reset(con
);
1423 ret
= prepare_write_connect(con
);
1426 prepare_read_connect(con
);
1428 /* Tell ceph about it. */
1429 mutex_unlock(&con
->mutex
);
1430 pr_info("reset on %s%lld\n", ENTITY_NAME(con
->peer_name
));
1431 if (con
->ops
->peer_reset
)
1432 con
->ops
->peer_reset(con
);
1433 mutex_lock(&con
->mutex
);
1434 if (test_bit(CLOSED
, &con
->state
) ||
1435 test_bit(OPENING
, &con
->state
))
1439 case CEPH_MSGR_TAG_RETRY_SESSION
:
1441 * If we sent a smaller connect_seq than the peer has, try
1442 * again with a larger value.
1444 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1445 le32_to_cpu(con
->out_connect
.connect_seq
),
1446 le32_to_cpu(con
->in_connect
.connect_seq
));
1447 con
->connect_seq
= le32_to_cpu(con
->in_connect
.connect_seq
);
1448 ceph_con_out_kvec_reset(con
);
1449 ret
= prepare_write_connect(con
);
1452 prepare_read_connect(con
);
1455 case CEPH_MSGR_TAG_RETRY_GLOBAL
:
1457 * If we sent a smaller global_seq than the peer has, try
1458 * again with a larger value.
1460 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1461 con
->peer_global_seq
,
1462 le32_to_cpu(con
->in_connect
.global_seq
));
1463 get_global_seq(con
->msgr
,
1464 le32_to_cpu(con
->in_connect
.global_seq
));
1465 ceph_con_out_kvec_reset(con
);
1466 ret
= prepare_write_connect(con
);
1469 prepare_read_connect(con
);
1472 case CEPH_MSGR_TAG_READY
:
1473 if (req_feat
& ~server_feat
) {
1474 pr_err("%s%lld %s protocol feature mismatch,"
1475 " my required %llx > server's %llx, need %llx\n",
1476 ENTITY_NAME(con
->peer_name
),
1477 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1478 req_feat
, server_feat
, req_feat
& ~server_feat
);
1479 con
->error_msg
= "missing required protocol features";
1483 clear_bit(CONNECTING
, &con
->state
);
1484 con
->peer_global_seq
= le32_to_cpu(con
->in_reply
.global_seq
);
1486 con
->peer_features
= server_feat
;
1487 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1488 con
->peer_global_seq
,
1489 le32_to_cpu(con
->in_reply
.connect_seq
),
1491 WARN_ON(con
->connect_seq
!=
1492 le32_to_cpu(con
->in_reply
.connect_seq
));
1494 if (con
->in_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
)
1495 set_bit(LOSSYTX
, &con
->state
);
1497 prepare_read_tag(con
);
1500 case CEPH_MSGR_TAG_WAIT
:
1502 * If there is a connection race (we are opening
1503 * connections to each other), one of us may just have
1504 * to WAIT. This shouldn't happen if we are the
1507 pr_err("process_connect got WAIT as client\n");
1508 con
->error_msg
= "protocol error, got WAIT as client";
1512 pr_err("connect protocol error, will retry\n");
1513 con
->error_msg
= "protocol error, garbage tag during connect";
1521 * read (part of) an ack
1523 static int read_partial_ack(struct ceph_connection
*con
)
1525 int size
= sizeof (con
->in_temp_ack
);
1528 return read_partial(con
, end
, size
, &con
->in_temp_ack
);
1533 * We can finally discard anything that's been acked.
1535 static void process_ack(struct ceph_connection
*con
)
1538 u64 ack
= le64_to_cpu(con
->in_temp_ack
);
1541 while (!list_empty(&con
->out_sent
)) {
1542 m
= list_first_entry(&con
->out_sent
, struct ceph_msg
,
1544 seq
= le64_to_cpu(m
->hdr
.seq
);
1547 dout("got ack for seq %llu type %d at %p\n", seq
,
1548 le16_to_cpu(m
->hdr
.type
), m
);
1549 m
->ack_stamp
= jiffies
;
1552 prepare_read_tag(con
);
1558 static int read_partial_message_section(struct ceph_connection
*con
,
1559 struct kvec
*section
,
1560 unsigned int sec_len
, u32
*crc
)
1566 while (section
->iov_len
< sec_len
) {
1567 BUG_ON(section
->iov_base
== NULL
);
1568 left
= sec_len
- section
->iov_len
;
1569 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)section
->iov_base
+
1570 section
->iov_len
, left
);
1573 section
->iov_len
+= ret
;
1575 if (section
->iov_len
== sec_len
)
1576 *crc
= crc32c(0, section
->iov_base
, section
->iov_len
);
1581 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
1582 struct ceph_msg_header
*hdr
,
1586 static int read_partial_message_pages(struct ceph_connection
*con
,
1587 struct page
**pages
,
1588 unsigned data_len
, bool do_datacrc
)
1594 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1595 (int)(PAGE_SIZE
- con
->in_msg_pos
.page_pos
));
1597 BUG_ON(pages
== NULL
);
1598 p
= kmap(pages
[con
->in_msg_pos
.page
]);
1599 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1601 if (ret
> 0 && do_datacrc
)
1603 crc32c(con
->in_data_crc
,
1604 p
+ con
->in_msg_pos
.page_pos
, ret
);
1605 kunmap(pages
[con
->in_msg_pos
.page
]);
1608 con
->in_msg_pos
.data_pos
+= ret
;
1609 con
->in_msg_pos
.page_pos
+= ret
;
1610 if (con
->in_msg_pos
.page_pos
== PAGE_SIZE
) {
1611 con
->in_msg_pos
.page_pos
= 0;
1612 con
->in_msg_pos
.page
++;
1619 static int read_partial_message_bio(struct ceph_connection
*con
,
1620 struct bio
**bio_iter
, int *bio_seg
,
1621 unsigned data_len
, bool do_datacrc
)
1623 struct bio_vec
*bv
= bio_iovec_idx(*bio_iter
, *bio_seg
);
1630 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1631 (int)(bv
->bv_len
- con
->in_msg_pos
.page_pos
));
1633 p
= kmap(bv
->bv_page
) + bv
->bv_offset
;
1635 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1637 if (ret
> 0 && do_datacrc
)
1639 crc32c(con
->in_data_crc
,
1640 p
+ con
->in_msg_pos
.page_pos
, ret
);
1641 kunmap(bv
->bv_page
);
1644 con
->in_msg_pos
.data_pos
+= ret
;
1645 con
->in_msg_pos
.page_pos
+= ret
;
1646 if (con
->in_msg_pos
.page_pos
== bv
->bv_len
) {
1647 con
->in_msg_pos
.page_pos
= 0;
1648 iter_bio_next(bio_iter
, bio_seg
);
1656 * read (part of) a message.
1658 static int read_partial_message(struct ceph_connection
*con
)
1660 struct ceph_msg
*m
= con
->in_msg
;
1664 unsigned front_len
, middle_len
, data_len
;
1665 bool do_datacrc
= !con
->msgr
->nocrc
;
1670 dout("read_partial_message con %p msg %p\n", con
, m
);
1673 size
= sizeof (con
->in_hdr
);
1675 ret
= read_partial(con
, end
, size
, &con
->in_hdr
);
1679 crc
= crc32c(0, &con
->in_hdr
, offsetof(struct ceph_msg_header
, crc
));
1680 if (cpu_to_le32(crc
) != con
->in_hdr
.crc
) {
1681 pr_err("read_partial_message bad hdr "
1682 " crc %u != expected %u\n",
1683 crc
, con
->in_hdr
.crc
);
1687 front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
1688 if (front_len
> CEPH_MSG_MAX_FRONT_LEN
)
1690 middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
1691 if (middle_len
> CEPH_MSG_MAX_DATA_LEN
)
1693 data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1694 if (data_len
> CEPH_MSG_MAX_DATA_LEN
)
1698 seq
= le64_to_cpu(con
->in_hdr
.seq
);
1699 if ((s64
)seq
- (s64
)con
->in_seq
< 1) {
1700 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1701 ENTITY_NAME(con
->peer_name
),
1702 ceph_pr_addr(&con
->peer_addr
.in_addr
),
1703 seq
, con
->in_seq
+ 1);
1704 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1706 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1708 } else if ((s64
)seq
- (s64
)con
->in_seq
> 1) {
1709 pr_err("read_partial_message bad seq %lld expected %lld\n",
1710 seq
, con
->in_seq
+ 1);
1711 con
->error_msg
= "bad message sequence # for incoming message";
1715 /* allocate message? */
1717 dout("got hdr type %d front %d data %d\n", con
->in_hdr
.type
,
1718 con
->in_hdr
.front_len
, con
->in_hdr
.data_len
);
1720 con
->in_msg
= ceph_alloc_msg(con
, &con
->in_hdr
, &skip
);
1722 /* skip this message */
1723 dout("alloc_msg said skip message\n");
1724 BUG_ON(con
->in_msg
);
1725 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1727 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1733 "error allocating memory for incoming message";
1737 m
->front
.iov_len
= 0; /* haven't read it yet */
1739 m
->middle
->vec
.iov_len
= 0;
1741 con
->in_msg_pos
.page
= 0;
1743 con
->in_msg_pos
.page_pos
= m
->page_alignment
;
1745 con
->in_msg_pos
.page_pos
= 0;
1746 con
->in_msg_pos
.data_pos
= 0;
1750 ret
= read_partial_message_section(con
, &m
->front
, front_len
,
1751 &con
->in_front_crc
);
1757 ret
= read_partial_message_section(con
, &m
->middle
->vec
,
1759 &con
->in_middle_crc
);
1764 if (m
->bio
&& !m
->bio_iter
)
1765 init_bio_iter(m
->bio
, &m
->bio_iter
, &m
->bio_seg
);
1769 while (con
->in_msg_pos
.data_pos
< data_len
) {
1771 ret
= read_partial_message_pages(con
, m
->pages
,
1772 data_len
, do_datacrc
);
1776 } else if (m
->bio
) {
1778 ret
= read_partial_message_bio(con
,
1779 &m
->bio_iter
, &m
->bio_seg
,
1780 data_len
, do_datacrc
);
1790 size
= sizeof (m
->footer
);
1792 ret
= read_partial(con
, end
, size
, &m
->footer
);
1796 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1797 m
, front_len
, m
->footer
.front_crc
, middle_len
,
1798 m
->footer
.middle_crc
, data_len
, m
->footer
.data_crc
);
1801 if (con
->in_front_crc
!= le32_to_cpu(m
->footer
.front_crc
)) {
1802 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1803 m
, con
->in_front_crc
, m
->footer
.front_crc
);
1806 if (con
->in_middle_crc
!= le32_to_cpu(m
->footer
.middle_crc
)) {
1807 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1808 m
, con
->in_middle_crc
, m
->footer
.middle_crc
);
1812 (m
->footer
.flags
& CEPH_MSG_FOOTER_NOCRC
) == 0 &&
1813 con
->in_data_crc
!= le32_to_cpu(m
->footer
.data_crc
)) {
1814 pr_err("read_partial_message %p data crc %u != exp. %u\n", m
,
1815 con
->in_data_crc
, le32_to_cpu(m
->footer
.data_crc
));
1819 return 1; /* done! */
1823 * Process message. This happens in the worker thread. The callback should
1824 * be careful not to do anything that waits on other incoming messages or it
1827 static void process_message(struct ceph_connection
*con
)
1829 struct ceph_msg
*msg
;
1834 /* if first message, set peer_name */
1835 if (con
->peer_name
.type
== 0)
1836 con
->peer_name
= msg
->hdr
.src
;
1839 mutex_unlock(&con
->mutex
);
1841 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1842 msg
, le64_to_cpu(msg
->hdr
.seq
),
1843 ENTITY_NAME(msg
->hdr
.src
),
1844 le16_to_cpu(msg
->hdr
.type
),
1845 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1846 le32_to_cpu(msg
->hdr
.front_len
),
1847 le32_to_cpu(msg
->hdr
.data_len
),
1848 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
1849 con
->ops
->dispatch(con
, msg
);
1851 mutex_lock(&con
->mutex
);
1852 prepare_read_tag(con
);
1857 * Write something to the socket. Called in a worker thread when the
1858 * socket appears to be writeable and we have something ready to send.
1860 static int try_write(struct ceph_connection
*con
)
1864 dout("try_write start %p state %lu nref %d\n", con
, con
->state
,
1865 atomic_read(&con
->nref
));
1868 dout("try_write out_kvec_bytes %d\n", con
->out_kvec_bytes
);
1870 /* open the socket first? */
1871 if (con
->sock
== NULL
) {
1872 ceph_con_out_kvec_reset(con
);
1873 prepare_write_banner(con
);
1874 ret
= prepare_write_connect(con
);
1877 prepare_read_banner(con
);
1878 set_bit(CONNECTING
, &con
->state
);
1879 clear_bit(NEGOTIATING
, &con
->state
);
1881 BUG_ON(con
->in_msg
);
1882 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1883 dout("try_write initiating connect on %p new state %lu\n",
1885 ret
= ceph_tcp_connect(con
);
1887 con
->error_msg
= "connect error";
1893 /* kvec data queued? */
1894 if (con
->out_skip
) {
1895 ret
= write_partial_skip(con
);
1899 if (con
->out_kvec_left
) {
1900 ret
= write_partial_kvec(con
);
1907 if (con
->out_msg_done
) {
1908 ceph_msg_put(con
->out_msg
);
1909 con
->out_msg
= NULL
; /* we're done with this one */
1913 ret
= write_partial_msg_pages(con
);
1915 goto more_kvec
; /* we need to send the footer, too! */
1919 dout("try_write write_partial_msg_pages err %d\n",
1926 if (!test_bit(CONNECTING
, &con
->state
)) {
1927 /* is anything else pending? */
1928 if (!list_empty(&con
->out_queue
)) {
1929 prepare_write_message(con
);
1932 if (con
->in_seq
> con
->in_seq_acked
) {
1933 prepare_write_ack(con
);
1936 if (test_and_clear_bit(KEEPALIVE_PENDING
, &con
->state
)) {
1937 prepare_write_keepalive(con
);
1942 /* Nothing to do! */
1943 clear_bit(WRITE_PENDING
, &con
->state
);
1944 dout("try_write nothing else to write.\n");
1947 dout("try_write done on %p ret %d\n", con
, ret
);
1954 * Read what we can from the socket.
1956 static int try_read(struct ceph_connection
*con
)
1963 if (test_bit(STANDBY
, &con
->state
))
1966 dout("try_read start on %p\n", con
);
1969 dout("try_read tag %d in_base_pos %d\n", (int)con
->in_tag
,
1973 * process_connect and process_message drop and re-take
1974 * con->mutex. make sure we handle a racing close or reopen.
1976 if (test_bit(CLOSED
, &con
->state
) ||
1977 test_bit(OPENING
, &con
->state
)) {
1982 if (test_bit(CONNECTING
, &con
->state
)) {
1983 if (!test_bit(NEGOTIATING
, &con
->state
)) {
1984 dout("try_read connecting\n");
1985 ret
= read_partial_banner(con
);
1988 ret
= process_banner(con
);
1992 ret
= read_partial_connect(con
);
1995 ret
= process_connect(con
);
2001 if (con
->in_base_pos
< 0) {
2003 * skipping + discarding content.
2005 * FIXME: there must be a better way to do this!
2007 static char buf
[SKIP_BUF_SIZE
];
2008 int skip
= min((int) sizeof (buf
), -con
->in_base_pos
);
2010 dout("skipping %d / %d bytes\n", skip
, -con
->in_base_pos
);
2011 ret
= ceph_tcp_recvmsg(con
->sock
, buf
, skip
);
2014 con
->in_base_pos
+= ret
;
2015 if (con
->in_base_pos
)
2018 if (con
->in_tag
== CEPH_MSGR_TAG_READY
) {
2022 ret
= ceph_tcp_recvmsg(con
->sock
, &con
->in_tag
, 1);
2025 dout("try_read got tag %d\n", (int)con
->in_tag
);
2026 switch (con
->in_tag
) {
2027 case CEPH_MSGR_TAG_MSG
:
2028 prepare_read_message(con
);
2030 case CEPH_MSGR_TAG_ACK
:
2031 prepare_read_ack(con
);
2033 case CEPH_MSGR_TAG_CLOSE
:
2034 set_bit(CLOSED
, &con
->state
); /* fixme */
2040 if (con
->in_tag
== CEPH_MSGR_TAG_MSG
) {
2041 ret
= read_partial_message(con
);
2045 con
->error_msg
= "bad crc";
2049 con
->error_msg
= "io error";
2054 if (con
->in_tag
== CEPH_MSGR_TAG_READY
)
2056 process_message(con
);
2059 if (con
->in_tag
== CEPH_MSGR_TAG_ACK
) {
2060 ret
= read_partial_ack(con
);
2068 dout("try_read done on %p ret %d\n", con
, ret
);
2072 pr_err("try_read bad con->in_tag = %d\n", (int)con
->in_tag
);
2073 con
->error_msg
= "protocol error, garbage tag";
2080 * Atomically queue work on a connection. Bump @con reference to
2081 * avoid races with connection teardown.
2083 static void queue_con(struct ceph_connection
*con
)
2085 if (!con
->ops
->get(con
)) {
2086 dout("queue_con %p ref count 0\n", con
);
2090 if (!queue_delayed_work(ceph_msgr_wq
, &con
->work
, 0)) {
2091 dout("queue_con %p - already queued\n", con
);
2094 dout("queue_con %p\n", con
);
2099 * Do some work on a connection. Drop a connection ref when we're done.
2101 static void con_work(struct work_struct
*work
)
2103 struct ceph_connection
*con
= container_of(work
, struct ceph_connection
,
2107 mutex_lock(&con
->mutex
);
2109 if (test_and_clear_bit(BACKOFF
, &con
->state
)) {
2110 dout("con_work %p backing off\n", con
);
2111 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
2112 round_jiffies_relative(con
->delay
))) {
2113 dout("con_work %p backoff %lu\n", con
, con
->delay
);
2114 mutex_unlock(&con
->mutex
);
2118 dout("con_work %p FAILED to back off %lu\n", con
,
2123 if (test_bit(STANDBY
, &con
->state
)) {
2124 dout("con_work %p STANDBY\n", con
);
2127 if (test_bit(CLOSED
, &con
->state
)) { /* e.g. if we are replaced */
2128 dout("con_work CLOSED\n");
2129 con_close_socket(con
);
2132 if (test_and_clear_bit(OPENING
, &con
->state
)) {
2133 /* reopen w/ new peer */
2134 dout("con_work OPENING\n");
2135 con_close_socket(con
);
2138 if (test_and_clear_bit(SOCK_CLOSED
, &con
->state
))
2141 ret
= try_read(con
);
2147 ret
= try_write(con
);
2154 mutex_unlock(&con
->mutex
);
2160 mutex_unlock(&con
->mutex
);
2161 ceph_fault(con
); /* error/fault path */
2167 * Generic error/fault handler. A retry mechanism is used with
2168 * exponential backoff
2170 static void ceph_fault(struct ceph_connection
*con
)
2172 pr_err("%s%lld %s %s\n", ENTITY_NAME(con
->peer_name
),
2173 ceph_pr_addr(&con
->peer_addr
.in_addr
), con
->error_msg
);
2174 dout("fault %p state %lu to peer %s\n",
2175 con
, con
->state
, ceph_pr_addr(&con
->peer_addr
.in_addr
));
2177 if (test_bit(LOSSYTX
, &con
->state
)) {
2178 dout("fault on LOSSYTX channel\n");
2182 mutex_lock(&con
->mutex
);
2183 if (test_bit(CLOSED
, &con
->state
))
2186 con_close_socket(con
);
2189 ceph_msg_put(con
->in_msg
);
2193 /* Requeue anything that hasn't been acked */
2194 list_splice_init(&con
->out_sent
, &con
->out_queue
);
2196 /* If there are no messages queued or keepalive pending, place
2197 * the connection in a STANDBY state */
2198 if (list_empty(&con
->out_queue
) &&
2199 !test_bit(KEEPALIVE_PENDING
, &con
->state
)) {
2200 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con
);
2201 clear_bit(WRITE_PENDING
, &con
->state
);
2202 set_bit(STANDBY
, &con
->state
);
2204 /* retry after a delay. */
2205 if (con
->delay
== 0)
2206 con
->delay
= BASE_DELAY_INTERVAL
;
2207 else if (con
->delay
< MAX_DELAY_INTERVAL
)
2210 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
2211 round_jiffies_relative(con
->delay
))) {
2212 dout("fault queued %p delay %lu\n", con
, con
->delay
);
2215 dout("fault failed to queue %p delay %lu, backoff\n",
2218 * In many cases we see a socket state change
2219 * while con_work is running and end up
2220 * queuing (non-delayed) work, such that we
2221 * can't backoff with a delay. Set a flag so
2222 * that when con_work restarts we schedule the
2225 set_bit(BACKOFF
, &con
->state
);
2230 mutex_unlock(&con
->mutex
);
2233 * in case we faulted due to authentication, invalidate our
2234 * current tickets so that we can get new ones.
2236 if (con
->auth_retry
&& con
->ops
->invalidate_authorizer
) {
2237 dout("calling invalidate_authorizer()\n");
2238 con
->ops
->invalidate_authorizer(con
);
2241 if (con
->ops
->fault
)
2242 con
->ops
->fault(con
);
2248 * create a new messenger instance
2250 struct ceph_messenger
*ceph_messenger_create(struct ceph_entity_addr
*myaddr
,
2251 u32 supported_features
,
2252 u32 required_features
)
2254 struct ceph_messenger
*msgr
;
2256 msgr
= kzalloc(sizeof(*msgr
), GFP_KERNEL
);
2258 return ERR_PTR(-ENOMEM
);
2260 msgr
->supported_features
= supported_features
;
2261 msgr
->required_features
= required_features
;
2263 spin_lock_init(&msgr
->global_seq_lock
);
2266 msgr
->inst
.addr
= *myaddr
;
2268 /* select a random nonce */
2269 msgr
->inst
.addr
.type
= 0;
2270 get_random_bytes(&msgr
->inst
.addr
.nonce
, sizeof(msgr
->inst
.addr
.nonce
));
2271 encode_my_addr(msgr
);
2273 dout("messenger_create %p\n", msgr
);
2276 EXPORT_SYMBOL(ceph_messenger_create
);
2278 void ceph_messenger_destroy(struct ceph_messenger
*msgr
)
2280 dout("destroy %p\n", msgr
);
2282 dout("destroyed messenger %p\n", msgr
);
2284 EXPORT_SYMBOL(ceph_messenger_destroy
);
2286 static void clear_standby(struct ceph_connection
*con
)
2288 /* come back from STANDBY? */
2289 if (test_and_clear_bit(STANDBY
, &con
->state
)) {
2290 mutex_lock(&con
->mutex
);
2291 dout("clear_standby %p and ++connect_seq\n", con
);
2293 WARN_ON(test_bit(WRITE_PENDING
, &con
->state
));
2294 WARN_ON(test_bit(KEEPALIVE_PENDING
, &con
->state
));
2295 mutex_unlock(&con
->mutex
);
2300 * Queue up an outgoing message on the given connection.
2302 void ceph_con_send(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2304 if (test_bit(CLOSED
, &con
->state
)) {
2305 dout("con_send %p closed, dropping %p\n", con
, msg
);
2311 msg
->hdr
.src
= con
->msgr
->inst
.name
;
2313 BUG_ON(msg
->front
.iov_len
!= le32_to_cpu(msg
->hdr
.front_len
));
2315 msg
->needs_out_seq
= true;
2318 mutex_lock(&con
->mutex
);
2319 BUG_ON(!list_empty(&msg
->list_head
));
2320 list_add_tail(&msg
->list_head
, &con
->out_queue
);
2321 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg
,
2322 ENTITY_NAME(con
->peer_name
), le16_to_cpu(msg
->hdr
.type
),
2323 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
2324 le32_to_cpu(msg
->hdr
.front_len
),
2325 le32_to_cpu(msg
->hdr
.middle_len
),
2326 le32_to_cpu(msg
->hdr
.data_len
));
2327 mutex_unlock(&con
->mutex
);
2329 /* if there wasn't anything waiting to send before, queue
2332 if (test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
2335 EXPORT_SYMBOL(ceph_con_send
);
2338 * Revoke a message that was previously queued for send
2340 void ceph_con_revoke(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2342 mutex_lock(&con
->mutex
);
2343 if (!list_empty(&msg
->list_head
)) {
2344 dout("con_revoke %p msg %p - was on queue\n", con
, msg
);
2345 list_del_init(&msg
->list_head
);
2349 if (con
->out_msg
== msg
) {
2350 dout("con_revoke %p msg %p - was sending\n", con
, msg
);
2351 con
->out_msg
= NULL
;
2352 if (con
->out_kvec_is_msg
) {
2353 con
->out_skip
= con
->out_kvec_bytes
;
2354 con
->out_kvec_is_msg
= false;
2359 mutex_unlock(&con
->mutex
);
2363 * Revoke a message that we may be reading data into
2365 void ceph_con_revoke_message(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2367 mutex_lock(&con
->mutex
);
2368 if (con
->in_msg
&& con
->in_msg
== msg
) {
2369 unsigned front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
2370 unsigned middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
2371 unsigned data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
2373 /* skip rest of message */
2374 dout("con_revoke_pages %p msg %p revoked\n", con
, msg
);
2375 con
->in_base_pos
= con
->in_base_pos
-
2376 sizeof(struct ceph_msg_header
) -
2380 sizeof(struct ceph_msg_footer
);
2381 ceph_msg_put(con
->in_msg
);
2383 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2386 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2387 con
, con
->in_msg
, msg
);
2389 mutex_unlock(&con
->mutex
);
2393 * Queue a keepalive byte to ensure the tcp connection is alive.
2395 void ceph_con_keepalive(struct ceph_connection
*con
)
2397 dout("con_keepalive %p\n", con
);
2399 if (test_and_set_bit(KEEPALIVE_PENDING
, &con
->state
) == 0 &&
2400 test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
2403 EXPORT_SYMBOL(ceph_con_keepalive
);
2407 * construct a new message with given type, size
2408 * the new msg has a ref count of 1.
2410 struct ceph_msg
*ceph_msg_new(int type
, int front_len
, gfp_t flags
,
2415 m
= kmalloc(sizeof(*m
), flags
);
2418 kref_init(&m
->kref
);
2419 INIT_LIST_HEAD(&m
->list_head
);
2422 m
->hdr
.type
= cpu_to_le16(type
);
2423 m
->hdr
.priority
= cpu_to_le16(CEPH_MSG_PRIO_DEFAULT
);
2425 m
->hdr
.front_len
= cpu_to_le32(front_len
);
2426 m
->hdr
.middle_len
= 0;
2427 m
->hdr
.data_len
= 0;
2428 m
->hdr
.data_off
= 0;
2429 m
->hdr
.reserved
= 0;
2430 m
->footer
.front_crc
= 0;
2431 m
->footer
.middle_crc
= 0;
2432 m
->footer
.data_crc
= 0;
2433 m
->footer
.flags
= 0;
2434 m
->front_max
= front_len
;
2435 m
->front_is_vmalloc
= false;
2436 m
->more_to_follow
= false;
2445 m
->page_alignment
= 0;
2455 if (front_len
> PAGE_CACHE_SIZE
) {
2456 m
->front
.iov_base
= __vmalloc(front_len
, flags
,
2458 m
->front_is_vmalloc
= true;
2460 m
->front
.iov_base
= kmalloc(front_len
, flags
);
2462 if (m
->front
.iov_base
== NULL
) {
2463 dout("ceph_msg_new can't allocate %d bytes\n",
2468 m
->front
.iov_base
= NULL
;
2470 m
->front
.iov_len
= front_len
;
2472 dout("ceph_msg_new %p front %d\n", m
, front_len
);
2479 pr_err("msg_new can't create type %d front %d\n", type
,
2483 dout("msg_new can't create type %d front %d\n", type
,
2488 EXPORT_SYMBOL(ceph_msg_new
);
2491 * Allocate "middle" portion of a message, if it is needed and wasn't
2492 * allocated by alloc_msg. This allows us to read a small fixed-size
2493 * per-type header in the front and then gracefully fail (i.e.,
2494 * propagate the error to the caller based on info in the front) when
2495 * the middle is too large.
2497 static int ceph_alloc_middle(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2499 int type
= le16_to_cpu(msg
->hdr
.type
);
2500 int middle_len
= le32_to_cpu(msg
->hdr
.middle_len
);
2502 dout("alloc_middle %p type %d %s middle_len %d\n", msg
, type
,
2503 ceph_msg_type_name(type
), middle_len
);
2504 BUG_ON(!middle_len
);
2505 BUG_ON(msg
->middle
);
2507 msg
->middle
= ceph_buffer_new(middle_len
, GFP_NOFS
);
2514 * Generic message allocator, for incoming messages.
2516 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
2517 struct ceph_msg_header
*hdr
,
2520 int type
= le16_to_cpu(hdr
->type
);
2521 int front_len
= le32_to_cpu(hdr
->front_len
);
2522 int middle_len
= le32_to_cpu(hdr
->middle_len
);
2523 struct ceph_msg
*msg
= NULL
;
2526 if (con
->ops
->alloc_msg
) {
2527 mutex_unlock(&con
->mutex
);
2528 msg
= con
->ops
->alloc_msg(con
, hdr
, skip
);
2529 mutex_lock(&con
->mutex
);
2535 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
2537 pr_err("unable to allocate msg type %d len %d\n",
2541 msg
->page_alignment
= le16_to_cpu(hdr
->data_off
);
2543 memcpy(&msg
->hdr
, &con
->in_hdr
, sizeof(con
->in_hdr
));
2545 if (middle_len
&& !msg
->middle
) {
2546 ret
= ceph_alloc_middle(con
, msg
);
2558 * Free a generically kmalloc'd message.
2560 void ceph_msg_kfree(struct ceph_msg
*m
)
2562 dout("msg_kfree %p\n", m
);
2563 if (m
->front_is_vmalloc
)
2564 vfree(m
->front
.iov_base
);
2566 kfree(m
->front
.iov_base
);
2571 * Drop a msg ref. Destroy as needed.
2573 void ceph_msg_last_put(struct kref
*kref
)
2575 struct ceph_msg
*m
= container_of(kref
, struct ceph_msg
, kref
);
2577 dout("ceph_msg_put last one on %p\n", m
);
2578 WARN_ON(!list_empty(&m
->list_head
));
2580 /* drop middle, data, if any */
2582 ceph_buffer_put(m
->middle
);
2589 ceph_pagelist_release(m
->pagelist
);
2597 ceph_msgpool_put(m
->pool
, m
);
2601 EXPORT_SYMBOL(ceph_msg_last_put
);
2603 void ceph_msg_dump(struct ceph_msg
*msg
)
2605 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg
,
2606 msg
->front_max
, msg
->nr_pages
);
2607 print_hex_dump(KERN_DEBUG
, "header: ",
2608 DUMP_PREFIX_OFFSET
, 16, 1,
2609 &msg
->hdr
, sizeof(msg
->hdr
), true);
2610 print_hex_dump(KERN_DEBUG
, " front: ",
2611 DUMP_PREFIX_OFFSET
, 16, 1,
2612 msg
->front
.iov_base
, msg
->front
.iov_len
, true);
2614 print_hex_dump(KERN_DEBUG
, "middle: ",
2615 DUMP_PREFIX_OFFSET
, 16, 1,
2616 msg
->middle
->vec
.iov_base
,
2617 msg
->middle
->vec
.iov_len
, true);
2618 print_hex_dump(KERN_DEBUG
, "footer: ",
2619 DUMP_PREFIX_OFFSET
, 16, 1,
2620 &msg
->footer
, sizeof(msg
->footer
), true);
2622 EXPORT_SYMBOL(ceph_msg_dump
);