1 #include "ceph_debug.h"
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/socket.h>
10 #include <linux/string.h>
14 #include "messenger.h"
18 * Ceph uses the messenger to exchange ceph_msg messages with other
19 * hosts in the system. The messenger provides ordered and reliable
20 * delivery. We tolerate TCP disconnects by reconnecting (with
21 * exponential backoff) in the case of a fault (disconnection, bad
22 * crc, protocol error). Acks allow sent messages to be discarded by
26 /* static tag bytes (protocol control messages) */
27 static char tag_msg
= CEPH_MSGR_TAG_MSG
;
28 static char tag_ack
= CEPH_MSGR_TAG_ACK
;
29 static char tag_keepalive
= CEPH_MSGR_TAG_KEEPALIVE
;
32 static void queue_con(struct ceph_connection
*con
);
33 static void con_work(struct work_struct
*);
34 static void ceph_fault(struct ceph_connection
*con
);
36 const char *ceph_name_type_str(int t
)
39 case CEPH_ENTITY_TYPE_MON
: return "mon";
40 case CEPH_ENTITY_TYPE_MDS
: return "mds";
41 case CEPH_ENTITY_TYPE_OSD
: return "osd";
42 case CEPH_ENTITY_TYPE_CLIENT
: return "client";
43 case CEPH_ENTITY_TYPE_ADMIN
: return "admin";
44 default: return "???";
49 * nicely render a sockaddr as a string.
51 #define MAX_ADDR_STR 20
52 static char addr_str
[MAX_ADDR_STR
][40];
53 static DEFINE_SPINLOCK(addr_str_lock
);
54 static int last_addr_str
;
56 const char *pr_addr(const struct sockaddr_storage
*ss
)
60 struct sockaddr_in
*in4
= (void *)ss
;
61 unsigned char *quad
= (void *)&in4
->sin_addr
.s_addr
;
62 struct sockaddr_in6
*in6
= (void *)ss
;
64 spin_lock(&addr_str_lock
);
66 if (last_addr_str
== MAX_ADDR_STR
)
68 spin_unlock(&addr_str_lock
);
71 switch (ss
->ss_family
) {
73 sprintf(s
, "%u.%u.%u.%u:%u",
74 (unsigned int)quad
[0],
75 (unsigned int)quad
[1],
76 (unsigned int)quad
[2],
77 (unsigned int)quad
[3],
78 (unsigned int)ntohs(in4
->sin_port
));
82 sprintf(s
, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
83 in6
->sin6_addr
.s6_addr16
[0],
84 in6
->sin6_addr
.s6_addr16
[1],
85 in6
->sin6_addr
.s6_addr16
[2],
86 in6
->sin6_addr
.s6_addr16
[3],
87 in6
->sin6_addr
.s6_addr16
[4],
88 in6
->sin6_addr
.s6_addr16
[5],
89 in6
->sin6_addr
.s6_addr16
[6],
90 in6
->sin6_addr
.s6_addr16
[7],
91 (unsigned int)ntohs(in6
->sin6_port
));
95 sprintf(s
, "(unknown sockaddr family %d)", (int)ss
->ss_family
);
101 static void encode_my_addr(struct ceph_messenger
*msgr
)
103 memcpy(&msgr
->my_enc_addr
, &msgr
->inst
.addr
, sizeof(msgr
->my_enc_addr
));
104 ceph_encode_addr(&msgr
->my_enc_addr
);
108 * work queue for all reading and writing to/from the socket.
110 struct workqueue_struct
*ceph_msgr_wq
;
112 int __init
ceph_msgr_init(void)
114 ceph_msgr_wq
= create_workqueue("ceph-msgr");
115 if (IS_ERR(ceph_msgr_wq
)) {
116 int ret
= PTR_ERR(ceph_msgr_wq
);
117 pr_err("msgr_init failed to create workqueue: %d\n", ret
);
124 void ceph_msgr_exit(void)
126 destroy_workqueue(ceph_msgr_wq
);
130 * socket callback functions
133 /* data available on socket, or listen socket received a connect */
134 static void ceph_data_ready(struct sock
*sk
, int count_unused
)
136 struct ceph_connection
*con
=
137 (struct ceph_connection
*)sk
->sk_user_data
;
138 if (sk
->sk_state
!= TCP_CLOSE_WAIT
) {
139 dout("ceph_data_ready on %p state = %lu, queueing work\n",
145 /* socket has buffer space for writing */
146 static void ceph_write_space(struct sock
*sk
)
148 struct ceph_connection
*con
=
149 (struct ceph_connection
*)sk
->sk_user_data
;
151 /* only queue to workqueue if there is data we want to write. */
152 if (test_bit(WRITE_PENDING
, &con
->state
)) {
153 dout("ceph_write_space %p queueing write work\n", con
);
156 dout("ceph_write_space %p nothing to write\n", con
);
159 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
160 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
163 /* socket's state has changed */
164 static void ceph_state_change(struct sock
*sk
)
166 struct ceph_connection
*con
=
167 (struct ceph_connection
*)sk
->sk_user_data
;
169 dout("ceph_state_change %p state = %lu sk_state = %u\n",
170 con
, con
->state
, sk
->sk_state
);
172 if (test_bit(CLOSED
, &con
->state
))
175 switch (sk
->sk_state
) {
177 dout("ceph_state_change TCP_CLOSE\n");
179 dout("ceph_state_change TCP_CLOSE_WAIT\n");
180 if (test_and_set_bit(SOCK_CLOSED
, &con
->state
) == 0) {
181 if (test_bit(CONNECTING
, &con
->state
))
182 con
->error_msg
= "connection failed";
184 con
->error_msg
= "socket closed";
188 case TCP_ESTABLISHED
:
189 dout("ceph_state_change TCP_ESTABLISHED\n");
196 * set up socket callbacks
198 static void set_sock_callbacks(struct socket
*sock
,
199 struct ceph_connection
*con
)
201 struct sock
*sk
= sock
->sk
;
202 sk
->sk_user_data
= (void *)con
;
203 sk
->sk_data_ready
= ceph_data_ready
;
204 sk
->sk_write_space
= ceph_write_space
;
205 sk
->sk_state_change
= ceph_state_change
;
214 * initiate connection to a remote socket.
216 static struct socket
*ceph_tcp_connect(struct ceph_connection
*con
)
218 struct sockaddr
*paddr
= (struct sockaddr
*)&con
->peer_addr
.in_addr
;
223 ret
= sock_create_kern(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, &sock
);
227 sock
->sk
->sk_allocation
= GFP_NOFS
;
229 set_sock_callbacks(sock
, con
);
231 dout("connect %s\n", pr_addr(&con
->peer_addr
.in_addr
));
233 ret
= sock
->ops
->connect(sock
, paddr
, sizeof(*paddr
), O_NONBLOCK
);
234 if (ret
== -EINPROGRESS
) {
235 dout("connect %s EINPROGRESS sk_state = %u\n",
236 pr_addr(&con
->peer_addr
.in_addr
),
241 pr_err("connect %s error %d\n",
242 pr_addr(&con
->peer_addr
.in_addr
), ret
);
245 con
->error_msg
= "connect error";
253 static int ceph_tcp_recvmsg(struct socket
*sock
, void *buf
, size_t len
)
255 struct kvec iov
= {buf
, len
};
256 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
258 return kernel_recvmsg(sock
, &msg
, &iov
, 1, len
, msg
.msg_flags
);
262 * write something. @more is true if caller will be sending more data
265 static int ceph_tcp_sendmsg(struct socket
*sock
, struct kvec
*iov
,
266 size_t kvlen
, size_t len
, int more
)
268 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
271 msg
.msg_flags
|= MSG_MORE
;
273 msg
.msg_flags
|= MSG_EOR
; /* superfluous, but what the hell */
275 return kernel_sendmsg(sock
, &msg
, iov
, kvlen
, len
);
280 * Shutdown/close the socket for the given connection.
282 static int con_close_socket(struct ceph_connection
*con
)
286 dout("con_close_socket on %p sock %p\n", con
, con
->sock
);
289 set_bit(SOCK_CLOSED
, &con
->state
);
290 rc
= con
->sock
->ops
->shutdown(con
->sock
, SHUT_RDWR
);
291 sock_release(con
->sock
);
293 clear_bit(SOCK_CLOSED
, &con
->state
);
298 * Reset a connection. Discard all incoming and outgoing messages
299 * and clear *_seq state.
301 static void ceph_msg_remove(struct ceph_msg
*msg
)
303 list_del_init(&msg
->list_head
);
306 static void ceph_msg_remove_list(struct list_head
*head
)
308 while (!list_empty(head
)) {
309 struct ceph_msg
*msg
= list_first_entry(head
, struct ceph_msg
,
311 ceph_msg_remove(msg
);
315 static void reset_connection(struct ceph_connection
*con
)
317 /* reset connection, out_queue, msg_ and connect_seq */
318 /* discard existing out_queue and msg_seq */
319 ceph_msg_remove_list(&con
->out_queue
);
320 ceph_msg_remove_list(&con
->out_sent
);
323 ceph_msg_put(con
->in_msg
);
327 con
->connect_seq
= 0;
330 ceph_msg_put(con
->out_msg
);
337 * mark a peer down. drop any open connections.
339 void ceph_con_close(struct ceph_connection
*con
)
341 dout("con_close %p peer %s\n", con
, pr_addr(&con
->peer_addr
.in_addr
));
342 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
343 clear_bit(STANDBY
, &con
->state
); /* avoid connect_seq bump */
344 mutex_lock(&con
->mutex
);
345 reset_connection(con
);
346 mutex_unlock(&con
->mutex
);
351 * Reopen a closed connection, with a new peer address.
353 void ceph_con_open(struct ceph_connection
*con
, struct ceph_entity_addr
*addr
)
355 dout("con_open %p %s\n", con
, pr_addr(&addr
->in_addr
));
356 set_bit(OPENING
, &con
->state
);
357 clear_bit(CLOSED
, &con
->state
);
358 memcpy(&con
->peer_addr
, addr
, sizeof(*addr
));
359 con
->delay
= 0; /* reset backoff memory */
366 struct ceph_connection
*ceph_con_get(struct ceph_connection
*con
)
368 dout("con_get %p nref = %d -> %d\n", con
,
369 atomic_read(&con
->nref
), atomic_read(&con
->nref
) + 1);
370 if (atomic_inc_not_zero(&con
->nref
))
375 void ceph_con_put(struct ceph_connection
*con
)
377 dout("con_put %p nref = %d -> %d\n", con
,
378 atomic_read(&con
->nref
), atomic_read(&con
->nref
) - 1);
379 BUG_ON(atomic_read(&con
->nref
) == 0);
380 if (atomic_dec_and_test(&con
->nref
)) {
387 * initialize a new connection.
389 void ceph_con_init(struct ceph_messenger
*msgr
, struct ceph_connection
*con
)
391 dout("con_init %p\n", con
);
392 memset(con
, 0, sizeof(*con
));
393 atomic_set(&con
->nref
, 1);
395 mutex_init(&con
->mutex
);
396 INIT_LIST_HEAD(&con
->out_queue
);
397 INIT_LIST_HEAD(&con
->out_sent
);
398 INIT_DELAYED_WORK(&con
->work
, con_work
);
403 * We maintain a global counter to order connection attempts. Get
404 * a unique seq greater than @gt.
406 static u32
get_global_seq(struct ceph_messenger
*msgr
, u32 gt
)
410 spin_lock(&msgr
->global_seq_lock
);
411 if (msgr
->global_seq
< gt
)
412 msgr
->global_seq
= gt
;
413 ret
= ++msgr
->global_seq
;
414 spin_unlock(&msgr
->global_seq_lock
);
420 * Prepare footer for currently outgoing message, and finish things
421 * off. Assumes out_kvec* are already valid.. we just add on to the end.
423 static void prepare_write_message_footer(struct ceph_connection
*con
, int v
)
425 struct ceph_msg
*m
= con
->out_msg
;
427 dout("prepare_write_message_footer %p\n", con
);
428 con
->out_kvec_is_msg
= true;
429 con
->out_kvec
[v
].iov_base
= &m
->footer
;
430 con
->out_kvec
[v
].iov_len
= sizeof(m
->footer
);
431 con
->out_kvec_bytes
+= sizeof(m
->footer
);
432 con
->out_kvec_left
++;
433 con
->out_more
= m
->more_to_follow
;
434 con
->out_msg_done
= true;
438 * Prepare headers for the next outgoing message.
440 static void prepare_write_message(struct ceph_connection
*con
)
445 con
->out_kvec_bytes
= 0;
446 con
->out_kvec_is_msg
= true;
447 con
->out_msg_done
= false;
449 /* Sneak an ack in there first? If we can get it into the same
450 * TCP packet that's a good thing. */
451 if (con
->in_seq
> con
->in_seq_acked
) {
452 con
->in_seq_acked
= con
->in_seq
;
453 con
->out_kvec
[v
].iov_base
= &tag_ack
;
454 con
->out_kvec
[v
++].iov_len
= 1;
455 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
456 con
->out_kvec
[v
].iov_base
= &con
->out_temp_ack
;
457 con
->out_kvec
[v
++].iov_len
= sizeof(con
->out_temp_ack
);
458 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
461 m
= list_first_entry(&con
->out_queue
,
462 struct ceph_msg
, list_head
);
464 if (test_bit(LOSSYTX
, &con
->state
)) {
465 /* put message on sent list */
467 list_move_tail(&m
->list_head
, &con
->out_sent
);
469 list_del_init(&m
->list_head
);
472 m
->hdr
.seq
= cpu_to_le64(++con
->out_seq
);
474 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
475 m
, con
->out_seq
, le16_to_cpu(m
->hdr
.type
),
476 le32_to_cpu(m
->hdr
.front_len
), le32_to_cpu(m
->hdr
.middle_len
),
477 le32_to_cpu(m
->hdr
.data_len
),
479 BUG_ON(le32_to_cpu(m
->hdr
.front_len
) != m
->front
.iov_len
);
481 /* tag + hdr + front + middle */
482 con
->out_kvec
[v
].iov_base
= &tag_msg
;
483 con
->out_kvec
[v
++].iov_len
= 1;
484 con
->out_kvec
[v
].iov_base
= &m
->hdr
;
485 con
->out_kvec
[v
++].iov_len
= sizeof(m
->hdr
);
486 con
->out_kvec
[v
++] = m
->front
;
488 con
->out_kvec
[v
++] = m
->middle
->vec
;
489 con
->out_kvec_left
= v
;
490 con
->out_kvec_bytes
+= 1 + sizeof(m
->hdr
) + m
->front
.iov_len
+
491 (m
->middle
? m
->middle
->vec
.iov_len
: 0);
492 con
->out_kvec_cur
= con
->out_kvec
;
494 /* fill in crc (except data pages), footer */
495 con
->out_msg
->hdr
.crc
=
496 cpu_to_le32(crc32c(0, (void *)&m
->hdr
,
497 sizeof(m
->hdr
) - sizeof(m
->hdr
.crc
)));
498 con
->out_msg
->footer
.flags
= CEPH_MSG_FOOTER_COMPLETE
;
499 con
->out_msg
->footer
.front_crc
=
500 cpu_to_le32(crc32c(0, m
->front
.iov_base
, m
->front
.iov_len
));
502 con
->out_msg
->footer
.middle_crc
=
503 cpu_to_le32(crc32c(0, m
->middle
->vec
.iov_base
,
504 m
->middle
->vec
.iov_len
));
506 con
->out_msg
->footer
.middle_crc
= 0;
507 con
->out_msg
->footer
.data_crc
= 0;
508 dout("prepare_write_message front_crc %u data_crc %u\n",
509 le32_to_cpu(con
->out_msg
->footer
.front_crc
),
510 le32_to_cpu(con
->out_msg
->footer
.middle_crc
));
512 /* is there a data payload? */
513 if (le32_to_cpu(m
->hdr
.data_len
) > 0) {
514 /* initialize page iterator */
515 con
->out_msg_pos
.page
= 0;
516 con
->out_msg_pos
.page_pos
=
517 le16_to_cpu(m
->hdr
.data_off
) & ~PAGE_MASK
;
518 con
->out_msg_pos
.data_pos
= 0;
519 con
->out_msg_pos
.did_page_crc
= 0;
520 con
->out_more
= 1; /* data + footer will follow */
522 /* no, queue up footer too and be done */
523 prepare_write_message_footer(con
, v
);
526 set_bit(WRITE_PENDING
, &con
->state
);
532 static void prepare_write_ack(struct ceph_connection
*con
)
534 dout("prepare_write_ack %p %llu -> %llu\n", con
,
535 con
->in_seq_acked
, con
->in_seq
);
536 con
->in_seq_acked
= con
->in_seq
;
538 con
->out_kvec
[0].iov_base
= &tag_ack
;
539 con
->out_kvec
[0].iov_len
= 1;
540 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
541 con
->out_kvec
[1].iov_base
= &con
->out_temp_ack
;
542 con
->out_kvec
[1].iov_len
= sizeof(con
->out_temp_ack
);
543 con
->out_kvec_left
= 2;
544 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
545 con
->out_kvec_cur
= con
->out_kvec
;
546 con
->out_more
= 1; /* more will follow.. eventually.. */
547 set_bit(WRITE_PENDING
, &con
->state
);
551 * Prepare to write keepalive byte.
553 static void prepare_write_keepalive(struct ceph_connection
*con
)
555 dout("prepare_write_keepalive %p\n", con
);
556 con
->out_kvec
[0].iov_base
= &tag_keepalive
;
557 con
->out_kvec
[0].iov_len
= 1;
558 con
->out_kvec_left
= 1;
559 con
->out_kvec_bytes
= 1;
560 con
->out_kvec_cur
= con
->out_kvec
;
561 set_bit(WRITE_PENDING
, &con
->state
);
565 * Connection negotiation.
568 static void prepare_connect_authorizer(struct ceph_connection
*con
)
572 int auth_protocol
= 0;
574 mutex_unlock(&con
->mutex
);
575 if (con
->ops
->get_authorizer
)
576 con
->ops
->get_authorizer(con
, &auth_buf
, &auth_len
,
577 &auth_protocol
, &con
->auth_reply_buf
,
578 &con
->auth_reply_buf_len
,
580 mutex_lock(&con
->mutex
);
582 con
->out_connect
.authorizer_protocol
= cpu_to_le32(auth_protocol
);
583 con
->out_connect
.authorizer_len
= cpu_to_le32(auth_len
);
585 con
->out_kvec
[con
->out_kvec_left
].iov_base
= auth_buf
;
586 con
->out_kvec
[con
->out_kvec_left
].iov_len
= auth_len
;
587 con
->out_kvec_left
++;
588 con
->out_kvec_bytes
+= auth_len
;
592 * We connected to a peer and are saying hello.
594 static void prepare_write_banner(struct ceph_messenger
*msgr
,
595 struct ceph_connection
*con
)
597 int len
= strlen(CEPH_BANNER
);
599 con
->out_kvec
[0].iov_base
= CEPH_BANNER
;
600 con
->out_kvec
[0].iov_len
= len
;
601 con
->out_kvec
[1].iov_base
= &msgr
->my_enc_addr
;
602 con
->out_kvec
[1].iov_len
= sizeof(msgr
->my_enc_addr
);
603 con
->out_kvec_left
= 2;
604 con
->out_kvec_bytes
= len
+ sizeof(msgr
->my_enc_addr
);
605 con
->out_kvec_cur
= con
->out_kvec
;
607 set_bit(WRITE_PENDING
, &con
->state
);
610 static void prepare_write_connect(struct ceph_messenger
*msgr
,
611 struct ceph_connection
*con
,
614 unsigned global_seq
= get_global_seq(con
->msgr
, 0);
617 switch (con
->peer_name
.type
) {
618 case CEPH_ENTITY_TYPE_MON
:
619 proto
= CEPH_MONC_PROTOCOL
;
621 case CEPH_ENTITY_TYPE_OSD
:
622 proto
= CEPH_OSDC_PROTOCOL
;
624 case CEPH_ENTITY_TYPE_MDS
:
625 proto
= CEPH_MDSC_PROTOCOL
;
631 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con
,
632 con
->connect_seq
, global_seq
, proto
);
634 con
->out_connect
.host_type
= cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT
);
635 con
->out_connect
.connect_seq
= cpu_to_le32(con
->connect_seq
);
636 con
->out_connect
.global_seq
= cpu_to_le32(global_seq
);
637 con
->out_connect
.protocol_version
= cpu_to_le32(proto
);
638 con
->out_connect
.flags
= 0;
641 con
->out_kvec_left
= 0;
642 con
->out_kvec_bytes
= 0;
644 con
->out_kvec
[con
->out_kvec_left
].iov_base
= &con
->out_connect
;
645 con
->out_kvec
[con
->out_kvec_left
].iov_len
= sizeof(con
->out_connect
);
646 con
->out_kvec_left
++;
647 con
->out_kvec_bytes
+= sizeof(con
->out_connect
);
648 con
->out_kvec_cur
= con
->out_kvec
;
650 set_bit(WRITE_PENDING
, &con
->state
);
652 prepare_connect_authorizer(con
);
657 * write as much of pending kvecs to the socket as we can.
659 * 0 -> socket full, but more to do
662 static int write_partial_kvec(struct ceph_connection
*con
)
666 dout("write_partial_kvec %p %d left\n", con
, con
->out_kvec_bytes
);
667 while (con
->out_kvec_bytes
> 0) {
668 ret
= ceph_tcp_sendmsg(con
->sock
, con
->out_kvec_cur
,
669 con
->out_kvec_left
, con
->out_kvec_bytes
,
673 con
->out_kvec_bytes
-= ret
;
674 if (con
->out_kvec_bytes
== 0)
677 if (ret
>= con
->out_kvec_cur
->iov_len
) {
678 ret
-= con
->out_kvec_cur
->iov_len
;
680 con
->out_kvec_left
--;
682 con
->out_kvec_cur
->iov_len
-= ret
;
683 con
->out_kvec_cur
->iov_base
+= ret
;
689 con
->out_kvec_left
= 0;
690 con
->out_kvec_is_msg
= false;
693 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con
,
694 con
->out_kvec_bytes
, con
->out_kvec_left
, ret
);
695 return ret
; /* done! */
699 * Write as much message data payload as we can. If we finish, queue
701 * 1 -> done, footer is now queued in out_kvec[].
702 * 0 -> socket full, but more to do
705 static int write_partial_msg_pages(struct ceph_connection
*con
)
707 struct ceph_msg
*msg
= con
->out_msg
;
708 unsigned data_len
= le32_to_cpu(msg
->hdr
.data_len
);
710 int crc
= con
->msgr
->nocrc
;
713 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
714 con
, con
->out_msg
, con
->out_msg_pos
.page
, con
->out_msg
->nr_pages
,
715 con
->out_msg_pos
.page_pos
);
717 while (con
->out_msg_pos
.page
< con
->out_msg
->nr_pages
) {
718 struct page
*page
= NULL
;
722 * if we are calculating the data crc (the default), we need
723 * to map the page. if our pages[] has been revoked, use the
727 page
= msg
->pages
[con
->out_msg_pos
.page
];
731 page
= con
->msgr
->zero_page
;
733 kaddr
= page_address(con
->msgr
->zero_page
);
735 len
= min((int)(PAGE_SIZE
- con
->out_msg_pos
.page_pos
),
736 (int)(data_len
- con
->out_msg_pos
.data_pos
));
737 if (crc
&& !con
->out_msg_pos
.did_page_crc
) {
738 void *base
= kaddr
+ con
->out_msg_pos
.page_pos
;
739 u32 tmpcrc
= le32_to_cpu(con
->out_msg
->footer
.data_crc
);
741 BUG_ON(kaddr
== NULL
);
742 con
->out_msg
->footer
.data_crc
=
743 cpu_to_le32(crc32c(tmpcrc
, base
, len
));
744 con
->out_msg_pos
.did_page_crc
= 1;
747 ret
= kernel_sendpage(con
->sock
, page
,
748 con
->out_msg_pos
.page_pos
, len
,
749 MSG_DONTWAIT
| MSG_NOSIGNAL
|
752 if (crc
&& msg
->pages
)
758 con
->out_msg_pos
.data_pos
+= ret
;
759 con
->out_msg_pos
.page_pos
+= ret
;
761 con
->out_msg_pos
.page_pos
= 0;
762 con
->out_msg_pos
.page
++;
763 con
->out_msg_pos
.did_page_crc
= 0;
767 dout("write_partial_msg_pages %p msg %p done\n", con
, msg
);
769 /* prepare and queue up footer, too */
771 con
->out_msg
->footer
.flags
|= CEPH_MSG_FOOTER_NOCRC
;
772 con
->out_kvec_bytes
= 0;
773 con
->out_kvec_left
= 0;
774 con
->out_kvec_cur
= con
->out_kvec
;
775 prepare_write_message_footer(con
, 0);
784 static int write_partial_skip(struct ceph_connection
*con
)
788 while (con
->out_skip
> 0) {
790 .iov_base
= page_address(con
->msgr
->zero_page
),
791 .iov_len
= min(con
->out_skip
, (int)PAGE_CACHE_SIZE
)
794 ret
= ceph_tcp_sendmsg(con
->sock
, &iov
, 1, iov
.iov_len
, 1);
797 con
->out_skip
-= ret
;
805 * Prepare to read connection handshake, or an ack.
807 static void prepare_read_banner(struct ceph_connection
*con
)
809 dout("prepare_read_banner %p\n", con
);
810 con
->in_base_pos
= 0;
813 static void prepare_read_connect(struct ceph_connection
*con
)
815 dout("prepare_read_connect %p\n", con
);
816 con
->in_base_pos
= 0;
819 static void prepare_read_connect_retry(struct ceph_connection
*con
)
821 dout("prepare_read_connect_retry %p\n", con
);
822 con
->in_base_pos
= strlen(CEPH_BANNER
) + sizeof(con
->actual_peer_addr
)
823 + sizeof(con
->peer_addr_for_me
);
826 static void prepare_read_ack(struct ceph_connection
*con
)
828 dout("prepare_read_ack %p\n", con
);
829 con
->in_base_pos
= 0;
832 static void prepare_read_tag(struct ceph_connection
*con
)
834 dout("prepare_read_tag %p\n", con
);
835 con
->in_base_pos
= 0;
836 con
->in_tag
= CEPH_MSGR_TAG_READY
;
840 * Prepare to read a message.
842 static int prepare_read_message(struct ceph_connection
*con
)
844 dout("prepare_read_message %p\n", con
);
845 BUG_ON(con
->in_msg
!= NULL
);
846 con
->in_base_pos
= 0;
847 con
->in_front_crc
= con
->in_middle_crc
= con
->in_data_crc
= 0;
852 static int read_partial(struct ceph_connection
*con
,
853 int *to
, int size
, void *object
)
856 while (con
->in_base_pos
< *to
) {
857 int left
= *to
- con
->in_base_pos
;
858 int have
= size
- left
;
859 int ret
= ceph_tcp_recvmsg(con
->sock
, object
+ have
, left
);
862 con
->in_base_pos
+= ret
;
869 * Read all or part of the connect-side handshake on a new connection
871 static int read_partial_banner(struct ceph_connection
*con
)
875 dout("read_partial_banner %p at %d\n", con
, con
->in_base_pos
);
878 ret
= read_partial(con
, &to
, strlen(CEPH_BANNER
), con
->in_banner
);
881 ret
= read_partial(con
, &to
, sizeof(con
->actual_peer_addr
),
882 &con
->actual_peer_addr
);
885 ret
= read_partial(con
, &to
, sizeof(con
->peer_addr_for_me
),
886 &con
->peer_addr_for_me
);
893 static int read_partial_connect(struct ceph_connection
*con
)
897 dout("read_partial_connect %p at %d\n", con
, con
->in_base_pos
);
899 ret
= read_partial(con
, &to
, sizeof(con
->in_reply
), &con
->in_reply
);
902 ret
= read_partial(con
, &to
, le32_to_cpu(con
->in_reply
.authorizer_len
),
903 con
->auth_reply_buf
);
907 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
908 con
, (int)con
->in_reply
.tag
,
909 le32_to_cpu(con
->in_reply
.connect_seq
),
910 le32_to_cpu(con
->in_reply
.global_seq
));
917 * Verify the hello banner looks okay.
919 static int verify_hello(struct ceph_connection
*con
)
921 if (memcmp(con
->in_banner
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
922 pr_err("connect to %s got bad banner\n",
923 pr_addr(&con
->peer_addr
.in_addr
));
924 con
->error_msg
= "protocol error, bad banner";
930 static bool addr_is_blank(struct sockaddr_storage
*ss
)
932 switch (ss
->ss_family
) {
934 return ((struct sockaddr_in
*)ss
)->sin_addr
.s_addr
== 0;
937 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[0] == 0 &&
938 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[1] == 0 &&
939 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[2] == 0 &&
940 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[3] == 0;
945 static int addr_port(struct sockaddr_storage
*ss
)
947 switch (ss
->ss_family
) {
949 return ntohs(((struct sockaddr_in
*)ss
)->sin_port
);
951 return ntohs(((struct sockaddr_in6
*)ss
)->sin6_port
);
956 static void addr_set_port(struct sockaddr_storage
*ss
, int p
)
958 switch (ss
->ss_family
) {
960 ((struct sockaddr_in
*)ss
)->sin_port
= htons(p
);
962 ((struct sockaddr_in6
*)ss
)->sin6_port
= htons(p
);
967 * Parse an ip[:port] list into an addr array. Use the default
968 * monitor port if a port isn't specified.
970 int ceph_parse_ips(const char *c
, const char *end
,
971 struct ceph_entity_addr
*addr
,
972 int max_count
, int *count
)
977 dout("parse_ips on '%.*s'\n", (int)(end
-c
), c
);
978 for (i
= 0; i
< max_count
; i
++) {
980 struct sockaddr_storage
*ss
= &addr
[i
].in_addr
;
981 struct sockaddr_in
*in4
= (void *)ss
;
982 struct sockaddr_in6
*in6
= (void *)ss
;
985 memset(ss
, 0, sizeof(*ss
));
986 if (in4_pton(p
, end
- p
, (u8
*)&in4
->sin_addr
.s_addr
,
988 ss
->ss_family
= AF_INET
;
989 } else if (in6_pton(p
, end
- p
, (u8
*)&in6
->sin6_addr
.s6_addr
,
991 ss
->ss_family
= AF_INET6
;
998 if (p
< end
&& *p
== ':') {
1001 while (p
< end
&& *p
>= '0' && *p
<= '9') {
1002 port
= (port
* 10) + (*p
- '0');
1005 if (port
> 65535 || port
== 0)
1008 port
= CEPH_MON_PORT
;
1011 addr_set_port(ss
, port
);
1013 dout("parse_ips got %s\n", pr_addr(ss
));
1030 pr_err("parse_ips bad ip '%s'\n", c
);
1034 static int process_banner(struct ceph_connection
*con
)
1036 dout("process_banner on %p\n", con
);
1038 if (verify_hello(con
) < 0)
1041 ceph_decode_addr(&con
->actual_peer_addr
);
1042 ceph_decode_addr(&con
->peer_addr_for_me
);
1045 * Make sure the other end is who we wanted. note that the other
1046 * end may not yet know their ip address, so if it's 0.0.0.0, give
1047 * them the benefit of the doubt.
1049 if (!ceph_entity_addr_is_local(&con
->peer_addr
,
1050 &con
->actual_peer_addr
) &&
1051 !(addr_is_blank(&con
->actual_peer_addr
.in_addr
) &&
1052 con
->actual_peer_addr
.nonce
== con
->peer_addr
.nonce
)) {
1053 pr_err("wrong peer, want %s/%d, "
1055 pr_addr(&con
->peer_addr
.in_addr
),
1056 con
->peer_addr
.nonce
,
1057 pr_addr(&con
->actual_peer_addr
.in_addr
),
1058 con
->actual_peer_addr
.nonce
);
1059 con
->error_msg
= "protocol error, wrong peer";
1064 * did we learn our address?
1066 if (addr_is_blank(&con
->msgr
->inst
.addr
.in_addr
)) {
1067 int port
= addr_port(&con
->msgr
->inst
.addr
.in_addr
);
1069 memcpy(&con
->msgr
->inst
.addr
.in_addr
,
1070 &con
->peer_addr_for_me
.in_addr
,
1071 sizeof(con
->peer_addr_for_me
.in_addr
));
1072 addr_set_port(&con
->msgr
->inst
.addr
.in_addr
, port
);
1073 encode_my_addr(con
->msgr
);
1074 dout("process_banner learned my addr is %s\n",
1075 pr_addr(&con
->msgr
->inst
.addr
.in_addr
));
1078 set_bit(NEGOTIATING
, &con
->state
);
1079 prepare_read_connect(con
);
1083 static int process_connect(struct ceph_connection
*con
)
1085 dout("process_connect on %p tag %d\n", con
, (int)con
->in_tag
);
1087 switch (con
->in_reply
.tag
) {
1088 case CEPH_MSGR_TAG_BADPROTOVER
:
1089 dout("process_connect got BADPROTOVER my %d != their %d\n",
1090 le32_to_cpu(con
->out_connect
.protocol_version
),
1091 le32_to_cpu(con
->in_reply
.protocol_version
));
1092 pr_err("%s%lld %s protocol version mismatch,"
1093 " my %d != server's %d\n",
1094 ENTITY_NAME(con
->peer_name
),
1095 pr_addr(&con
->peer_addr
.in_addr
),
1096 le32_to_cpu(con
->out_connect
.protocol_version
),
1097 le32_to_cpu(con
->in_reply
.protocol_version
));
1098 con
->error_msg
= "protocol version mismatch";
1099 reset_connection(con
);
1100 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
1102 mutex_unlock(&con
->mutex
);
1103 if (con
->ops
->bad_proto
)
1104 con
->ops
->bad_proto(con
);
1105 mutex_lock(&con
->mutex
);
1108 case CEPH_MSGR_TAG_BADAUTHORIZER
:
1110 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con
,
1112 if (con
->auth_retry
== 2) {
1113 con
->error_msg
= "connect authorization failure";
1114 reset_connection(con
);
1115 set_bit(CLOSED
, &con
->state
);
1118 con
->auth_retry
= 1;
1119 prepare_write_connect(con
->msgr
, con
, 0);
1120 prepare_read_connect_retry(con
);
1123 case CEPH_MSGR_TAG_RESETSESSION
:
1125 * If we connected with a large connect_seq but the peer
1126 * has no record of a session with us (no connection, or
1127 * connect_seq == 0), they will send RESETSESION to indicate
1128 * that they must have reset their session, and may have
1131 dout("process_connect got RESET peer seq %u\n",
1132 le32_to_cpu(con
->in_connect
.connect_seq
));
1133 pr_err("%s%lld %s connection reset\n",
1134 ENTITY_NAME(con
->peer_name
),
1135 pr_addr(&con
->peer_addr
.in_addr
));
1136 reset_connection(con
);
1137 prepare_write_connect(con
->msgr
, con
, 0);
1138 prepare_read_connect(con
);
1140 /* Tell ceph about it. */
1141 mutex_unlock(&con
->mutex
);
1142 pr_info("reset on %s%lld\n", ENTITY_NAME(con
->peer_name
));
1143 if (con
->ops
->peer_reset
)
1144 con
->ops
->peer_reset(con
);
1145 mutex_lock(&con
->mutex
);
1148 case CEPH_MSGR_TAG_RETRY_SESSION
:
1150 * If we sent a smaller connect_seq than the peer has, try
1151 * again with a larger value.
1153 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1154 le32_to_cpu(con
->out_connect
.connect_seq
),
1155 le32_to_cpu(con
->in_connect
.connect_seq
));
1156 con
->connect_seq
= le32_to_cpu(con
->in_connect
.connect_seq
);
1157 prepare_write_connect(con
->msgr
, con
, 0);
1158 prepare_read_connect(con
);
1161 case CEPH_MSGR_TAG_RETRY_GLOBAL
:
1163 * If we sent a smaller global_seq than the peer has, try
1164 * again with a larger value.
1166 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1167 con
->peer_global_seq
,
1168 le32_to_cpu(con
->in_connect
.global_seq
));
1169 get_global_seq(con
->msgr
,
1170 le32_to_cpu(con
->in_connect
.global_seq
));
1171 prepare_write_connect(con
->msgr
, con
, 0);
1172 prepare_read_connect(con
);
1175 case CEPH_MSGR_TAG_READY
:
1176 clear_bit(CONNECTING
, &con
->state
);
1177 con
->peer_global_seq
= le32_to_cpu(con
->in_reply
.global_seq
);
1179 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1180 con
->peer_global_seq
,
1181 le32_to_cpu(con
->in_reply
.connect_seq
),
1183 WARN_ON(con
->connect_seq
!=
1184 le32_to_cpu(con
->in_reply
.connect_seq
));
1186 if (con
->in_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
)
1187 set_bit(LOSSYTX
, &con
->state
);
1189 prepare_read_tag(con
);
1192 case CEPH_MSGR_TAG_WAIT
:
1194 * If there is a connection race (we are opening
1195 * connections to each other), one of us may just have
1196 * to WAIT. This shouldn't happen if we are the
1199 pr_err("process_connect peer connecting WAIT\n");
1202 pr_err("connect protocol error, will retry\n");
1203 con
->error_msg
= "protocol error, garbage tag during connect";
1211 * read (part of) an ack
1213 static int read_partial_ack(struct ceph_connection
*con
)
1217 return read_partial(con
, &to
, sizeof(con
->in_temp_ack
),
1223 * We can finally discard anything that's been acked.
1225 static void process_ack(struct ceph_connection
*con
)
1228 u64 ack
= le64_to_cpu(con
->in_temp_ack
);
1231 while (!list_empty(&con
->out_sent
)) {
1232 m
= list_first_entry(&con
->out_sent
, struct ceph_msg
,
1234 seq
= le64_to_cpu(m
->hdr
.seq
);
1237 dout("got ack for seq %llu type %d at %p\n", seq
,
1238 le16_to_cpu(m
->hdr
.type
), m
);
1241 prepare_read_tag(con
);
1250 * read (part of) a message.
1252 static int read_partial_message(struct ceph_connection
*con
)
1254 struct ceph_msg
*m
= con
->in_msg
;
1258 unsigned front_len
, middle_len
, data_len
, data_off
;
1259 int datacrc
= con
->msgr
->nocrc
;
1261 dout("read_partial_message con %p msg %p\n", con
, m
);
1264 while (con
->in_base_pos
< sizeof(con
->in_hdr
)) {
1265 left
= sizeof(con
->in_hdr
) - con
->in_base_pos
;
1266 ret
= ceph_tcp_recvmsg(con
->sock
,
1267 (char *)&con
->in_hdr
+ con
->in_base_pos
,
1271 con
->in_base_pos
+= ret
;
1272 if (con
->in_base_pos
== sizeof(con
->in_hdr
)) {
1273 u32 crc
= crc32c(0, (void *)&con
->in_hdr
,
1274 sizeof(con
->in_hdr
) - sizeof(con
->in_hdr
.crc
));
1275 if (crc
!= le32_to_cpu(con
->in_hdr
.crc
)) {
1276 pr_err("read_partial_message bad hdr "
1277 " crc %u != expected %u\n",
1278 crc
, con
->in_hdr
.crc
);
1284 front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
1285 if (front_len
> CEPH_MSG_MAX_FRONT_LEN
)
1287 middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
1288 if (middle_len
> CEPH_MSG_MAX_DATA_LEN
)
1290 data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1291 if (data_len
> CEPH_MSG_MAX_DATA_LEN
)
1294 /* allocate message? */
1296 dout("got hdr type %d front %d data %d\n", con
->in_hdr
.type
,
1297 con
->in_hdr
.front_len
, con
->in_hdr
.data_len
);
1298 con
->in_msg
= con
->ops
->alloc_msg(con
, &con
->in_hdr
);
1300 /* skip this message */
1301 pr_err("alloc_msg returned NULL, skipping message\n");
1302 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1304 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1307 if (IS_ERR(con
->in_msg
)) {
1308 ret
= PTR_ERR(con
->in_msg
);
1310 con
->error_msg
= "out of memory for incoming message";
1314 m
->front
.iov_len
= 0; /* haven't read it yet */
1315 memcpy(&m
->hdr
, &con
->in_hdr
, sizeof(con
->in_hdr
));
1319 while (m
->front
.iov_len
< front_len
) {
1320 BUG_ON(m
->front
.iov_base
== NULL
);
1321 left
= front_len
- m
->front
.iov_len
;
1322 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)m
->front
.iov_base
+
1323 m
->front
.iov_len
, left
);
1326 m
->front
.iov_len
+= ret
;
1327 if (m
->front
.iov_len
== front_len
)
1328 con
->in_front_crc
= crc32c(0, m
->front
.iov_base
,
1333 while (middle_len
> 0 && (!m
->middle
||
1334 m
->middle
->vec
.iov_len
< middle_len
)) {
1335 if (m
->middle
== NULL
) {
1337 if (con
->ops
->alloc_middle
)
1338 ret
= con
->ops
->alloc_middle(con
, m
);
1340 pr_err("alloc_middle fail skipping payload\n");
1341 con
->in_base_pos
= -middle_len
- data_len
1342 - sizeof(m
->footer
);
1343 ceph_msg_put(con
->in_msg
);
1345 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1348 m
->middle
->vec
.iov_len
= 0;
1350 left
= middle_len
- m
->middle
->vec
.iov_len
;
1351 ret
= ceph_tcp_recvmsg(con
->sock
,
1352 (char *)m
->middle
->vec
.iov_base
+
1353 m
->middle
->vec
.iov_len
, left
);
1356 m
->middle
->vec
.iov_len
+= ret
;
1357 if (m
->middle
->vec
.iov_len
== middle_len
)
1358 con
->in_middle_crc
= crc32c(0, m
->middle
->vec
.iov_base
,
1359 m
->middle
->vec
.iov_len
);
1363 data_off
= le16_to_cpu(m
->hdr
.data_off
);
1367 if (m
->nr_pages
== 0) {
1368 con
->in_msg_pos
.page
= 0;
1369 con
->in_msg_pos
.page_pos
= data_off
& ~PAGE_MASK
;
1370 con
->in_msg_pos
.data_pos
= 0;
1371 /* find pages for data payload */
1372 want
= calc_pages_for(data_off
& ~PAGE_MASK
, data_len
);
1374 mutex_unlock(&con
->mutex
);
1375 if (con
->ops
->prepare_pages
)
1376 ret
= con
->ops
->prepare_pages(con
, m
, want
);
1377 mutex_lock(&con
->mutex
);
1379 dout("%p prepare_pages failed, skipping payload\n", m
);
1380 con
->in_base_pos
= -data_len
- sizeof(m
->footer
);
1381 ceph_msg_put(con
->in_msg
);
1383 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1386 BUG_ON(m
->nr_pages
< want
);
1388 while (con
->in_msg_pos
.data_pos
< data_len
) {
1389 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1390 (int)(PAGE_SIZE
- con
->in_msg_pos
.page_pos
));
1391 BUG_ON(m
->pages
== NULL
);
1392 p
= kmap(m
->pages
[con
->in_msg_pos
.page
]);
1393 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1395 if (ret
> 0 && datacrc
)
1397 crc32c(con
->in_data_crc
,
1398 p
+ con
->in_msg_pos
.page_pos
, ret
);
1399 kunmap(m
->pages
[con
->in_msg_pos
.page
]);
1402 con
->in_msg_pos
.data_pos
+= ret
;
1403 con
->in_msg_pos
.page_pos
+= ret
;
1404 if (con
->in_msg_pos
.page_pos
== PAGE_SIZE
) {
1405 con
->in_msg_pos
.page_pos
= 0;
1406 con
->in_msg_pos
.page
++;
1412 to
= sizeof(m
->hdr
) + sizeof(m
->footer
);
1413 while (con
->in_base_pos
< to
) {
1414 left
= to
- con
->in_base_pos
;
1415 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)&m
->footer
+
1416 (con
->in_base_pos
- sizeof(m
->hdr
)),
1420 con
->in_base_pos
+= ret
;
1422 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1423 m
, front_len
, m
->footer
.front_crc
, middle_len
,
1424 m
->footer
.middle_crc
, data_len
, m
->footer
.data_crc
);
1427 if (con
->in_front_crc
!= le32_to_cpu(m
->footer
.front_crc
)) {
1428 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1429 m
, con
->in_front_crc
, m
->footer
.front_crc
);
1432 if (con
->in_middle_crc
!= le32_to_cpu(m
->footer
.middle_crc
)) {
1433 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1434 m
, con
->in_middle_crc
, m
->footer
.middle_crc
);
1438 (m
->footer
.flags
& CEPH_MSG_FOOTER_NOCRC
) == 0 &&
1439 con
->in_data_crc
!= le32_to_cpu(m
->footer
.data_crc
)) {
1440 pr_err("read_partial_message %p data crc %u != exp. %u\n", m
,
1441 con
->in_data_crc
, le32_to_cpu(m
->footer
.data_crc
));
1445 return 1; /* done! */
1449 * Process message. This happens in the worker thread. The callback should
1450 * be careful not to do anything that waits on other incoming messages or it
1453 static void process_message(struct ceph_connection
*con
)
1455 struct ceph_msg
*msg
;
1460 /* if first message, set peer_name */
1461 if (con
->peer_name
.type
== 0)
1462 con
->peer_name
= msg
->hdr
.src
.name
;
1465 mutex_unlock(&con
->mutex
);
1467 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1468 msg
, le64_to_cpu(msg
->hdr
.seq
),
1469 ENTITY_NAME(msg
->hdr
.src
.name
),
1470 le16_to_cpu(msg
->hdr
.type
),
1471 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1472 le32_to_cpu(msg
->hdr
.front_len
),
1473 le32_to_cpu(msg
->hdr
.data_len
),
1474 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
1475 con
->ops
->dispatch(con
, msg
);
1477 mutex_lock(&con
->mutex
);
1478 prepare_read_tag(con
);
1483 * Write something to the socket. Called in a worker thread when the
1484 * socket appears to be writeable and we have something ready to send.
1486 static int try_write(struct ceph_connection
*con
)
1488 struct ceph_messenger
*msgr
= con
->msgr
;
1491 dout("try_write start %p state %lu nref %d\n", con
, con
->state
,
1492 atomic_read(&con
->nref
));
1494 mutex_lock(&con
->mutex
);
1496 dout("try_write out_kvec_bytes %d\n", con
->out_kvec_bytes
);
1498 /* open the socket first? */
1499 if (con
->sock
== NULL
) {
1501 * if we were STANDBY and are reconnecting _this_
1502 * connection, bump connect_seq now. Always bump
1505 if (test_and_clear_bit(STANDBY
, &con
->state
))
1508 prepare_write_banner(msgr
, con
);
1509 prepare_write_connect(msgr
, con
, 1);
1510 prepare_read_banner(con
);
1511 set_bit(CONNECTING
, &con
->state
);
1512 clear_bit(NEGOTIATING
, &con
->state
);
1514 BUG_ON(con
->in_msg
);
1515 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1516 dout("try_write initiating connect on %p new state %lu\n",
1518 con
->sock
= ceph_tcp_connect(con
);
1519 if (IS_ERR(con
->sock
)) {
1521 con
->error_msg
= "connect error";
1528 /* kvec data queued? */
1529 if (con
->out_skip
) {
1530 ret
= write_partial_skip(con
);
1534 dout("try_write write_partial_skip err %d\n", ret
);
1538 if (con
->out_kvec_left
) {
1539 ret
= write_partial_kvec(con
);
1546 if (con
->out_msg_done
) {
1547 ceph_msg_put(con
->out_msg
);
1548 con
->out_msg
= NULL
; /* we're done with this one */
1552 ret
= write_partial_msg_pages(con
);
1554 goto more_kvec
; /* we need to send the footer, too! */
1558 dout("try_write write_partial_msg_pages err %d\n",
1565 if (!test_bit(CONNECTING
, &con
->state
)) {
1566 /* is anything else pending? */
1567 if (!list_empty(&con
->out_queue
)) {
1568 prepare_write_message(con
);
1571 if (con
->in_seq
> con
->in_seq_acked
) {
1572 prepare_write_ack(con
);
1575 if (test_and_clear_bit(KEEPALIVE_PENDING
, &con
->state
)) {
1576 prepare_write_keepalive(con
);
1581 /* Nothing to do! */
1582 clear_bit(WRITE_PENDING
, &con
->state
);
1583 dout("try_write nothing else to write.\n");
1587 mutex_unlock(&con
->mutex
);
1588 dout("try_write done on %p\n", con
);
1595 * Read what we can from the socket.
1597 static int try_read(struct ceph_connection
*con
)
1599 struct ceph_messenger
*msgr
;
1605 if (test_bit(STANDBY
, &con
->state
))
1608 dout("try_read start on %p\n", con
);
1611 mutex_lock(&con
->mutex
);
1614 dout("try_read tag %d in_base_pos %d\n", (int)con
->in_tag
,
1616 if (test_bit(CONNECTING
, &con
->state
)) {
1617 if (!test_bit(NEGOTIATING
, &con
->state
)) {
1618 dout("try_read connecting\n");
1619 ret
= read_partial_banner(con
);
1622 if (process_banner(con
) < 0) {
1627 ret
= read_partial_connect(con
);
1630 if (process_connect(con
) < 0) {
1637 if (con
->in_base_pos
< 0) {
1639 * skipping + discarding content.
1641 * FIXME: there must be a better way to do this!
1643 static char buf
[1024];
1644 int skip
= min(1024, -con
->in_base_pos
);
1645 dout("skipping %d / %d bytes\n", skip
, -con
->in_base_pos
);
1646 ret
= ceph_tcp_recvmsg(con
->sock
, buf
, skip
);
1649 con
->in_base_pos
+= ret
;
1650 if (con
->in_base_pos
)
1653 if (con
->in_tag
== CEPH_MSGR_TAG_READY
) {
1657 ret
= ceph_tcp_recvmsg(con
->sock
, &con
->in_tag
, 1);
1660 dout("try_read got tag %d\n", (int)con
->in_tag
);
1661 switch (con
->in_tag
) {
1662 case CEPH_MSGR_TAG_MSG
:
1663 prepare_read_message(con
);
1665 case CEPH_MSGR_TAG_ACK
:
1666 prepare_read_ack(con
);
1668 case CEPH_MSGR_TAG_CLOSE
:
1669 set_bit(CLOSED
, &con
->state
); /* fixme */
1675 if (con
->in_tag
== CEPH_MSGR_TAG_MSG
) {
1676 ret
= read_partial_message(con
);
1680 con
->error_msg
= "bad crc";
1684 con
->error_msg
= "io error";
1690 if (con
->in_tag
== CEPH_MSGR_TAG_READY
)
1692 process_message(con
);
1695 if (con
->in_tag
== CEPH_MSGR_TAG_ACK
) {
1696 ret
= read_partial_ack(con
);
1706 mutex_unlock(&con
->mutex
);
1707 dout("try_read done on %p\n", con
);
1711 pr_err("try_read bad con->in_tag = %d\n", (int)con
->in_tag
);
1712 con
->error_msg
= "protocol error, garbage tag";
1719 * Atomically queue work on a connection. Bump @con reference to
1720 * avoid races with connection teardown.
1722 * There is some trickery going on with QUEUED and BUSY because we
1723 * only want a _single_ thread operating on each connection at any
1724 * point in time, but we want to use all available CPUs.
1726 * The worker thread only proceeds if it can atomically set BUSY. It
1727 * clears QUEUED and does it's thing. When it thinks it's done, it
1728 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1729 * (tries again to set BUSY).
1731 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1732 * try to queue work. If that fails (work is already queued, or BUSY)
1733 * we give up (work also already being done or is queued) but leave QUEUED
1734 * set so that the worker thread will loop if necessary.
1736 static void queue_con(struct ceph_connection
*con
)
1738 if (test_bit(DEAD
, &con
->state
)) {
1739 dout("queue_con %p ignoring: DEAD\n",
1744 if (!con
->ops
->get(con
)) {
1745 dout("queue_con %p ref count 0\n", con
);
1749 set_bit(QUEUED
, &con
->state
);
1750 if (test_bit(BUSY
, &con
->state
)) {
1751 dout("queue_con %p - already BUSY\n", con
);
1753 } else if (!queue_work(ceph_msgr_wq
, &con
->work
.work
)) {
1754 dout("queue_con %p - already queued\n", con
);
1757 dout("queue_con %p\n", con
);
1762 * Do some work on a connection. Drop a connection ref when we're done.
1764 static void con_work(struct work_struct
*work
)
1766 struct ceph_connection
*con
= container_of(work
, struct ceph_connection
,
1771 if (test_and_set_bit(BUSY
, &con
->state
) != 0) {
1772 dout("con_work %p BUSY already set\n", con
);
1775 dout("con_work %p start, clearing QUEUED\n", con
);
1776 clear_bit(QUEUED
, &con
->state
);
1778 if (test_bit(CLOSED
, &con
->state
)) { /* e.g. if we are replaced */
1779 dout("con_work CLOSED\n");
1780 con_close_socket(con
);
1783 if (test_and_clear_bit(OPENING
, &con
->state
)) {
1784 /* reopen w/ new peer */
1785 dout("con_work OPENING\n");
1786 con_close_socket(con
);
1789 if (test_and_clear_bit(SOCK_CLOSED
, &con
->state
) ||
1790 try_read(con
) < 0 ||
1791 try_write(con
) < 0) {
1793 ceph_fault(con
); /* error/fault path */
1797 clear_bit(BUSY
, &con
->state
);
1798 dout("con->state=%lu\n", con
->state
);
1799 if (test_bit(QUEUED
, &con
->state
)) {
1801 dout("con_work %p QUEUED reset, looping\n", con
);
1804 dout("con_work %p QUEUED reset, but just faulted\n", con
);
1805 clear_bit(QUEUED
, &con
->state
);
1807 dout("con_work %p done\n", con
);
1815 * Generic error/fault handler. A retry mechanism is used with
1816 * exponential backoff
1818 static void ceph_fault(struct ceph_connection
*con
)
1820 pr_err("%s%lld %s %s\n", ENTITY_NAME(con
->peer_name
),
1821 pr_addr(&con
->peer_addr
.in_addr
), con
->error_msg
);
1822 dout("fault %p state %lu to peer %s\n",
1823 con
, con
->state
, pr_addr(&con
->peer_addr
.in_addr
));
1825 if (test_bit(LOSSYTX
, &con
->state
)) {
1826 dout("fault on LOSSYTX channel\n");
1830 clear_bit(BUSY
, &con
->state
); /* to avoid an improbable race */
1832 mutex_lock(&con
->mutex
);
1834 con_close_socket(con
);
1837 ceph_msg_put(con
->in_msg
);
1841 /* If there are no messages in the queue, place the connection
1842 * in a STANDBY state (i.e., don't try to reconnect just yet). */
1843 if (list_empty(&con
->out_queue
) && !con
->out_keepalive_pending
) {
1844 dout("fault setting STANDBY\n");
1845 set_bit(STANDBY
, &con
->state
);
1846 mutex_unlock(&con
->mutex
);
1850 /* Requeue anything that hasn't been acked, and retry after a
1852 list_splice_init(&con
->out_sent
, &con
->out_queue
);
1854 if (con
->delay
== 0)
1855 con
->delay
= BASE_DELAY_INTERVAL
;
1856 else if (con
->delay
< MAX_DELAY_INTERVAL
)
1859 mutex_unlock(&con
->mutex
);
1861 /* explicitly schedule work to try to reconnect again later. */
1862 dout("fault queueing %p delay %lu\n", con
, con
->delay
);
1864 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
1865 round_jiffies_relative(con
->delay
)) == 0)
1869 if (con
->ops
->fault
)
1870 con
->ops
->fault(con
);
1876 * create a new messenger instance
1878 struct ceph_messenger
*ceph_messenger_create(struct ceph_entity_addr
*myaddr
)
1880 struct ceph_messenger
*msgr
;
1882 msgr
= kzalloc(sizeof(*msgr
), GFP_KERNEL
);
1884 return ERR_PTR(-ENOMEM
);
1886 spin_lock_init(&msgr
->global_seq_lock
);
1888 /* the zero page is needed if a request is "canceled" while the message
1889 * is being written over the socket */
1890 msgr
->zero_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1891 if (!msgr
->zero_page
) {
1893 return ERR_PTR(-ENOMEM
);
1895 kmap(msgr
->zero_page
);
1898 msgr
->inst
.addr
= *myaddr
;
1900 /* select a random nonce */
1901 get_random_bytes(&msgr
->inst
.addr
.nonce
,
1902 sizeof(msgr
->inst
.addr
.nonce
));
1903 encode_my_addr(msgr
);
1905 dout("messenger_create %p\n", msgr
);
1909 void ceph_messenger_destroy(struct ceph_messenger
*msgr
)
1911 dout("destroy %p\n", msgr
);
1912 kunmap(msgr
->zero_page
);
1913 __free_page(msgr
->zero_page
);
1915 dout("destroyed messenger %p\n", msgr
);
1919 * Queue up an outgoing message on the given connection.
1921 void ceph_con_send(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1923 if (test_bit(CLOSED
, &con
->state
)) {
1924 dout("con_send %p closed, dropping %p\n", con
, msg
);
1930 msg
->hdr
.src
.name
= con
->msgr
->inst
.name
;
1931 msg
->hdr
.src
.addr
= con
->msgr
->my_enc_addr
;
1932 msg
->hdr
.orig_src
= msg
->hdr
.src
;
1933 msg
->hdr
.dst_erank
= con
->peer_addr
.erank
;
1936 mutex_lock(&con
->mutex
);
1937 BUG_ON(!list_empty(&msg
->list_head
));
1938 list_add_tail(&msg
->list_head
, &con
->out_queue
);
1939 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg
,
1940 ENTITY_NAME(con
->peer_name
), le16_to_cpu(msg
->hdr
.type
),
1941 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1942 le32_to_cpu(msg
->hdr
.front_len
),
1943 le32_to_cpu(msg
->hdr
.middle_len
),
1944 le32_to_cpu(msg
->hdr
.data_len
));
1945 mutex_unlock(&con
->mutex
);
1947 /* if there wasn't anything waiting to send before, queue
1949 if (test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
1954 * Revoke a message that was previously queued for send
1956 void ceph_con_revoke(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1958 mutex_lock(&con
->mutex
);
1959 if (!list_empty(&msg
->list_head
)) {
1960 dout("con_revoke %p msg %p\n", con
, msg
);
1961 list_del_init(&msg
->list_head
);
1964 if (con
->out_msg
== msg
) {
1965 ceph_msg_put(con
->out_msg
);
1966 con
->out_msg
= NULL
;
1968 if (con
->out_kvec_is_msg
) {
1969 con
->out_skip
= con
->out_kvec_bytes
;
1970 con
->out_kvec_is_msg
= false;
1973 dout("con_revoke %p msg %p - not queued (sent?)\n", con
, msg
);
1975 mutex_unlock(&con
->mutex
);
1979 * Revoke a page vector that we may be reading data into
1981 void ceph_con_revoke_pages(struct ceph_connection
*con
, struct page
**pages
)
1983 mutex_lock(&con
->mutex
);
1984 if (con
->in_msg
&& con
->in_msg
->pages
== pages
) {
1985 unsigned data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1987 /* skip rest of message */
1988 dout("con_revoke_pages %p msg %p pages %p revoked\n", con
,
1989 con
->in_msg
, pages
);
1990 if (con
->in_msg_pos
.data_pos
< data_len
)
1991 con
->in_base_pos
= con
->in_msg_pos
.data_pos
- data_len
;
1993 con
->in_base_pos
= con
->in_base_pos
-
1994 sizeof(struct ceph_msg_header
) -
1995 sizeof(struct ceph_msg_footer
);
1996 con
->in_msg
->pages
= NULL
;
1997 ceph_msg_put(con
->in_msg
);
1999 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2001 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2002 con
, con
->in_msg
, pages
);
2004 mutex_unlock(&con
->mutex
);
2008 * Queue a keepalive byte to ensure the tcp connection is alive.
2010 void ceph_con_keepalive(struct ceph_connection
*con
)
2012 if (test_and_set_bit(KEEPALIVE_PENDING
, &con
->state
) == 0 &&
2013 test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
2019 * construct a new message with given type, size
2020 * the new msg has a ref count of 1.
2022 struct ceph_msg
*ceph_msg_new(int type
, int front_len
,
2023 int page_len
, int page_off
, struct page
**pages
)
2027 m
= kmalloc(sizeof(*m
), GFP_NOFS
);
2030 kref_init(&m
->kref
);
2031 INIT_LIST_HEAD(&m
->list_head
);
2033 m
->hdr
.type
= cpu_to_le16(type
);
2034 m
->hdr
.front_len
= cpu_to_le32(front_len
);
2035 m
->hdr
.middle_len
= 0;
2036 m
->hdr
.data_len
= cpu_to_le32(page_len
);
2037 m
->hdr
.data_off
= cpu_to_le16(page_off
);
2038 m
->hdr
.priority
= cpu_to_le16(CEPH_MSG_PRIO_DEFAULT
);
2039 m
->footer
.front_crc
= 0;
2040 m
->footer
.middle_crc
= 0;
2041 m
->footer
.data_crc
= 0;
2042 m
->front_max
= front_len
;
2043 m
->front_is_vmalloc
= false;
2044 m
->more_to_follow
= false;
2049 if (front_len
> PAGE_CACHE_SIZE
) {
2050 m
->front
.iov_base
= __vmalloc(front_len
, GFP_NOFS
,
2052 m
->front_is_vmalloc
= true;
2054 m
->front
.iov_base
= kmalloc(front_len
, GFP_NOFS
);
2056 if (m
->front
.iov_base
== NULL
) {
2057 pr_err("msg_new can't allocate %d bytes\n",
2062 m
->front
.iov_base
= NULL
;
2064 m
->front
.iov_len
= front_len
;
2070 m
->nr_pages
= calc_pages_for(page_off
, page_len
);
2073 dout("ceph_msg_new %p page %d~%d -> %d\n", m
, page_off
, page_len
,
2080 pr_err("msg_new can't create type %d len %d\n", type
, front_len
);
2081 return ERR_PTR(-ENOMEM
);
2085 * Generic message allocator, for incoming messages.
2087 struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
2088 struct ceph_msg_header
*hdr
)
2090 int type
= le16_to_cpu(hdr
->type
);
2091 int front_len
= le32_to_cpu(hdr
->front_len
);
2092 struct ceph_msg
*msg
= ceph_msg_new(type
, front_len
, 0, 0, NULL
);
2095 pr_err("unable to allocate msg type %d len %d\n",
2097 return ERR_PTR(-ENOMEM
);
2103 * Allocate "middle" portion of a message, if it is needed and wasn't
2104 * allocated by alloc_msg. This allows us to read a small fixed-size
2105 * per-type header in the front and then gracefully fail (i.e.,
2106 * propagate the error to the caller based on info in the front) when
2107 * the middle is too large.
2109 int ceph_alloc_middle(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2111 int type
= le16_to_cpu(msg
->hdr
.type
);
2112 int middle_len
= le32_to_cpu(msg
->hdr
.middle_len
);
2114 dout("alloc_middle %p type %d %s middle_len %d\n", msg
, type
,
2115 ceph_msg_type_name(type
), middle_len
);
2116 BUG_ON(!middle_len
);
2117 BUG_ON(msg
->middle
);
2119 msg
->middle
= ceph_buffer_new(middle_len
, GFP_NOFS
);
2127 * Free a generically kmalloc'd message.
2129 void ceph_msg_kfree(struct ceph_msg
*m
)
2131 dout("msg_kfree %p\n", m
);
2132 if (m
->front_is_vmalloc
)
2133 vfree(m
->front
.iov_base
);
2135 kfree(m
->front
.iov_base
);
2140 * Drop a msg ref. Destroy as needed.
2142 void ceph_msg_last_put(struct kref
*kref
)
2144 struct ceph_msg
*m
= container_of(kref
, struct ceph_msg
, kref
);
2146 dout("ceph_msg_put last one on %p\n", m
);
2147 WARN_ON(!list_empty(&m
->list_head
));
2149 /* drop middle, data, if any */
2151 ceph_buffer_put(m
->middle
);
2158 ceph_msgpool_put(m
->pool
, m
);
2163 void ceph_msg_dump(struct ceph_msg
*msg
)
2165 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg
,
2166 msg
->front_max
, msg
->nr_pages
);
2167 print_hex_dump(KERN_DEBUG
, "header: ",
2168 DUMP_PREFIX_OFFSET
, 16, 1,
2169 &msg
->hdr
, sizeof(msg
->hdr
), true);
2170 print_hex_dump(KERN_DEBUG
, " front: ",
2171 DUMP_PREFIX_OFFSET
, 16, 1,
2172 msg
->front
.iov_base
, msg
->front
.iov_len
, true);
2174 print_hex_dump(KERN_DEBUG
, "middle: ",
2175 DUMP_PREFIX_OFFSET
, 16, 1,
2176 msg
->middle
->vec
.iov_base
,
2177 msg
->middle
->vec
.iov_len
, true);
2178 print_hex_dump(KERN_DEBUG
, "footer: ",
2179 DUMP_PREFIX_OFFSET
, 16, 1,
2180 &msg
->footer
, sizeof(msg
->footer
), true);