1 #include "ceph_debug.h"
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/socket.h>
10 #include <linux/string.h>
14 #include "messenger.h"
19 * Ceph uses the messenger to exchange ceph_msg messages with other
20 * hosts in the system. The messenger provides ordered and reliable
21 * delivery. We tolerate TCP disconnects by reconnecting (with
22 * exponential backoff) in the case of a fault (disconnection, bad
23 * crc, protocol error). Acks allow sent messages to be discarded by
27 /* static tag bytes (protocol control messages) */
28 static char tag_msg
= CEPH_MSGR_TAG_MSG
;
29 static char tag_ack
= CEPH_MSGR_TAG_ACK
;
30 static char tag_keepalive
= CEPH_MSGR_TAG_KEEPALIVE
;
33 static void queue_con(struct ceph_connection
*con
);
34 static void con_work(struct work_struct
*);
35 static void ceph_fault(struct ceph_connection
*con
);
37 const char *ceph_name_type_str(int t
)
40 case CEPH_ENTITY_TYPE_MON
: return "mon";
41 case CEPH_ENTITY_TYPE_MDS
: return "mds";
42 case CEPH_ENTITY_TYPE_OSD
: return "osd";
43 case CEPH_ENTITY_TYPE_CLIENT
: return "client";
44 case CEPH_ENTITY_TYPE_ADMIN
: return "admin";
45 default: return "???";
50 * nicely render a sockaddr as a string.
52 #define MAX_ADDR_STR 20
53 static char addr_str
[MAX_ADDR_STR
][40];
54 static DEFINE_SPINLOCK(addr_str_lock
);
55 static int last_addr_str
;
57 const char *pr_addr(const struct sockaddr_storage
*ss
)
61 struct sockaddr_in
*in4
= (void *)ss
;
62 unsigned char *quad
= (void *)&in4
->sin_addr
.s_addr
;
63 struct sockaddr_in6
*in6
= (void *)ss
;
65 spin_lock(&addr_str_lock
);
67 if (last_addr_str
== MAX_ADDR_STR
)
69 spin_unlock(&addr_str_lock
);
72 switch (ss
->ss_family
) {
74 sprintf(s
, "%u.%u.%u.%u:%u",
75 (unsigned int)quad
[0],
76 (unsigned int)quad
[1],
77 (unsigned int)quad
[2],
78 (unsigned int)quad
[3],
79 (unsigned int)ntohs(in4
->sin_port
));
83 sprintf(s
, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
84 in6
->sin6_addr
.s6_addr16
[0],
85 in6
->sin6_addr
.s6_addr16
[1],
86 in6
->sin6_addr
.s6_addr16
[2],
87 in6
->sin6_addr
.s6_addr16
[3],
88 in6
->sin6_addr
.s6_addr16
[4],
89 in6
->sin6_addr
.s6_addr16
[5],
90 in6
->sin6_addr
.s6_addr16
[6],
91 in6
->sin6_addr
.s6_addr16
[7],
92 (unsigned int)ntohs(in6
->sin6_port
));
96 sprintf(s
, "(unknown sockaddr family %d)", (int)ss
->ss_family
);
102 static void encode_my_addr(struct ceph_messenger
*msgr
)
104 memcpy(&msgr
->my_enc_addr
, &msgr
->inst
.addr
, sizeof(msgr
->my_enc_addr
));
105 ceph_encode_addr(&msgr
->my_enc_addr
);
109 * work queue for all reading and writing to/from the socket.
111 struct workqueue_struct
*ceph_msgr_wq
;
113 int __init
ceph_msgr_init(void)
115 ceph_msgr_wq
= create_workqueue("ceph-msgr");
116 if (IS_ERR(ceph_msgr_wq
)) {
117 int ret
= PTR_ERR(ceph_msgr_wq
);
118 pr_err("msgr_init failed to create workqueue: %d\n", ret
);
125 void ceph_msgr_exit(void)
127 destroy_workqueue(ceph_msgr_wq
);
131 * socket callback functions
134 /* data available on socket, or listen socket received a connect */
135 static void ceph_data_ready(struct sock
*sk
, int count_unused
)
137 struct ceph_connection
*con
=
138 (struct ceph_connection
*)sk
->sk_user_data
;
139 if (sk
->sk_state
!= TCP_CLOSE_WAIT
) {
140 dout("ceph_data_ready on %p state = %lu, queueing work\n",
146 /* socket has buffer space for writing */
147 static void ceph_write_space(struct sock
*sk
)
149 struct ceph_connection
*con
=
150 (struct ceph_connection
*)sk
->sk_user_data
;
152 /* only queue to workqueue if there is data we want to write. */
153 if (test_bit(WRITE_PENDING
, &con
->state
)) {
154 dout("ceph_write_space %p queueing write work\n", con
);
157 dout("ceph_write_space %p nothing to write\n", con
);
160 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
161 clear_bit(SOCK_NOSPACE
, &sk
->sk_socket
->flags
);
164 /* socket's state has changed */
165 static void ceph_state_change(struct sock
*sk
)
167 struct ceph_connection
*con
=
168 (struct ceph_connection
*)sk
->sk_user_data
;
170 dout("ceph_state_change %p state = %lu sk_state = %u\n",
171 con
, con
->state
, sk
->sk_state
);
173 if (test_bit(CLOSED
, &con
->state
))
176 switch (sk
->sk_state
) {
178 dout("ceph_state_change TCP_CLOSE\n");
180 dout("ceph_state_change TCP_CLOSE_WAIT\n");
181 if (test_and_set_bit(SOCK_CLOSED
, &con
->state
) == 0) {
182 if (test_bit(CONNECTING
, &con
->state
))
183 con
->error_msg
= "connection failed";
185 con
->error_msg
= "socket closed";
189 case TCP_ESTABLISHED
:
190 dout("ceph_state_change TCP_ESTABLISHED\n");
197 * set up socket callbacks
199 static void set_sock_callbacks(struct socket
*sock
,
200 struct ceph_connection
*con
)
202 struct sock
*sk
= sock
->sk
;
203 sk
->sk_user_data
= (void *)con
;
204 sk
->sk_data_ready
= ceph_data_ready
;
205 sk
->sk_write_space
= ceph_write_space
;
206 sk
->sk_state_change
= ceph_state_change
;
215 * initiate connection to a remote socket.
217 static struct socket
*ceph_tcp_connect(struct ceph_connection
*con
)
219 struct sockaddr
*paddr
= (struct sockaddr
*)&con
->peer_addr
.in_addr
;
224 ret
= sock_create_kern(AF_INET
, SOCK_STREAM
, IPPROTO_TCP
, &sock
);
228 sock
->sk
->sk_allocation
= GFP_NOFS
;
230 set_sock_callbacks(sock
, con
);
232 dout("connect %s\n", pr_addr(&con
->peer_addr
.in_addr
));
234 ret
= sock
->ops
->connect(sock
, paddr
, sizeof(*paddr
), O_NONBLOCK
);
235 if (ret
== -EINPROGRESS
) {
236 dout("connect %s EINPROGRESS sk_state = %u\n",
237 pr_addr(&con
->peer_addr
.in_addr
),
242 pr_err("connect %s error %d\n",
243 pr_addr(&con
->peer_addr
.in_addr
), ret
);
246 con
->error_msg
= "connect error";
254 static int ceph_tcp_recvmsg(struct socket
*sock
, void *buf
, size_t len
)
256 struct kvec iov
= {buf
, len
};
257 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
259 return kernel_recvmsg(sock
, &msg
, &iov
, 1, len
, msg
.msg_flags
);
263 * write something. @more is true if caller will be sending more data
266 static int ceph_tcp_sendmsg(struct socket
*sock
, struct kvec
*iov
,
267 size_t kvlen
, size_t len
, int more
)
269 struct msghdr msg
= { .msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
};
272 msg
.msg_flags
|= MSG_MORE
;
274 msg
.msg_flags
|= MSG_EOR
; /* superfluous, but what the hell */
276 return kernel_sendmsg(sock
, &msg
, iov
, kvlen
, len
);
281 * Shutdown/close the socket for the given connection.
283 static int con_close_socket(struct ceph_connection
*con
)
287 dout("con_close_socket on %p sock %p\n", con
, con
->sock
);
290 set_bit(SOCK_CLOSED
, &con
->state
);
291 rc
= con
->sock
->ops
->shutdown(con
->sock
, SHUT_RDWR
);
292 sock_release(con
->sock
);
294 clear_bit(SOCK_CLOSED
, &con
->state
);
299 * Reset a connection. Discard all incoming and outgoing messages
300 * and clear *_seq state.
302 static void ceph_msg_remove(struct ceph_msg
*msg
)
304 list_del_init(&msg
->list_head
);
307 static void ceph_msg_remove_list(struct list_head
*head
)
309 while (!list_empty(head
)) {
310 struct ceph_msg
*msg
= list_first_entry(head
, struct ceph_msg
,
312 ceph_msg_remove(msg
);
316 static void reset_connection(struct ceph_connection
*con
)
318 /* reset connection, out_queue, msg_ and connect_seq */
319 /* discard existing out_queue and msg_seq */
320 ceph_msg_remove_list(&con
->out_queue
);
321 ceph_msg_remove_list(&con
->out_sent
);
324 ceph_msg_put(con
->in_msg
);
328 con
->connect_seq
= 0;
331 ceph_msg_put(con
->out_msg
);
338 * mark a peer down. drop any open connections.
340 void ceph_con_close(struct ceph_connection
*con
)
342 dout("con_close %p peer %s\n", con
, pr_addr(&con
->peer_addr
.in_addr
));
343 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
344 clear_bit(STANDBY
, &con
->state
); /* avoid connect_seq bump */
345 mutex_lock(&con
->mutex
);
346 reset_connection(con
);
347 mutex_unlock(&con
->mutex
);
352 * Reopen a closed connection, with a new peer address.
354 void ceph_con_open(struct ceph_connection
*con
, struct ceph_entity_addr
*addr
)
356 dout("con_open %p %s\n", con
, pr_addr(&addr
->in_addr
));
357 set_bit(OPENING
, &con
->state
);
358 clear_bit(CLOSED
, &con
->state
);
359 memcpy(&con
->peer_addr
, addr
, sizeof(*addr
));
360 con
->delay
= 0; /* reset backoff memory */
367 struct ceph_connection
*ceph_con_get(struct ceph_connection
*con
)
369 dout("con_get %p nref = %d -> %d\n", con
,
370 atomic_read(&con
->nref
), atomic_read(&con
->nref
) + 1);
371 if (atomic_inc_not_zero(&con
->nref
))
376 void ceph_con_put(struct ceph_connection
*con
)
378 dout("con_put %p nref = %d -> %d\n", con
,
379 atomic_read(&con
->nref
), atomic_read(&con
->nref
) - 1);
380 BUG_ON(atomic_read(&con
->nref
) == 0);
381 if (atomic_dec_and_test(&con
->nref
)) {
388 * initialize a new connection.
390 void ceph_con_init(struct ceph_messenger
*msgr
, struct ceph_connection
*con
)
392 dout("con_init %p\n", con
);
393 memset(con
, 0, sizeof(*con
));
394 atomic_set(&con
->nref
, 1);
396 mutex_init(&con
->mutex
);
397 INIT_LIST_HEAD(&con
->out_queue
);
398 INIT_LIST_HEAD(&con
->out_sent
);
399 INIT_DELAYED_WORK(&con
->work
, con_work
);
404 * We maintain a global counter to order connection attempts. Get
405 * a unique seq greater than @gt.
407 static u32
get_global_seq(struct ceph_messenger
*msgr
, u32 gt
)
411 spin_lock(&msgr
->global_seq_lock
);
412 if (msgr
->global_seq
< gt
)
413 msgr
->global_seq
= gt
;
414 ret
= ++msgr
->global_seq
;
415 spin_unlock(&msgr
->global_seq_lock
);
421 * Prepare footer for currently outgoing message, and finish things
422 * off. Assumes out_kvec* are already valid.. we just add on to the end.
424 static void prepare_write_message_footer(struct ceph_connection
*con
, int v
)
426 struct ceph_msg
*m
= con
->out_msg
;
428 dout("prepare_write_message_footer %p\n", con
);
429 con
->out_kvec_is_msg
= true;
430 con
->out_kvec
[v
].iov_base
= &m
->footer
;
431 con
->out_kvec
[v
].iov_len
= sizeof(m
->footer
);
432 con
->out_kvec_bytes
+= sizeof(m
->footer
);
433 con
->out_kvec_left
++;
434 con
->out_more
= m
->more_to_follow
;
435 con
->out_msg_done
= true;
439 * Prepare headers for the next outgoing message.
441 static void prepare_write_message(struct ceph_connection
*con
)
446 con
->out_kvec_bytes
= 0;
447 con
->out_kvec_is_msg
= true;
448 con
->out_msg_done
= false;
450 /* Sneak an ack in there first? If we can get it into the same
451 * TCP packet that's a good thing. */
452 if (con
->in_seq
> con
->in_seq_acked
) {
453 con
->in_seq_acked
= con
->in_seq
;
454 con
->out_kvec
[v
].iov_base
= &tag_ack
;
455 con
->out_kvec
[v
++].iov_len
= 1;
456 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
457 con
->out_kvec
[v
].iov_base
= &con
->out_temp_ack
;
458 con
->out_kvec
[v
++].iov_len
= sizeof(con
->out_temp_ack
);
459 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
462 m
= list_first_entry(&con
->out_queue
,
463 struct ceph_msg
, list_head
);
465 if (test_bit(LOSSYTX
, &con
->state
)) {
466 /* put message on sent list */
468 list_move_tail(&m
->list_head
, &con
->out_sent
);
470 list_del_init(&m
->list_head
);
473 m
->hdr
.seq
= cpu_to_le64(++con
->out_seq
);
475 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
476 m
, con
->out_seq
, le16_to_cpu(m
->hdr
.type
),
477 le32_to_cpu(m
->hdr
.front_len
), le32_to_cpu(m
->hdr
.middle_len
),
478 le32_to_cpu(m
->hdr
.data_len
),
480 BUG_ON(le32_to_cpu(m
->hdr
.front_len
) != m
->front
.iov_len
);
482 /* tag + hdr + front + middle */
483 con
->out_kvec
[v
].iov_base
= &tag_msg
;
484 con
->out_kvec
[v
++].iov_len
= 1;
485 con
->out_kvec
[v
].iov_base
= &m
->hdr
;
486 con
->out_kvec
[v
++].iov_len
= sizeof(m
->hdr
);
487 con
->out_kvec
[v
++] = m
->front
;
489 con
->out_kvec
[v
++] = m
->middle
->vec
;
490 con
->out_kvec_left
= v
;
491 con
->out_kvec_bytes
+= 1 + sizeof(m
->hdr
) + m
->front
.iov_len
+
492 (m
->middle
? m
->middle
->vec
.iov_len
: 0);
493 con
->out_kvec_cur
= con
->out_kvec
;
495 /* fill in crc (except data pages), footer */
496 con
->out_msg
->hdr
.crc
=
497 cpu_to_le32(crc32c(0, (void *)&m
->hdr
,
498 sizeof(m
->hdr
) - sizeof(m
->hdr
.crc
)));
499 con
->out_msg
->footer
.flags
= CEPH_MSG_FOOTER_COMPLETE
;
500 con
->out_msg
->footer
.front_crc
=
501 cpu_to_le32(crc32c(0, m
->front
.iov_base
, m
->front
.iov_len
));
503 con
->out_msg
->footer
.middle_crc
=
504 cpu_to_le32(crc32c(0, m
->middle
->vec
.iov_base
,
505 m
->middle
->vec
.iov_len
));
507 con
->out_msg
->footer
.middle_crc
= 0;
508 con
->out_msg
->footer
.data_crc
= 0;
509 dout("prepare_write_message front_crc %u data_crc %u\n",
510 le32_to_cpu(con
->out_msg
->footer
.front_crc
),
511 le32_to_cpu(con
->out_msg
->footer
.middle_crc
));
513 /* is there a data payload? */
514 if (le32_to_cpu(m
->hdr
.data_len
) > 0) {
515 /* initialize page iterator */
516 con
->out_msg_pos
.page
= 0;
517 con
->out_msg_pos
.page_pos
=
518 le16_to_cpu(m
->hdr
.data_off
) & ~PAGE_MASK
;
519 con
->out_msg_pos
.data_pos
= 0;
520 con
->out_msg_pos
.did_page_crc
= 0;
521 con
->out_more
= 1; /* data + footer will follow */
523 /* no, queue up footer too and be done */
524 prepare_write_message_footer(con
, v
);
527 set_bit(WRITE_PENDING
, &con
->state
);
533 static void prepare_write_ack(struct ceph_connection
*con
)
535 dout("prepare_write_ack %p %llu -> %llu\n", con
,
536 con
->in_seq_acked
, con
->in_seq
);
537 con
->in_seq_acked
= con
->in_seq
;
539 con
->out_kvec
[0].iov_base
= &tag_ack
;
540 con
->out_kvec
[0].iov_len
= 1;
541 con
->out_temp_ack
= cpu_to_le64(con
->in_seq_acked
);
542 con
->out_kvec
[1].iov_base
= &con
->out_temp_ack
;
543 con
->out_kvec
[1].iov_len
= sizeof(con
->out_temp_ack
);
544 con
->out_kvec_left
= 2;
545 con
->out_kvec_bytes
= 1 + sizeof(con
->out_temp_ack
);
546 con
->out_kvec_cur
= con
->out_kvec
;
547 con
->out_more
= 1; /* more will follow.. eventually.. */
548 set_bit(WRITE_PENDING
, &con
->state
);
552 * Prepare to write keepalive byte.
554 static void prepare_write_keepalive(struct ceph_connection
*con
)
556 dout("prepare_write_keepalive %p\n", con
);
557 con
->out_kvec
[0].iov_base
= &tag_keepalive
;
558 con
->out_kvec
[0].iov_len
= 1;
559 con
->out_kvec_left
= 1;
560 con
->out_kvec_bytes
= 1;
561 con
->out_kvec_cur
= con
->out_kvec
;
562 set_bit(WRITE_PENDING
, &con
->state
);
566 * Connection negotiation.
569 static void prepare_connect_authorizer(struct ceph_connection
*con
)
573 int auth_protocol
= 0;
575 mutex_unlock(&con
->mutex
);
576 if (con
->ops
->get_authorizer
)
577 con
->ops
->get_authorizer(con
, &auth_buf
, &auth_len
,
578 &auth_protocol
, &con
->auth_reply_buf
,
579 &con
->auth_reply_buf_len
,
581 mutex_lock(&con
->mutex
);
583 con
->out_connect
.authorizer_protocol
= cpu_to_le32(auth_protocol
);
584 con
->out_connect
.authorizer_len
= cpu_to_le32(auth_len
);
586 con
->out_kvec
[con
->out_kvec_left
].iov_base
= auth_buf
;
587 con
->out_kvec
[con
->out_kvec_left
].iov_len
= auth_len
;
588 con
->out_kvec_left
++;
589 con
->out_kvec_bytes
+= auth_len
;
593 * We connected to a peer and are saying hello.
595 static void prepare_write_banner(struct ceph_messenger
*msgr
,
596 struct ceph_connection
*con
)
598 int len
= strlen(CEPH_BANNER
);
600 con
->out_kvec
[0].iov_base
= CEPH_BANNER
;
601 con
->out_kvec
[0].iov_len
= len
;
602 con
->out_kvec
[1].iov_base
= &msgr
->my_enc_addr
;
603 con
->out_kvec
[1].iov_len
= sizeof(msgr
->my_enc_addr
);
604 con
->out_kvec_left
= 2;
605 con
->out_kvec_bytes
= len
+ sizeof(msgr
->my_enc_addr
);
606 con
->out_kvec_cur
= con
->out_kvec
;
608 set_bit(WRITE_PENDING
, &con
->state
);
611 static void prepare_write_connect(struct ceph_messenger
*msgr
,
612 struct ceph_connection
*con
,
615 unsigned global_seq
= get_global_seq(con
->msgr
, 0);
618 switch (con
->peer_name
.type
) {
619 case CEPH_ENTITY_TYPE_MON
:
620 proto
= CEPH_MONC_PROTOCOL
;
622 case CEPH_ENTITY_TYPE_OSD
:
623 proto
= CEPH_OSDC_PROTOCOL
;
625 case CEPH_ENTITY_TYPE_MDS
:
626 proto
= CEPH_MDSC_PROTOCOL
;
632 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con
,
633 con
->connect_seq
, global_seq
, proto
);
635 con
->out_connect
.features
= CEPH_FEATURE_SUPPORTED
;
636 con
->out_connect
.host_type
= cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT
);
637 con
->out_connect
.connect_seq
= cpu_to_le32(con
->connect_seq
);
638 con
->out_connect
.global_seq
= cpu_to_le32(global_seq
);
639 con
->out_connect
.protocol_version
= cpu_to_le32(proto
);
640 con
->out_connect
.flags
= 0;
643 con
->out_kvec_left
= 0;
644 con
->out_kvec_bytes
= 0;
646 con
->out_kvec
[con
->out_kvec_left
].iov_base
= &con
->out_connect
;
647 con
->out_kvec
[con
->out_kvec_left
].iov_len
= sizeof(con
->out_connect
);
648 con
->out_kvec_left
++;
649 con
->out_kvec_bytes
+= sizeof(con
->out_connect
);
650 con
->out_kvec_cur
= con
->out_kvec
;
652 set_bit(WRITE_PENDING
, &con
->state
);
654 prepare_connect_authorizer(con
);
659 * write as much of pending kvecs to the socket as we can.
661 * 0 -> socket full, but more to do
664 static int write_partial_kvec(struct ceph_connection
*con
)
668 dout("write_partial_kvec %p %d left\n", con
, con
->out_kvec_bytes
);
669 while (con
->out_kvec_bytes
> 0) {
670 ret
= ceph_tcp_sendmsg(con
->sock
, con
->out_kvec_cur
,
671 con
->out_kvec_left
, con
->out_kvec_bytes
,
675 con
->out_kvec_bytes
-= ret
;
676 if (con
->out_kvec_bytes
== 0)
679 if (ret
>= con
->out_kvec_cur
->iov_len
) {
680 ret
-= con
->out_kvec_cur
->iov_len
;
682 con
->out_kvec_left
--;
684 con
->out_kvec_cur
->iov_len
-= ret
;
685 con
->out_kvec_cur
->iov_base
+= ret
;
691 con
->out_kvec_left
= 0;
692 con
->out_kvec_is_msg
= false;
695 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con
,
696 con
->out_kvec_bytes
, con
->out_kvec_left
, ret
);
697 return ret
; /* done! */
701 * Write as much message data payload as we can. If we finish, queue
703 * 1 -> done, footer is now queued in out_kvec[].
704 * 0 -> socket full, but more to do
707 static int write_partial_msg_pages(struct ceph_connection
*con
)
709 struct ceph_msg
*msg
= con
->out_msg
;
710 unsigned data_len
= le32_to_cpu(msg
->hdr
.data_len
);
712 int crc
= con
->msgr
->nocrc
;
715 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
716 con
, con
->out_msg
, con
->out_msg_pos
.page
, con
->out_msg
->nr_pages
,
717 con
->out_msg_pos
.page_pos
);
719 while (con
->out_msg_pos
.page
< con
->out_msg
->nr_pages
) {
720 struct page
*page
= NULL
;
724 * if we are calculating the data crc (the default), we need
725 * to map the page. if our pages[] has been revoked, use the
729 page
= msg
->pages
[con
->out_msg_pos
.page
];
732 } else if (msg
->pagelist
) {
733 page
= list_first_entry(&msg
->pagelist
->head
,
738 page
= con
->msgr
->zero_page
;
740 kaddr
= page_address(con
->msgr
->zero_page
);
742 len
= min((int)(PAGE_SIZE
- con
->out_msg_pos
.page_pos
),
743 (int)(data_len
- con
->out_msg_pos
.data_pos
));
744 if (crc
&& !con
->out_msg_pos
.did_page_crc
) {
745 void *base
= kaddr
+ con
->out_msg_pos
.page_pos
;
746 u32 tmpcrc
= le32_to_cpu(con
->out_msg
->footer
.data_crc
);
748 BUG_ON(kaddr
== NULL
);
749 con
->out_msg
->footer
.data_crc
=
750 cpu_to_le32(crc32c(tmpcrc
, base
, len
));
751 con
->out_msg_pos
.did_page_crc
= 1;
754 ret
= kernel_sendpage(con
->sock
, page
,
755 con
->out_msg_pos
.page_pos
, len
,
756 MSG_DONTWAIT
| MSG_NOSIGNAL
|
759 if (crc
&& (msg
->pages
|| msg
->pagelist
))
765 con
->out_msg_pos
.data_pos
+= ret
;
766 con
->out_msg_pos
.page_pos
+= ret
;
768 con
->out_msg_pos
.page_pos
= 0;
769 con
->out_msg_pos
.page
++;
770 con
->out_msg_pos
.did_page_crc
= 0;
772 list_move_tail(&page
->lru
,
773 &msg
->pagelist
->head
);
777 dout("write_partial_msg_pages %p msg %p done\n", con
, msg
);
779 /* prepare and queue up footer, too */
781 con
->out_msg
->footer
.flags
|= CEPH_MSG_FOOTER_NOCRC
;
782 con
->out_kvec_bytes
= 0;
783 con
->out_kvec_left
= 0;
784 con
->out_kvec_cur
= con
->out_kvec
;
785 prepare_write_message_footer(con
, 0);
794 static int write_partial_skip(struct ceph_connection
*con
)
798 while (con
->out_skip
> 0) {
800 .iov_base
= page_address(con
->msgr
->zero_page
),
801 .iov_len
= min(con
->out_skip
, (int)PAGE_CACHE_SIZE
)
804 ret
= ceph_tcp_sendmsg(con
->sock
, &iov
, 1, iov
.iov_len
, 1);
807 con
->out_skip
-= ret
;
815 * Prepare to read connection handshake, or an ack.
817 static void prepare_read_banner(struct ceph_connection
*con
)
819 dout("prepare_read_banner %p\n", con
);
820 con
->in_base_pos
= 0;
823 static void prepare_read_connect(struct ceph_connection
*con
)
825 dout("prepare_read_connect %p\n", con
);
826 con
->in_base_pos
= 0;
829 static void prepare_read_connect_retry(struct ceph_connection
*con
)
831 dout("prepare_read_connect_retry %p\n", con
);
832 con
->in_base_pos
= strlen(CEPH_BANNER
) + sizeof(con
->actual_peer_addr
)
833 + sizeof(con
->peer_addr_for_me
);
836 static void prepare_read_ack(struct ceph_connection
*con
)
838 dout("prepare_read_ack %p\n", con
);
839 con
->in_base_pos
= 0;
842 static void prepare_read_tag(struct ceph_connection
*con
)
844 dout("prepare_read_tag %p\n", con
);
845 con
->in_base_pos
= 0;
846 con
->in_tag
= CEPH_MSGR_TAG_READY
;
850 * Prepare to read a message.
852 static int prepare_read_message(struct ceph_connection
*con
)
854 dout("prepare_read_message %p\n", con
);
855 BUG_ON(con
->in_msg
!= NULL
);
856 con
->in_base_pos
= 0;
857 con
->in_front_crc
= con
->in_middle_crc
= con
->in_data_crc
= 0;
862 static int read_partial(struct ceph_connection
*con
,
863 int *to
, int size
, void *object
)
866 while (con
->in_base_pos
< *to
) {
867 int left
= *to
- con
->in_base_pos
;
868 int have
= size
- left
;
869 int ret
= ceph_tcp_recvmsg(con
->sock
, object
+ have
, left
);
872 con
->in_base_pos
+= ret
;
879 * Read all or part of the connect-side handshake on a new connection
881 static int read_partial_banner(struct ceph_connection
*con
)
885 dout("read_partial_banner %p at %d\n", con
, con
->in_base_pos
);
888 ret
= read_partial(con
, &to
, strlen(CEPH_BANNER
), con
->in_banner
);
891 ret
= read_partial(con
, &to
, sizeof(con
->actual_peer_addr
),
892 &con
->actual_peer_addr
);
895 ret
= read_partial(con
, &to
, sizeof(con
->peer_addr_for_me
),
896 &con
->peer_addr_for_me
);
903 static int read_partial_connect(struct ceph_connection
*con
)
907 dout("read_partial_connect %p at %d\n", con
, con
->in_base_pos
);
909 ret
= read_partial(con
, &to
, sizeof(con
->in_reply
), &con
->in_reply
);
912 ret
= read_partial(con
, &to
, le32_to_cpu(con
->in_reply
.authorizer_len
),
913 con
->auth_reply_buf
);
917 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
918 con
, (int)con
->in_reply
.tag
,
919 le32_to_cpu(con
->in_reply
.connect_seq
),
920 le32_to_cpu(con
->in_reply
.global_seq
));
927 * Verify the hello banner looks okay.
929 static int verify_hello(struct ceph_connection
*con
)
931 if (memcmp(con
->in_banner
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
932 pr_err("connect to %s got bad banner\n",
933 pr_addr(&con
->peer_addr
.in_addr
));
934 con
->error_msg
= "protocol error, bad banner";
940 static bool addr_is_blank(struct sockaddr_storage
*ss
)
942 switch (ss
->ss_family
) {
944 return ((struct sockaddr_in
*)ss
)->sin_addr
.s_addr
== 0;
947 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[0] == 0 &&
948 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[1] == 0 &&
949 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[2] == 0 &&
950 ((struct sockaddr_in6
*)ss
)->sin6_addr
.s6_addr32
[3] == 0;
955 static int addr_port(struct sockaddr_storage
*ss
)
957 switch (ss
->ss_family
) {
959 return ntohs(((struct sockaddr_in
*)ss
)->sin_port
);
961 return ntohs(((struct sockaddr_in6
*)ss
)->sin6_port
);
966 static void addr_set_port(struct sockaddr_storage
*ss
, int p
)
968 switch (ss
->ss_family
) {
970 ((struct sockaddr_in
*)ss
)->sin_port
= htons(p
);
972 ((struct sockaddr_in6
*)ss
)->sin6_port
= htons(p
);
977 * Parse an ip[:port] list into an addr array. Use the default
978 * monitor port if a port isn't specified.
980 int ceph_parse_ips(const char *c
, const char *end
,
981 struct ceph_entity_addr
*addr
,
982 int max_count
, int *count
)
987 dout("parse_ips on '%.*s'\n", (int)(end
-c
), c
);
988 for (i
= 0; i
< max_count
; i
++) {
990 struct sockaddr_storage
*ss
= &addr
[i
].in_addr
;
991 struct sockaddr_in
*in4
= (void *)ss
;
992 struct sockaddr_in6
*in6
= (void *)ss
;
995 memset(ss
, 0, sizeof(*ss
));
996 if (in4_pton(p
, end
- p
, (u8
*)&in4
->sin_addr
.s_addr
,
998 ss
->ss_family
= AF_INET
;
999 } else if (in6_pton(p
, end
- p
, (u8
*)&in6
->sin6_addr
.s6_addr
,
1001 ss
->ss_family
= AF_INET6
;
1008 if (p
< end
&& *p
== ':') {
1011 while (p
< end
&& *p
>= '0' && *p
<= '9') {
1012 port
= (port
* 10) + (*p
- '0');
1015 if (port
> 65535 || port
== 0)
1018 port
= CEPH_MON_PORT
;
1021 addr_set_port(ss
, port
);
1023 dout("parse_ips got %s\n", pr_addr(ss
));
1040 pr_err("parse_ips bad ip '%s'\n", c
);
1044 static int process_banner(struct ceph_connection
*con
)
1046 dout("process_banner on %p\n", con
);
1048 if (verify_hello(con
) < 0)
1051 ceph_decode_addr(&con
->actual_peer_addr
);
1052 ceph_decode_addr(&con
->peer_addr_for_me
);
1055 * Make sure the other end is who we wanted. note that the other
1056 * end may not yet know their ip address, so if it's 0.0.0.0, give
1057 * them the benefit of the doubt.
1059 if (memcmp(&con
->peer_addr
, &con
->actual_peer_addr
,
1060 sizeof(con
->peer_addr
)) != 0 &&
1061 !(addr_is_blank(&con
->actual_peer_addr
.in_addr
) &&
1062 con
->actual_peer_addr
.nonce
== con
->peer_addr
.nonce
)) {
1063 pr_warning("wrong peer, want %s/%lld, got %s/%lld\n",
1064 pr_addr(&con
->peer_addr
.in_addr
),
1065 le64_to_cpu(con
->peer_addr
.nonce
),
1066 pr_addr(&con
->actual_peer_addr
.in_addr
),
1067 le64_to_cpu(con
->actual_peer_addr
.nonce
));
1068 con
->error_msg
= "wrong peer at address";
1073 * did we learn our address?
1075 if (addr_is_blank(&con
->msgr
->inst
.addr
.in_addr
)) {
1076 int port
= addr_port(&con
->msgr
->inst
.addr
.in_addr
);
1078 memcpy(&con
->msgr
->inst
.addr
.in_addr
,
1079 &con
->peer_addr_for_me
.in_addr
,
1080 sizeof(con
->peer_addr_for_me
.in_addr
));
1081 addr_set_port(&con
->msgr
->inst
.addr
.in_addr
, port
);
1082 encode_my_addr(con
->msgr
);
1083 dout("process_banner learned my addr is %s\n",
1084 pr_addr(&con
->msgr
->inst
.addr
.in_addr
));
1087 set_bit(NEGOTIATING
, &con
->state
);
1088 prepare_read_connect(con
);
1092 static void fail_protocol(struct ceph_connection
*con
)
1094 reset_connection(con
);
1095 set_bit(CLOSED
, &con
->state
); /* in case there's queued work */
1097 mutex_unlock(&con
->mutex
);
1098 if (con
->ops
->bad_proto
)
1099 con
->ops
->bad_proto(con
);
1100 mutex_lock(&con
->mutex
);
1103 static int process_connect(struct ceph_connection
*con
)
1105 u64 sup_feat
= CEPH_FEATURE_SUPPORTED
;
1106 u64 req_feat
= CEPH_FEATURE_REQUIRED
;
1107 u64 server_feat
= le64_to_cpu(con
->in_reply
.features
);
1109 dout("process_connect on %p tag %d\n", con
, (int)con
->in_tag
);
1111 switch (con
->in_reply
.tag
) {
1112 case CEPH_MSGR_TAG_FEATURES
:
1113 pr_err("%s%lld %s feature set mismatch,"
1114 " my %llx < server's %llx, missing %llx\n",
1115 ENTITY_NAME(con
->peer_name
),
1116 pr_addr(&con
->peer_addr
.in_addr
),
1117 sup_feat
, server_feat
, server_feat
& ~sup_feat
);
1118 con
->error_msg
= "missing required protocol features";
1122 case CEPH_MSGR_TAG_BADPROTOVER
:
1123 pr_err("%s%lld %s protocol version mismatch,"
1124 " my %d != server's %d\n",
1125 ENTITY_NAME(con
->peer_name
),
1126 pr_addr(&con
->peer_addr
.in_addr
),
1127 le32_to_cpu(con
->out_connect
.protocol_version
),
1128 le32_to_cpu(con
->in_reply
.protocol_version
));
1129 con
->error_msg
= "protocol version mismatch";
1133 case CEPH_MSGR_TAG_BADAUTHORIZER
:
1135 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con
,
1137 if (con
->auth_retry
== 2) {
1138 con
->error_msg
= "connect authorization failure";
1139 reset_connection(con
);
1140 set_bit(CLOSED
, &con
->state
);
1143 con
->auth_retry
= 1;
1144 prepare_write_connect(con
->msgr
, con
, 0);
1145 prepare_read_connect_retry(con
);
1148 case CEPH_MSGR_TAG_RESETSESSION
:
1150 * If we connected with a large connect_seq but the peer
1151 * has no record of a session with us (no connection, or
1152 * connect_seq == 0), they will send RESETSESION to indicate
1153 * that they must have reset their session, and may have
1156 dout("process_connect got RESET peer seq %u\n",
1157 le32_to_cpu(con
->in_connect
.connect_seq
));
1158 pr_err("%s%lld %s connection reset\n",
1159 ENTITY_NAME(con
->peer_name
),
1160 pr_addr(&con
->peer_addr
.in_addr
));
1161 reset_connection(con
);
1162 prepare_write_connect(con
->msgr
, con
, 0);
1163 prepare_read_connect(con
);
1165 /* Tell ceph about it. */
1166 mutex_unlock(&con
->mutex
);
1167 pr_info("reset on %s%lld\n", ENTITY_NAME(con
->peer_name
));
1168 if (con
->ops
->peer_reset
)
1169 con
->ops
->peer_reset(con
);
1170 mutex_lock(&con
->mutex
);
1173 case CEPH_MSGR_TAG_RETRY_SESSION
:
1175 * If we sent a smaller connect_seq than the peer has, try
1176 * again with a larger value.
1178 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1179 le32_to_cpu(con
->out_connect
.connect_seq
),
1180 le32_to_cpu(con
->in_connect
.connect_seq
));
1181 con
->connect_seq
= le32_to_cpu(con
->in_connect
.connect_seq
);
1182 prepare_write_connect(con
->msgr
, con
, 0);
1183 prepare_read_connect(con
);
1186 case CEPH_MSGR_TAG_RETRY_GLOBAL
:
1188 * If we sent a smaller global_seq than the peer has, try
1189 * again with a larger value.
1191 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1192 con
->peer_global_seq
,
1193 le32_to_cpu(con
->in_connect
.global_seq
));
1194 get_global_seq(con
->msgr
,
1195 le32_to_cpu(con
->in_connect
.global_seq
));
1196 prepare_write_connect(con
->msgr
, con
, 0);
1197 prepare_read_connect(con
);
1200 case CEPH_MSGR_TAG_READY
:
1201 if (req_feat
& ~server_feat
) {
1202 pr_err("%s%lld %s protocol feature mismatch,"
1203 " my required %llx > server's %llx, need %llx\n",
1204 ENTITY_NAME(con
->peer_name
),
1205 pr_addr(&con
->peer_addr
.in_addr
),
1206 req_feat
, server_feat
, req_feat
& ~server_feat
);
1207 con
->error_msg
= "missing required protocol features";
1211 clear_bit(CONNECTING
, &con
->state
);
1212 con
->peer_global_seq
= le32_to_cpu(con
->in_reply
.global_seq
);
1214 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1215 con
->peer_global_seq
,
1216 le32_to_cpu(con
->in_reply
.connect_seq
),
1218 WARN_ON(con
->connect_seq
!=
1219 le32_to_cpu(con
->in_reply
.connect_seq
));
1221 if (con
->in_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
)
1222 set_bit(LOSSYTX
, &con
->state
);
1224 prepare_read_tag(con
);
1227 case CEPH_MSGR_TAG_WAIT
:
1229 * If there is a connection race (we are opening
1230 * connections to each other), one of us may just have
1231 * to WAIT. This shouldn't happen if we are the
1234 pr_err("process_connect peer connecting WAIT\n");
1237 pr_err("connect protocol error, will retry\n");
1238 con
->error_msg
= "protocol error, garbage tag during connect";
1246 * read (part of) an ack
1248 static int read_partial_ack(struct ceph_connection
*con
)
1252 return read_partial(con
, &to
, sizeof(con
->in_temp_ack
),
1258 * We can finally discard anything that's been acked.
1260 static void process_ack(struct ceph_connection
*con
)
1263 u64 ack
= le64_to_cpu(con
->in_temp_ack
);
1266 while (!list_empty(&con
->out_sent
)) {
1267 m
= list_first_entry(&con
->out_sent
, struct ceph_msg
,
1269 seq
= le64_to_cpu(m
->hdr
.seq
);
1272 dout("got ack for seq %llu type %d at %p\n", seq
,
1273 le16_to_cpu(m
->hdr
.type
), m
);
1276 prepare_read_tag(con
);
1282 static int read_partial_message_section(struct ceph_connection
*con
,
1283 struct kvec
*section
, unsigned int sec_len
,
1291 while (section
->iov_len
< sec_len
) {
1292 BUG_ON(section
->iov_base
== NULL
);
1293 left
= sec_len
- section
->iov_len
;
1294 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)section
->iov_base
+
1295 section
->iov_len
, left
);
1298 section
->iov_len
+= ret
;
1299 if (section
->iov_len
== sec_len
)
1300 *crc
= crc32c(0, section
->iov_base
,
1307 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
1308 struct ceph_msg_header
*hdr
,
1311 * read (part of) a message.
1313 static int read_partial_message(struct ceph_connection
*con
)
1315 struct ceph_msg
*m
= con
->in_msg
;
1319 unsigned front_len
, middle_len
, data_len
, data_off
;
1320 int datacrc
= con
->msgr
->nocrc
;
1323 dout("read_partial_message con %p msg %p\n", con
, m
);
1326 while (con
->in_base_pos
< sizeof(con
->in_hdr
)) {
1327 left
= sizeof(con
->in_hdr
) - con
->in_base_pos
;
1328 ret
= ceph_tcp_recvmsg(con
->sock
,
1329 (char *)&con
->in_hdr
+ con
->in_base_pos
,
1333 con
->in_base_pos
+= ret
;
1334 if (con
->in_base_pos
== sizeof(con
->in_hdr
)) {
1335 u32 crc
= crc32c(0, (void *)&con
->in_hdr
,
1336 sizeof(con
->in_hdr
) - sizeof(con
->in_hdr
.crc
));
1337 if (crc
!= le32_to_cpu(con
->in_hdr
.crc
)) {
1338 pr_err("read_partial_message bad hdr "
1339 " crc %u != expected %u\n",
1340 crc
, con
->in_hdr
.crc
);
1345 front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
1346 if (front_len
> CEPH_MSG_MAX_FRONT_LEN
)
1348 middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
1349 if (middle_len
> CEPH_MSG_MAX_DATA_LEN
)
1351 data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1352 if (data_len
> CEPH_MSG_MAX_DATA_LEN
)
1354 data_off
= le16_to_cpu(con
->in_hdr
.data_off
);
1356 /* allocate message? */
1358 dout("got hdr type %d front %d data %d\n", con
->in_hdr
.type
,
1359 con
->in_hdr
.front_len
, con
->in_hdr
.data_len
);
1360 con
->in_msg
= ceph_alloc_msg(con
, &con
->in_hdr
, &skip
);
1362 /* skip this message */
1363 pr_err("alloc_msg returned NULL, skipping message\n");
1364 con
->in_base_pos
= -front_len
- middle_len
- data_len
-
1366 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1369 if (IS_ERR(con
->in_msg
)) {
1370 ret
= PTR_ERR(con
->in_msg
);
1372 con
->error_msg
= "error allocating memory for incoming message";
1376 m
->front
.iov_len
= 0; /* haven't read it yet */
1378 m
->middle
->vec
.iov_len
= 0;
1380 con
->in_msg_pos
.page
= 0;
1381 con
->in_msg_pos
.page_pos
= data_off
& ~PAGE_MASK
;
1382 con
->in_msg_pos
.data_pos
= 0;
1386 ret
= read_partial_message_section(con
, &m
->front
, front_len
,
1387 &con
->in_front_crc
);
1393 ret
= read_partial_message_section(con
, &m
->middle
->vec
, middle_len
,
1394 &con
->in_middle_crc
);
1400 while (con
->in_msg_pos
.data_pos
< data_len
) {
1401 left
= min((int)(data_len
- con
->in_msg_pos
.data_pos
),
1402 (int)(PAGE_SIZE
- con
->in_msg_pos
.page_pos
));
1403 BUG_ON(m
->pages
== NULL
);
1404 p
= kmap(m
->pages
[con
->in_msg_pos
.page
]);
1405 ret
= ceph_tcp_recvmsg(con
->sock
, p
+ con
->in_msg_pos
.page_pos
,
1407 if (ret
> 0 && datacrc
)
1409 crc32c(con
->in_data_crc
,
1410 p
+ con
->in_msg_pos
.page_pos
, ret
);
1411 kunmap(m
->pages
[con
->in_msg_pos
.page
]);
1414 con
->in_msg_pos
.data_pos
+= ret
;
1415 con
->in_msg_pos
.page_pos
+= ret
;
1416 if (con
->in_msg_pos
.page_pos
== PAGE_SIZE
) {
1417 con
->in_msg_pos
.page_pos
= 0;
1418 con
->in_msg_pos
.page
++;
1423 to
= sizeof(m
->hdr
) + sizeof(m
->footer
);
1424 while (con
->in_base_pos
< to
) {
1425 left
= to
- con
->in_base_pos
;
1426 ret
= ceph_tcp_recvmsg(con
->sock
, (char *)&m
->footer
+
1427 (con
->in_base_pos
- sizeof(m
->hdr
)),
1431 con
->in_base_pos
+= ret
;
1433 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1434 m
, front_len
, m
->footer
.front_crc
, middle_len
,
1435 m
->footer
.middle_crc
, data_len
, m
->footer
.data_crc
);
1438 if (con
->in_front_crc
!= le32_to_cpu(m
->footer
.front_crc
)) {
1439 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1440 m
, con
->in_front_crc
, m
->footer
.front_crc
);
1443 if (con
->in_middle_crc
!= le32_to_cpu(m
->footer
.middle_crc
)) {
1444 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1445 m
, con
->in_middle_crc
, m
->footer
.middle_crc
);
1449 (m
->footer
.flags
& CEPH_MSG_FOOTER_NOCRC
) == 0 &&
1450 con
->in_data_crc
!= le32_to_cpu(m
->footer
.data_crc
)) {
1451 pr_err("read_partial_message %p data crc %u != exp. %u\n", m
,
1452 con
->in_data_crc
, le32_to_cpu(m
->footer
.data_crc
));
1456 return 1; /* done! */
1460 * Process message. This happens in the worker thread. The callback should
1461 * be careful not to do anything that waits on other incoming messages or it
1464 static void process_message(struct ceph_connection
*con
)
1466 struct ceph_msg
*msg
;
1471 /* if first message, set peer_name */
1472 if (con
->peer_name
.type
== 0)
1473 con
->peer_name
= msg
->hdr
.src
.name
;
1476 mutex_unlock(&con
->mutex
);
1478 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1479 msg
, le64_to_cpu(msg
->hdr
.seq
),
1480 ENTITY_NAME(msg
->hdr
.src
.name
),
1481 le16_to_cpu(msg
->hdr
.type
),
1482 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1483 le32_to_cpu(msg
->hdr
.front_len
),
1484 le32_to_cpu(msg
->hdr
.data_len
),
1485 con
->in_front_crc
, con
->in_middle_crc
, con
->in_data_crc
);
1486 con
->ops
->dispatch(con
, msg
);
1488 mutex_lock(&con
->mutex
);
1489 prepare_read_tag(con
);
1494 * Write something to the socket. Called in a worker thread when the
1495 * socket appears to be writeable and we have something ready to send.
1497 static int try_write(struct ceph_connection
*con
)
1499 struct ceph_messenger
*msgr
= con
->msgr
;
1502 dout("try_write start %p state %lu nref %d\n", con
, con
->state
,
1503 atomic_read(&con
->nref
));
1505 mutex_lock(&con
->mutex
);
1507 dout("try_write out_kvec_bytes %d\n", con
->out_kvec_bytes
);
1509 /* open the socket first? */
1510 if (con
->sock
== NULL
) {
1512 * if we were STANDBY and are reconnecting _this_
1513 * connection, bump connect_seq now. Always bump
1516 if (test_and_clear_bit(STANDBY
, &con
->state
))
1519 prepare_write_banner(msgr
, con
);
1520 prepare_write_connect(msgr
, con
, 1);
1521 prepare_read_banner(con
);
1522 set_bit(CONNECTING
, &con
->state
);
1523 clear_bit(NEGOTIATING
, &con
->state
);
1525 BUG_ON(con
->in_msg
);
1526 con
->in_tag
= CEPH_MSGR_TAG_READY
;
1527 dout("try_write initiating connect on %p new state %lu\n",
1529 con
->sock
= ceph_tcp_connect(con
);
1530 if (IS_ERR(con
->sock
)) {
1532 con
->error_msg
= "connect error";
1539 /* kvec data queued? */
1540 if (con
->out_skip
) {
1541 ret
= write_partial_skip(con
);
1545 dout("try_write write_partial_skip err %d\n", ret
);
1549 if (con
->out_kvec_left
) {
1550 ret
= write_partial_kvec(con
);
1557 if (con
->out_msg_done
) {
1558 ceph_msg_put(con
->out_msg
);
1559 con
->out_msg
= NULL
; /* we're done with this one */
1563 ret
= write_partial_msg_pages(con
);
1565 goto more_kvec
; /* we need to send the footer, too! */
1569 dout("try_write write_partial_msg_pages err %d\n",
1576 if (!test_bit(CONNECTING
, &con
->state
)) {
1577 /* is anything else pending? */
1578 if (!list_empty(&con
->out_queue
)) {
1579 prepare_write_message(con
);
1582 if (con
->in_seq
> con
->in_seq_acked
) {
1583 prepare_write_ack(con
);
1586 if (test_and_clear_bit(KEEPALIVE_PENDING
, &con
->state
)) {
1587 prepare_write_keepalive(con
);
1592 /* Nothing to do! */
1593 clear_bit(WRITE_PENDING
, &con
->state
);
1594 dout("try_write nothing else to write.\n");
1598 mutex_unlock(&con
->mutex
);
1599 dout("try_write done on %p\n", con
);
1606 * Read what we can from the socket.
1608 static int try_read(struct ceph_connection
*con
)
1610 struct ceph_messenger
*msgr
;
1616 if (test_bit(STANDBY
, &con
->state
))
1619 dout("try_read start on %p\n", con
);
1622 mutex_lock(&con
->mutex
);
1625 dout("try_read tag %d in_base_pos %d\n", (int)con
->in_tag
,
1627 if (test_bit(CONNECTING
, &con
->state
)) {
1628 if (!test_bit(NEGOTIATING
, &con
->state
)) {
1629 dout("try_read connecting\n");
1630 ret
= read_partial_banner(con
);
1633 if (process_banner(con
) < 0) {
1638 ret
= read_partial_connect(con
);
1641 if (process_connect(con
) < 0) {
1648 if (con
->in_base_pos
< 0) {
1650 * skipping + discarding content.
1652 * FIXME: there must be a better way to do this!
1654 static char buf
[1024];
1655 int skip
= min(1024, -con
->in_base_pos
);
1656 dout("skipping %d / %d bytes\n", skip
, -con
->in_base_pos
);
1657 ret
= ceph_tcp_recvmsg(con
->sock
, buf
, skip
);
1660 con
->in_base_pos
+= ret
;
1661 if (con
->in_base_pos
)
1664 if (con
->in_tag
== CEPH_MSGR_TAG_READY
) {
1668 ret
= ceph_tcp_recvmsg(con
->sock
, &con
->in_tag
, 1);
1671 dout("try_read got tag %d\n", (int)con
->in_tag
);
1672 switch (con
->in_tag
) {
1673 case CEPH_MSGR_TAG_MSG
:
1674 prepare_read_message(con
);
1676 case CEPH_MSGR_TAG_ACK
:
1677 prepare_read_ack(con
);
1679 case CEPH_MSGR_TAG_CLOSE
:
1680 set_bit(CLOSED
, &con
->state
); /* fixme */
1686 if (con
->in_tag
== CEPH_MSGR_TAG_MSG
) {
1687 ret
= read_partial_message(con
);
1691 con
->error_msg
= "bad crc";
1695 con
->error_msg
= "io error";
1701 if (con
->in_tag
== CEPH_MSGR_TAG_READY
)
1703 process_message(con
);
1706 if (con
->in_tag
== CEPH_MSGR_TAG_ACK
) {
1707 ret
= read_partial_ack(con
);
1717 mutex_unlock(&con
->mutex
);
1718 dout("try_read done on %p\n", con
);
1722 pr_err("try_read bad con->in_tag = %d\n", (int)con
->in_tag
);
1723 con
->error_msg
= "protocol error, garbage tag";
1730 * Atomically queue work on a connection. Bump @con reference to
1731 * avoid races with connection teardown.
1733 * There is some trickery going on with QUEUED and BUSY because we
1734 * only want a _single_ thread operating on each connection at any
1735 * point in time, but we want to use all available CPUs.
1737 * The worker thread only proceeds if it can atomically set BUSY. It
1738 * clears QUEUED and does it's thing. When it thinks it's done, it
1739 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1740 * (tries again to set BUSY).
1742 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1743 * try to queue work. If that fails (work is already queued, or BUSY)
1744 * we give up (work also already being done or is queued) but leave QUEUED
1745 * set so that the worker thread will loop if necessary.
1747 static void queue_con(struct ceph_connection
*con
)
1749 if (test_bit(DEAD
, &con
->state
)) {
1750 dout("queue_con %p ignoring: DEAD\n",
1755 if (!con
->ops
->get(con
)) {
1756 dout("queue_con %p ref count 0\n", con
);
1760 set_bit(QUEUED
, &con
->state
);
1761 if (test_bit(BUSY
, &con
->state
)) {
1762 dout("queue_con %p - already BUSY\n", con
);
1764 } else if (!queue_work(ceph_msgr_wq
, &con
->work
.work
)) {
1765 dout("queue_con %p - already queued\n", con
);
1768 dout("queue_con %p\n", con
);
1773 * Do some work on a connection. Drop a connection ref when we're done.
1775 static void con_work(struct work_struct
*work
)
1777 struct ceph_connection
*con
= container_of(work
, struct ceph_connection
,
1782 if (test_and_set_bit(BUSY
, &con
->state
) != 0) {
1783 dout("con_work %p BUSY already set\n", con
);
1786 dout("con_work %p start, clearing QUEUED\n", con
);
1787 clear_bit(QUEUED
, &con
->state
);
1789 if (test_bit(CLOSED
, &con
->state
)) { /* e.g. if we are replaced */
1790 dout("con_work CLOSED\n");
1791 con_close_socket(con
);
1794 if (test_and_clear_bit(OPENING
, &con
->state
)) {
1795 /* reopen w/ new peer */
1796 dout("con_work OPENING\n");
1797 con_close_socket(con
);
1800 if (test_and_clear_bit(SOCK_CLOSED
, &con
->state
) ||
1801 try_read(con
) < 0 ||
1802 try_write(con
) < 0) {
1804 ceph_fault(con
); /* error/fault path */
1808 clear_bit(BUSY
, &con
->state
);
1809 dout("con->state=%lu\n", con
->state
);
1810 if (test_bit(QUEUED
, &con
->state
)) {
1812 dout("con_work %p QUEUED reset, looping\n", con
);
1815 dout("con_work %p QUEUED reset, but just faulted\n", con
);
1816 clear_bit(QUEUED
, &con
->state
);
1818 dout("con_work %p done\n", con
);
1826 * Generic error/fault handler. A retry mechanism is used with
1827 * exponential backoff
1829 static void ceph_fault(struct ceph_connection
*con
)
1831 pr_err("%s%lld %s %s\n", ENTITY_NAME(con
->peer_name
),
1832 pr_addr(&con
->peer_addr
.in_addr
), con
->error_msg
);
1833 dout("fault %p state %lu to peer %s\n",
1834 con
, con
->state
, pr_addr(&con
->peer_addr
.in_addr
));
1836 if (test_bit(LOSSYTX
, &con
->state
)) {
1837 dout("fault on LOSSYTX channel\n");
1841 clear_bit(BUSY
, &con
->state
); /* to avoid an improbable race */
1843 mutex_lock(&con
->mutex
);
1845 con_close_socket(con
);
1848 ceph_msg_put(con
->in_msg
);
1852 /* If there are no messages in the queue, place the connection
1853 * in a STANDBY state (i.e., don't try to reconnect just yet). */
1854 if (list_empty(&con
->out_queue
) && !con
->out_keepalive_pending
) {
1855 dout("fault setting STANDBY\n");
1856 set_bit(STANDBY
, &con
->state
);
1857 mutex_unlock(&con
->mutex
);
1861 /* Requeue anything that hasn't been acked, and retry after a
1863 list_splice_init(&con
->out_sent
, &con
->out_queue
);
1865 if (con
->delay
== 0)
1866 con
->delay
= BASE_DELAY_INTERVAL
;
1867 else if (con
->delay
< MAX_DELAY_INTERVAL
)
1870 mutex_unlock(&con
->mutex
);
1872 /* explicitly schedule work to try to reconnect again later. */
1873 dout("fault queueing %p delay %lu\n", con
, con
->delay
);
1875 if (queue_delayed_work(ceph_msgr_wq
, &con
->work
,
1876 round_jiffies_relative(con
->delay
)) == 0)
1880 if (con
->ops
->fault
)
1881 con
->ops
->fault(con
);
1887 * create a new messenger instance
1889 struct ceph_messenger
*ceph_messenger_create(struct ceph_entity_addr
*myaddr
)
1891 struct ceph_messenger
*msgr
;
1893 msgr
= kzalloc(sizeof(*msgr
), GFP_KERNEL
);
1895 return ERR_PTR(-ENOMEM
);
1897 spin_lock_init(&msgr
->global_seq_lock
);
1899 /* the zero page is needed if a request is "canceled" while the message
1900 * is being written over the socket */
1901 msgr
->zero_page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
);
1902 if (!msgr
->zero_page
) {
1904 return ERR_PTR(-ENOMEM
);
1906 kmap(msgr
->zero_page
);
1909 msgr
->inst
.addr
= *myaddr
;
1911 /* select a random nonce */
1912 msgr
->inst
.addr
.type
= 0;
1913 get_random_bytes(&msgr
->inst
.addr
.nonce
, sizeof(msgr
->inst
.addr
.nonce
));
1914 encode_my_addr(msgr
);
1916 dout("messenger_create %p\n", msgr
);
1920 void ceph_messenger_destroy(struct ceph_messenger
*msgr
)
1922 dout("destroy %p\n", msgr
);
1923 kunmap(msgr
->zero_page
);
1924 __free_page(msgr
->zero_page
);
1926 dout("destroyed messenger %p\n", msgr
);
1930 * Queue up an outgoing message on the given connection.
1932 void ceph_con_send(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1934 if (test_bit(CLOSED
, &con
->state
)) {
1935 dout("con_send %p closed, dropping %p\n", con
, msg
);
1941 msg
->hdr
.src
.name
= con
->msgr
->inst
.name
;
1942 msg
->hdr
.src
.addr
= con
->msgr
->my_enc_addr
;
1943 msg
->hdr
.orig_src
= msg
->hdr
.src
;
1946 mutex_lock(&con
->mutex
);
1947 BUG_ON(!list_empty(&msg
->list_head
));
1948 list_add_tail(&msg
->list_head
, &con
->out_queue
);
1949 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg
,
1950 ENTITY_NAME(con
->peer_name
), le16_to_cpu(msg
->hdr
.type
),
1951 ceph_msg_type_name(le16_to_cpu(msg
->hdr
.type
)),
1952 le32_to_cpu(msg
->hdr
.front_len
),
1953 le32_to_cpu(msg
->hdr
.middle_len
),
1954 le32_to_cpu(msg
->hdr
.data_len
));
1955 mutex_unlock(&con
->mutex
);
1957 /* if there wasn't anything waiting to send before, queue
1959 if (test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
1964 * Revoke a message that was previously queued for send
1966 void ceph_con_revoke(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1968 mutex_lock(&con
->mutex
);
1969 if (!list_empty(&msg
->list_head
)) {
1970 dout("con_revoke %p msg %p\n", con
, msg
);
1971 list_del_init(&msg
->list_head
);
1974 if (con
->out_msg
== msg
) {
1975 ceph_msg_put(con
->out_msg
);
1976 con
->out_msg
= NULL
;
1978 if (con
->out_kvec_is_msg
) {
1979 con
->out_skip
= con
->out_kvec_bytes
;
1980 con
->out_kvec_is_msg
= false;
1983 dout("con_revoke %p msg %p - not queued (sent?)\n", con
, msg
);
1985 mutex_unlock(&con
->mutex
);
1989 * Revoke a message that we may be reading data into
1991 void ceph_con_revoke_message(struct ceph_connection
*con
, struct ceph_msg
*msg
)
1993 mutex_lock(&con
->mutex
);
1994 if (con
->in_msg
&& con
->in_msg
== msg
) {
1995 unsigned front_len
= le32_to_cpu(con
->in_hdr
.front_len
);
1996 unsigned middle_len
= le32_to_cpu(con
->in_hdr
.middle_len
);
1997 unsigned data_len
= le32_to_cpu(con
->in_hdr
.data_len
);
1999 /* skip rest of message */
2000 dout("con_revoke_pages %p msg %p revoked\n", con
, msg
);
2001 con
->in_base_pos
= con
->in_base_pos
-
2002 sizeof(struct ceph_msg_header
) -
2006 sizeof(struct ceph_msg_footer
);
2007 ceph_msg_put(con
->in_msg
);
2009 con
->in_tag
= CEPH_MSGR_TAG_READY
;
2011 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2012 con
, con
->in_msg
, msg
);
2014 mutex_unlock(&con
->mutex
);
2018 * Queue a keepalive byte to ensure the tcp connection is alive.
2020 void ceph_con_keepalive(struct ceph_connection
*con
)
2022 if (test_and_set_bit(KEEPALIVE_PENDING
, &con
->state
) == 0 &&
2023 test_and_set_bit(WRITE_PENDING
, &con
->state
) == 0)
2029 * construct a new message with given type, size
2030 * the new msg has a ref count of 1.
2032 struct ceph_msg
*ceph_msg_new(int type
, int front_len
,
2033 int page_len
, int page_off
, struct page
**pages
)
2037 m
= kmalloc(sizeof(*m
), GFP_NOFS
);
2040 kref_init(&m
->kref
);
2041 INIT_LIST_HEAD(&m
->list_head
);
2043 m
->hdr
.type
= cpu_to_le16(type
);
2044 m
->hdr
.front_len
= cpu_to_le32(front_len
);
2045 m
->hdr
.middle_len
= 0;
2046 m
->hdr
.data_len
= cpu_to_le32(page_len
);
2047 m
->hdr
.data_off
= cpu_to_le16(page_off
);
2048 m
->hdr
.priority
= cpu_to_le16(CEPH_MSG_PRIO_DEFAULT
);
2049 m
->footer
.front_crc
= 0;
2050 m
->footer
.middle_crc
= 0;
2051 m
->footer
.data_crc
= 0;
2052 m
->front_max
= front_len
;
2053 m
->front_is_vmalloc
= false;
2054 m
->more_to_follow
= false;
2059 if (front_len
> PAGE_CACHE_SIZE
) {
2060 m
->front
.iov_base
= __vmalloc(front_len
, GFP_NOFS
,
2062 m
->front_is_vmalloc
= true;
2064 m
->front
.iov_base
= kmalloc(front_len
, GFP_NOFS
);
2066 if (m
->front
.iov_base
== NULL
) {
2067 pr_err("msg_new can't allocate %d bytes\n",
2072 m
->front
.iov_base
= NULL
;
2074 m
->front
.iov_len
= front_len
;
2080 m
->nr_pages
= calc_pages_for(page_off
, page_len
);
2084 dout("ceph_msg_new %p page %d~%d -> %d\n", m
, page_off
, page_len
,
2091 pr_err("msg_new can't create type %d len %d\n", type
, front_len
);
2092 return ERR_PTR(-ENOMEM
);
2096 * Allocate "middle" portion of a message, if it is needed and wasn't
2097 * allocated by alloc_msg. This allows us to read a small fixed-size
2098 * per-type header in the front and then gracefully fail (i.e.,
2099 * propagate the error to the caller based on info in the front) when
2100 * the middle is too large.
2102 static int ceph_alloc_middle(struct ceph_connection
*con
, struct ceph_msg
*msg
)
2104 int type
= le16_to_cpu(msg
->hdr
.type
);
2105 int middle_len
= le32_to_cpu(msg
->hdr
.middle_len
);
2107 dout("alloc_middle %p type %d %s middle_len %d\n", msg
, type
,
2108 ceph_msg_type_name(type
), middle_len
);
2109 BUG_ON(!middle_len
);
2110 BUG_ON(msg
->middle
);
2112 msg
->middle
= ceph_buffer_new(middle_len
, GFP_NOFS
);
2119 * Generic message allocator, for incoming messages.
2121 static struct ceph_msg
*ceph_alloc_msg(struct ceph_connection
*con
,
2122 struct ceph_msg_header
*hdr
,
2125 int type
= le16_to_cpu(hdr
->type
);
2126 int front_len
= le32_to_cpu(hdr
->front_len
);
2127 int middle_len
= le32_to_cpu(hdr
->middle_len
);
2128 struct ceph_msg
*msg
= NULL
;
2131 if (con
->ops
->alloc_msg
) {
2132 mutex_unlock(&con
->mutex
);
2133 msg
= con
->ops
->alloc_msg(con
, hdr
, skip
);
2134 mutex_lock(&con
->mutex
);
2143 msg
= ceph_msg_new(type
, front_len
, 0, 0, NULL
);
2145 pr_err("unable to allocate msg type %d len %d\n",
2147 return ERR_PTR(-ENOMEM
);
2150 memcpy(&msg
->hdr
, &con
->in_hdr
, sizeof(con
->in_hdr
));
2153 ret
= ceph_alloc_middle(con
, msg
);
2166 * Free a generically kmalloc'd message.
2168 void ceph_msg_kfree(struct ceph_msg
*m
)
2170 dout("msg_kfree %p\n", m
);
2171 if (m
->front_is_vmalloc
)
2172 vfree(m
->front
.iov_base
);
2174 kfree(m
->front
.iov_base
);
2179 * Drop a msg ref. Destroy as needed.
2181 void ceph_msg_last_put(struct kref
*kref
)
2183 struct ceph_msg
*m
= container_of(kref
, struct ceph_msg
, kref
);
2185 dout("ceph_msg_put last one on %p\n", m
);
2186 WARN_ON(!list_empty(&m
->list_head
));
2188 /* drop middle, data, if any */
2190 ceph_buffer_put(m
->middle
);
2197 ceph_pagelist_release(m
->pagelist
);
2203 ceph_msgpool_put(m
->pool
, m
);
2208 void ceph_msg_dump(struct ceph_msg
*msg
)
2210 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg
,
2211 msg
->front_max
, msg
->nr_pages
);
2212 print_hex_dump(KERN_DEBUG
, "header: ",
2213 DUMP_PREFIX_OFFSET
, 16, 1,
2214 &msg
->hdr
, sizeof(msg
->hdr
), true);
2215 print_hex_dump(KERN_DEBUG
, " front: ",
2216 DUMP_PREFIX_OFFSET
, 16, 1,
2217 msg
->front
.iov_base
, msg
->front
.iov_len
, true);
2219 print_hex_dump(KERN_DEBUG
, "middle: ",
2220 DUMP_PREFIX_OFFSET
, 16, 1,
2221 msg
->middle
->vec
.iov_base
,
2222 msg
->middle
->vec
.iov_len
, true);
2223 print_hex_dump(KERN_DEBUG
, "footer: ",
2224 DUMP_PREFIX_OFFSET
, 16, 1,
2225 &msg
->footer
, sizeof(msg
->footer
), true);