1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2014 UnitedStack <haomai@unitedstack.com>
8 * Author: Haomai Wang <haomaiwang@gmail.com>
10 * This is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License version 2.1, as published by the Free Software
13 * Foundation. See file COPYING.
19 #include "include/Context.h"
20 #include "common/errno.h"
21 #include "AsyncMessenger.h"
22 #include "AsyncConnection.h"
24 #include "messages/MOSDOp.h"
25 #include "messages/MOSDOpReply.h"
26 #include "common/EventTrace.h"
28 // Constant to limit starting sequence number to 2^31. Nothing special about it, just a big number. PLR
29 #define SEQ_MASK 0x7fffffff
31 #define dout_subsys ceph_subsys_ms
33 #define dout_prefix _conn_prefix(_dout)
34 ostream
& AsyncConnection::_conn_prefix(std::ostream
*_dout
) {
35 return *_dout
<< "-- " << async_msgr
->get_myinst().addr
<< " >> " << peer_addr
<< " conn(" << this
37 << " s=" << get_state_name(state
)
38 << " pgs=" << peer_global_seq
39 << " cs=" << connect_seq
40 << " l=" << policy
.lossy
45 // 1. Don't dispatch any event when closed! It may cause AsyncConnection alive even if AsyncMessenger dead
47 const int AsyncConnection::TCP_PREFETCH_MIN_SIZE
= 512;
48 const int ASYNC_COALESCE_THRESHOLD
= 256;
50 class C_time_wakeup
: public EventCallback
{
51 AsyncConnectionRef conn
;
54 explicit C_time_wakeup(AsyncConnectionRef c
): conn(c
) {}
55 void do_request(int fd_or_id
) override
{
56 conn
->wakeup_from(fd_or_id
);
60 class C_handle_read
: public EventCallback
{
61 AsyncConnectionRef conn
;
64 explicit C_handle_read(AsyncConnectionRef c
): conn(c
) {}
65 void do_request(int fd_or_id
) override
{
70 class C_handle_write
: public EventCallback
{
71 AsyncConnectionRef conn
;
74 explicit C_handle_write(AsyncConnectionRef c
): conn(c
) {}
75 void do_request(int fd
) override
{
80 class C_clean_handler
: public EventCallback
{
81 AsyncConnectionRef conn
;
83 explicit C_clean_handler(AsyncConnectionRef c
): conn(c
) {}
84 void do_request(int id
) override
{
90 class C_tick_wakeup
: public EventCallback
{
91 AsyncConnectionRef conn
;
94 explicit C_tick_wakeup(AsyncConnectionRef c
): conn(c
) {}
95 void do_request(int fd_or_id
) override
{
100 static void alloc_aligned_buffer(bufferlist
& data
, unsigned len
, unsigned off
)
102 // create a buffer to read into that matches the data alignment
104 if (off
& ~CEPH_PAGE_MASK
) {
107 head
= MIN(CEPH_PAGE_SIZE
- (off
& ~CEPH_PAGE_MASK
), left
);
108 data
.push_back(buffer::create(head
));
111 unsigned middle
= left
& CEPH_PAGE_MASK
;
113 data
.push_back(buffer::create_page_aligned(middle
));
117 data
.push_back(buffer::create(left
));
121 AsyncConnection::AsyncConnection(CephContext
*cct
, AsyncMessenger
*m
, DispatchQueue
*q
,
123 : Connection(cct
, m
), delay_state(NULL
), async_msgr(m
), conn_id(q
->get_id()),
124 logger(w
->get_perf_counter()), global_seq(0), connect_seq(0), peer_global_seq(0),
125 out_seq(0), ack_left(0), in_seq(0), state(STATE_NONE
), state_after_send(STATE_NONE
), port(-1),
126 dispatch_queue(q
), can_write(WriteStatus::NOWRITE
),
127 open_write(false), keepalive(false), recv_buf(NULL
),
128 recv_max_prefetch(MAX(msgr
->cct
->_conf
->ms_tcp_prefetch_max_size
, TCP_PREFETCH_MIN_SIZE
)),
129 recv_start(0), recv_end(0),
130 last_active(ceph::coarse_mono_clock::now()),
131 inactive_timeout_us(cct
->_conf
->ms_tcp_read_timeout
*1000*1000),
132 got_bad_auth(false), authorizer(NULL
), replacing(false),
133 is_reset_from_peer(false), once_ready(false), state_buffer(NULL
), state_offset(0),
134 worker(w
), center(&w
->center
)
136 read_handler
= new C_handle_read(this);
137 write_handler
= new C_handle_write(this);
138 wakeup_handler
= new C_time_wakeup(this);
139 tick_handler
= new C_tick_wakeup(this);
140 memset(msgvec
, 0, sizeof(msgvec
));
141 // double recv_max_prefetch see "read_until"
142 recv_buf
= new char[2*recv_max_prefetch
];
143 state_buffer
= new char[4096];
144 logger
->inc(l_msgr_created_connections
);
147 AsyncConnection::~AsyncConnection()
149 assert(out_q
.empty());
150 assert(sent
.empty());
155 delete[] state_buffer
;
156 assert(!delay_state
);
159 void AsyncConnection::maybe_start_delay_thread()
162 auto pos
= async_msgr
->cct
->_conf
->get_val
<std::string
>("ms_inject_delay_type").find(ceph_entity_type_name(peer_type
));
163 if (pos
!= string::npos
) {
164 ldout(msgr
->cct
, 1) << __func__
<< " setting up a delay queue" << dendl
;
165 delay_state
= new DelayedDelivery(async_msgr
, center
, dispatch_queue
, conn_id
);
170 /* return -1 means `fd` occurs error or closed, it should be closed
171 * return 0 means EAGAIN or EINTR */
172 ssize_t
AsyncConnection::read_bulk(char *buf
, unsigned len
)
176 nread
= cs
.read(buf
, len
);
178 if (nread
== -EAGAIN
) {
180 } else if (nread
== -EINTR
) {
183 ldout(async_msgr
->cct
, 1) << __func__
<< " reading from fd=" << cs
.fd()
184 << " : "<< strerror(nread
) << dendl
;
187 } else if (nread
== 0) {
188 ldout(async_msgr
->cct
, 1) << __func__
<< " peer close file descriptor "
195 // return the remaining bytes, it may larger than the length of ptr
196 // else return < 0 means error
197 ssize_t
AsyncConnection::_try_send(bool more
)
199 if (async_msgr
->cct
->_conf
->ms_inject_socket_failures
&& cs
) {
200 if (rand() % async_msgr
->cct
->_conf
->ms_inject_socket_failures
== 0) {
201 ldout(async_msgr
->cct
, 0) << __func__
<< " injecting socket failure" << dendl
;
206 ssize_t r
= cs
.send(outcoming_bl
, more
);
208 ldout(async_msgr
->cct
, 1) << __func__
<< " send error: " << cpp_strerror(r
) << dendl
;
212 ldout(async_msgr
->cct
, 10) << __func__
<< " sent bytes " << r
213 << " remaining bytes " << outcoming_bl
.length() << dendl
;
215 if (!open_write
&& is_queued()) {
216 if (center
->in_thread()) {
217 center
->create_file_event(cs
.fd(), EVENT_WRITABLE
, write_handler
);
220 center
->dispatch_event_external(write_handler
);
224 if (open_write
&& !is_queued()) {
225 if (center
->in_thread()) {
226 center
->delete_file_event(cs
.fd(), EVENT_WRITABLE
);
229 center
->dispatch_event_external(write_handler
);
231 if (state_after_send
!= STATE_NONE
)
232 center
->dispatch_event_external(read_handler
);
235 return outcoming_bl
.length();
238 // Because this func will be called multi times to populate
239 // the needed buffer, so the passed in bufferptr must be the same.
240 // Normally, only "read_message" will pass existing bufferptr in
242 // And it will uses readahead method to reduce small read overhead,
243 // "recv_buf" is used to store read buffer
245 // return the remaining bytes, 0 means this buffer is finished
246 // else return < 0 means error
247 ssize_t
AsyncConnection::read_until(unsigned len
, char *p
)
249 ldout(async_msgr
->cct
, 25) << __func__
<< " len is " << len
<< " state_offset is "
250 << state_offset
<< dendl
;
252 if (async_msgr
->cct
->_conf
->ms_inject_socket_failures
&& cs
) {
253 if (rand() % async_msgr
->cct
->_conf
->ms_inject_socket_failures
== 0) {
254 ldout(async_msgr
->cct
, 0) << __func__
<< " injecting socket failure" << dendl
;
260 uint64_t left
= len
- state_offset
;
261 if (recv_end
> recv_start
) {
262 uint64_t to_read
= MIN(recv_end
- recv_start
, left
);
263 memcpy(p
, recv_buf
+recv_start
, to_read
);
264 recv_start
+= to_read
;
266 ldout(async_msgr
->cct
, 25) << __func__
<< " got " << to_read
<< " in buffer "
267 << " left is " << left
<< " buffer still has "
268 << recv_end
- recv_start
<< dendl
;
272 state_offset
+= to_read
;
275 recv_end
= recv_start
= 0;
276 /* nothing left in the prefetch buffer */
277 if (len
> recv_max_prefetch
) {
278 /* this was a large read, we don't prefetch for these */
280 r
= read_bulk(p
+state_offset
, left
);
281 ldout(async_msgr
->cct
, 25) << __func__
<< " read_bulk left is " << left
<< " got " << r
<< dendl
;
283 ldout(async_msgr
->cct
, 1) << __func__
<< " read failed" << dendl
;
285 } else if (r
== static_cast<int>(left
)) {
294 r
= read_bulk(recv_buf
+recv_end
, recv_max_prefetch
);
295 ldout(async_msgr
->cct
, 25) << __func__
<< " read_bulk recv_end is " << recv_end
296 << " left is " << left
<< " got " << r
<< dendl
;
298 ldout(async_msgr
->cct
, 1) << __func__
<< " read failed" << dendl
;
302 if (r
>= static_cast<int>(left
)) {
303 recv_start
= len
- state_offset
;
304 memcpy(p
+state_offset
, recv_buf
, recv_start
);
310 memcpy(p
+state_offset
, recv_buf
, recv_end
-recv_start
);
311 state_offset
+= (recv_end
- recv_start
);
312 recv_end
= recv_start
= 0;
314 ldout(async_msgr
->cct
, 25) << __func__
<< " need len " << len
<< " remaining "
315 << len
- state_offset
<< " bytes" << dendl
;
316 return len
- state_offset
;
319 void AsyncConnection::inject_delay() {
320 if (async_msgr
->cct
->_conf
->ms_inject_internal_delays
) {
321 ldout(async_msgr
->cct
, 10) << __func__
<< " sleep for " <<
322 async_msgr
->cct
->_conf
->ms_inject_internal_delays
<< dendl
;
324 t
.set_from_double(async_msgr
->cct
->_conf
->ms_inject_internal_delays
);
329 void AsyncConnection::process()
332 int prev_state
= state
;
333 #if defined(WITH_LTTNG) && defined(WITH_EVENTTRACE)
334 utime_t ltt_recv_stamp
= ceph_clock_now();
336 bool need_dispatch_writer
= false;
337 std::lock_guard
<std::mutex
> l(lock
);
338 last_active
= ceph::coarse_mono_clock::now();
340 ldout(async_msgr
->cct
, 20) << __func__
<< " prev state is " << get_state_name(prev_state
) << dendl
;
346 r
= read_until(sizeof(tag
), &tag
);
348 ldout(async_msgr
->cct
, 1) << __func__
<< " read tag failed" << dendl
;
354 if (tag
== CEPH_MSGR_TAG_KEEPALIVE
) {
355 ldout(async_msgr
->cct
, 20) << __func__
<< " got KEEPALIVE" << dendl
;
356 set_last_keepalive(ceph_clock_now());
357 } else if (tag
== CEPH_MSGR_TAG_KEEPALIVE2
) {
358 state
= STATE_OPEN_KEEPALIVE2
;
359 } else if (tag
== CEPH_MSGR_TAG_KEEPALIVE2_ACK
) {
360 state
= STATE_OPEN_KEEPALIVE2_ACK
;
361 } else if (tag
== CEPH_MSGR_TAG_ACK
) {
362 state
= STATE_OPEN_TAG_ACK
;
363 } else if (tag
== CEPH_MSGR_TAG_MSG
) {
364 state
= STATE_OPEN_MESSAGE_HEADER
;
365 } else if (tag
== CEPH_MSGR_TAG_CLOSE
) {
366 state
= STATE_OPEN_TAG_CLOSE
;
368 ldout(async_msgr
->cct
, 0) << __func__
<< " bad tag " << (int)tag
<< dendl
;
375 case STATE_OPEN_KEEPALIVE2
:
378 r
= read_until(sizeof(*t
), state_buffer
);
380 ldout(async_msgr
->cct
, 1) << __func__
<< " read keeplive timespec failed" << dendl
;
386 ldout(async_msgr
->cct
, 30) << __func__
<< " got KEEPALIVE2 tag ..." << dendl
;
387 t
= (ceph_timespec
*)state_buffer
;
388 utime_t kp_t
= utime_t(*t
);
390 _append_keepalive_or_ack(true, &kp_t
);
392 ldout(async_msgr
->cct
, 20) << __func__
<< " got KEEPALIVE2 " << kp_t
<< dendl
;
393 set_last_keepalive(ceph_clock_now());
394 need_dispatch_writer
= true;
399 case STATE_OPEN_KEEPALIVE2_ACK
:
402 r
= read_until(sizeof(*t
), state_buffer
);
404 ldout(async_msgr
->cct
, 1) << __func__
<< " read keeplive timespec failed" << dendl
;
410 t
= (ceph_timespec
*)state_buffer
;
411 set_last_keepalive_ack(utime_t(*t
));
412 ldout(async_msgr
->cct
, 20) << __func__
<< " got KEEPALIVE_ACK" << dendl
;
417 case STATE_OPEN_TAG_ACK
:
420 r
= read_until(sizeof(*seq
), state_buffer
);
422 ldout(async_msgr
->cct
, 1) << __func__
<< " read ack seq failed" << dendl
;
428 seq
= (ceph_le64
*)state_buffer
;
429 ldout(async_msgr
->cct
, 20) << __func__
<< " got ACK" << dendl
;
435 case STATE_OPEN_MESSAGE_HEADER
:
437 #if defined(WITH_LTTNG) && defined(WITH_EVENTTRACE)
438 ltt_recv_stamp
= ceph_clock_now();
440 ldout(async_msgr
->cct
, 20) << __func__
<< " begin MSG" << dendl
;
441 ceph_msg_header header
;
442 ceph_msg_header_old oldheader
;
443 __u32 header_crc
= 0;
445 if (has_feature(CEPH_FEATURE_NOSRCADDR
))
446 len
= sizeof(header
);
448 len
= sizeof(oldheader
);
450 r
= read_until(len
, state_buffer
);
452 ldout(async_msgr
->cct
, 1) << __func__
<< " read message header failed" << dendl
;
458 ldout(async_msgr
->cct
, 20) << __func__
<< " got MSG header" << dendl
;
460 if (has_feature(CEPH_FEATURE_NOSRCADDR
)) {
461 header
= *((ceph_msg_header
*)state_buffer
);
462 if (msgr
->crcflags
& MSG_CRC_HEADER
)
463 header_crc
= ceph_crc32c(0, (unsigned char *)&header
,
464 sizeof(header
) - sizeof(header
.crc
));
466 oldheader
= *((ceph_msg_header_old
*)state_buffer
);
468 memcpy(&header
, &oldheader
, sizeof(header
));
469 header
.src
= oldheader
.src
.name
;
470 header
.reserved
= oldheader
.reserved
;
471 if (msgr
->crcflags
& MSG_CRC_HEADER
) {
472 header
.crc
= oldheader
.crc
;
473 header_crc
= ceph_crc32c(0, (unsigned char *)&oldheader
, sizeof(oldheader
) - sizeof(oldheader
.crc
));
477 ldout(async_msgr
->cct
, 20) << __func__
<< " got envelope type=" << header
.type
478 << " src " << entity_name_t(header
.src
)
479 << " front=" << header
.front_len
480 << " data=" << header
.data_len
481 << " off " << header
.data_off
<< dendl
;
484 if (msgr
->crcflags
& MSG_CRC_HEADER
&& header_crc
!= header
.crc
) {
485 ldout(async_msgr
->cct
,0) << __func__
<< " got bad header crc "
486 << header_crc
<< " != " << header
.crc
<< dendl
;
495 recv_stamp
= ceph_clock_now();
496 current_header
= header
;
497 state
= STATE_OPEN_MESSAGE_THROTTLE_MESSAGE
;
501 case STATE_OPEN_MESSAGE_THROTTLE_MESSAGE
:
503 if (policy
.throttler_messages
) {
504 ldout(async_msgr
->cct
, 10) << __func__
<< " wants " << 1 << " message from policy throttler "
505 << policy
.throttler_messages
->get_current() << "/"
506 << policy
.throttler_messages
->get_max() << dendl
;
507 if (!policy
.throttler_messages
->get_or_fail()) {
508 ldout(async_msgr
->cct
, 10) << __func__
<< " wants 1 message from policy throttle "
509 << policy
.throttler_messages
->get_current() << "/"
510 << policy
.throttler_messages
->get_max() << " failed, just wait." << dendl
;
511 // following thread pool deal with th full message queue isn't a
512 // short time, so we can wait a ms.
513 if (register_time_events
.empty())
514 register_time_events
.insert(center
->create_time_event(1000, wakeup_handler
));
519 state
= STATE_OPEN_MESSAGE_THROTTLE_BYTES
;
523 case STATE_OPEN_MESSAGE_THROTTLE_BYTES
:
525 cur_msg_size
= current_header
.front_len
+ current_header
.middle_len
+ current_header
.data_len
;
527 if (policy
.throttler_bytes
) {
528 ldout(async_msgr
->cct
, 10) << __func__
<< " wants " << cur_msg_size
<< " bytes from policy throttler "
529 << policy
.throttler_bytes
->get_current() << "/"
530 << policy
.throttler_bytes
->get_max() << dendl
;
531 if (!policy
.throttler_bytes
->get_or_fail(cur_msg_size
)) {
532 ldout(async_msgr
->cct
, 10) << __func__
<< " wants " << cur_msg_size
<< " bytes from policy throttler "
533 << policy
.throttler_bytes
->get_current() << "/"
534 << policy
.throttler_bytes
->get_max() << " failed, just wait." << dendl
;
535 // following thread pool deal with th full message queue isn't a
536 // short time, so we can wait a ms.
537 if (register_time_events
.empty())
538 register_time_events
.insert(center
->create_time_event(1000, wakeup_handler
));
544 state
= STATE_OPEN_MESSAGE_THROTTLE_DISPATCH_QUEUE
;
548 case STATE_OPEN_MESSAGE_THROTTLE_DISPATCH_QUEUE
:
551 if (!dispatch_queue
->dispatch_throttler
.get_or_fail(cur_msg_size
)) {
552 ldout(async_msgr
->cct
, 10) << __func__
<< " wants " << cur_msg_size
<< " bytes from dispatch throttle "
553 << dispatch_queue
->dispatch_throttler
.get_current() << "/"
554 << dispatch_queue
->dispatch_throttler
.get_max() << " failed, just wait." << dendl
;
555 // following thread pool deal with th full message queue isn't a
556 // short time, so we can wait a ms.
557 if (register_time_events
.empty())
558 register_time_events
.insert(center
->create_time_event(1000, wakeup_handler
));
563 throttle_stamp
= ceph_clock_now();
564 state
= STATE_OPEN_MESSAGE_READ_FRONT
;
568 case STATE_OPEN_MESSAGE_READ_FRONT
:
571 unsigned front_len
= current_header
.front_len
;
574 front
.push_back(buffer::create(front_len
));
576 r
= read_until(front_len
, front
.c_str());
578 ldout(async_msgr
->cct
, 1) << __func__
<< " read message front failed" << dendl
;
584 ldout(async_msgr
->cct
, 20) << __func__
<< " got front " << front
.length() << dendl
;
586 state
= STATE_OPEN_MESSAGE_READ_MIDDLE
;
589 case STATE_OPEN_MESSAGE_READ_MIDDLE
:
592 unsigned middle_len
= current_header
.middle_len
;
594 if (!middle
.length())
595 middle
.push_back(buffer::create(middle_len
));
597 r
= read_until(middle_len
, middle
.c_str());
599 ldout(async_msgr
->cct
, 1) << __func__
<< " read message middle failed" << dendl
;
604 ldout(async_msgr
->cct
, 20) << __func__
<< " got middle " << middle
.length() << dendl
;
607 state
= STATE_OPEN_MESSAGE_READ_DATA_PREPARE
;
610 case STATE_OPEN_MESSAGE_READ_DATA_PREPARE
:
613 unsigned data_len
= le32_to_cpu(current_header
.data_len
);
614 unsigned data_off
= le32_to_cpu(current_header
.data_off
);
617 map
<ceph_tid_t
,pair
<bufferlist
,int> >::iterator p
= rx_buffers
.find(current_header
.tid
);
618 if (p
!= rx_buffers
.end()) {
619 ldout(async_msgr
->cct
,10) << __func__
<< " seleting rx buffer v " << p
->second
.second
620 << " at offset " << data_off
621 << " len " << p
->second
.first
.length() << dendl
;
622 data_buf
= p
->second
.first
;
623 // make sure it's big enough
624 if (data_buf
.length() < data_len
)
625 data_buf
.push_back(buffer::create(data_len
- data_buf
.length()));
626 data_blp
= data_buf
.begin();
628 ldout(async_msgr
->cct
,20) << __func__
<< " allocating new rx buffer at offset " << data_off
<< dendl
;
629 alloc_aligned_buffer(data_buf
, data_len
, data_off
);
630 data_blp
= data_buf
.begin();
635 state
= STATE_OPEN_MESSAGE_READ_DATA
;
638 case STATE_OPEN_MESSAGE_READ_DATA
:
640 while (msg_left
> 0) {
641 bufferptr bp
= data_blp
.get_current_ptr();
642 unsigned read
= MIN(bp
.length(), msg_left
);
643 r
= read_until(read
, bp
.c_str());
645 ldout(async_msgr
->cct
, 1) << __func__
<< " read data error " << dendl
;
651 data_blp
.advance(read
);
652 data
.append(bp
, 0, read
);
659 state
= STATE_OPEN_MESSAGE_READ_FOOTER_AND_DISPATCH
;
662 case STATE_OPEN_MESSAGE_READ_FOOTER_AND_DISPATCH
:
664 ceph_msg_footer footer
;
665 ceph_msg_footer_old old_footer
;
668 if (has_feature(CEPH_FEATURE_MSG_AUTH
))
669 len
= sizeof(footer
);
671 len
= sizeof(old_footer
);
673 r
= read_until(len
, state_buffer
);
675 ldout(async_msgr
->cct
, 1) << __func__
<< " read footer data error " << dendl
;
681 if (has_feature(CEPH_FEATURE_MSG_AUTH
)) {
682 footer
= *((ceph_msg_footer
*)state_buffer
);
684 old_footer
= *((ceph_msg_footer_old
*)state_buffer
);
685 footer
.front_crc
= old_footer
.front_crc
;
686 footer
.middle_crc
= old_footer
.middle_crc
;
687 footer
.data_crc
= old_footer
.data_crc
;
689 footer
.flags
= old_footer
.flags
;
691 int aborted
= (footer
.flags
& CEPH_MSG_FOOTER_COMPLETE
) == 0;
692 ldout(async_msgr
->cct
, 10) << __func__
<< " aborted = " << aborted
<< dendl
;
694 ldout(async_msgr
->cct
, 0) << __func__
<< " got " << front
.length() << " + " << middle
.length() << " + " << data
.length()
695 << " byte message.. ABORTED" << dendl
;
699 ldout(async_msgr
->cct
, 20) << __func__
<< " got " << front
.length() << " + " << middle
.length()
700 << " + " << data
.length() << " byte message" << dendl
;
701 Message
*message
= decode_message(async_msgr
->cct
, async_msgr
->crcflags
, current_header
, footer
,
702 front
, middle
, data
, this);
704 ldout(async_msgr
->cct
, 1) << __func__
<< " decode message failed " << dendl
;
709 // Check the signature if one should be present. A zero return indicates success. PLR
712 if (session_security
.get() == NULL
) {
713 ldout(async_msgr
->cct
, 10) << __func__
<< " no session security set" << dendl
;
715 if (session_security
->check_message_signature(message
)) {
716 ldout(async_msgr
->cct
, 0) << __func__
<< " Signature check failed" << dendl
;
721 message
->set_byte_throttler(policy
.throttler_bytes
);
722 message
->set_message_throttler(policy
.throttler_messages
);
724 // store reservation size in message, so we don't get confused
725 // by messages entering the dispatch queue through other paths.
726 message
->set_dispatch_throttle_size(cur_msg_size
);
728 message
->set_recv_stamp(recv_stamp
);
729 message
->set_throttle_stamp(throttle_stamp
);
730 message
->set_recv_complete_stamp(ceph_clock_now());
732 // check received seq#. if it is old, drop the message.
733 // note that incoming messages may skip ahead. this is convenient for the client
734 // side queueing because messages can't be renumbered, but the (kernel) client will
735 // occasionally pull a message out of the sent queue to send elsewhere. in that case
736 // it doesn't matter if we "got" it or not.
737 uint64_t cur_seq
= in_seq
.read();
738 if (message
->get_seq() <= cur_seq
) {
739 ldout(async_msgr
->cct
,0) << __func__
<< " got old message "
740 << message
->get_seq() << " <= " << cur_seq
<< " " << message
<< " " << *message
741 << ", discarding" << dendl
;
743 if (has_feature(CEPH_FEATURE_RECONNECT_SEQ
) && async_msgr
->cct
->_conf
->ms_die_on_old_message
)
744 assert(0 == "old msgs despite reconnect_seq feature");
747 if (message
->get_seq() > cur_seq
+ 1) {
748 ldout(async_msgr
->cct
, 0) << __func__
<< " missed message? skipped from seq "
749 << cur_seq
<< " to " << message
->get_seq() << dendl
;
750 if (async_msgr
->cct
->_conf
->ms_die_on_skipped_message
)
751 assert(0 == "skipped incoming seq");
754 message
->set_connection(this);
756 #if defined(WITH_LTTNG) && defined(WITH_EVENTTRACE)
757 if (message
->get_type() == CEPH_MSG_OSD_OP
|| message
->get_type() == CEPH_MSG_OSD_OPREPLY
) {
758 utime_t ltt_processed_stamp
= ceph_clock_now();
759 double usecs_elapsed
= (ltt_processed_stamp
.to_nsec()-ltt_recv_stamp
.to_nsec())/1000;
761 if (message
->get_type() == CEPH_MSG_OSD_OP
)
762 OID_ELAPSED_WITH_MSG(message
, usecs_elapsed
, "TIME_TO_DECODE_OSD_OP", false);
764 OID_ELAPSED_WITH_MSG(message
, usecs_elapsed
, "TIME_TO_DECODE_OSD_OPREPLY", false);
768 // note last received message.
769 in_seq
.set(message
->get_seq());
770 ldout(async_msgr
->cct
, 5) << " rx " << message
->get_source() << " seq "
771 << message
->get_seq() << " " << message
772 << " " << *message
<< dendl
;
776 need_dispatch_writer
= true;
780 logger
->inc(l_msgr_recv_messages
);
781 logger
->inc(l_msgr_recv_bytes
, cur_msg_size
+ sizeof(ceph_msg_header
) + sizeof(ceph_msg_footer
));
783 async_msgr
->ms_fast_preprocess(message
);
785 utime_t release
= message
->get_recv_stamp();
786 double delay_period
= 0;
787 if (rand() % 10000 < async_msgr
->cct
->_conf
->ms_inject_delay_probability
* 10000.0) {
788 delay_period
= async_msgr
->cct
->_conf
->ms_inject_delay_max
* (double)(rand() % 10000) / 10000.0;
789 release
+= delay_period
;
790 ldout(async_msgr
->cct
, 1) << "queue_received will delay until " << release
<< " on "
791 << message
<< " " << *message
<< dendl
;
793 delay_state
->queue(delay_period
, release
, message
);
794 } else if (async_msgr
->ms_can_fast_dispatch(message
)) {
796 dispatch_queue
->fast_dispatch(message
);
799 dispatch_queue
->enqueue(message
, message
->get_priority(), conn_id
);
805 case STATE_OPEN_TAG_CLOSE
:
807 ldout(async_msgr
->cct
, 20) << __func__
<< " got CLOSE" << dendl
;
814 ldout(async_msgr
->cct
, 20) << __func__
<< " enter STANDY" << dendl
;
821 ldout(async_msgr
->cct
, 20) << __func__
<< " enter none state" << dendl
;
827 ldout(async_msgr
->cct
, 20) << __func__
<< " socket closed" << dendl
;
833 ldout(async_msgr
->cct
, 1) << __func__
<< " enter wait state, failing" << dendl
;
839 if (_process_connection() < 0)
844 } while (prev_state
!= state
);
846 if (need_dispatch_writer
&& is_connected())
847 center
->dispatch_event_external(write_handler
);
854 ssize_t
AsyncConnection::_process_connection()
859 case STATE_WAIT_SEND
:
861 std::lock_guard
<std::mutex
> l(write_lock
);
862 if (!outcoming_bl
.length()) {
863 assert(state_after_send
);
864 state
= state_after_send
;
865 state_after_send
= STATE_NONE
;
870 case STATE_CONNECTING
:
872 assert(!policy
.server
);
874 // reset connect state variables
875 got_bad_auth
= false;
878 authorizer_buf
.clear();
879 memset(&connect_msg
, 0, sizeof(connect_msg
));
880 memset(&connect_reply
, 0, sizeof(connect_reply
));
882 global_seq
= async_msgr
->get_global_seq();
883 // close old socket. this is safe because we stopped the reader thread above.
885 center
->delete_file_event(cs
.fd(), EVENT_READABLE
|EVENT_WRITABLE
);
890 opts
.priority
= async_msgr
->get_socket_priority();
891 opts
.connect_bind_addr
= msgr
->get_myaddr();
892 r
= worker
->connect(get_peer_addr(), opts
, &cs
);
896 center
->create_file_event(cs
.fd(), EVENT_READABLE
, read_handler
);
897 state
= STATE_CONNECTING_RE
;
901 case STATE_CONNECTING_RE
:
903 r
= cs
.is_connected();
905 ldout(async_msgr
->cct
, 1) << __func__
<< " reconnect failed " << dendl
;
906 if (r
== -ECONNREFUSED
) {
907 ldout(async_msgr
->cct
, 2) << __func__
<< " connection refused!" << dendl
;
908 dispatch_queue
->queue_refused(this);
912 ldout(async_msgr
->cct
, 10) << __func__
<< " nonblock connect inprogress" << dendl
;
913 if (async_msgr
->get_stack()->nonblock_connect_need_writable_event())
914 center
->create_file_event(cs
.fd(), EVENT_WRITABLE
, read_handler
);
918 center
->delete_file_event(cs
.fd(), EVENT_WRITABLE
);
919 ldout(async_msgr
->cct
, 10) << __func__
<< " connect successfully, ready to send banner" << dendl
;
922 bl
.append(CEPH_BANNER
, strlen(CEPH_BANNER
));
925 state
= STATE_CONNECTING_WAIT_BANNER_AND_IDENTIFY
;
926 ldout(async_msgr
->cct
, 10) << __func__
<< " connect write banner done: "
927 << get_peer_addr() << dendl
;
929 state
= STATE_WAIT_SEND
;
930 state_after_send
= STATE_CONNECTING_WAIT_BANNER_AND_IDENTIFY
;
931 ldout(async_msgr
->cct
, 10) << __func__
<< " connect wait for write banner: "
932 << get_peer_addr() << dendl
;
940 case STATE_CONNECTING_WAIT_BANNER_AND_IDENTIFY
:
942 entity_addr_t paddr
, peer_addr_for_me
;
944 unsigned banner_len
= strlen(CEPH_BANNER
);
945 unsigned need_len
= banner_len
+ sizeof(ceph_entity_addr
)*2;
946 r
= read_until(need_len
, state_buffer
);
948 ldout(async_msgr
->cct
, 1) << __func__
<< " read banner and identify addresses failed" << dendl
;
954 if (memcmp(state_buffer
, CEPH_BANNER
, banner_len
)) {
955 ldout(async_msgr
->cct
, 0) << __func__
<< " connect protocol error (bad banner) on peer "
956 << get_peer_addr() << dendl
;
961 bl
.append(state_buffer
+banner_len
, sizeof(ceph_entity_addr
)*2);
962 bufferlist::iterator p
= bl
.begin();
965 ::decode(peer_addr_for_me
, p
);
966 } catch (const buffer::error
& e
) {
967 lderr(async_msgr
->cct
) << __func__
<< " decode peer addr failed " << dendl
;
970 ldout(async_msgr
->cct
, 20) << __func__
<< " connect read peer addr "
971 << paddr
<< " on socket " << cs
.fd() << dendl
;
972 if (peer_addr
!= paddr
) {
973 if (paddr
.is_blank_ip() && peer_addr
.get_port() == paddr
.get_port() &&
974 peer_addr
.get_nonce() == paddr
.get_nonce()) {
975 ldout(async_msgr
->cct
, 0) << __func__
<< " connect claims to be " << paddr
976 << " not " << peer_addr
977 << " - presumably this is the same node!" << dendl
;
979 ldout(async_msgr
->cct
, 0) << __func__
<< " connect claims to be "
980 << paddr
<< " not " << peer_addr
<< " - wrong node!" << dendl
;
985 ldout(async_msgr
->cct
, 20) << __func__
<< " connect peer addr for me is " << peer_addr_for_me
<< dendl
;
987 async_msgr
->learned_addr(peer_addr_for_me
);
988 if (async_msgr
->cct
->_conf
->ms_inject_internal_delays
) {
989 if (rand() % async_msgr
->cct
->_conf
->ms_inject_socket_failures
== 0) {
990 ldout(msgr
->cct
, 10) << __func__
<< " sleep for "
991 << async_msgr
->cct
->_conf
->ms_inject_internal_delays
<< dendl
;
993 t
.set_from_double(async_msgr
->cct
->_conf
->ms_inject_internal_delays
);
999 if (state
!= STATE_CONNECTING_WAIT_BANNER_AND_IDENTIFY
) {
1000 ldout(async_msgr
->cct
, 1) << __func__
<< " state changed while learned_addr, mark_down or "
1001 << " replacing must be happened just now" << dendl
;
1005 ::encode(async_msgr
->get_myaddr(), myaddrbl
, 0); // legacy
1006 r
= try_send(myaddrbl
);
1008 state
= STATE_CONNECTING_SEND_CONNECT_MSG
;
1009 ldout(async_msgr
->cct
, 10) << __func__
<< " connect sent my addr "
1010 << async_msgr
->get_myaddr() << dendl
;
1012 state
= STATE_WAIT_SEND
;
1013 state_after_send
= STATE_CONNECTING_SEND_CONNECT_MSG
;
1014 ldout(async_msgr
->cct
, 10) << __func__
<< " connect send my addr done: "
1015 << async_msgr
->get_myaddr() << dendl
;
1017 ldout(async_msgr
->cct
, 2) << __func__
<< " connect couldn't write my addr, "
1018 << cpp_strerror(r
) << dendl
;
1025 case STATE_CONNECTING_SEND_CONNECT_MSG
:
1027 if (!got_bad_auth
) {
1029 authorizer
= async_msgr
->get_authorizer(peer_type
, false);
1033 connect_msg
.features
= policy
.features_supported
;
1034 connect_msg
.host_type
= async_msgr
->get_myinst().name
.type();
1035 connect_msg
.global_seq
= global_seq
;
1036 connect_msg
.connect_seq
= connect_seq
;
1037 connect_msg
.protocol_version
= async_msgr
->get_proto_version(peer_type
, true);
1038 connect_msg
.authorizer_protocol
= authorizer
? authorizer
->protocol
: 0;
1039 connect_msg
.authorizer_len
= authorizer
? authorizer
->bl
.length() : 0;
1041 ldout(async_msgr
->cct
, 10) << __func__
<< " connect_msg.authorizer_len="
1042 << connect_msg
.authorizer_len
<< " protocol="
1043 << connect_msg
.authorizer_protocol
<< dendl
;
1044 connect_msg
.flags
= 0;
1046 connect_msg
.flags
|= CEPH_MSG_CONNECT_LOSSY
; // this is fyi, actually, server decides!
1047 bl
.append((char*)&connect_msg
, sizeof(connect_msg
));
1049 bl
.append(authorizer
->bl
.c_str(), authorizer
->bl
.length());
1051 ldout(async_msgr
->cct
, 10) << __func__
<< " connect sending gseq=" << global_seq
<< " cseq="
1052 << connect_seq
<< " proto=" << connect_msg
.protocol_version
<< dendl
;
1056 state
= STATE_CONNECTING_WAIT_CONNECT_REPLY
;
1057 ldout(async_msgr
->cct
,20) << __func__
<< " connect wrote (self +) cseq, waiting for reply" << dendl
;
1059 state
= STATE_WAIT_SEND
;
1060 state_after_send
= STATE_CONNECTING_WAIT_CONNECT_REPLY
;
1061 ldout(async_msgr
->cct
, 10) << __func__
<< " continue send reply " << dendl
;
1063 ldout(async_msgr
->cct
, 2) << __func__
<< " connect couldn't send reply "
1064 << cpp_strerror(r
) << dendl
;
1071 case STATE_CONNECTING_WAIT_CONNECT_REPLY
:
1073 r
= read_until(sizeof(connect_reply
), state_buffer
);
1075 ldout(async_msgr
->cct
, 1) << __func__
<< " read connect reply failed" << dendl
;
1081 connect_reply
= *((ceph_msg_connect_reply
*)state_buffer
);
1083 ldout(async_msgr
->cct
, 20) << __func__
<< " connect got reply tag " << (int)connect_reply
.tag
1084 << " connect_seq " << connect_reply
.connect_seq
<< " global_seq "
1085 << connect_reply
.global_seq
<< " proto " << connect_reply
.protocol_version
1086 << " flags " << (int)connect_reply
.flags
<< " features "
1087 << connect_reply
.features
<< dendl
;
1088 state
= STATE_CONNECTING_WAIT_CONNECT_REPLY_AUTH
;
1093 case STATE_CONNECTING_WAIT_CONNECT_REPLY_AUTH
:
1095 bufferlist authorizer_reply
;
1096 if (connect_reply
.authorizer_len
) {
1097 ldout(async_msgr
->cct
, 10) << __func__
<< " reply.authorizer_len=" << connect_reply
.authorizer_len
<< dendl
;
1098 assert(connect_reply
.authorizer_len
< 4096);
1099 r
= read_until(connect_reply
.authorizer_len
, state_buffer
);
1101 ldout(async_msgr
->cct
, 1) << __func__
<< " read connect reply authorizer failed" << dendl
;
1107 authorizer_reply
.append(state_buffer
, connect_reply
.authorizer_len
);
1108 bufferlist::iterator iter
= authorizer_reply
.begin();
1109 if (authorizer
&& !authorizer
->verify_reply(iter
)) {
1110 ldout(async_msgr
->cct
, 0) << __func__
<< " failed verifying authorize reply" << dendl
;
1114 r
= handle_connect_reply(connect_msg
, connect_reply
);
1118 // state must be changed!
1119 assert(state
!= STATE_CONNECTING_WAIT_CONNECT_REPLY_AUTH
);
1123 case STATE_CONNECTING_WAIT_ACK_SEQ
:
1125 uint64_t newly_acked_seq
= 0;
1127 r
= read_until(sizeof(newly_acked_seq
), state_buffer
);
1129 ldout(async_msgr
->cct
, 1) << __func__
<< " read connect ack seq failed" << dendl
;
1135 newly_acked_seq
= *((uint64_t*)state_buffer
);
1136 ldout(async_msgr
->cct
, 2) << __func__
<< " got newly_acked_seq " << newly_acked_seq
1137 << " vs out_seq " << out_seq
.read() << dendl
;
1138 discard_requeued_up_to(newly_acked_seq
);
1139 //while (newly_acked_seq > out_seq.read()) {
1140 // Message *m = _get_next_outgoing(NULL);
1142 // ldout(async_msgr->cct, 2) << __func__ << " discarding previously sent " << m->get_seq()
1143 // << " " << *m << dendl;
1144 // assert(m->get_seq() <= newly_acked_seq);
1150 uint64_t s
= in_seq
.read();
1151 bl
.append((char*)&s
, sizeof(s
));
1154 state
= STATE_CONNECTING_READY
;
1155 ldout(async_msgr
->cct
, 10) << __func__
<< " send in_seq done " << dendl
;
1157 state_after_send
= STATE_CONNECTING_READY
;
1158 state
= STATE_WAIT_SEND
;
1159 ldout(async_msgr
->cct
, 10) << __func__
<< " continue send in_seq " << dendl
;
1166 case STATE_CONNECTING_READY
:
1169 peer_global_seq
= connect_reply
.global_seq
;
1170 policy
.lossy
= connect_reply
.flags
& CEPH_MSG_CONNECT_LOSSY
;
1174 assert(connect_seq
== connect_reply
.connect_seq
);
1175 backoff
= utime_t();
1176 set_features((uint64_t)connect_reply
.features
& (uint64_t)connect_msg
.features
);
1177 ldout(async_msgr
->cct
, 10) << __func__
<< " connect success " << connect_seq
1178 << ", lossy = " << policy
.lossy
<< ", features "
1179 << get_features() << dendl
;
1181 // If we have an authorizer, get a new AuthSessionHandler to deal with ongoing security of the
1183 if (authorizer
!= NULL
) {
1184 session_security
.reset(
1185 get_auth_session_handler(async_msgr
->cct
,
1186 authorizer
->protocol
,
1187 authorizer
->session_key
,
1190 // We have no authorizer, so we shouldn't be applying security to messages in this AsyncConnection. PLR
1191 session_security
.reset();
1195 assert(delay_state
->ready());
1196 dispatch_queue
->queue_connect(this);
1197 async_msgr
->ms_deliver_handle_fast_connect(this);
1199 // make sure no pending tick timer
1201 center
->delete_time_event(last_tick_id
);
1202 last_tick_id
= center
->create_time_event(inactive_timeout_us
, tick_handler
);
1204 // message may in queue between last _try_send and connection ready
1205 // write event may already notify and we need to force scheduler again
1207 can_write
= WriteStatus::CANWRITE
;
1209 center
->dispatch_event_external(write_handler
);
1210 write_lock
.unlock();
1211 maybe_start_delay_thread();
1215 case STATE_ACCEPTING
:
1218 center
->create_file_event(cs
.fd(), EVENT_READABLE
, read_handler
);
1220 bl
.append(CEPH_BANNER
, strlen(CEPH_BANNER
));
1222 ::encode(async_msgr
->get_myaddr(), bl
, 0); // legacy
1223 port
= async_msgr
->get_myaddr().get_port();
1224 ::encode(socket_addr
, bl
, 0); // legacy
1225 ldout(async_msgr
->cct
, 1) << __func__
<< " sd=" << cs
.fd() << " " << socket_addr
<< dendl
;
1229 state
= STATE_ACCEPTING_WAIT_BANNER_ADDR
;
1230 ldout(async_msgr
->cct
, 10) << __func__
<< " write banner and addr done: "
1231 << get_peer_addr() << dendl
;
1233 state
= STATE_WAIT_SEND
;
1234 state_after_send
= STATE_ACCEPTING_WAIT_BANNER_ADDR
;
1235 ldout(async_msgr
->cct
, 10) << __func__
<< " wait for write banner and addr: "
1236 << get_peer_addr() << dendl
;
1243 case STATE_ACCEPTING_WAIT_BANNER_ADDR
:
1246 entity_addr_t peer_addr
;
1248 r
= read_until(strlen(CEPH_BANNER
) + sizeof(ceph_entity_addr
), state_buffer
);
1250 ldout(async_msgr
->cct
, 1) << __func__
<< " read peer banner and addr failed" << dendl
;
1256 if (memcmp(state_buffer
, CEPH_BANNER
, strlen(CEPH_BANNER
))) {
1257 ldout(async_msgr
->cct
, 1) << __func__
<< " accept peer sent bad banner '" << state_buffer
1258 << "' (should be '" << CEPH_BANNER
<< "')" << dendl
;
1262 addr_bl
.append(state_buffer
+strlen(CEPH_BANNER
), sizeof(ceph_entity_addr
));
1264 bufferlist::iterator ti
= addr_bl
.begin();
1265 ::decode(peer_addr
, ti
);
1268 ldout(async_msgr
->cct
, 10) << __func__
<< " accept peer addr is " << peer_addr
<< dendl
;
1269 if (peer_addr
.is_blank_ip()) {
1270 // peer apparently doesn't know what ip they have; figure it out for them.
1271 int port
= peer_addr
.get_port();
1272 peer_addr
.u
= socket_addr
.u
;
1273 peer_addr
.set_port(port
);
1274 ldout(async_msgr
->cct
, 0) << __func__
<< " accept peer addr is really " << peer_addr
1275 << " (socket is " << socket_addr
<< ")" << dendl
;
1277 set_peer_addr(peer_addr
); // so that connection_state gets set up
1278 state
= STATE_ACCEPTING_WAIT_CONNECT_MSG
;
1282 case STATE_ACCEPTING_WAIT_CONNECT_MSG
:
1284 r
= read_until(sizeof(connect_msg
), state_buffer
);
1286 ldout(async_msgr
->cct
, 1) << __func__
<< " read connect msg failed" << dendl
;
1292 connect_msg
= *((ceph_msg_connect
*)state_buffer
);
1293 state
= STATE_ACCEPTING_WAIT_CONNECT_MSG_AUTH
;
1297 case STATE_ACCEPTING_WAIT_CONNECT_MSG_AUTH
:
1299 bufferlist authorizer_reply
;
1301 if (connect_msg
.authorizer_len
) {
1302 if (!authorizer_buf
.length())
1303 authorizer_buf
.push_back(buffer::create(connect_msg
.authorizer_len
));
1305 r
= read_until(connect_msg
.authorizer_len
, authorizer_buf
.c_str());
1307 ldout(async_msgr
->cct
, 1) << __func__
<< " read connect authorizer failed" << dendl
;
1314 ldout(async_msgr
->cct
, 20) << __func__
<< " accept got peer connect_seq "
1315 << connect_msg
.connect_seq
<< " global_seq "
1316 << connect_msg
.global_seq
<< dendl
;
1317 set_peer_type(connect_msg
.host_type
);
1318 policy
= async_msgr
->get_policy(connect_msg
.host_type
);
1319 ldout(async_msgr
->cct
, 10) << __func__
<< " accept of host_type " << connect_msg
.host_type
1320 << ", policy.lossy=" << policy
.lossy
<< " policy.server="
1321 << policy
.server
<< " policy.standby=" << policy
.standby
1322 << " policy.resetcheck=" << policy
.resetcheck
<< dendl
;
1324 r
= handle_connect_msg(connect_msg
, authorizer_buf
, authorizer_reply
);
1328 // state is changed by "handle_connect_msg"
1329 assert(state
!= STATE_ACCEPTING_WAIT_CONNECT_MSG_AUTH
);
1333 case STATE_ACCEPTING_WAIT_SEQ
:
1335 uint64_t newly_acked_seq
;
1336 r
= read_until(sizeof(newly_acked_seq
), state_buffer
);
1338 ldout(async_msgr
->cct
, 1) << __func__
<< " read ack seq failed" << dendl
;
1339 goto fail_registered
;
1344 newly_acked_seq
= *((uint64_t*)state_buffer
);
1345 ldout(async_msgr
->cct
, 2) << __func__
<< " accept get newly_acked_seq " << newly_acked_seq
<< dendl
;
1346 discard_requeued_up_to(newly_acked_seq
);
1347 state
= STATE_ACCEPTING_READY
;
1351 case STATE_ACCEPTING_READY
:
1353 ldout(async_msgr
->cct
, 20) << __func__
<< " accept done" << dendl
;
1355 memset(&connect_msg
, 0, sizeof(connect_msg
));
1358 assert(delay_state
->ready());
1359 // make sure no pending tick timer
1361 center
->delete_time_event(last_tick_id
);
1362 last_tick_id
= center
->create_time_event(inactive_timeout_us
, tick_handler
);
1365 can_write
= WriteStatus::CANWRITE
;
1367 center
->dispatch_event_external(write_handler
);
1368 write_lock
.unlock();
1369 maybe_start_delay_thread();
1375 lderr(async_msgr
->cct
) << __func__
<< " bad state: " << state
<< dendl
;
1383 ldout(async_msgr
->cct
, 10) << "accept fault after register" << dendl
;
1390 int AsyncConnection::handle_connect_reply(ceph_msg_connect
&connect
, ceph_msg_connect_reply
&reply
)
1392 uint64_t feat_missing
;
1393 if (reply
.tag
== CEPH_MSGR_TAG_FEATURES
) {
1394 ldout(async_msgr
->cct
, 0) << __func__
<< " connect protocol feature mismatch, my "
1395 << std::hex
<< connect
.features
<< " < peer "
1396 << reply
.features
<< " missing "
1397 << (reply
.features
& ~policy
.features_supported
)
1398 << std::dec
<< dendl
;
1402 if (reply
.tag
== CEPH_MSGR_TAG_BADPROTOVER
) {
1403 ldout(async_msgr
->cct
, 0) << __func__
<< " connect protocol version mismatch, my "
1404 << connect
.protocol_version
<< " != " << reply
.protocol_version
1409 if (reply
.tag
== CEPH_MSGR_TAG_BADAUTHORIZER
) {
1410 ldout(async_msgr
->cct
,0) << __func__
<< " connect got BADAUTHORIZER" << dendl
;
1413 got_bad_auth
= true;
1415 authorizer
= async_msgr
->get_authorizer(peer_type
, true); // try harder
1416 state
= STATE_CONNECTING_SEND_CONNECT_MSG
;
1418 if (reply
.tag
== CEPH_MSGR_TAG_RESETSESSION
) {
1419 ldout(async_msgr
->cct
, 0) << __func__
<< " connect got RESETSESSION" << dendl
;
1420 was_session_reset();
1421 state
= STATE_CONNECTING_SEND_CONNECT_MSG
;
1423 if (reply
.tag
== CEPH_MSGR_TAG_RETRY_GLOBAL
) {
1424 global_seq
= async_msgr
->get_global_seq(reply
.global_seq
);
1425 ldout(async_msgr
->cct
, 5) << __func__
<< " connect got RETRY_GLOBAL "
1426 << reply
.global_seq
<< " chose new "
1427 << global_seq
<< dendl
;
1428 state
= STATE_CONNECTING_SEND_CONNECT_MSG
;
1430 if (reply
.tag
== CEPH_MSGR_TAG_RETRY_SESSION
) {
1431 assert(reply
.connect_seq
> connect_seq
);
1432 ldout(async_msgr
->cct
, 5) << __func__
<< " connect got RETRY_SESSION "
1433 << connect_seq
<< " -> "
1434 << reply
.connect_seq
<< dendl
;
1435 connect_seq
= reply
.connect_seq
;
1436 state
= STATE_CONNECTING_SEND_CONNECT_MSG
;
1438 if (reply
.tag
== CEPH_MSGR_TAG_WAIT
) {
1439 ldout(async_msgr
->cct
, 1) << __func__
<< " connect got WAIT (connection race)" << dendl
;
1443 feat_missing
= policy
.features_required
& ~(uint64_t)connect_reply
.features
;
1445 ldout(async_msgr
->cct
, 1) << __func__
<< " missing required features " << std::hex
1446 << feat_missing
<< std::dec
<< dendl
;
1450 if (reply
.tag
== CEPH_MSGR_TAG_SEQ
) {
1451 ldout(async_msgr
->cct
, 10) << __func__
<< " got CEPH_MSGR_TAG_SEQ, reading acked_seq and writing in_seq" << dendl
;
1452 state
= STATE_CONNECTING_WAIT_ACK_SEQ
;
1454 if (reply
.tag
== CEPH_MSGR_TAG_READY
) {
1455 ldout(async_msgr
->cct
, 10) << __func__
<< " got CEPH_MSGR_TAG_READY " << dendl
;
1456 state
= STATE_CONNECTING_READY
;
1465 ssize_t
AsyncConnection::handle_connect_msg(ceph_msg_connect
&connect
, bufferlist
&authorizer_bl
,
1466 bufferlist
&authorizer_reply
)
1469 ceph_msg_connect_reply reply
;
1470 bufferlist reply_bl
;
1472 memset(&reply
, 0, sizeof(reply
));
1473 reply
.protocol_version
= async_msgr
->get_proto_version(peer_type
, false);
1476 ldout(async_msgr
->cct
, 10) << __func__
<< " accept my proto " << reply
.protocol_version
1477 << ", their proto " << connect
.protocol_version
<< dendl
;
1478 if (connect
.protocol_version
!= reply
.protocol_version
) {
1479 return _reply_accept(CEPH_MSGR_TAG_BADPROTOVER
, connect
, reply
, authorizer_reply
);
1481 // require signatures for cephx?
1482 if (connect
.authorizer_protocol
== CEPH_AUTH_CEPHX
) {
1483 if (peer_type
== CEPH_ENTITY_TYPE_OSD
||
1484 peer_type
== CEPH_ENTITY_TYPE_MDS
) {
1485 if (async_msgr
->cct
->_conf
->cephx_require_signatures
||
1486 async_msgr
->cct
->_conf
->cephx_cluster_require_signatures
) {
1487 ldout(async_msgr
->cct
, 10) << __func__
<< " using cephx, requiring MSG_AUTH feature bit for cluster" << dendl
;
1488 policy
.features_required
|= CEPH_FEATURE_MSG_AUTH
;
1491 if (async_msgr
->cct
->_conf
->cephx_require_signatures
||
1492 async_msgr
->cct
->_conf
->cephx_service_require_signatures
) {
1493 ldout(async_msgr
->cct
, 10) << __func__
<< " using cephx, requiring MSG_AUTH feature bit for service" << dendl
;
1494 policy
.features_required
|= CEPH_FEATURE_MSG_AUTH
;
1498 uint64_t feat_missing
= policy
.features_required
& ~(uint64_t)connect
.features
;
1500 ldout(async_msgr
->cct
, 1) << __func__
<< " peer missing required features "
1501 << std::hex
<< feat_missing
<< std::dec
<< dendl
;
1502 return _reply_accept(CEPH_MSGR_TAG_FEATURES
, connect
, reply
, authorizer_reply
);
1507 bool authorizer_valid
;
1508 if (!async_msgr
->verify_authorizer(this, peer_type
, connect
.authorizer_protocol
, authorizer_bl
,
1509 authorizer_reply
, authorizer_valid
, session_key
) || !authorizer_valid
) {
1511 ldout(async_msgr
->cct
,0) << __func__
<< ": got bad authorizer" << dendl
;
1512 session_security
.reset();
1513 return _reply_accept(CEPH_MSGR_TAG_BADAUTHORIZER
, connect
, reply
, authorizer_reply
);
1516 // We've verified the authorizer for this AsyncConnection, so set up the session security structure. PLR
1517 ldout(async_msgr
->cct
, 10) << __func__
<< " accept setting up session_security." << dendl
;
1520 AsyncConnectionRef existing
= async_msgr
->lookup_conn(peer_addr
);
1525 if (state
!= STATE_ACCEPTING_WAIT_CONNECT_MSG_AUTH
) {
1526 ldout(async_msgr
->cct
, 1) << __func__
<< " state changed while accept, it must be mark_down" << dendl
;
1527 assert(state
== STATE_CLOSED
);
1531 if (existing
== this)
1534 // There is no possible that existing connection will acquire this
1535 // connection's lock
1536 existing
->lock
.lock(); // skip lockdep check (we are locking a second AsyncConnection here)
1538 if (existing
->replacing
|| existing
->state
== STATE_CLOSED
) {
1539 ldout(async_msgr
->cct
, 1) << __func__
<< " existing racing replace or mark_down happened while replacing."
1540 << " existing_state=" << get_state_name(existing
->state
) << dendl
;
1541 reply
.global_seq
= existing
->peer_global_seq
;
1542 r
= _reply_accept(CEPH_MSGR_TAG_RETRY_GLOBAL
, connect
, reply
, authorizer_reply
);
1543 existing
->lock
.unlock();
1549 if (connect
.global_seq
< existing
->peer_global_seq
) {
1550 ldout(async_msgr
->cct
, 10) << __func__
<< " accept existing " << existing
1551 << ".gseq " << existing
->peer_global_seq
<< " > "
1552 << connect
.global_seq
<< ", RETRY_GLOBAL" << dendl
;
1553 reply
.global_seq
= existing
->peer_global_seq
; // so we can send it below..
1554 existing
->lock
.unlock();
1555 return _reply_accept(CEPH_MSGR_TAG_RETRY_GLOBAL
, connect
, reply
, authorizer_reply
);
1557 ldout(async_msgr
->cct
, 10) << __func__
<< " accept existing " << existing
1558 << ".gseq " << existing
->peer_global_seq
1559 << " <= " << connect
.global_seq
<< ", looks ok" << dendl
;
1562 if (existing
->policy
.lossy
) {
1563 ldout(async_msgr
->cct
, 0) << __func__
<< " accept replacing existing (lossy) channel (new one lossy="
1564 << policy
.lossy
<< ")" << dendl
;
1565 existing
->was_session_reset();
1569 ldout(async_msgr
->cct
, 0) << __func__
<< " accept connect_seq " << connect
.connect_seq
1570 << " vs existing csq=" << existing
->connect_seq
<< " existing_state="
1571 << get_state_name(existing
->state
) << dendl
;
1573 if (connect
.connect_seq
== 0 && existing
->connect_seq
> 0) {
1574 ldout(async_msgr
->cct
,0) << __func__
<< " accept peer reset, then tried to connect to us, replacing" << dendl
;
1575 // this is a hard reset from peer
1576 is_reset_from_peer
= true;
1577 if (policy
.resetcheck
)
1578 existing
->was_session_reset(); // this resets out_queue, msg_ and connect_seq #'s
1582 if (connect
.connect_seq
< existing
->connect_seq
) {
1583 // old attempt, or we sent READY but they didn't get it.
1584 ldout(async_msgr
->cct
, 10) << __func__
<< " accept existing " << existing
<< ".cseq "
1585 << existing
->connect_seq
<< " > " << connect
.connect_seq
1586 << ", RETRY_SESSION" << dendl
;
1587 reply
.connect_seq
= existing
->connect_seq
+ 1;
1588 existing
->lock
.unlock();
1589 return _reply_accept(CEPH_MSGR_TAG_RETRY_SESSION
, connect
, reply
, authorizer_reply
);
1592 if (connect
.connect_seq
== existing
->connect_seq
) {
1593 // if the existing connection successfully opened, and/or
1594 // subsequently went to standby, then the peer should bump
1595 // their connect_seq and retry: this is not a connection race
1596 // we need to resolve here.
1597 if (existing
->state
== STATE_OPEN
||
1598 existing
->state
== STATE_STANDBY
) {
1599 ldout(async_msgr
->cct
, 10) << __func__
<< " accept connection race, existing " << existing
1600 << ".cseq " << existing
->connect_seq
<< " == "
1601 << connect
.connect_seq
<< ", OPEN|STANDBY, RETRY_SESSION" << dendl
;
1602 reply
.connect_seq
= existing
->connect_seq
+ 1;
1603 existing
->lock
.unlock();
1604 return _reply_accept(CEPH_MSGR_TAG_RETRY_SESSION
, connect
, reply
, authorizer_reply
);
1608 if (peer_addr
< async_msgr
->get_myaddr() || existing
->policy
.server
) {
1610 ldout(async_msgr
->cct
, 10) << __func__
<< " accept connection race, existing " << existing
1611 << ".cseq " << existing
->connect_seq
<< " == " << connect
.connect_seq
1612 << ", or we are server, replacing my attempt" << dendl
;
1615 // our existing outgoing wins
1616 ldout(async_msgr
->cct
,10) << __func__
<< " accept connection race, existing "
1617 << existing
<< ".cseq " << existing
->connect_seq
1618 << " == " << connect
.connect_seq
<< ", sending WAIT" << dendl
;
1619 assert(peer_addr
> async_msgr
->get_myaddr());
1620 existing
->lock
.unlock();
1621 return _reply_accept(CEPH_MSGR_TAG_WAIT
, connect
, reply
, authorizer_reply
);
1625 assert(connect
.connect_seq
> existing
->connect_seq
);
1626 assert(connect
.global_seq
>= existing
->peer_global_seq
);
1627 if (policy
.resetcheck
&& // RESETSESSION only used by servers; peers do not reset each other
1628 existing
->connect_seq
== 0) {
1629 ldout(async_msgr
->cct
, 0) << __func__
<< " accept we reset (peer sent cseq "
1630 << connect
.connect_seq
<< ", " << existing
<< ".cseq = "
1631 << existing
->connect_seq
<< "), sending RESETSESSION" << dendl
;
1632 existing
->lock
.unlock();
1633 return _reply_accept(CEPH_MSGR_TAG_RESETSESSION
, connect
, reply
, authorizer_reply
);
1637 ldout(async_msgr
->cct
, 10) << __func__
<< " accept peer sent cseq " << connect
.connect_seq
1638 << " > " << existing
->connect_seq
<< dendl
;
1641 else if (!replacing
&& connect
.connect_seq
> 0) {
1642 // we reset, and they are opening a new session
1643 ldout(async_msgr
->cct
, 0) << __func__
<< " accept we reset (peer sent cseq "
1644 << connect
.connect_seq
<< "), sending RESETSESSION" << dendl
;
1645 return _reply_accept(CEPH_MSGR_TAG_RESETSESSION
, connect
, reply
, authorizer_reply
);
1648 ldout(async_msgr
->cct
, 10) << __func__
<< " accept new session" << dendl
;
1655 ldout(async_msgr
->cct
, 10) << __func__
<< " accept replacing " << existing
<< dendl
;
1658 if (existing
->policy
.lossy
) {
1659 // disconnect from the Connection
1660 ldout(async_msgr
->cct
, 1) << __func__
<< " replacing on lossy channel, failing existing" << dendl
;
1662 existing
->dispatch_queue
->queue_reset(existing
.get());
1664 assert(can_write
== WriteStatus::NOWRITE
);
1665 existing
->write_lock
.lock();
1667 // reset the in_seq if this is a hard reset from peer,
1668 // otherwise we respect our original connection's value
1669 if (is_reset_from_peer
) {
1670 existing
->is_reset_from_peer
= true;
1673 center
->delete_file_event(cs
.fd(), EVENT_READABLE
|EVENT_WRITABLE
);
1675 // Clean up output buffer
1676 existing
->outcoming_bl
.clear();
1677 if (existing
->delay_state
) {
1678 existing
->delay_state
->flush();
1679 assert(!delay_state
);
1681 existing
->requeue_sent();
1682 existing
->reset_recv_state();
1684 auto temp_cs
= std::move(cs
);
1685 EventCenter
*new_center
= center
;
1686 Worker
*new_worker
= worker
;
1687 // avoid _stop shutdown replacing socket
1688 // queue a reset on the new connection, which we're dumping for the old
1691 dispatch_queue
->queue_reset(this);
1692 ldout(async_msgr
->cct
, 1) << __func__
<< " stop myself to swap existing" << dendl
;
1693 existing
->can_write
= WriteStatus::REPLACING
;
1694 existing
->open_write
= false;
1695 existing
->replacing
= true;
1696 existing
->state_offset
= 0;
1697 // avoid previous thread modify event
1698 existing
->state
= STATE_NONE
;
1699 // Discard existing prefetch buffer in `recv_buf`
1700 existing
->recv_start
= existing
->recv_end
= 0;
1701 // there shouldn't exist any buffer
1702 assert(recv_start
== recv_end
);
1704 auto deactivate_existing
= std::bind(
1705 [existing
, new_worker
, new_center
, connect
, reply
, authorizer_reply
](ConnectedSocket
&cs
) mutable {
1706 // we need to delete time event in original thread
1708 std::lock_guard
<std::mutex
> l(existing
->lock
);
1709 if (existing
->state
== STATE_NONE
) {
1710 existing
->shutdown_socket();
1711 existing
->cs
= std::move(cs
);
1712 existing
->worker
->references
--;
1713 new_worker
->references
++;
1714 existing
->logger
= new_worker
->get_perf_counter();
1715 existing
->worker
= new_worker
;
1716 existing
->center
= new_center
;
1717 if (existing
->delay_state
)
1718 existing
->delay_state
->set_center(new_center
);
1719 } else if (existing
->state
== STATE_CLOSED
) {
1720 auto back_to_close
= std::bind(
1721 [](ConnectedSocket
&cs
) mutable { cs
.close(); }, std::move(cs
));
1722 new_center
->submit_to(
1723 new_center
->get_id(), std::move(back_to_close
), true);
1730 // Before changing existing->center, it may already exists some events in existing->center's queue.
1731 // Then if we mark down `existing`, it will execute in another thread and clean up connection.
1732 // Previous event will result in segment fault
1733 auto transfer_existing
= [existing
, connect
, reply
, authorizer_reply
]() mutable {
1734 std::lock_guard
<std::mutex
> l(existing
->lock
);
1735 if (existing
->state
== STATE_CLOSED
)
1737 assert(existing
->state
== STATE_NONE
);
1739 existing
->state
= STATE_ACCEPTING_WAIT_CONNECT_MSG
;
1740 existing
->center
->create_file_event(existing
->cs
.fd(), EVENT_READABLE
, existing
->read_handler
);
1741 reply
.global_seq
= existing
->peer_global_seq
;
1742 if (existing
->_reply_accept(CEPH_MSGR_TAG_RETRY_GLOBAL
, connect
, reply
, authorizer_reply
) < 0) {
1747 if (existing
->center
->in_thread())
1748 transfer_existing();
1750 existing
->center
->submit_to(
1751 existing
->center
->get_id(), std::move(transfer_existing
), true);
1752 }, std::move(temp_cs
));
1754 existing
->center
->submit_to(
1755 existing
->center
->get_id(), std::move(deactivate_existing
), true);
1756 existing
->write_lock
.unlock();
1757 existing
->lock
.unlock();
1760 existing
->lock
.unlock();
1763 connect_seq
= connect
.connect_seq
+ 1;
1764 peer_global_seq
= connect
.global_seq
;
1765 ldout(async_msgr
->cct
, 10) << __func__
<< " accept success, connect_seq = "
1766 << connect_seq
<< " in_seq=" << in_seq
.read() << ", sending READY" << dendl
;
1770 // if it is a hard reset from peer, we don't need a round-trip to negotiate in/out sequence
1771 if ((connect
.features
& CEPH_FEATURE_RECONNECT_SEQ
) && !is_reset_from_peer
) {
1772 reply
.tag
= CEPH_MSGR_TAG_SEQ
;
1773 next_state
= STATE_ACCEPTING_WAIT_SEQ
;
1775 reply
.tag
= CEPH_MSGR_TAG_READY
;
1776 next_state
= STATE_ACCEPTING_READY
;
1777 discard_requeued_up_to(0);
1778 is_reset_from_peer
= false;
1783 reply
.features
= policy
.features_supported
;
1784 reply
.global_seq
= async_msgr
->get_global_seq();
1785 reply
.connect_seq
= connect_seq
;
1787 reply
.authorizer_len
= authorizer_reply
.length();
1789 reply
.flags
= reply
.flags
| CEPH_MSG_CONNECT_LOSSY
;
1791 set_features((uint64_t)reply
.features
& (uint64_t)connect
.features
);
1792 ldout(async_msgr
->cct
, 10) << __func__
<< " accept features " << get_features() << dendl
;
1794 session_security
.reset(
1795 get_auth_session_handler(async_msgr
->cct
, connect
.authorizer_protocol
,
1796 session_key
, get_features()));
1798 reply_bl
.append((char*)&reply
, sizeof(reply
));
1800 if (reply
.authorizer_len
)
1801 reply_bl
.append(authorizer_reply
.c_str(), authorizer_reply
.length());
1803 if (reply
.tag
== CEPH_MSGR_TAG_SEQ
) {
1804 uint64_t s
= in_seq
.read();
1805 reply_bl
.append((char*)&s
, sizeof(s
));
1809 // Because "replacing" will prevent other connections preempt this addr,
1810 // it's safe that here we don't acquire Connection's lock
1811 r
= async_msgr
->accept_conn(this);
1818 ldout(async_msgr
->cct
, 1) << __func__
<< " existing race replacing process for addr=" << peer_addr
1819 << " just fail later one(this)" << dendl
;
1820 goto fail_registered
;
1822 if (state
!= STATE_ACCEPTING_WAIT_CONNECT_MSG_AUTH
) {
1823 ldout(async_msgr
->cct
, 1) << __func__
<< " state changed while accept_conn, it must be mark_down" << dendl
;
1824 assert(state
== STATE_CLOSED
);
1825 goto fail_registered
;
1828 r
= try_send(reply_bl
);
1830 goto fail_registered
;
1833 dispatch_queue
->queue_accept(this);
1834 async_msgr
->ms_deliver_handle_fast_accept(this);
1839 ldout(async_msgr
->cct
, 2) << __func__
<< " accept write reply msg done" << dendl
;
1841 state
= STATE_WAIT_SEND
;
1842 state_after_send
= next_state
;
1848 ldout(async_msgr
->cct
, 10) << __func__
<< " accept fault after register" << dendl
;
1852 ldout(async_msgr
->cct
, 10) << __func__
<< " failed to accept." << dendl
;
1856 void AsyncConnection::_connect()
1858 ldout(async_msgr
->cct
, 10) << __func__
<< " csq=" << connect_seq
<< dendl
;
1860 state
= STATE_CONNECTING
;
1861 // rescheduler connection in order to avoid lock dep
1862 // may called by external thread(send_message)
1863 center
->dispatch_event_external(read_handler
);
1866 void AsyncConnection::accept(ConnectedSocket socket
, entity_addr_t
&addr
)
1868 ldout(async_msgr
->cct
, 10) << __func__
<< " sd=" << socket
.fd() << dendl
;
1869 assert(socket
.fd() >= 0);
1871 std::lock_guard
<std::mutex
> l(lock
);
1872 cs
= std::move(socket
);
1874 state
= STATE_ACCEPTING
;
1875 // rescheduler connection in order to avoid lock dep
1876 center
->dispatch_event_external(read_handler
);
1879 int AsyncConnection::send_message(Message
*m
)
1882 lgeneric_subdout(async_msgr
->cct
, ms
,
1883 1) << "-- " << async_msgr
->get_myaddr() << " --> "
1884 << get_peer_addr() << " -- "
1885 << *m
<< " -- " << m
<< " con "
1886 << m
->get_connection().get()
1889 // optimistic think it's ok to encode(actually may broken now)
1890 if (!m
->get_priority())
1891 m
->set_priority(async_msgr
->get_default_send_priority());
1893 m
->get_header().src
= async_msgr
->get_myname();
1894 m
->set_connection(this);
1896 if (m
->get_type() == CEPH_MSG_OSD_OP
)
1897 OID_EVENT_TRACE_WITH_MSG(m
, "SEND_MSG_OSD_OP_BEGIN", true);
1898 else if (m
->get_type() == CEPH_MSG_OSD_OPREPLY
)
1899 OID_EVENT_TRACE_WITH_MSG(m
, "SEND_MSG_OSD_OPREPLY_BEGIN", true);
1901 if (async_msgr
->get_myaddr() == get_peer_addr()) { //loopback connection
1902 ldout(async_msgr
->cct
, 20) << __func__
<< " " << *m
<< " local" << dendl
;
1903 std::lock_guard
<std::mutex
> l(write_lock
);
1904 if (can_write
!= WriteStatus::CLOSED
) {
1905 dispatch_queue
->local_delivery(m
, m
->get_priority());
1907 ldout(async_msgr
->cct
, 10) << __func__
<< " loopback connection closed."
1908 << " Drop message " << m
<< dendl
;
1914 last_active
= ceph::coarse_mono_clock::now();
1915 // we don't want to consider local message here, it's too lightweight which
1916 // may disturb users
1917 logger
->inc(l_msgr_send_messages
);
1920 uint64_t f
= get_features();
1922 // TODO: Currently not all messages supports reencode like MOSDMap, so here
1923 // only let fast dispatch support messages prepare message
1924 bool can_fast_prepare
= async_msgr
->ms_can_fast_dispatch(m
);
1925 if (can_fast_prepare
)
1926 prepare_send_message(f
, m
, bl
);
1928 std::lock_guard
<std::mutex
> l(write_lock
);
1929 // "features" changes will change the payload encoding
1930 if (can_fast_prepare
&& (can_write
== WriteStatus::NOWRITE
|| get_features() != f
)) {
1931 // ensure the correctness of message encoding
1933 m
->get_payload().clear();
1934 ldout(async_msgr
->cct
, 5) << __func__
<< " clear encoded buffer previous "
1935 << f
<< " != " << get_features() << dendl
;
1937 if (!is_queued() && can_write
== WriteStatus::CANWRITE
&& async_msgr
->cct
->_conf
->ms_async_send_inline
) {
1939 prepare_send_message(get_features(), m
, bl
);
1940 logger
->inc(l_msgr_send_messages_inline
);
1941 if (write_message(m
, bl
, false) < 0) {
1942 ldout(async_msgr
->cct
, 1) << __func__
<< " send msg failed" << dendl
;
1943 // we want to handle fault within internal thread
1944 center
->dispatch_event_external(write_handler
);
1946 } else if (can_write
== WriteStatus::CLOSED
) {
1947 ldout(async_msgr
->cct
, 10) << __func__
<< " connection closed."
1948 << " Drop message " << m
<< dendl
;
1951 m
->trace
.event("async enqueueing message");
1952 out_q
[m
->get_priority()].emplace_back(std::move(bl
), m
);
1953 ldout(async_msgr
->cct
, 15) << __func__
<< " inline write is denied, reschedule m=" << m
<< dendl
;
1954 if (can_write
!= WriteStatus::REPLACING
)
1955 center
->dispatch_event_external(write_handler
);
1960 void AsyncConnection::requeue_sent()
1965 list
<pair
<bufferlist
, Message
*> >& rq
= out_q
[CEPH_MSG_PRIO_HIGHEST
];
1966 while (!sent
.empty()) {
1967 Message
* m
= sent
.back();
1969 ldout(async_msgr
->cct
, 10) << __func__
<< " " << *m
<< " for resend "
1970 << " (" << m
->get_seq() << ")" << dendl
;
1971 rq
.push_front(make_pair(bufferlist(), m
));
1976 void AsyncConnection::discard_requeued_up_to(uint64_t seq
)
1978 ldout(async_msgr
->cct
, 10) << __func__
<< " " << seq
<< dendl
;
1979 std::lock_guard
<std::mutex
> l(write_lock
);
1980 if (out_q
.count(CEPH_MSG_PRIO_HIGHEST
) == 0)
1982 list
<pair
<bufferlist
, Message
*> >& rq
= out_q
[CEPH_MSG_PRIO_HIGHEST
];
1983 while (!rq
.empty()) {
1984 pair
<bufferlist
, Message
*> p
= rq
.front();
1985 if (p
.second
->get_seq() == 0 || p
.second
->get_seq() > seq
)
1987 ldout(async_msgr
->cct
, 10) << __func__
<< " " << *(p
.second
) << " for resend seq " << p
.second
->get_seq()
1988 << " <= " << seq
<< ", discarding" << dendl
;
1994 out_q
.erase(CEPH_MSG_PRIO_HIGHEST
);
1998 * Tears down the AsyncConnection's message queues, and removes them from the DispatchQueue
1999 * Must hold pipe_lock prior to calling.
2001 void AsyncConnection::discard_out_queue()
2003 ldout(async_msgr
->cct
, 10) << __func__
<< " started" << dendl
;
2005 for (list
<Message
*>::iterator p
= sent
.begin(); p
!= sent
.end(); ++p
) {
2006 ldout(async_msgr
->cct
, 20) << __func__
<< " discard " << *p
<< dendl
;
2010 for (map
<int, list
<pair
<bufferlist
, Message
*> > >::iterator p
= out_q
.begin(); p
!= out_q
.end(); ++p
)
2011 for (list
<pair
<bufferlist
, Message
*> >::iterator r
= p
->second
.begin(); r
!= p
->second
.end(); ++r
) {
2012 ldout(async_msgr
->cct
, 20) << __func__
<< " discard " << r
->second
<< dendl
;
2016 outcoming_bl
.clear();
2019 int AsyncConnection::randomize_out_seq()
2021 if (get_features() & CEPH_FEATURE_MSG_AUTH
) {
2022 // Set out_seq to a random value, so CRC won't be predictable. Don't bother checking seq_error
2023 // here. We'll check it on the call. PLR
2025 int seq_error
= get_random_bytes((char *)&rand_seq
, sizeof(rand_seq
));
2026 rand_seq
&= SEQ_MASK
;
2027 lsubdout(async_msgr
->cct
, ms
, 10) << __func__
<< " randomize_out_seq " << rand_seq
<< dendl
;
2028 out_seq
.set(rand_seq
);
2031 // previously, seq #'s always started at 0.
2037 void AsyncConnection::fault()
2039 if (state
== STATE_CLOSED
|| state
== STATE_NONE
) {
2040 ldout(async_msgr
->cct
, 10) << __func__
<< " connection is already closed" << dendl
;
2044 if (policy
.lossy
&& !(state
>= STATE_CONNECTING
&& state
< STATE_CONNECTING_READY
)) {
2045 ldout(async_msgr
->cct
, 1) << __func__
<< " on lossy channel, failing" << dendl
;
2047 dispatch_queue
->queue_reset(this);
2052 can_write
= WriteStatus::NOWRITE
;
2056 // queue delayed items immediately
2058 delay_state
->flush();
2059 // requeue sent items
2061 recv_start
= recv_end
= 0;
2064 is_reset_from_peer
= false;
2065 outcoming_bl
.clear();
2066 if (!once_ready
&& !is_queued() &&
2067 state
>=STATE_ACCEPTING
&& state
<= STATE_ACCEPTING_WAIT_CONNECT_MSG_AUTH
) {
2068 ldout(async_msgr
->cct
, 0) << __func__
<< " with nothing to send and in the half "
2069 << " accept state just closed" << dendl
;
2070 write_lock
.unlock();
2072 dispatch_queue
->queue_reset(this);
2076 if (policy
.standby
&& !is_queued() && state
!= STATE_WAIT
) {
2077 ldout(async_msgr
->cct
,0) << __func__
<< " with nothing to send, going to standby" << dendl
;
2078 state
= STATE_STANDBY
;
2079 write_lock
.unlock();
2083 write_lock
.unlock();
2084 if (!(state
>= STATE_CONNECTING
&& state
< STATE_CONNECTING_READY
) &&
2085 state
!= STATE_WAIT
) { // STATE_WAIT is coming from STATE_CONNECTING_*
2086 // policy maybe empty when state is in accept
2087 if (policy
.server
) {
2088 ldout(async_msgr
->cct
, 0) << __func__
<< " server, going to standby" << dendl
;
2089 state
= STATE_STANDBY
;
2091 ldout(async_msgr
->cct
, 0) << __func__
<< " initiating reconnect" << dendl
;
2093 state
= STATE_CONNECTING
;
2095 backoff
= utime_t();
2096 center
->dispatch_event_external(read_handler
);
2098 if (state
== STATE_WAIT
) {
2099 backoff
.set_from_double(async_msgr
->cct
->_conf
->ms_max_backoff
);
2100 } else if (backoff
== utime_t()) {
2101 backoff
.set_from_double(async_msgr
->cct
->_conf
->ms_initial_backoff
);
2104 if (backoff
> async_msgr
->cct
->_conf
->ms_max_backoff
)
2105 backoff
.set_from_double(async_msgr
->cct
->_conf
->ms_max_backoff
);
2108 state
= STATE_CONNECTING
;
2109 ldout(async_msgr
->cct
, 10) << __func__
<< " waiting " << backoff
<< dendl
;
2111 register_time_events
.insert(center
->create_time_event(
2112 backoff
.to_nsec()/1000, wakeup_handler
));
2116 void AsyncConnection::was_session_reset()
2118 ldout(async_msgr
->cct
,10) << __func__
<< " started" << dendl
;
2119 std::lock_guard
<std::mutex
> l(write_lock
);
2121 delay_state
->discard();
2122 dispatch_queue
->discard_queue(conn_id
);
2123 discard_out_queue();
2125 dispatch_queue
->queue_remote_reset(this);
2127 if (randomize_out_seq()) {
2128 ldout(async_msgr
->cct
, 15) << __func__
<< " could not get random bytes to set seq number for session reset; set seq number to " << out_seq
.read() << dendl
;
2133 // it's safe to directly set 0, double locked
2136 can_write
= WriteStatus::NOWRITE
;
2139 void AsyncConnection::_stop()
2141 if (state
== STATE_CLOSED
)
2145 delay_state
->flush();
2147 ldout(async_msgr
->cct
, 2) << __func__
<< dendl
;
2148 std::lock_guard
<std::mutex
> l(write_lock
);
2151 dispatch_queue
->discard_queue(conn_id
);
2152 discard_out_queue();
2153 async_msgr
->unregister_conn(this);
2154 worker
->release_worker();
2156 state
= STATE_CLOSED
;
2158 can_write
= WriteStatus::CLOSED
;
2160 // Make sure in-queue events will been processed
2161 center
->dispatch_event_external(EventCallbackRef(new C_clean_handler(this)));
2164 void AsyncConnection::prepare_send_message(uint64_t features
, Message
*m
, bufferlist
&bl
)
2166 ldout(async_msgr
->cct
, 20) << __func__
<< " m" << " " << *m
<< dendl
;
2168 // associate message with Connection (for benefit of encode_payload)
2169 if (m
->empty_payload())
2170 ldout(async_msgr
->cct
, 20) << __func__
<< " encoding features "
2171 << features
<< " " << m
<< " " << *m
<< dendl
;
2173 ldout(async_msgr
->cct
, 20) << __func__
<< " half-reencoding features "
2174 << features
<< " " << m
<< " " << *m
<< dendl
;
2176 // encode and copy out of *m
2177 m
->encode(features
, msgr
->crcflags
);
2179 bl
.append(m
->get_payload());
2180 bl
.append(m
->get_middle());
2181 bl
.append(m
->get_data());
2184 ssize_t
AsyncConnection::write_message(Message
*m
, bufferlist
& bl
, bool more
)
2187 assert(can_write
== WriteStatus::CANWRITE
);
2188 m
->set_seq(out_seq
.inc());
2190 if (!policy
.lossy
) {
2196 if (msgr
->crcflags
& MSG_CRC_HEADER
)
2197 m
->calc_header_crc();
2199 ceph_msg_header
& header
= m
->get_header();
2200 ceph_msg_footer
& footer
= m
->get_footer();
2202 // TODO: let sign_message could be reentry?
2203 // Now that we have all the crcs calculated, handle the
2204 // digital signature for the message, if the AsyncConnection has session
2205 // security set up. Some session security options do not
2206 // actually calculate and check the signature, but they should
2207 // handle the calls to sign_message and check_signature. PLR
2208 if (session_security
.get() == NULL
) {
2209 ldout(async_msgr
->cct
, 20) << __func__
<< " no session security" << dendl
;
2211 if (session_security
->sign_message(m
)) {
2212 ldout(async_msgr
->cct
, 20) << __func__
<< " failed to sign m="
2213 << m
<< "): sig = " << footer
.sig
<< dendl
;
2215 ldout(async_msgr
->cct
, 20) << __func__
<< " signed m=" << m
2216 << "): sig = " << footer
.sig
<< dendl
;
2220 unsigned original_bl_len
= outcoming_bl
.length();
2222 outcoming_bl
.append(CEPH_MSGR_TAG_MSG
);
2224 if (has_feature(CEPH_FEATURE_NOSRCADDR
)) {
2225 outcoming_bl
.append((char*)&header
, sizeof(header
));
2227 ceph_msg_header_old oldheader
;
2228 memcpy(&oldheader
, &header
, sizeof(header
));
2229 oldheader
.src
.name
= header
.src
;
2230 oldheader
.src
.addr
= get_peer_addr();
2231 oldheader
.orig_src
= oldheader
.src
;
2232 oldheader
.reserved
= header
.reserved
;
2233 oldheader
.crc
= ceph_crc32c(0, (unsigned char*)&oldheader
,
2234 sizeof(oldheader
) - sizeof(oldheader
.crc
));
2235 outcoming_bl
.append((char*)&oldheader
, sizeof(oldheader
));
2238 ldout(async_msgr
->cct
, 20) << __func__
<< " sending message type=" << header
.type
2239 << " src " << entity_name_t(header
.src
)
2240 << " front=" << header
.front_len
2241 << " data=" << header
.data_len
2242 << " off " << header
.data_off
<< dendl
;
2244 if ((bl
.length() <= ASYNC_COALESCE_THRESHOLD
) && (bl
.buffers().size() > 1)) {
2245 std::list
<buffer::ptr
>::const_iterator pb
;
2246 for (pb
= bl
.buffers().begin(); pb
!= bl
.buffers().end(); ++pb
) {
2247 outcoming_bl
.append((char*)pb
->c_str(), pb
->length());
2250 outcoming_bl
.claim_append(bl
);
2253 // send footer; if receiver doesn't support signatures, use the old footer format
2254 ceph_msg_footer_old old_footer
;
2255 if (has_feature(CEPH_FEATURE_MSG_AUTH
)) {
2256 outcoming_bl
.append((char*)&footer
, sizeof(footer
));
2258 if (msgr
->crcflags
& MSG_CRC_HEADER
) {
2259 old_footer
.front_crc
= footer
.front_crc
;
2260 old_footer
.middle_crc
= footer
.middle_crc
;
2261 old_footer
.data_crc
= footer
.data_crc
;
2263 old_footer
.front_crc
= old_footer
.middle_crc
= 0;
2265 old_footer
.data_crc
= msgr
->crcflags
& MSG_CRC_DATA
? footer
.data_crc
: 0;
2266 old_footer
.flags
= footer
.flags
;
2267 outcoming_bl
.append((char*)&old_footer
, sizeof(old_footer
));
2270 m
->trace
.event("async writing message");
2271 logger
->inc(l_msgr_send_bytes
, outcoming_bl
.length() - original_bl_len
);
2272 ldout(async_msgr
->cct
, 20) << __func__
<< " sending " << m
->get_seq()
2273 << " " << m
<< dendl
;
2274 ssize_t rc
= _try_send(more
);
2276 ldout(async_msgr
->cct
, 1) << __func__
<< " error sending " << m
<< ", "
2277 << cpp_strerror(rc
) << dendl
;
2278 } else if (rc
== 0) {
2279 ldout(async_msgr
->cct
, 10) << __func__
<< " sending " << m
<< " done." << dendl
;
2281 ldout(async_msgr
->cct
, 10) << __func__
<< " sending " << m
<< " continuely." << dendl
;
2283 if (m
->get_type() == CEPH_MSG_OSD_OP
)
2284 OID_EVENT_TRACE_WITH_MSG(m
, "SEND_MSG_OSD_OP_END", false);
2285 else if (m
->get_type() == CEPH_MSG_OSD_OPREPLY
)
2286 OID_EVENT_TRACE_WITH_MSG(m
, "SEND_MSG_OSD_OPREPLY_END", false);
2292 void AsyncConnection::reset_recv_state()
2294 // clean up state internal variables and states
2295 if (state
>= STATE_CONNECTING_SEND_CONNECT_MSG
&&
2296 state
<= STATE_CONNECTING_READY
) {
2299 got_bad_auth
= false;
2302 if (state
> STATE_OPEN_MESSAGE_THROTTLE_MESSAGE
&&
2303 state
<= STATE_OPEN_MESSAGE_READ_FOOTER_AND_DISPATCH
2304 && policy
.throttler_messages
) {
2305 ldout(async_msgr
->cct
, 10) << __func__
<< " releasing " << 1
2306 << " message to policy throttler "
2307 << policy
.throttler_messages
->get_current() << "/"
2308 << policy
.throttler_messages
->get_max() << dendl
;
2309 policy
.throttler_messages
->put();
2311 if (state
> STATE_OPEN_MESSAGE_THROTTLE_BYTES
&&
2312 state
<= STATE_OPEN_MESSAGE_READ_FOOTER_AND_DISPATCH
) {
2313 if (policy
.throttler_bytes
) {
2314 ldout(async_msgr
->cct
, 10) << __func__
<< " releasing " << cur_msg_size
2315 << " bytes to policy throttler "
2316 << policy
.throttler_bytes
->get_current() << "/"
2317 << policy
.throttler_bytes
->get_max() << dendl
;
2318 policy
.throttler_bytes
->put(cur_msg_size
);
2321 if (state
> STATE_OPEN_MESSAGE_THROTTLE_DISPATCH_QUEUE
&&
2322 state
<= STATE_OPEN_MESSAGE_READ_FOOTER_AND_DISPATCH
) {
2323 ldout(async_msgr
->cct
, 10) << __func__
<< " releasing " << cur_msg_size
2324 << " bytes to dispatch_queue throttler "
2325 << dispatch_queue
->dispatch_throttler
.get_current() << "/"
2326 << dispatch_queue
->dispatch_throttler
.get_max() << dendl
;
2327 dispatch_queue
->dispatch_throttle_release(cur_msg_size
);
2331 void AsyncConnection::handle_ack(uint64_t seq
)
2333 ldout(async_msgr
->cct
, 15) << __func__
<< " got ack seq " << seq
<< dendl
;
2335 std::lock_guard
<std::mutex
> l(write_lock
);
2336 while (!sent
.empty() && sent
.front()->get_seq() <= seq
) {
2337 Message
* m
= sent
.front();
2339 ldout(async_msgr
->cct
, 10) << __func__
<< " got ack seq "
2340 << seq
<< " >= " << m
->get_seq() << " on "
2341 << m
<< " " << *m
<< dendl
;
2346 void AsyncConnection::DelayedDelivery::do_request(int id
)
2348 Message
*m
= nullptr;
2350 std::lock_guard
<std::mutex
> l(delay_lock
);
2351 register_time_events
.erase(id
);
2354 if (delay_queue
.empty())
2356 utime_t release
= delay_queue
.front().first
;
2357 m
= delay_queue
.front().second
;
2358 string delay_msg_type
= msgr
->cct
->_conf
->ms_inject_delay_msg_type
;
2359 utime_t now
= ceph_clock_now();
2360 if ((release
> now
&&
2361 (delay_msg_type
.empty() || m
->get_type_name() == delay_msg_type
))) {
2362 utime_t t
= release
- now
;
2365 delay_queue
.pop_front();
2367 if (msgr
->ms_can_fast_dispatch(m
)) {
2368 dispatch_queue
->fast_dispatch(m
);
2370 dispatch_queue
->enqueue(m
, m
->get_priority(), conn_id
);
2374 void AsyncConnection::DelayedDelivery::flush() {
2375 stop_dispatch
= true;
2377 center
->get_id(), [this] () mutable {
2378 std::lock_guard
<std::mutex
> l(delay_lock
);
2379 while (!delay_queue
.empty()) {
2380 Message
*m
= delay_queue
.front().second
;
2381 if (msgr
->ms_can_fast_dispatch(m
)) {
2382 dispatch_queue
->fast_dispatch(m
);
2384 dispatch_queue
->enqueue(m
, m
->get_priority(), conn_id
);
2386 delay_queue
.pop_front();
2388 for (auto i
: register_time_events
)
2389 center
->delete_time_event(i
);
2390 register_time_events
.clear();
2391 stop_dispatch
= false;
2395 void AsyncConnection::send_keepalive()
2397 ldout(async_msgr
->cct
, 10) << __func__
<< dendl
;
2398 std::lock_guard
<std::mutex
> l(write_lock
);
2399 if (can_write
!= WriteStatus::CLOSED
) {
2401 center
->dispatch_event_external(write_handler
);
2405 void AsyncConnection::mark_down()
2407 ldout(async_msgr
->cct
, 1) << __func__
<< dendl
;
2408 std::lock_guard
<std::mutex
> l(lock
);
2412 void AsyncConnection::_append_keepalive_or_ack(bool ack
, utime_t
*tp
)
2414 ldout(async_msgr
->cct
, 10) << __func__
<< dendl
;
2417 struct ceph_timespec ts
;
2418 tp
->encode_timeval(&ts
);
2419 outcoming_bl
.append(CEPH_MSGR_TAG_KEEPALIVE2_ACK
);
2420 outcoming_bl
.append((char*)&ts
, sizeof(ts
));
2421 } else if (has_feature(CEPH_FEATURE_MSGR_KEEPALIVE2
)) {
2422 struct ceph_timespec ts
;
2423 utime_t t
= ceph_clock_now();
2424 t
.encode_timeval(&ts
);
2425 outcoming_bl
.append(CEPH_MSGR_TAG_KEEPALIVE2
);
2426 outcoming_bl
.append((char*)&ts
, sizeof(ts
));
2428 outcoming_bl
.append(CEPH_MSGR_TAG_KEEPALIVE
);
2432 void AsyncConnection::handle_write()
2434 ldout(async_msgr
->cct
, 10) << __func__
<< dendl
;
2438 if (can_write
== WriteStatus::CANWRITE
) {
2440 _append_keepalive_or_ack();
2446 Message
*m
= _get_next_outgoing(&data
);
2450 // send_message or requeue messages may not encode message
2452 prepare_send_message(get_features(), m
, data
);
2454 r
= write_message(m
, data
, _has_next_outgoing());
2456 ldout(async_msgr
->cct
, 1) << __func__
<< " send msg failed" << dendl
;
2457 write_lock
.unlock();
2464 uint64_t left
= ack_left
.read();
2468 outcoming_bl
.append(CEPH_MSGR_TAG_ACK
);
2469 outcoming_bl
.append((char*)&s
, sizeof(s
));
2470 ldout(async_msgr
->cct
, 10) << __func__
<< " try send msg ack, acked " << left
<< " messages" << dendl
;
2472 left
= ack_left
.read();
2473 r
= _try_send(left
);
2474 } else if (is_queued()) {
2478 write_lock
.unlock();
2480 ldout(async_msgr
->cct
, 1) << __func__
<< " send msg failed" << dendl
;
2484 write_lock
.unlock();
2487 if (state
== STATE_STANDBY
&& !policy
.server
&& is_queued()) {
2488 ldout(async_msgr
->cct
, 10) << __func__
<< " policy.server is false" << dendl
;
2490 } else if (cs
&& state
!= STATE_NONE
&& state
!= STATE_CONNECTING
&& state
!= STATE_CONNECTING_RE
&& state
!= STATE_CLOSED
) {
2493 ldout(async_msgr
->cct
, 1) << __func__
<< " send outcoming bl failed" << dendl
;
2494 write_lock
.unlock();
2500 write_lock
.unlock();
2512 void AsyncConnection::wakeup_from(uint64_t id
)
2515 register_time_events
.erase(id
);
2520 void AsyncConnection::tick(uint64_t id
)
2522 auto now
= ceph::coarse_mono_clock::now();
2523 ldout(async_msgr
->cct
, 20) << __func__
<< " last_id=" << last_tick_id
2524 << " last_active" << last_active
<< dendl
;
2525 assert(last_tick_id
== id
);
2526 std::lock_guard
<std::mutex
> l(lock
);
2528 auto idle_period
= std::chrono::duration_cast
<std::chrono::microseconds
>(now
- last_active
).count();
2529 if (inactive_timeout_us
< (uint64_t)idle_period
) {
2530 ldout(async_msgr
->cct
, 1) << __func__
<< " idle(" << idle_period
<< ") more than "
2531 << inactive_timeout_us
2532 << " us, mark self fault." << dendl
;
2534 } else if (is_connected()) {
2535 last_tick_id
= center
->create_time_event(inactive_timeout_us
, tick_handler
);