]> git.proxmox.com Git - ceph.git/blob - ceph/src/msg/async/AsyncMessenger.h
0b1f2c8e9308d793cc5831f46a1cfd43813d7288
[ceph.git] / ceph / src / msg / async / AsyncMessenger.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2014 UnitedStack <haomai@unitedstack.com>
7 *
8 * Author: Haomai Wang <haomaiwang@gmail.com>
9 *
10 * This is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License version 2.1, as published by the Free Software
13 * Foundation. See file COPYING.
14 *
15 */
16
17 #ifndef CEPH_ASYNCMESSENGER_H
18 #define CEPH_ASYNCMESSENGER_H
19
20 #include "include/types.h"
21 #include "include/xlist.h"
22
23 #include <map>
24 using namespace std;
25 #include "include/unordered_map.h"
26 #include "include/unordered_set.h"
27
28 #include "common/Mutex.h"
29 #include "common/Cond.h"
30 #include "common/Thread.h"
31
32 #include "include/Spinlock.h"
33
34 #include "msg/SimplePolicyMessenger.h"
35 #include "msg/DispatchQueue.h"
36 #include "include/assert.h"
37 #include "AsyncConnection.h"
38 #include "Event.h"
39
40
41 class AsyncMessenger;
42
43 /**
44 * If the Messenger binds to a specific address, the Processor runs
45 * and listens for incoming connections.
46 */
47 class Processor {
48 AsyncMessenger *msgr;
49 NetHandler net;
50 Worker *worker;
51 ServerSocket listen_socket;
52 EventCallbackRef listen_handler;
53
54 class C_processor_accept;
55
56 public:
57 Processor(AsyncMessenger *r, Worker *w, CephContext *c);
58 ~Processor() { delete listen_handler; };
59
60 void stop();
61 int bind(const entity_addr_t &bind_addr,
62 const set<int>& avoid_ports,
63 entity_addr_t* bound_addr);
64 void start();
65 void accept();
66 };
67
68 /*
69 * AsyncMessenger is represented for maintaining a set of asynchronous connections,
70 * it may own a bind address and the accepted connections will be managed by
71 * AsyncMessenger.
72 *
73 */
74
75 class AsyncMessenger : public SimplePolicyMessenger {
76 // First we have the public Messenger interface implementation...
77 public:
78 /**
79 * Initialize the AsyncMessenger!
80 *
81 * @param cct The CephContext to use
82 * @param name The name to assign ourselves
83 * _nonce A unique ID to use for this AsyncMessenger. It should not
84 * be a value that will be repeated if the daemon restarts.
85 */
86 AsyncMessenger(CephContext *cct, entity_name_t name, const std::string &type,
87 string mname, uint64_t _nonce);
88
89 /**
90 * Destroy the AsyncMessenger. Pretty simple since all the work is done
91 * elsewhere.
92 */
93 ~AsyncMessenger() override;
94
95 /** @defgroup Accessors
96 * @{
97 */
98 void set_addr_unknowns(const entity_addr_t &addr) override;
99
100 int get_dispatch_queue_len() override {
101 return dispatch_queue.get_queue_len();
102 }
103
104 double get_dispatch_queue_max_age(utime_t now) override {
105 return dispatch_queue.get_max_age(now);
106 }
107 /** @} Accessors */
108
109 /**
110 * @defgroup Configuration functions
111 * @{
112 */
113 void set_cluster_protocol(int p) override {
114 assert(!started && !did_bind);
115 cluster_protocol = p;
116 }
117
118 int bind(const entity_addr_t& bind_addr) override;
119 int rebind(const set<int>& avoid_ports) override;
120 int client_bind(const entity_addr_t& bind_addr) override;
121
122 /** @} Configuration functions */
123
124 /**
125 * @defgroup Startup/Shutdown
126 * @{
127 */
128 int start() override;
129 void wait() override;
130 int shutdown() override;
131
132 /** @} // Startup/Shutdown */
133
134 /**
135 * @defgroup Messaging
136 * @{
137 */
138 int send_message(Message *m, const entity_inst_t& dest) override {
139 Mutex::Locker l(lock);
140
141 return _send_message(m, dest);
142 }
143
144 /** @} // Messaging */
145
146 /**
147 * @defgroup Connection Management
148 * @{
149 */
150 ConnectionRef get_connection(const entity_inst_t& dest) override;
151 ConnectionRef get_loopback_connection() override;
152 void mark_down(const entity_addr_t& addr) override;
153 void mark_down_all() override {
154 shutdown_connections(true);
155 }
156 /** @} // Connection Management */
157
158 /**
159 * @defgroup Inner classes
160 * @{
161 */
162
163 /**
164 * @} // Inner classes
165 */
166
167 protected:
168 /**
169 * @defgroup Messenger Interfaces
170 * @{
171 */
172 /**
173 * Start up the DispatchQueue thread once we have somebody to dispatch to.
174 */
175 void ready() override;
176 /** @} // Messenger Interfaces */
177
178 private:
179
180 /**
181 * @defgroup Utility functions
182 * @{
183 */
184
185 /**
186 * Create a connection associated with the given entity (of the given type).
187 * Initiate the connection. (This function returning does not guarantee
188 * connection success.)
189 *
190 * @param addr The address of the entity to connect to.
191 * @param type The peer type of the entity at the address.
192 *
193 * @return a pointer to the newly-created connection. Caller does not own a
194 * reference; take one if you need it.
195 */
196 AsyncConnectionRef create_connect(const entity_addr_t& addr, int type);
197
198 /**
199 * Queue up a Message for delivery to the entity specified
200 * by addr and dest_type.
201 * submit_message() is responsible for creating
202 * new AsyncConnection (and closing old ones) as necessary.
203 *
204 * @param m The Message to queue up. This function eats a reference.
205 * @param con The existing Connection to use, or NULL if you don't know of one.
206 * @param dest_addr The address to send the Message to.
207 * @param dest_type The peer type of the address we're sending to
208 * just drop silently under failure.
209 */
210 void submit_message(Message *m, AsyncConnectionRef con,
211 const entity_addr_t& dest_addr, int dest_type);
212
213 int _send_message(Message *m, const entity_inst_t& dest);
214 void _finish_bind(const entity_addr_t& bind_addr,
215 const entity_addr_t& listen_addr);
216
217 private:
218 static const uint64_t ReapDeadConnectionThreshold = 5;
219
220 NetworkStack *stack;
221 std::vector<Processor*> processors;
222 friend class Processor;
223 DispatchQueue dispatch_queue;
224
225 // the worker run messenger's cron jobs
226 Worker *local_worker;
227
228 std::string ms_type;
229
230 /// overall lock used for AsyncMessenger data structures
231 Mutex lock;
232 // AsyncMessenger stuff
233 /// approximately unique ID set by the Constructor for use in entity_addr_t
234 uint64_t nonce;
235
236 /// true, specifying we haven't learned our addr; set false when we find it.
237 // maybe this should be protected by the lock?
238 bool need_addr;
239
240 /**
241 * set to bind address if bind was called before NetworkStack was ready to
242 * bind
243 */
244 entity_addr_t pending_bind_addr;
245
246 /**
247 * false; set to true if a pending bind exists
248 */
249 bool pending_bind = false;
250
251 /**
252 * The following aren't lock-protected since you shouldn't be able to race
253 * the only writers.
254 */
255
256 /**
257 * false; set to true if the AsyncMessenger bound to a specific address;
258 * and set false again by Accepter::stop().
259 */
260 bool did_bind;
261 /// counter for the global seq our connection protocol uses
262 __u32 global_seq;
263 /// lock to protect the global_seq
264 ceph_spinlock_t global_seq_lock;
265
266 /**
267 * hash map of addresses to Asyncconnection
268 *
269 * NOTE: a Asyncconnection* with state CLOSED may still be in the map but is considered
270 * invalid and can be replaced by anyone holding the msgr lock
271 */
272 ceph::unordered_map<entity_addr_t, AsyncConnectionRef> conns;
273
274 /**
275 * list of connection are in teh process of accepting
276 *
277 * These are not yet in the conns map.
278 */
279 set<AsyncConnectionRef> accepting_conns;
280
281 /**
282 * list of connection are closed which need to be clean up
283 *
284 * Because AsyncMessenger and AsyncConnection follow a lock rule that
285 * we can lock AsyncMesenger::lock firstly then lock AsyncConnection::lock
286 * but can't reversed. This rule is aimed to avoid dead lock.
287 * So if AsyncConnection want to unregister itself from AsyncMessenger,
288 * we pick up this idea that just queue itself to this set and do lazy
289 * deleted for AsyncConnection. "_lookup_conn" must ensure not return a
290 * AsyncConnection in this set.
291 */
292 Mutex deleted_lock;
293 set<AsyncConnectionRef> deleted_conns;
294
295 EventCallbackRef reap_handler;
296
297 /// internal cluster protocol version, if any, for talking to entities of the same type.
298 int cluster_protocol;
299
300 Cond stop_cond;
301 bool stopped;
302
303 AsyncConnectionRef _lookup_conn(const entity_addr_t& k) {
304 assert(lock.is_locked());
305 ceph::unordered_map<entity_addr_t, AsyncConnectionRef>::iterator p = conns.find(k);
306 if (p == conns.end())
307 return NULL;
308
309 // lazy delete, see "deleted_conns"
310 Mutex::Locker l(deleted_lock);
311 if (deleted_conns.erase(p->second)) {
312 p->second->get_perf_counter()->dec(l_msgr_active_connections);
313 conns.erase(p);
314 return NULL;
315 }
316
317 return p->second;
318 }
319
320 void _init_local_connection() {
321 assert(lock.is_locked());
322 local_connection->peer_addr = my_inst.addr;
323 local_connection->peer_type = my_inst.name.type();
324 local_connection->set_features(CEPH_FEATURES_ALL);
325 ms_deliver_handle_fast_connect(local_connection.get());
326 }
327
328 void shutdown_connections(bool queue_reset);
329
330 public:
331
332 /// con used for sending messages to ourselves
333 ConnectionRef local_connection;
334
335 /**
336 * @defgroup AsyncMessenger internals
337 * @{
338 */
339 /**
340 * This wraps _lookup_conn.
341 */
342 AsyncConnectionRef lookup_conn(const entity_addr_t& k) {
343 Mutex::Locker l(lock);
344 return _lookup_conn(k);
345 }
346
347 int accept_conn(AsyncConnectionRef conn) {
348 Mutex::Locker l(lock);
349 auto it = conns.find(conn->peer_addr);
350 if (it != conns.end()) {
351 AsyncConnectionRef existing = it->second;
352
353 // lazy delete, see "deleted_conns"
354 // If conn already in, we will return 0
355 Mutex::Locker l(deleted_lock);
356 if (deleted_conns.erase(existing)) {
357 existing->get_perf_counter()->dec(l_msgr_active_connections);
358 conns.erase(it);
359 } else if (conn != existing) {
360 return -1;
361 }
362 }
363 conns[conn->peer_addr] = conn;
364 conn->get_perf_counter()->inc(l_msgr_active_connections);
365 accepting_conns.erase(conn);
366 return 0;
367 }
368
369 void learned_addr(const entity_addr_t &peer_addr_for_me);
370 void add_accept(Worker *w, ConnectedSocket cli_socket, entity_addr_t &addr);
371 NetworkStack *get_stack() {
372 return stack;
373 }
374
375 /**
376 * This wraps ms_deliver_get_authorizer. We use it for AsyncConnection.
377 */
378 AuthAuthorizer *get_authorizer(int peer_type, bool force_new) {
379 return ms_deliver_get_authorizer(peer_type, force_new);
380 }
381
382 /**
383 * This wraps ms_deliver_verify_authorizer; we use it for AsyncConnection.
384 */
385 bool verify_authorizer(Connection *con, int peer_type, int protocol, bufferlist& auth, bufferlist& auth_reply,
386 bool& isvalid, CryptoKey& session_key) {
387 return ms_deliver_verify_authorizer(con, peer_type, protocol, auth,
388 auth_reply, isvalid, session_key);
389 }
390 /**
391 * Increment the global sequence for this AsyncMessenger and return it.
392 * This is for the connect protocol, although it doesn't hurt if somebody
393 * else calls it.
394 *
395 * @return a global sequence ID that nobody else has seen.
396 */
397 __u32 get_global_seq(__u32 old=0) {
398 ceph_spin_lock(&global_seq_lock);
399 if (old > global_seq)
400 global_seq = old;
401 __u32 ret = ++global_seq;
402 ceph_spin_unlock(&global_seq_lock);
403 return ret;
404 }
405 /**
406 * Get the protocol version we support for the given peer type: either
407 * a peer protocol (if it matches our own), the protocol version for the
408 * peer (if we're connecting), or our protocol version (if we're accepting).
409 */
410 int get_proto_version(int peer_type, bool connect) const;
411
412 /**
413 * Fill in the address and peer type for the local connection, which
414 * is used for delivering messages back to ourself.
415 */
416 void init_local_connection() {
417 Mutex::Locker l(lock);
418 _init_local_connection();
419 }
420
421 /**
422 * Unregister connection from `conns`
423 *
424 * See "deleted_conns"
425 */
426 void unregister_conn(AsyncConnectionRef conn) {
427 Mutex::Locker l(deleted_lock);
428 deleted_conns.insert(conn);
429
430 if (deleted_conns.size() >= ReapDeadConnectionThreshold) {
431 local_worker->center.dispatch_event_external(reap_handler);
432 }
433 }
434
435 /**
436 * Reap dead connection from `deleted_conns`
437 *
438 * @return the number of dead connections
439 *
440 * See "deleted_conns"
441 */
442 int reap_dead();
443
444 /**
445 * @} // AsyncMessenger Internals
446 */
447 } ;
448
449 #endif /* CEPH_ASYNCMESSENGER_H */