]> git.proxmox.com Git - ceph.git/blame - ceph/src/mon/OSDMonitor.h
update sources to v12.1.1
[ceph.git] / ceph / src / mon / OSDMonitor.h
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 * Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
8 *
9 * Author: Loic Dachary <loic@dachary.org>
10 *
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
15 *
16 */
17
18/* Object Store Device (OSD) Monitor
19 */
20
21#ifndef CEPH_OSDMONITOR_H
22#define CEPH_OSDMONITOR_H
23
24#include <map>
25#include <set>
26using namespace std;
27
28#include "include/types.h"
29#include "common/simple_cache.hpp"
30#include "msg/Messenger.h"
31
32#include "osd/OSDMap.h"
33#include "osd/OSDMapMapping.h"
34
35#include "CreatingPGs.h"
36#include "PaxosService.h"
37
38class Monitor;
39class PGMap;
40class MonSession;
41class MOSDMap;
42
43#include "erasure-code/ErasureCodeInterface.h"
44#include "mon/MonOpRequest.h"
45
46#define OSD_METADATA_PREFIX "osd_metadata"
47
48/// information about a particular peer's failure reports for one osd
49struct failure_reporter_t {
50 utime_t failed_since; ///< when they think it failed
51 MonOpRequestRef op; ///< failure op request
52
53 failure_reporter_t() {}
54 explicit failure_reporter_t(utime_t s) : failed_since(s) {}
55 ~failure_reporter_t() { }
56};
57
58/// information about all failure reports for one osd
59struct failure_info_t {
60 map<int, failure_reporter_t> reporters; ///< reporter -> failed_since etc
61 utime_t max_failed_since; ///< most recent failed_since
62
63 failure_info_t() {}
64
65 utime_t get_failed_since() {
66 if (max_failed_since == utime_t() && !reporters.empty()) {
67 // the old max must have canceled; recalculate.
68 for (map<int, failure_reporter_t>::iterator p = reporters.begin();
69 p != reporters.end();
70 ++p)
71 if (p->second.failed_since > max_failed_since)
72 max_failed_since = p->second.failed_since;
73 }
74 return max_failed_since;
75 }
76
77 // set the message for the latest report. return any old op request we had,
78 // if any, so we can discard it.
79 MonOpRequestRef add_report(int who, utime_t failed_since,
80 MonOpRequestRef op) {
81 map<int, failure_reporter_t>::iterator p = reporters.find(who);
82 if (p == reporters.end()) {
83 if (max_failed_since < failed_since)
84 max_failed_since = failed_since;
85 p = reporters.insert(map<int, failure_reporter_t>::value_type(who, failure_reporter_t(failed_since))).first;
86 }
87
88 MonOpRequestRef ret = p->second.op;
89 p->second.op = op;
90 return ret;
91 }
92
93 void take_report_messages(list<MonOpRequestRef>& ls) {
94 for (map<int, failure_reporter_t>::iterator p = reporters.begin();
95 p != reporters.end();
96 ++p) {
97 if (p->second.op) {
98 ls.push_back(p->second.op);
99 p->second.op.reset();
100 }
101 }
102 }
103
104 MonOpRequestRef cancel_report(int who) {
105 map<int, failure_reporter_t>::iterator p = reporters.find(who);
106 if (p == reporters.end())
107 return MonOpRequestRef();
108 MonOpRequestRef ret = p->second.op;
109 reporters.erase(p);
110 return ret;
111 }
112};
113
114
115class LastEpochClean {
116 struct Lec {
117 vector<epoch_t> epoch_by_pg;
118 ps_t next_missing = 0;
119 epoch_t floor = std::numeric_limits<epoch_t>::max();
120 void report(ps_t pg, epoch_t last_epoch_clean);
121 };
122 std::map<uint64_t, Lec> report_by_pool;
123public:
124 void report(const pg_t& pg, epoch_t last_epoch_clean);
125 void remove_pool(uint64_t pool);
126 epoch_t get_lower_bound(const OSDMap& latest) const;
127};
128
129
130class OSDMonitor : public PaxosService {
131 CephContext *cct;
132
133public:
134 OSDMap osdmap;
135
136 // [leader]
137 OSDMap::Incremental pending_inc;
138 map<int, bufferlist> pending_metadata;
139 set<int> pending_metadata_rm;
140 map<int, failure_info_t> failure_info;
141 map<int,utime_t> down_pending_out; // osd down -> out
142
143 map<int,double> osd_weight;
144
145 SimpleLRU<version_t, bufferlist> inc_osd_cache;
146 SimpleLRU<version_t, bufferlist> full_osd_cache;
147
148 bool check_failures(utime_t now);
149 bool check_failure(utime_t now, int target_osd, failure_info_t& fi);
224ce89b 150 void force_failure(int target_osd, int by);
7c673cae
FG
151
152 // the time of last msg(MSG_ALIVE and MSG_PGTEMP) proposed without delay
153 utime_t last_attempted_minwait_time;
154
155 bool _have_pending_crush();
156 CrushWrapper &_get_stable_crush();
157 void _get_pending_crush(CrushWrapper& newcrush);
158
159 enum FastReadType {
160 FAST_READ_OFF,
161 FAST_READ_ON,
162 FAST_READ_DEFAULT
163 };
164
165 // svc
166public:
167 void create_initial() override;
168 void get_store_prefixes(std::set<string>& s) override;
169
170private:
171 void update_from_paxos(bool *need_bootstrap) override;
172 void create_pending() override; // prepare a new pending
173 void encode_pending(MonitorDBStore::TransactionRef t) override;
174 void on_active() override;
175 void on_restart() override;
176 void on_shutdown() override;
177 /**
178 * we haven't delegated full version stashing to paxosservice for some time
179 * now, making this function useless in current context.
180 */
181 void encode_full(MonitorDBStore::TransactionRef t) override { }
182 /**
183 * do not let paxosservice periodically stash full osdmaps, or we will break our
184 * locally-managed full maps. (update_from_paxos loads the latest and writes them
185 * out going forward from there, but if we just synced that may mean we skip some.)
186 */
187 bool should_stash_full() override {
188 return false;
189 }
190
191 /**
192 * hook into trim to include the oldest full map in the trim transaction
193 *
194 * This ensures that anyone post-sync will have enough to rebuild their
195 * full osdmaps.
196 */
197 void encode_trim_extra(MonitorDBStore::TransactionRef tx, version_t first) override;
198
199 void update_msgr_features();
200 int check_cluster_features(uint64_t features, stringstream &ss);
201 /**
202 * check if the cluster supports the features required by the
203 * given crush map. Outputs the daemons which don't support it
204 * to the stringstream.
205 *
206 * @returns true if the map is passable, false otherwise
207 */
208 bool validate_crush_against_features(const CrushWrapper *newcrush,
209 stringstream &ss);
210 void check_osdmap_subs();
211 void share_map_with_random_osd();
212
213 Mutex prime_pg_temp_lock = {"OSDMonitor::prime_pg_temp_lock"};
214 struct PrimeTempJob : public ParallelPGMapper::Job {
215 OSDMonitor *osdmon;
216 PrimeTempJob(const OSDMap& om, OSDMonitor *m)
217 : ParallelPGMapper::Job(&om), osdmon(m) {}
218 void process(int64_t pool, unsigned ps_begin, unsigned ps_end) override {
219 for (unsigned ps = ps_begin; ps < ps_end; ++ps) {
220 pg_t pgid(ps, pool);
221 osdmon->prime_pg_temp(*osdmap, pgid);
222 }
223 }
224 void complete() override {}
225 };
226 void maybe_prime_pg_temp();
227 void prime_pg_temp(const OSDMap& next, pg_t pgid);
228
229 ParallelPGMapper mapper; ///< for background pg work
230 OSDMapMapping mapping; ///< pg <-> osd mappings
231 unique_ptr<ParallelPGMapper::Job> mapping_job; ///< background mapping job
232 void start_mapping();
233
234 void update_logger();
235
236 void handle_query(PaxosServiceMessage *m);
237 bool preprocess_query(MonOpRequestRef op) override; // true if processed.
238 bool prepare_update(MonOpRequestRef op) override;
239 bool should_propose(double &delay) override;
240
241 version_t get_trim_to() override;
242
243 bool can_mark_down(int o);
244 bool can_mark_up(int o);
245 bool can_mark_out(int o);
246 bool can_mark_in(int o);
247
248 // ...
249 MOSDMap *build_latest_full();
250 MOSDMap *build_incremental(epoch_t first, epoch_t last);
251 void send_full(MonOpRequestRef op);
252 void send_incremental(MonOpRequestRef op, epoch_t first);
253public:
254 // @param req an optional op request, if the osdmaps are replies to it. so
255 // @c Monitor::send_reply() can mark_event with it.
256 void send_incremental(epoch_t first, MonSession *session, bool onetime,
257 MonOpRequestRef req = MonOpRequestRef());
258
259private:
260 void print_utilization(ostream &out, Formatter *f, bool tree) const;
261
262 bool check_source(PaxosServiceMessage *m, uuid_d fsid);
263
264 bool preprocess_get_osdmap(MonOpRequestRef op);
265
266 bool preprocess_mark_me_down(MonOpRequestRef op);
267
268 friend class C_AckMarkedDown;
269 bool preprocess_failure(MonOpRequestRef op);
270 bool prepare_failure(MonOpRequestRef op);
271 bool prepare_mark_me_down(MonOpRequestRef op);
272 void process_failures();
273 void take_all_failures(list<MonOpRequestRef>& ls);
274
275 bool preprocess_full(MonOpRequestRef op);
276 bool prepare_full(MonOpRequestRef op);
277
278 bool preprocess_boot(MonOpRequestRef op);
279 bool prepare_boot(MonOpRequestRef op);
280 void _booted(MonOpRequestRef op, bool logit);
281
282 void update_up_thru(int from, epoch_t up_thru);
283 bool preprocess_alive(MonOpRequestRef op);
284 bool prepare_alive(MonOpRequestRef op);
285 void _reply_map(MonOpRequestRef op, epoch_t e);
286
287 bool preprocess_pgtemp(MonOpRequestRef op);
288 bool prepare_pgtemp(MonOpRequestRef op);
289
290 bool preprocess_pg_created(MonOpRequestRef op);
291 bool prepare_pg_created(MonOpRequestRef op);
292
293 int _check_remove_pool(int64_t pool_id, const pg_pool_t &pool, ostream *ss);
294 bool _check_become_tier(
295 int64_t tier_pool_id, const pg_pool_t *tier_pool,
296 int64_t base_pool_id, const pg_pool_t *base_pool,
297 int *err, ostream *ss) const;
298 bool _check_remove_tier(
299 int64_t base_pool_id, const pg_pool_t *base_pool, const pg_pool_t *tier_pool,
300 int *err, ostream *ss) const;
301
302 int _prepare_remove_pool(int64_t pool, ostream *ss, bool no_fake);
303 int _prepare_rename_pool(int64_t pool, string newname);
304
305 bool preprocess_pool_op (MonOpRequestRef op);
306 bool preprocess_pool_op_create (MonOpRequestRef op);
307 bool prepare_pool_op (MonOpRequestRef op);
308 bool prepare_pool_op_create (MonOpRequestRef op);
309 bool prepare_pool_op_delete(MonOpRequestRef op);
310 int crush_rename_bucket(const string& srcname,
311 const string& dstname,
312 ostream *ss);
313 void check_legacy_ec_plugin(const string& plugin,
314 const string& profile) const;
315 int normalize_profile(const string& profilename,
316 ErasureCodeProfile &profile,
317 bool force,
318 ostream *ss);
31f18b77
FG
319 int crush_rule_create_erasure(const string &name,
320 const string &profile,
321 int *rule,
322 ostream *ss);
323 int get_crush_rule(const string &rule_name,
324 int *crush_rule,
7c673cae
FG
325 ostream *ss);
326 int get_erasure_code(const string &erasure_code_profile,
327 ErasureCodeInterfaceRef *erasure_code,
328 ostream *ss) const;
31f18b77 329 int prepare_pool_crush_rule(const unsigned pool_type,
7c673cae 330 const string &erasure_code_profile,
31f18b77
FG
331 const string &rule_name,
332 int *crush_rule,
7c673cae
FG
333 ostream *ss);
334 bool erasure_code_profile_in_use(
335 const mempool::osdmap::map<int64_t, pg_pool_t> &pools,
336 const string &profile,
337 ostream *ss);
338 int parse_erasure_code_profile(const vector<string> &erasure_code_profile,
339 map<string,string> *erasure_code_profile_map,
340 ostream *ss);
341 int prepare_pool_size(const unsigned pool_type,
342 const string &erasure_code_profile,
343 unsigned *size, unsigned *min_size,
344 ostream *ss);
345 int prepare_pool_stripe_width(const unsigned pool_type,
346 const string &erasure_code_profile,
347 unsigned *stripe_width,
348 ostream *ss);
349 int prepare_new_pool(string& name, uint64_t auid,
31f18b77
FG
350 int crush_rule,
351 const string &crush_rule_name,
7c673cae
FG
352 unsigned pg_num, unsigned pgp_num,
353 const string &erasure_code_profile,
354 const unsigned pool_type,
355 const uint64_t expected_num_objects,
356 FastReadType fast_read,
357 ostream *ss);
358 int prepare_new_pool(MonOpRequestRef op);
359
360 void update_pool_flags(int64_t pool_id, uint64_t flags);
361 bool update_pools_status();
7c673cae
FG
362
363 bool prepare_set_flag(MonOpRequestRef op, int flag);
364 bool prepare_unset_flag(MonOpRequestRef op, int flag);
365
366 void _pool_op_reply(MonOpRequestRef op,
367 int ret, epoch_t epoch, bufferlist *blp=NULL);
368
369 struct C_Booted : public C_MonOp {
370 OSDMonitor *cmon;
371 bool logit;
372 C_Booted(OSDMonitor *cm, MonOpRequestRef op_, bool l=true) :
373 C_MonOp(op_), cmon(cm), logit(l) {}
374 void _finish(int r) override {
375 if (r >= 0)
376 cmon->_booted(op, logit);
377 else if (r == -ECANCELED)
378 return;
379 else if (r == -EAGAIN)
380 cmon->dispatch(op);
381 else
382 assert(0 == "bad C_Booted return value");
383 }
384 };
385
386 struct C_ReplyMap : public C_MonOp {
387 OSDMonitor *osdmon;
388 epoch_t e;
389 C_ReplyMap(OSDMonitor *o, MonOpRequestRef op_, epoch_t ee)
390 : C_MonOp(op_), osdmon(o), e(ee) {}
391 void _finish(int r) override {
392 if (r >= 0)
393 osdmon->_reply_map(op, e);
394 else if (r == -ECANCELED)
395 return;
396 else if (r == -EAGAIN)
397 osdmon->dispatch(op);
398 else
399 assert(0 == "bad C_ReplyMap return value");
400 }
401 };
402 struct C_PoolOp : public C_MonOp {
403 OSDMonitor *osdmon;
404 int replyCode;
405 int epoch;
406 bufferlist reply_data;
407 C_PoolOp(OSDMonitor * osd, MonOpRequestRef op_, int rc, int e, bufferlist *rd=NULL) :
408 C_MonOp(op_), osdmon(osd), replyCode(rc), epoch(e) {
409 if (rd)
410 reply_data = *rd;
411 }
412 void _finish(int r) override {
413 if (r >= 0)
414 osdmon->_pool_op_reply(op, replyCode, epoch, &reply_data);
415 else if (r == -ECANCELED)
416 return;
417 else if (r == -EAGAIN)
418 osdmon->dispatch(op);
419 else
420 assert(0 == "bad C_PoolOp return value");
421 }
422 };
423
424 bool preprocess_remove_snaps(MonOpRequestRef op);
425 bool prepare_remove_snaps(MonOpRequestRef op);
426
427 OpTracker op_tracker;
428
429 int load_metadata(int osd, map<string, string>& m, ostream *err);
31f18b77 430 void count_metadata(const string& field, Formatter *f);
7c673cae
FG
431 int get_osd_objectstore_type(int osd, std::string *type);
432 bool is_pool_currently_all_bluestore(int64_t pool_id, const pg_pool_t &pool,
433 ostream *err);
434
435 // when we last received PG stats from each osd
436 map<int,utime_t> last_osd_report;
437 // TODO: use last_osd_report to store the osd report epochs, once we don't
438 // need to upgrade from pre-luminous releases.
439 map<int,epoch_t> osd_epochs;
440 LastEpochClean last_epoch_clean;
441 bool preprocess_beacon(MonOpRequestRef op);
442 bool prepare_beacon(MonOpRequestRef op);
443 epoch_t get_min_last_epoch_clean() const;
444
445 friend class C_UpdateCreatingPGs;
446 std::map<int, std::map<epoch_t, std::set<pg_t>>> creating_pgs_by_osd_epoch;
447 std::vector<pg_t> pending_created_pgs;
448 // the epoch when the pg mapping was calculated
449 epoch_t creating_pgs_epoch = 0;
450 creating_pgs_t creating_pgs;
451 std::mutex creating_pgs_lock;
452
453 creating_pgs_t update_pending_pgs(const OSDMap::Incremental& inc);
31f18b77
FG
454 void trim_creating_pgs(creating_pgs_t *creating_pgs,
455 const ceph::unordered_map<pg_t,pg_stat_t>& pgm);
456 unsigned scan_for_creating_pgs(
7c673cae
FG
457 const mempool::osdmap::map<int64_t,pg_pool_t>& pools,
458 const mempool::osdmap::set<int64_t>& removed_pools,
459 utime_t modified,
460 creating_pgs_t* creating_pgs) const;
461 pair<int32_t, pg_t> get_parent_pg(pg_t pgid) const;
462 void update_creating_pgs();
463 void check_pg_creates_subs();
464 epoch_t send_pg_creates(int osd, Connection *con, epoch_t next);
465
31f18b77
FG
466 int32_t _allocate_osd_id(int32_t* existing_id);
467
7c673cae
FG
468public:
469 OSDMonitor(CephContext *cct, Monitor *mn, Paxos *p, const string& service_name);
470
471 void tick() override; // check state, take actions
472
7c673cae
FG
473 void get_health(list<pair<health_status_t,string> >& summary,
474 list<pair<health_status_t,string> > *detail,
475 CephContext *cct) const override;
476 bool preprocess_command(MonOpRequestRef op);
477 bool prepare_command(MonOpRequestRef op);
478 bool prepare_command_impl(MonOpRequestRef op, map<string,cmd_vartype>& cmdmap);
479
31f18b77
FG
480 int validate_osd_create(
481 const int32_t id,
482 const uuid_d& uuid,
483 const bool check_osd_exists,
484 int32_t* existing_id,
485 stringstream& ss);
486 int prepare_command_osd_create(
487 const int32_t id,
488 const uuid_d& uuid,
489 int32_t* existing_id,
490 stringstream& ss);
491 void do_osd_create(const int32_t id, const uuid_d& uuid, int32_t* new_id);
492 int prepare_command_osd_purge(int32_t id, stringstream& ss);
493 int prepare_command_osd_destroy(int32_t id, stringstream& ss);
494 int _prepare_command_osd_crush_remove(
495 CrushWrapper &newcrush,
496 int32_t id,
497 int32_t ancestor,
498 bool has_ancestor,
499 bool unlink_only);
500 void do_osd_crush_remove(CrushWrapper& newcrush);
501 int prepare_command_osd_crush_remove(
502 CrushWrapper &newcrush,
503 int32_t id,
504 int32_t ancestor,
505 bool has_ancestor,
506 bool unlink_only);
507 int prepare_command_osd_remove(int32_t id);
508 int prepare_command_osd_new(
509 MonOpRequestRef op,
510 const map<string,cmd_vartype>& cmdmap,
511 const map<string,string>& secrets,
512 stringstream &ss,
513 Formatter *f);
514
7c673cae
FG
515 int prepare_command_pool_set(map<string,cmd_vartype> &cmdmap,
516 stringstream& ss);
517
518 bool handle_osd_timeouts(const utime_t &now,
519 std::map<int,utime_t> &last_osd_report);
520
521 void send_latest(MonOpRequestRef op, epoch_t start=0);
522 void send_latest_now_nodelete(MonOpRequestRef op, epoch_t start=0) {
523 op->mark_osdmon_event(__func__);
524 send_incremental(op, start);
525 }
526
527 int get_version(version_t ver, bufferlist& bl) override;
528 int get_version_full(version_t ver, bufferlist& bl) override;
529
530 epoch_t blacklist(const entity_addr_t& a, utime_t until);
531
532 void dump_info(Formatter *f);
533 int dump_osd_metadata(int osd, Formatter *f, ostream *err);
534 void print_nodes(Formatter *f);
535
536 void check_osdmap_sub(Subscription *sub);
537 void check_pg_creates_sub(Subscription *sub);
538
539 void add_flag(int flag) {
540 if (!(osdmap.flags & flag)) {
541 if (pending_inc.new_flags < 0)
542 pending_inc.new_flags = osdmap.flags;
543 pending_inc.new_flags |= flag;
544 }
545 }
546
547 void remove_flag(int flag) {
548 if(osdmap.flags & flag) {
549 if (pending_inc.new_flags < 0)
550 pending_inc.new_flags = osdmap.flags;
551 pending_inc.new_flags &= ~flag;
552 }
553 }
554};
555
556#endif