]> git.proxmox.com Git - ceph.git/blob - ceph/src/osd/OSD.cc
3e721e9bc9843cd6eb5ecd6328477ef214c6a94e
[ceph.git] / ceph / src / osd / OSD.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 * Copyright (C) 2017 OVH
8 *
9 * This is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License version 2.1, as published by the Free Software
12 * Foundation. See file COPYING.
13 *
14 */
15 #include "acconfig.h"
16
17 #include <fstream>
18 #include <iostream>
19 #include <errno.h>
20 #include <sys/stat.h>
21 #include <signal.h>
22 #include <ctype.h>
23 #include <boost/scoped_ptr.hpp>
24
25 #ifdef HAVE_SYS_PARAM_H
26 #include <sys/param.h>
27 #endif
28
29 #ifdef HAVE_SYS_MOUNT_H
30 #include <sys/mount.h>
31 #endif
32
33 #include "osd/PG.h"
34
35 #include "include/types.h"
36 #include "include/compat.h"
37
38 #include "OSD.h"
39 #include "OSDMap.h"
40 #include "Watch.h"
41 #include "osdc/Objecter.h"
42
43 #include "common/errno.h"
44 #include "common/ceph_argparse.h"
45 #include "common/ceph_time.h"
46 #include "common/version.h"
47 #include "common/io_priority.h"
48
49 #include "os/ObjectStore.h"
50 #ifdef HAVE_LIBFUSE
51 #include "os/FuseStore.h"
52 #endif
53
54 #include "PrimaryLogPG.h"
55
56
57 #include "msg/Messenger.h"
58 #include "msg/Message.h"
59
60 #include "mon/MonClient.h"
61
62 #include "messages/MLog.h"
63
64 #include "messages/MGenericMessage.h"
65 #include "messages/MOSDPing.h"
66 #include "messages/MOSDFailure.h"
67 #include "messages/MOSDMarkMeDown.h"
68 #include "messages/MOSDFull.h"
69 #include "messages/MOSDOp.h"
70 #include "messages/MOSDOpReply.h"
71 #include "messages/MOSDBackoff.h"
72 #include "messages/MOSDBeacon.h"
73 #include "messages/MOSDRepOp.h"
74 #include "messages/MOSDRepOpReply.h"
75 #include "messages/MOSDBoot.h"
76 #include "messages/MOSDPGTemp.h"
77
78 #include "messages/MOSDMap.h"
79 #include "messages/MMonGetOSDMap.h"
80 #include "messages/MOSDPGNotify.h"
81 #include "messages/MOSDPGQuery.h"
82 #include "messages/MOSDPGLog.h"
83 #include "messages/MOSDPGRemove.h"
84 #include "messages/MOSDPGInfo.h"
85 #include "messages/MOSDPGCreate.h"
86 #include "messages/MOSDPGTrim.h"
87 #include "messages/MOSDPGScan.h"
88 #include "messages/MOSDPGBackfill.h"
89 #include "messages/MBackfillReserve.h"
90 #include "messages/MRecoveryReserve.h"
91 #include "messages/MOSDForceRecovery.h"
92 #include "messages/MOSDECSubOpWrite.h"
93 #include "messages/MOSDECSubOpWriteReply.h"
94 #include "messages/MOSDECSubOpRead.h"
95 #include "messages/MOSDECSubOpReadReply.h"
96 #include "messages/MOSDPGCreated.h"
97 #include "messages/MOSDPGUpdateLogMissing.h"
98 #include "messages/MOSDPGUpdateLogMissingReply.h"
99
100 #include "messages/MOSDAlive.h"
101
102 #include "messages/MOSDScrub.h"
103 #include "messages/MOSDScrubReserve.h"
104 #include "messages/MOSDRepScrub.h"
105
106 #include "messages/MMonCommand.h"
107 #include "messages/MCommand.h"
108 #include "messages/MCommandReply.h"
109
110 #include "messages/MPGStats.h"
111 #include "messages/MPGStatsAck.h"
112
113 #include "messages/MWatchNotify.h"
114 #include "messages/MOSDPGPush.h"
115 #include "messages/MOSDPGPushReply.h"
116 #include "messages/MOSDPGPull.h"
117
118 #include "common/perf_counters.h"
119 #include "common/Timer.h"
120 #include "common/LogClient.h"
121 #include "common/AsyncReserver.h"
122 #include "common/HeartbeatMap.h"
123 #include "common/admin_socket.h"
124 #include "common/ceph_context.h"
125
126 #include "global/signal_handler.h"
127 #include "global/pidfile.h"
128
129 #include "include/color.h"
130 #include "perfglue/cpu_profiler.h"
131 #include "perfglue/heap_profiler.h"
132
133 #include "osd/OpRequest.h"
134
135 #include "auth/AuthAuthorizeHandler.h"
136 #include "auth/RotatingKeyRing.h"
137 #include "common/errno.h"
138
139 #include "objclass/objclass.h"
140
141 #include "common/cmdparse.h"
142 #include "include/str_list.h"
143 #include "include/util.h"
144
145 #include "include/assert.h"
146 #include "common/config.h"
147 #include "common/EventTrace.h"
148
149 #ifdef WITH_LTTNG
150 #define TRACEPOINT_DEFINE
151 #define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
152 #include "tracing/osd.h"
153 #undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
154 #undef TRACEPOINT_DEFINE
155 #else
156 #define tracepoint(...)
157 #endif
158
159 #define dout_context cct
160 #define dout_subsys ceph_subsys_osd
161 #undef dout_prefix
162 #define dout_prefix _prefix(_dout, whoami, get_osdmap_epoch())
163
164
165 const double OSD::OSD_TICK_INTERVAL = 1.0;
166
167 static ostream& _prefix(std::ostream* _dout, int whoami, epoch_t epoch) {
168 return *_dout << "osd." << whoami << " " << epoch << " ";
169 }
170
171 //Initial features in new superblock.
172 //Features here are also automatically upgraded
173 CompatSet OSD::get_osd_initial_compat_set() {
174 CompatSet::FeatureSet ceph_osd_feature_compat;
175 CompatSet::FeatureSet ceph_osd_feature_ro_compat;
176 CompatSet::FeatureSet ceph_osd_feature_incompat;
177 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE);
178 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_PGINFO);
179 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_OLOC);
180 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEC);
181 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_CATEGORIES);
182 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_HOBJECTPOOL);
183 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BIGINFO);
184 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBINFO);
185 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_LEVELDBLOG);
186 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SNAPMAPPER);
187 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_HINTS);
188 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_PGMETA);
189 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_MISSING);
190 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_FASTINFO);
191 ceph_osd_feature_incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_RECOVERY_DELETES);
192 return CompatSet(ceph_osd_feature_compat, ceph_osd_feature_ro_compat,
193 ceph_osd_feature_incompat);
194 }
195
196 //Features are added here that this OSD supports.
197 CompatSet OSD::get_osd_compat_set() {
198 CompatSet compat = get_osd_initial_compat_set();
199 //Any features here can be set in code, but not in initial superblock
200 compat.incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
201 return compat;
202 }
203
204 OSDService::OSDService(OSD *osd) :
205 osd(osd),
206 cct(osd->cct),
207 meta_osr(new ObjectStore::Sequencer("meta")),
208 whoami(osd->whoami), store(osd->store),
209 log_client(osd->log_client), clog(osd->clog),
210 pg_recovery_stats(osd->pg_recovery_stats),
211 cluster_messenger(osd->cluster_messenger),
212 client_messenger(osd->client_messenger),
213 logger(osd->logger),
214 recoverystate_perf(osd->recoverystate_perf),
215 monc(osd->monc),
216 peering_wq(osd->peering_wq),
217 recovery_gen_wq("recovery_gen_wq", cct->_conf->osd_recovery_thread_timeout,
218 &osd->disk_tp),
219 class_handler(osd->class_handler),
220 pg_epoch_lock("OSDService::pg_epoch_lock"),
221 publish_lock("OSDService::publish_lock"),
222 pre_publish_lock("OSDService::pre_publish_lock"),
223 max_oldest_map(0),
224 peer_map_epoch_lock("OSDService::peer_map_epoch_lock"),
225 sched_scrub_lock("OSDService::sched_scrub_lock"), scrubs_pending(0),
226 scrubs_active(0),
227 agent_lock("OSDService::agent_lock"),
228 agent_valid_iterator(false),
229 agent_ops(0),
230 flush_mode_high_count(0),
231 agent_active(true),
232 agent_thread(this),
233 agent_stop_flag(false),
234 agent_timer_lock("OSDService::agent_timer_lock"),
235 agent_timer(osd->client_messenger->cct, agent_timer_lock),
236 last_recalibrate(ceph_clock_now()),
237 promote_max_objects(0),
238 promote_max_bytes(0),
239 objecter(new Objecter(osd->client_messenger->cct, osd->objecter_messenger, osd->monc, NULL, 0, 0)),
240 objecter_finisher(osd->client_messenger->cct),
241 watch_lock("OSDService::watch_lock"),
242 watch_timer(osd->client_messenger->cct, watch_lock),
243 next_notif_id(0),
244 recovery_request_lock("OSDService::recovery_request_lock"),
245 recovery_request_timer(cct, recovery_request_lock, false),
246 recovery_sleep_lock("OSDService::recovery_sleep_lock"),
247 recovery_sleep_timer(cct, recovery_sleep_lock, false),
248 reserver_finisher(cct),
249 local_reserver(&reserver_finisher, cct->_conf->osd_max_backfills,
250 cct->_conf->osd_min_recovery_priority),
251 remote_reserver(&reserver_finisher, cct->_conf->osd_max_backfills,
252 cct->_conf->osd_min_recovery_priority),
253 pg_temp_lock("OSDService::pg_temp_lock"),
254 snap_sleep_lock("OSDService::snap_sleep_lock"),
255 snap_sleep_timer(
256 osd->client_messenger->cct, snap_sleep_lock, false /* relax locking */),
257 scrub_sleep_lock("OSDService::scrub_sleep_lock"),
258 scrub_sleep_timer(
259 osd->client_messenger->cct, scrub_sleep_lock, false /* relax locking */),
260 snap_reserver(&reserver_finisher,
261 cct->_conf->osd_max_trimming_pgs),
262 recovery_lock("OSDService::recovery_lock"),
263 recovery_ops_active(0),
264 recovery_ops_reserved(0),
265 recovery_paused(false),
266 map_cache_lock("OSDService::map_cache_lock"),
267 map_cache(cct, cct->_conf->osd_map_cache_size),
268 map_bl_cache(cct->_conf->osd_map_cache_size),
269 map_bl_inc_cache(cct->_conf->osd_map_cache_size),
270 in_progress_split_lock("OSDService::in_progress_split_lock"),
271 stat_lock("OSDService::stat_lock"),
272 full_status_lock("OSDService::full_status_lock"),
273 cur_state(NONE),
274 cur_ratio(0),
275 epoch_lock("OSDService::epoch_lock"),
276 boot_epoch(0), up_epoch(0), bind_epoch(0),
277 is_stopping_lock("OSDService::is_stopping_lock")
278 #ifdef PG_DEBUG_REFS
279 , pgid_lock("OSDService::pgid_lock")
280 #endif
281 {
282 objecter->init();
283 }
284
285 OSDService::~OSDService()
286 {
287 delete objecter;
288 }
289
290
291
292 #ifdef PG_DEBUG_REFS
293 void OSDService::add_pgid(spg_t pgid, PG *pg){
294 Mutex::Locker l(pgid_lock);
295 if (!pgid_tracker.count(pgid)) {
296 live_pgs[pgid] = pg;
297 }
298 pgid_tracker[pgid]++;
299 }
300 void OSDService::remove_pgid(spg_t pgid, PG *pg)
301 {
302 Mutex::Locker l(pgid_lock);
303 assert(pgid_tracker.count(pgid));
304 assert(pgid_tracker[pgid] > 0);
305 pgid_tracker[pgid]--;
306 if (pgid_tracker[pgid] == 0) {
307 pgid_tracker.erase(pgid);
308 live_pgs.erase(pgid);
309 }
310 }
311 void OSDService::dump_live_pgids()
312 {
313 Mutex::Locker l(pgid_lock);
314 derr << "live pgids:" << dendl;
315 for (map<spg_t, int>::const_iterator i = pgid_tracker.cbegin();
316 i != pgid_tracker.cend();
317 ++i) {
318 derr << "\t" << *i << dendl;
319 live_pgs[i->first]->dump_live_ids();
320 }
321 }
322 #endif
323
324
325 void OSDService::_start_split(spg_t parent, const set<spg_t> &children)
326 {
327 for (set<spg_t>::const_iterator i = children.begin();
328 i != children.end();
329 ++i) {
330 dout(10) << __func__ << ": Starting split on pg " << *i
331 << ", parent=" << parent << dendl;
332 assert(!pending_splits.count(*i));
333 assert(!in_progress_splits.count(*i));
334 pending_splits.insert(make_pair(*i, parent));
335
336 assert(!rev_pending_splits[parent].count(*i));
337 rev_pending_splits[parent].insert(*i);
338 }
339 }
340
341 void OSDService::mark_split_in_progress(spg_t parent, const set<spg_t> &children)
342 {
343 Mutex::Locker l(in_progress_split_lock);
344 map<spg_t, set<spg_t> >::iterator piter = rev_pending_splits.find(parent);
345 assert(piter != rev_pending_splits.end());
346 for (set<spg_t>::const_iterator i = children.begin();
347 i != children.end();
348 ++i) {
349 assert(piter->second.count(*i));
350 assert(pending_splits.count(*i));
351 assert(!in_progress_splits.count(*i));
352 assert(pending_splits[*i] == parent);
353
354 pending_splits.erase(*i);
355 piter->second.erase(*i);
356 in_progress_splits.insert(*i);
357 }
358 if (piter->second.empty())
359 rev_pending_splits.erase(piter);
360 }
361
362 void OSDService::cancel_pending_splits_for_parent(spg_t parent)
363 {
364 Mutex::Locker l(in_progress_split_lock);
365 _cancel_pending_splits_for_parent(parent);
366 }
367
368 void OSDService::_cancel_pending_splits_for_parent(spg_t parent)
369 {
370 map<spg_t, set<spg_t> >::iterator piter = rev_pending_splits.find(parent);
371 if (piter == rev_pending_splits.end())
372 return;
373
374 for (set<spg_t>::iterator i = piter->second.begin();
375 i != piter->second.end();
376 ++i) {
377 assert(pending_splits.count(*i));
378 assert(!in_progress_splits.count(*i));
379 pending_splits.erase(*i);
380 dout(10) << __func__ << ": Completing split on pg " << *i
381 << " for parent: " << parent << dendl;
382 _cancel_pending_splits_for_parent(*i);
383 }
384 rev_pending_splits.erase(piter);
385 }
386
387 void OSDService::_maybe_split_pgid(OSDMapRef old_map,
388 OSDMapRef new_map,
389 spg_t pgid)
390 {
391 assert(old_map->have_pg_pool(pgid.pool()));
392 int old_pgnum = old_map->get_pg_num(pgid.pool());
393 if (pgid.ps() < static_cast<unsigned>(old_pgnum)) {
394 set<spg_t> children;
395 if (pgid.is_split(old_pgnum,
396 new_map->get_pg_num(pgid.pool()), &children)) {
397 _start_split(pgid, children); }
398 } else {
399 assert(pgid.ps() < static_cast<unsigned>(new_map->get_pg_num(pgid.pool())));
400 }
401 }
402
403 void OSDService::init_splits_between(spg_t pgid,
404 OSDMapRef frommap,
405 OSDMapRef tomap)
406 {
407 // First, check whether we can avoid this potentially expensive check
408 if (tomap->have_pg_pool(pgid.pool()) &&
409 pgid.is_split(
410 frommap->get_pg_num(pgid.pool()),
411 tomap->get_pg_num(pgid.pool()),
412 NULL)) {
413 // Ok, a split happened, so we need to walk the osdmaps
414 set<spg_t> new_pgs; // pgs to scan on each map
415 new_pgs.insert(pgid);
416 OSDMapRef curmap(get_map(frommap->get_epoch()));
417 for (epoch_t e = frommap->get_epoch() + 1;
418 e <= tomap->get_epoch();
419 ++e) {
420 OSDMapRef nextmap(try_get_map(e));
421 if (!nextmap)
422 continue;
423 set<spg_t> even_newer_pgs; // pgs added in this loop
424 for (set<spg_t>::iterator i = new_pgs.begin(); i != new_pgs.end(); ++i) {
425 set<spg_t> split_pgs;
426 if (i->is_split(curmap->get_pg_num(i->pool()),
427 nextmap->get_pg_num(i->pool()),
428 &split_pgs)) {
429 start_split(*i, split_pgs);
430 even_newer_pgs.insert(split_pgs.begin(), split_pgs.end());
431 }
432 }
433 new_pgs.insert(even_newer_pgs.begin(), even_newer_pgs.end());
434 curmap = nextmap;
435 }
436 assert(curmap == tomap); // we must have had both frommap and tomap
437 }
438 }
439
440 void OSDService::expand_pg_num(OSDMapRef old_map,
441 OSDMapRef new_map)
442 {
443 Mutex::Locker l(in_progress_split_lock);
444 for (set<spg_t>::iterator i = in_progress_splits.begin();
445 i != in_progress_splits.end();
446 ) {
447 if (!new_map->have_pg_pool(i->pool())) {
448 in_progress_splits.erase(i++);
449 } else {
450 _maybe_split_pgid(old_map, new_map, *i);
451 ++i;
452 }
453 }
454 for (map<spg_t, spg_t>::iterator i = pending_splits.begin();
455 i != pending_splits.end();
456 ) {
457 if (!new_map->have_pg_pool(i->first.pool())) {
458 rev_pending_splits.erase(i->second);
459 pending_splits.erase(i++);
460 } else {
461 _maybe_split_pgid(old_map, new_map, i->first);
462 ++i;
463 }
464 }
465 }
466
467 bool OSDService::splitting(spg_t pgid)
468 {
469 Mutex::Locker l(in_progress_split_lock);
470 return in_progress_splits.count(pgid) ||
471 pending_splits.count(pgid);
472 }
473
474 void OSDService::complete_split(const set<spg_t> &pgs)
475 {
476 Mutex::Locker l(in_progress_split_lock);
477 for (set<spg_t>::const_iterator i = pgs.begin();
478 i != pgs.end();
479 ++i) {
480 dout(10) << __func__ << ": Completing split on pg " << *i << dendl;
481 assert(!pending_splits.count(*i));
482 assert(in_progress_splits.count(*i));
483 in_progress_splits.erase(*i);
484 }
485 }
486
487 void OSDService::need_heartbeat_peer_update()
488 {
489 osd->need_heartbeat_peer_update();
490 }
491
492 void OSDService::pg_stat_queue_enqueue(PG *pg)
493 {
494 osd->pg_stat_queue_enqueue(pg);
495 }
496
497 void OSDService::pg_stat_queue_dequeue(PG *pg)
498 {
499 osd->pg_stat_queue_dequeue(pg);
500 }
501
502 void OSDService::start_shutdown()
503 {
504 {
505 Mutex::Locker l(agent_timer_lock);
506 agent_timer.shutdown();
507 }
508
509 {
510 Mutex::Locker l(recovery_sleep_lock);
511 recovery_sleep_timer.shutdown();
512 }
513 }
514
515 void OSDService::shutdown_reserver()
516 {
517 reserver_finisher.wait_for_empty();
518 reserver_finisher.stop();
519 }
520
521 void OSDService::shutdown()
522 {
523 {
524 Mutex::Locker l(watch_lock);
525 watch_timer.shutdown();
526 }
527
528 objecter->shutdown();
529 objecter_finisher.wait_for_empty();
530 objecter_finisher.stop();
531
532 {
533 Mutex::Locker l(recovery_request_lock);
534 recovery_request_timer.shutdown();
535 }
536
537 {
538 Mutex::Locker l(snap_sleep_lock);
539 snap_sleep_timer.shutdown();
540 }
541
542 {
543 Mutex::Locker l(scrub_sleep_lock);
544 scrub_sleep_timer.shutdown();
545 }
546
547 osdmap = OSDMapRef();
548 next_osdmap = OSDMapRef();
549 }
550
551 void OSDService::init()
552 {
553 reserver_finisher.start();
554 objecter_finisher.start();
555 objecter->set_client_incarnation(0);
556
557 // deprioritize objecter in daemonperf output
558 objecter->get_logger()->set_prio_adjust(-3);
559
560 watch_timer.init();
561 agent_timer.init();
562 snap_sleep_timer.init();
563 scrub_sleep_timer.init();
564
565 agent_thread.create("osd_srv_agent");
566
567 if (cct->_conf->osd_recovery_delay_start)
568 defer_recovery(cct->_conf->osd_recovery_delay_start);
569 }
570
571 void OSDService::final_init()
572 {
573 objecter->start(osdmap.get());
574 }
575
576 void OSDService::activate_map()
577 {
578 // wake/unwake the tiering agent
579 agent_lock.Lock();
580 agent_active =
581 !osdmap->test_flag(CEPH_OSDMAP_NOTIERAGENT) &&
582 osd->is_active();
583 agent_cond.Signal();
584 agent_lock.Unlock();
585 }
586
587 class AgentTimeoutCB : public Context {
588 PGRef pg;
589 public:
590 explicit AgentTimeoutCB(PGRef _pg) : pg(_pg) {}
591 void finish(int) override {
592 pg->agent_choose_mode_restart();
593 }
594 };
595
596 void OSDService::agent_entry()
597 {
598 dout(10) << __func__ << " start" << dendl;
599 agent_lock.Lock();
600
601 while (!agent_stop_flag) {
602 if (agent_queue.empty()) {
603 dout(20) << __func__ << " empty queue" << dendl;
604 agent_cond.Wait(agent_lock);
605 continue;
606 }
607 uint64_t level = agent_queue.rbegin()->first;
608 set<PGRef>& top = agent_queue.rbegin()->second;
609 dout(10) << __func__
610 << " tiers " << agent_queue.size()
611 << ", top is " << level
612 << " with pgs " << top.size()
613 << ", ops " << agent_ops << "/"
614 << cct->_conf->osd_agent_max_ops
615 << (agent_active ? " active" : " NOT ACTIVE")
616 << dendl;
617 dout(20) << __func__ << " oids " << agent_oids << dendl;
618 int max = cct->_conf->osd_agent_max_ops - agent_ops;
619 int agent_flush_quota = max;
620 if (!flush_mode_high_count)
621 agent_flush_quota = cct->_conf->osd_agent_max_low_ops - agent_ops;
622 if (agent_flush_quota <= 0 || top.empty() || !agent_active) {
623 agent_cond.Wait(agent_lock);
624 continue;
625 }
626
627 if (!agent_valid_iterator || agent_queue_pos == top.end()) {
628 agent_queue_pos = top.begin();
629 agent_valid_iterator = true;
630 }
631 PGRef pg = *agent_queue_pos;
632 dout(10) << "high_count " << flush_mode_high_count
633 << " agent_ops " << agent_ops
634 << " flush_quota " << agent_flush_quota << dendl;
635 agent_lock.Unlock();
636 if (!pg->agent_work(max, agent_flush_quota)) {
637 dout(10) << __func__ << " " << pg->get_pgid()
638 << " no agent_work, delay for " << cct->_conf->osd_agent_delay_time
639 << " seconds" << dendl;
640
641 osd->logger->inc(l_osd_tier_delay);
642 // Queue a timer to call agent_choose_mode for this pg in 5 seconds
643 agent_timer_lock.Lock();
644 Context *cb = new AgentTimeoutCB(pg);
645 agent_timer.add_event_after(cct->_conf->osd_agent_delay_time, cb);
646 agent_timer_lock.Unlock();
647 }
648 agent_lock.Lock();
649 }
650 agent_lock.Unlock();
651 dout(10) << __func__ << " finish" << dendl;
652 }
653
654 void OSDService::agent_stop()
655 {
656 {
657 Mutex::Locker l(agent_lock);
658
659 // By this time all ops should be cancelled
660 assert(agent_ops == 0);
661 // By this time all PGs are shutdown and dequeued
662 if (!agent_queue.empty()) {
663 set<PGRef>& top = agent_queue.rbegin()->second;
664 derr << "agent queue not empty, for example " << (*top.begin())->info.pgid << dendl;
665 assert(0 == "agent queue not empty");
666 }
667
668 agent_stop_flag = true;
669 agent_cond.Signal();
670 }
671 agent_thread.join();
672 }
673
674 // -------------------------------------
675
676 void OSDService::promote_throttle_recalibrate()
677 {
678 utime_t now = ceph_clock_now();
679 double dur = now - last_recalibrate;
680 last_recalibrate = now;
681 unsigned prob = promote_probability_millis;
682
683 uint64_t target_obj_sec = cct->_conf->osd_tier_promote_max_objects_sec;
684 uint64_t target_bytes_sec = cct->_conf->osd_tier_promote_max_bytes_sec;
685
686 unsigned min_prob = 1;
687
688 uint64_t attempts, obj, bytes;
689 promote_counter.sample_and_attenuate(&attempts, &obj, &bytes);
690 dout(10) << __func__ << " " << attempts << " attempts, promoted "
691 << obj << " objects and " << pretty_si_t(bytes) << " bytes; target "
692 << target_obj_sec << " obj/sec or "
693 << pretty_si_t(target_bytes_sec) << " bytes/sec"
694 << dendl;
695
696 // calculate what the probability *should* be, given the targets
697 unsigned new_prob;
698 if (attempts && dur > 0) {
699 uint64_t avg_size = 1;
700 if (obj)
701 avg_size = MAX(bytes / obj, 1);
702 unsigned po = (double)target_obj_sec * dur * 1000.0 / (double)attempts;
703 unsigned pb = (double)target_bytes_sec / (double)avg_size * dur * 1000.0
704 / (double)attempts;
705 dout(20) << __func__ << " po " << po << " pb " << pb << " avg_size "
706 << avg_size << dendl;
707 if (target_obj_sec && target_bytes_sec)
708 new_prob = MIN(po, pb);
709 else if (target_obj_sec)
710 new_prob = po;
711 else if (target_bytes_sec)
712 new_prob = pb;
713 else
714 new_prob = 1000;
715 } else {
716 new_prob = 1000;
717 }
718 dout(20) << __func__ << " new_prob " << new_prob << dendl;
719
720 // correct for persistent skew between target rate and actual rate, adjust
721 double ratio = 1.0;
722 unsigned actual = 0;
723 if (attempts && obj) {
724 actual = obj * 1000 / attempts;
725 ratio = (double)actual / (double)prob;
726 new_prob = (double)new_prob / ratio;
727 }
728 new_prob = MAX(new_prob, min_prob);
729 new_prob = MIN(new_prob, 1000);
730
731 // adjust
732 prob = (prob + new_prob) / 2;
733 prob = MAX(prob, min_prob);
734 prob = MIN(prob, 1000);
735 dout(10) << __func__ << " actual " << actual
736 << ", actual/prob ratio " << ratio
737 << ", adjusted new_prob " << new_prob
738 << ", prob " << promote_probability_millis << " -> " << prob
739 << dendl;
740 promote_probability_millis = prob;
741
742 // set hard limits for this interval to mitigate stampedes
743 promote_max_objects = target_obj_sec * OSD::OSD_TICK_INTERVAL * 2;
744 promote_max_bytes = target_bytes_sec * OSD::OSD_TICK_INTERVAL * 2;
745 }
746
747 // -------------------------------------
748
749 float OSDService::get_failsafe_full_ratio()
750 {
751 float full_ratio = cct->_conf->osd_failsafe_full_ratio;
752 if (full_ratio > 1.0) full_ratio /= 100.0;
753 return full_ratio;
754 }
755
756 void OSDService::check_full_status(float ratio)
757 {
758 Mutex::Locker l(full_status_lock);
759
760 cur_ratio = ratio;
761
762 // The OSDMap ratios take precendence. So if the failsafe is .95 and
763 // the admin sets the cluster full to .96, the failsafe moves up to .96
764 // too. (Not that having failsafe == full is ideal, but it's better than
765 // dropping writes before the clusters appears full.)
766 OSDMapRef osdmap = get_osdmap();
767 if (!osdmap || osdmap->get_epoch() == 0) {
768 cur_state = NONE;
769 return;
770 }
771 float nearfull_ratio = osdmap->get_nearfull_ratio();
772 float backfillfull_ratio = std::max(osdmap->get_backfillfull_ratio(), nearfull_ratio);
773 float full_ratio = std::max(osdmap->get_full_ratio(), backfillfull_ratio);
774 float failsafe_ratio = std::max(get_failsafe_full_ratio(), full_ratio);
775
776 if (osdmap->require_osd_release < CEPH_RELEASE_LUMINOUS) {
777 // use the failsafe for nearfull and full; the mon isn't using the
778 // flags anyway because we're mid-upgrade.
779 full_ratio = failsafe_ratio;
780 backfillfull_ratio = failsafe_ratio;
781 nearfull_ratio = failsafe_ratio;
782 } else if (full_ratio <= 0 ||
783 backfillfull_ratio <= 0 ||
784 nearfull_ratio <= 0) {
785 derr << __func__ << " full_ratio, backfillfull_ratio or nearfull_ratio is <= 0" << dendl;
786 // use failsafe flag. ick. the monitor did something wrong or the user
787 // did something stupid.
788 full_ratio = failsafe_ratio;
789 backfillfull_ratio = failsafe_ratio;
790 nearfull_ratio = failsafe_ratio;
791 }
792
793 string inject;
794 s_names new_state;
795 if (injectfull_state > NONE && injectfull) {
796 new_state = injectfull_state;
797 inject = "(Injected)";
798 } else if (ratio > failsafe_ratio) {
799 new_state = FAILSAFE;
800 } else if (ratio > full_ratio) {
801 new_state = FULL;
802 } else if (ratio > backfillfull_ratio) {
803 new_state = BACKFILLFULL;
804 } else if (ratio > nearfull_ratio) {
805 new_state = NEARFULL;
806 } else {
807 new_state = NONE;
808 }
809 dout(20) << __func__ << " cur ratio " << ratio
810 << ". nearfull_ratio " << nearfull_ratio
811 << ". backfillfull_ratio " << backfillfull_ratio
812 << ", full_ratio " << full_ratio
813 << ", failsafe_ratio " << failsafe_ratio
814 << ", new state " << get_full_state_name(new_state)
815 << " " << inject
816 << dendl;
817
818 // warn
819 if (cur_state != new_state) {
820 dout(10) << __func__ << " " << get_full_state_name(cur_state)
821 << " -> " << get_full_state_name(new_state) << dendl;
822 if (new_state == FAILSAFE) {
823 clog->error() << "full status failsafe engaged, dropping updates, now "
824 << (int)roundf(ratio * 100) << "% full";
825 } else if (cur_state == FAILSAFE) {
826 clog->error() << "full status failsafe disengaged, no longer dropping "
827 << "updates, now " << (int)roundf(ratio * 100) << "% full";
828 }
829 cur_state = new_state;
830 }
831 }
832
833 bool OSDService::need_fullness_update()
834 {
835 OSDMapRef osdmap = get_osdmap();
836 s_names cur = NONE;
837 if (osdmap->exists(whoami)) {
838 if (osdmap->get_state(whoami) & CEPH_OSD_FULL) {
839 cur = FULL;
840 } else if (osdmap->get_state(whoami) & CEPH_OSD_BACKFILLFULL) {
841 cur = BACKFILLFULL;
842 } else if (osdmap->get_state(whoami) & CEPH_OSD_NEARFULL) {
843 cur = NEARFULL;
844 }
845 }
846 s_names want = NONE;
847 if (is_full())
848 want = FULL;
849 else if (is_backfillfull())
850 want = BACKFILLFULL;
851 else if (is_nearfull())
852 want = NEARFULL;
853 return want != cur;
854 }
855
856 bool OSDService::_check_full(s_names type, ostream &ss) const
857 {
858 Mutex::Locker l(full_status_lock);
859
860 if (injectfull && injectfull_state >= type) {
861 // injectfull is either a count of the number of times to return failsafe full
862 // or if -1 then always return full
863 if (injectfull > 0)
864 --injectfull;
865 ss << "Injected " << get_full_state_name(type) << " OSD ("
866 << (injectfull < 0 ? "set" : std::to_string(injectfull)) << ")";
867 return true;
868 }
869
870 ss << "current usage is " << cur_ratio;
871 return cur_state >= type;
872 }
873
874 bool OSDService::check_failsafe_full(ostream &ss) const
875 {
876 return _check_full(FAILSAFE, ss);
877 }
878
879 bool OSDService::check_full(ostream &ss) const
880 {
881 return _check_full(FULL, ss);
882 }
883
884 bool OSDService::check_backfill_full(ostream &ss) const
885 {
886 return _check_full(BACKFILLFULL, ss);
887 }
888
889 bool OSDService::check_nearfull(ostream &ss) const
890 {
891 return _check_full(NEARFULL, ss);
892 }
893
894 bool OSDService::is_failsafe_full() const
895 {
896 Mutex::Locker l(full_status_lock);
897 return cur_state == FAILSAFE;
898 }
899
900 bool OSDService::is_full() const
901 {
902 Mutex::Locker l(full_status_lock);
903 return cur_state >= FULL;
904 }
905
906 bool OSDService::is_backfillfull() const
907 {
908 Mutex::Locker l(full_status_lock);
909 return cur_state >= BACKFILLFULL;
910 }
911
912 bool OSDService::is_nearfull() const
913 {
914 Mutex::Locker l(full_status_lock);
915 return cur_state >= NEARFULL;
916 }
917
918 void OSDService::set_injectfull(s_names type, int64_t count)
919 {
920 Mutex::Locker l(full_status_lock);
921 injectfull_state = type;
922 injectfull = count;
923 }
924
925 osd_stat_t OSDService::set_osd_stat(const struct store_statfs_t &stbuf,
926 vector<int>& hb_peers)
927 {
928 uint64_t bytes = stbuf.total;
929 uint64_t used = bytes - stbuf.available;
930 uint64_t avail = stbuf.available;
931
932 osd->logger->set(l_osd_stat_bytes, bytes);
933 osd->logger->set(l_osd_stat_bytes_used, used);
934 osd->logger->set(l_osd_stat_bytes_avail, avail);
935
936 {
937 Mutex::Locker l(stat_lock);
938 osd_stat.hb_peers.swap(hb_peers);
939 osd->op_tracker.get_age_ms_histogram(&osd_stat.op_queue_age_hist);
940 osd_stat.kb = bytes >> 10;
941 osd_stat.kb_used = used >> 10;
942 osd_stat.kb_avail = avail >> 10;
943 return osd_stat;
944 }
945 }
946
947 void OSDService::update_osd_stat(vector<int>& hb_peers)
948 {
949 // load osd stats first
950 struct store_statfs_t stbuf;
951 int r = osd->store->statfs(&stbuf);
952 if (r < 0) {
953 derr << "statfs() failed: " << cpp_strerror(r) << dendl;
954 return;
955 }
956
957 auto new_stat = set_osd_stat(stbuf, hb_peers);
958 dout(20) << "update_osd_stat " << new_stat << dendl;
959 assert(new_stat.kb);
960 float ratio = ((float)new_stat.kb_used) / ((float)new_stat.kb);
961 check_full_status(ratio);
962 }
963
964 bool OSDService::check_osdmap_full(const set<pg_shard_t> &missing_on)
965 {
966 OSDMapRef osdmap = get_osdmap();
967 for (auto shard : missing_on) {
968 if (osdmap->get_state(shard.osd) & CEPH_OSD_FULL)
969 return true;
970 }
971 return false;
972 }
973
974 void OSDService::send_message_osd_cluster(int peer, Message *m, epoch_t from_epoch)
975 {
976 OSDMapRef next_map = get_nextmap_reserved();
977 // service map is always newer/newest
978 assert(from_epoch <= next_map->get_epoch());
979
980 if (next_map->is_down(peer) ||
981 next_map->get_info(peer).up_from > from_epoch) {
982 m->put();
983 release_map(next_map);
984 return;
985 }
986 const entity_inst_t& peer_inst = next_map->get_cluster_inst(peer);
987 ConnectionRef peer_con = osd->cluster_messenger->get_connection(peer_inst);
988 share_map_peer(peer, peer_con.get(), next_map);
989 peer_con->send_message(m);
990 release_map(next_map);
991 }
992
993 ConnectionRef OSDService::get_con_osd_cluster(int peer, epoch_t from_epoch)
994 {
995 OSDMapRef next_map = get_nextmap_reserved();
996 // service map is always newer/newest
997 assert(from_epoch <= next_map->get_epoch());
998
999 if (next_map->is_down(peer) ||
1000 next_map->get_info(peer).up_from > from_epoch) {
1001 release_map(next_map);
1002 return NULL;
1003 }
1004 ConnectionRef con = osd->cluster_messenger->get_connection(next_map->get_cluster_inst(peer));
1005 release_map(next_map);
1006 return con;
1007 }
1008
1009 pair<ConnectionRef,ConnectionRef> OSDService::get_con_osd_hb(int peer, epoch_t from_epoch)
1010 {
1011 OSDMapRef next_map = get_nextmap_reserved();
1012 // service map is always newer/newest
1013 assert(from_epoch <= next_map->get_epoch());
1014
1015 pair<ConnectionRef,ConnectionRef> ret;
1016 if (next_map->is_down(peer) ||
1017 next_map->get_info(peer).up_from > from_epoch) {
1018 release_map(next_map);
1019 return ret;
1020 }
1021 ret.first = osd->hb_back_client_messenger->get_connection(next_map->get_hb_back_inst(peer));
1022 if (next_map->get_hb_front_addr(peer) != entity_addr_t())
1023 ret.second = osd->hb_front_client_messenger->get_connection(next_map->get_hb_front_inst(peer));
1024 release_map(next_map);
1025 return ret;
1026 }
1027
1028
1029 void OSDService::queue_want_pg_temp(pg_t pgid, vector<int>& want)
1030 {
1031 Mutex::Locker l(pg_temp_lock);
1032 map<pg_t,vector<int> >::iterator p = pg_temp_pending.find(pgid);
1033 if (p == pg_temp_pending.end() ||
1034 p->second != want) {
1035 pg_temp_wanted[pgid] = want;
1036 }
1037 }
1038
1039 void OSDService::remove_want_pg_temp(pg_t pgid)
1040 {
1041 Mutex::Locker l(pg_temp_lock);
1042 pg_temp_wanted.erase(pgid);
1043 pg_temp_pending.erase(pgid);
1044 }
1045
1046 void OSDService::_sent_pg_temp()
1047 {
1048 for (map<pg_t,vector<int> >::iterator p = pg_temp_wanted.begin();
1049 p != pg_temp_wanted.end();
1050 ++p)
1051 pg_temp_pending[p->first] = p->second;
1052 pg_temp_wanted.clear();
1053 }
1054
1055 void OSDService::requeue_pg_temp()
1056 {
1057 Mutex::Locker l(pg_temp_lock);
1058 // wanted overrides pending. note that remove_want_pg_temp
1059 // clears the item out of both.
1060 unsigned old_wanted = pg_temp_wanted.size();
1061 unsigned old_pending = pg_temp_pending.size();
1062 _sent_pg_temp();
1063 pg_temp_wanted.swap(pg_temp_pending);
1064 dout(10) << __func__ << " " << old_wanted << " + " << old_pending << " -> "
1065 << pg_temp_wanted.size() << dendl;
1066 }
1067
1068 void OSDService::send_pg_temp()
1069 {
1070 Mutex::Locker l(pg_temp_lock);
1071 if (pg_temp_wanted.empty())
1072 return;
1073 dout(10) << "send_pg_temp " << pg_temp_wanted << dendl;
1074 MOSDPGTemp *m = new MOSDPGTemp(osdmap->get_epoch());
1075 m->pg_temp = pg_temp_wanted;
1076 monc->send_mon_message(m);
1077 _sent_pg_temp();
1078 }
1079
1080 void OSDService::send_pg_created(pg_t pgid)
1081 {
1082 dout(20) << __func__ << dendl;
1083 if (osdmap->require_osd_release >= CEPH_RELEASE_LUMINOUS) {
1084 monc->send_mon_message(new MOSDPGCreated(pgid));
1085 }
1086 }
1087
1088 // --------------------------------------
1089 // dispatch
1090
1091 epoch_t OSDService::get_peer_epoch(int peer)
1092 {
1093 Mutex::Locker l(peer_map_epoch_lock);
1094 map<int,epoch_t>::iterator p = peer_map_epoch.find(peer);
1095 if (p == peer_map_epoch.end())
1096 return 0;
1097 return p->second;
1098 }
1099
1100 epoch_t OSDService::note_peer_epoch(int peer, epoch_t e)
1101 {
1102 Mutex::Locker l(peer_map_epoch_lock);
1103 map<int,epoch_t>::iterator p = peer_map_epoch.find(peer);
1104 if (p != peer_map_epoch.end()) {
1105 if (p->second < e) {
1106 dout(10) << "note_peer_epoch osd." << peer << " has " << e << dendl;
1107 p->second = e;
1108 } else {
1109 dout(30) << "note_peer_epoch osd." << peer << " has " << p->second << " >= " << e << dendl;
1110 }
1111 return p->second;
1112 } else {
1113 dout(10) << "note_peer_epoch osd." << peer << " now has " << e << dendl;
1114 peer_map_epoch[peer] = e;
1115 return e;
1116 }
1117 }
1118
1119 void OSDService::forget_peer_epoch(int peer, epoch_t as_of)
1120 {
1121 Mutex::Locker l(peer_map_epoch_lock);
1122 map<int,epoch_t>::iterator p = peer_map_epoch.find(peer);
1123 if (p != peer_map_epoch.end()) {
1124 if (p->second <= as_of) {
1125 dout(10) << "forget_peer_epoch osd." << peer << " as_of " << as_of
1126 << " had " << p->second << dendl;
1127 peer_map_epoch.erase(p);
1128 } else {
1129 dout(10) << "forget_peer_epoch osd." << peer << " as_of " << as_of
1130 << " has " << p->second << " - not forgetting" << dendl;
1131 }
1132 }
1133 }
1134
1135 bool OSDService::should_share_map(entity_name_t name, Connection *con,
1136 epoch_t epoch, const OSDMapRef& osdmap,
1137 const epoch_t *sent_epoch_p)
1138 {
1139 dout(20) << "should_share_map "
1140 << name << " " << con->get_peer_addr()
1141 << " " << epoch << dendl;
1142
1143 // does client have old map?
1144 if (name.is_client()) {
1145 bool message_sendmap = epoch < osdmap->get_epoch();
1146 if (message_sendmap && sent_epoch_p) {
1147 dout(20) << "client session last_sent_epoch: "
1148 << *sent_epoch_p
1149 << " versus osdmap epoch " << osdmap->get_epoch() << dendl;
1150 if (*sent_epoch_p < osdmap->get_epoch()) {
1151 return true;
1152 } // else we don't need to send it out again
1153 }
1154 }
1155
1156 if (con->get_messenger() == osd->cluster_messenger &&
1157 con != osd->cluster_messenger->get_loopback_connection() &&
1158 osdmap->is_up(name.num()) &&
1159 (osdmap->get_cluster_addr(name.num()) == con->get_peer_addr() ||
1160 osdmap->get_hb_back_addr(name.num()) == con->get_peer_addr())) {
1161 // remember
1162 epoch_t has = MAX(get_peer_epoch(name.num()), epoch);
1163
1164 // share?
1165 if (has < osdmap->get_epoch()) {
1166 dout(10) << name << " " << con->get_peer_addr()
1167 << " has old map " << epoch << " < "
1168 << osdmap->get_epoch() << dendl;
1169 return true;
1170 }
1171 }
1172
1173 return false;
1174 }
1175
1176 void OSDService::share_map(
1177 entity_name_t name,
1178 Connection *con,
1179 epoch_t epoch,
1180 OSDMapRef& osdmap,
1181 epoch_t *sent_epoch_p)
1182 {
1183 dout(20) << "share_map "
1184 << name << " " << con->get_peer_addr()
1185 << " " << epoch << dendl;
1186
1187 if (!osd->is_active()) {
1188 /*It is safe not to proceed as OSD is not in healthy state*/
1189 return;
1190 }
1191
1192 bool want_shared = should_share_map(name, con, epoch,
1193 osdmap, sent_epoch_p);
1194
1195 if (want_shared){
1196 if (name.is_client()) {
1197 dout(10) << name << " has old map " << epoch
1198 << " < " << osdmap->get_epoch() << dendl;
1199 // we know the Session is valid or we wouldn't be sending
1200 if (sent_epoch_p) {
1201 *sent_epoch_p = osdmap->get_epoch();
1202 }
1203 send_incremental_map(epoch, con, osdmap);
1204 } else if (con->get_messenger() == osd->cluster_messenger &&
1205 osdmap->is_up(name.num()) &&
1206 (osdmap->get_cluster_addr(name.num()) == con->get_peer_addr() ||
1207 osdmap->get_hb_back_addr(name.num()) == con->get_peer_addr())) {
1208 dout(10) << name << " " << con->get_peer_addr()
1209 << " has old map " << epoch << " < "
1210 << osdmap->get_epoch() << dendl;
1211 note_peer_epoch(name.num(), osdmap->get_epoch());
1212 send_incremental_map(epoch, con, osdmap);
1213 }
1214 }
1215 }
1216
1217 void OSDService::share_map_peer(int peer, Connection *con, OSDMapRef map)
1218 {
1219 if (!map)
1220 map = get_osdmap();
1221
1222 // send map?
1223 epoch_t pe = get_peer_epoch(peer);
1224 if (pe) {
1225 if (pe < map->get_epoch()) {
1226 send_incremental_map(pe, con, map);
1227 note_peer_epoch(peer, map->get_epoch());
1228 } else
1229 dout(20) << "share_map_peer " << con << " already has epoch " << pe << dendl;
1230 } else {
1231 dout(20) << "share_map_peer " << con << " don't know epoch, doing nothing" << dendl;
1232 // no idea about peer's epoch.
1233 // ??? send recent ???
1234 // do nothing.
1235 }
1236 }
1237
1238 bool OSDService::can_inc_scrubs_pending()
1239 {
1240 bool can_inc = false;
1241 Mutex::Locker l(sched_scrub_lock);
1242
1243 if (scrubs_pending + scrubs_active < cct->_conf->osd_max_scrubs) {
1244 dout(20) << __func__ << " " << scrubs_pending << " -> " << (scrubs_pending+1)
1245 << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
1246 can_inc = true;
1247 } else {
1248 dout(20) << __func__ << scrubs_pending << " + " << scrubs_active << " active >= max " << cct->_conf->osd_max_scrubs << dendl;
1249 }
1250
1251 return can_inc;
1252 }
1253
1254 bool OSDService::inc_scrubs_pending()
1255 {
1256 bool result = false;
1257
1258 sched_scrub_lock.Lock();
1259 if (scrubs_pending + scrubs_active < cct->_conf->osd_max_scrubs) {
1260 dout(20) << "inc_scrubs_pending " << scrubs_pending << " -> " << (scrubs_pending+1)
1261 << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
1262 result = true;
1263 ++scrubs_pending;
1264 } else {
1265 dout(20) << "inc_scrubs_pending " << scrubs_pending << " + " << scrubs_active << " active >= max " << cct->_conf->osd_max_scrubs << dendl;
1266 }
1267 sched_scrub_lock.Unlock();
1268
1269 return result;
1270 }
1271
1272 void OSDService::dec_scrubs_pending()
1273 {
1274 sched_scrub_lock.Lock();
1275 dout(20) << "dec_scrubs_pending " << scrubs_pending << " -> " << (scrubs_pending-1)
1276 << " (max " << cct->_conf->osd_max_scrubs << ", active " << scrubs_active << ")" << dendl;
1277 --scrubs_pending;
1278 assert(scrubs_pending >= 0);
1279 sched_scrub_lock.Unlock();
1280 }
1281
1282 void OSDService::inc_scrubs_active(bool reserved)
1283 {
1284 sched_scrub_lock.Lock();
1285 ++(scrubs_active);
1286 if (reserved) {
1287 --(scrubs_pending);
1288 dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active
1289 << " (max " << cct->_conf->osd_max_scrubs
1290 << ", pending " << (scrubs_pending+1) << " -> " << scrubs_pending << ")" << dendl;
1291 assert(scrubs_pending >= 0);
1292 } else {
1293 dout(20) << "inc_scrubs_active " << (scrubs_active-1) << " -> " << scrubs_active
1294 << " (max " << cct->_conf->osd_max_scrubs
1295 << ", pending " << scrubs_pending << ")" << dendl;
1296 }
1297 sched_scrub_lock.Unlock();
1298 }
1299
1300 void OSDService::dec_scrubs_active()
1301 {
1302 sched_scrub_lock.Lock();
1303 dout(20) << "dec_scrubs_active " << scrubs_active << " -> " << (scrubs_active-1)
1304 << " (max " << cct->_conf->osd_max_scrubs << ", pending " << scrubs_pending << ")" << dendl;
1305 --scrubs_active;
1306 assert(scrubs_active >= 0);
1307 sched_scrub_lock.Unlock();
1308 }
1309
1310 void OSDService::retrieve_epochs(epoch_t *_boot_epoch, epoch_t *_up_epoch,
1311 epoch_t *_bind_epoch) const
1312 {
1313 Mutex::Locker l(epoch_lock);
1314 if (_boot_epoch)
1315 *_boot_epoch = boot_epoch;
1316 if (_up_epoch)
1317 *_up_epoch = up_epoch;
1318 if (_bind_epoch)
1319 *_bind_epoch = bind_epoch;
1320 }
1321
1322 void OSDService::set_epochs(const epoch_t *_boot_epoch, const epoch_t *_up_epoch,
1323 const epoch_t *_bind_epoch)
1324 {
1325 Mutex::Locker l(epoch_lock);
1326 if (_boot_epoch) {
1327 assert(*_boot_epoch == 0 || *_boot_epoch >= boot_epoch);
1328 boot_epoch = *_boot_epoch;
1329 }
1330 if (_up_epoch) {
1331 assert(*_up_epoch == 0 || *_up_epoch >= up_epoch);
1332 up_epoch = *_up_epoch;
1333 }
1334 if (_bind_epoch) {
1335 assert(*_bind_epoch == 0 || *_bind_epoch >= bind_epoch);
1336 bind_epoch = *_bind_epoch;
1337 }
1338 }
1339
1340 bool OSDService::prepare_to_stop()
1341 {
1342 Mutex::Locker l(is_stopping_lock);
1343 if (get_state() != NOT_STOPPING)
1344 return false;
1345
1346 OSDMapRef osdmap = get_osdmap();
1347 if (osdmap && osdmap->is_up(whoami)) {
1348 dout(0) << __func__ << " telling mon we are shutting down" << dendl;
1349 set_state(PREPARING_TO_STOP);
1350 monc->send_mon_message(new MOSDMarkMeDown(monc->get_fsid(),
1351 osdmap->get_inst(whoami),
1352 osdmap->get_epoch(),
1353 true // request ack
1354 ));
1355 utime_t now = ceph_clock_now();
1356 utime_t timeout;
1357 timeout.set_from_double(now + cct->_conf->osd_mon_shutdown_timeout);
1358 while ((ceph_clock_now() < timeout) &&
1359 (get_state() != STOPPING)) {
1360 is_stopping_cond.WaitUntil(is_stopping_lock, timeout);
1361 }
1362 }
1363 dout(0) << __func__ << " starting shutdown" << dendl;
1364 set_state(STOPPING);
1365 return true;
1366 }
1367
1368 void OSDService::got_stop_ack()
1369 {
1370 Mutex::Locker l(is_stopping_lock);
1371 if (get_state() == PREPARING_TO_STOP) {
1372 dout(0) << __func__ << " starting shutdown" << dendl;
1373 set_state(STOPPING);
1374 is_stopping_cond.Signal();
1375 } else {
1376 dout(10) << __func__ << " ignoring msg" << dendl;
1377 }
1378 }
1379
1380 MOSDMap *OSDService::build_incremental_map_msg(epoch_t since, epoch_t to,
1381 OSDSuperblock& sblock)
1382 {
1383 MOSDMap *m = new MOSDMap(monc->get_fsid());
1384 m->oldest_map = max_oldest_map;
1385 m->newest_map = sblock.newest_map;
1386
1387 for (epoch_t e = to; e > since; e--) {
1388 bufferlist bl;
1389 if (e > m->oldest_map && get_inc_map_bl(e, bl)) {
1390 m->incremental_maps[e].claim(bl);
1391 } else if (get_map_bl(e, bl)) {
1392 m->maps[e].claim(bl);
1393 break;
1394 } else {
1395 derr << "since " << since << " to " << to
1396 << " oldest " << m->oldest_map << " newest " << m->newest_map
1397 << dendl;
1398 m->put();
1399 m = NULL;
1400 break;
1401 }
1402 }
1403 return m;
1404 }
1405
1406 void OSDService::send_map(MOSDMap *m, Connection *con)
1407 {
1408 con->send_message(m);
1409 }
1410
1411 void OSDService::send_incremental_map(epoch_t since, Connection *con,
1412 OSDMapRef& osdmap)
1413 {
1414 epoch_t to = osdmap->get_epoch();
1415 dout(10) << "send_incremental_map " << since << " -> " << to
1416 << " to " << con << " " << con->get_peer_addr() << dendl;
1417
1418 MOSDMap *m = NULL;
1419 while (!m) {
1420 OSDSuperblock sblock(get_superblock());
1421 if (since < sblock.oldest_map) {
1422 // just send latest full map
1423 MOSDMap *m = new MOSDMap(monc->get_fsid());
1424 m->oldest_map = max_oldest_map;
1425 m->newest_map = sblock.newest_map;
1426 get_map_bl(to, m->maps[to]);
1427 send_map(m, con);
1428 return;
1429 }
1430
1431 if (to > since && (int64_t)(to - since) > cct->_conf->osd_map_share_max_epochs) {
1432 dout(10) << " " << (to - since) << " > max " << cct->_conf->osd_map_share_max_epochs
1433 << ", only sending most recent" << dendl;
1434 since = to - cct->_conf->osd_map_share_max_epochs;
1435 }
1436
1437 if (to - since > (epoch_t)cct->_conf->osd_map_message_max)
1438 to = since + cct->_conf->osd_map_message_max;
1439 m = build_incremental_map_msg(since, to, sblock);
1440 }
1441 send_map(m, con);
1442 }
1443
1444 bool OSDService::_get_map_bl(epoch_t e, bufferlist& bl)
1445 {
1446 bool found = map_bl_cache.lookup(e, &bl);
1447 if (found) {
1448 if (logger)
1449 logger->inc(l_osd_map_bl_cache_hit);
1450 return true;
1451 }
1452 if (logger)
1453 logger->inc(l_osd_map_bl_cache_miss);
1454 found = store->read(coll_t::meta(),
1455 OSD::get_osdmap_pobject_name(e), 0, 0, bl,
1456 CEPH_OSD_OP_FLAG_FADVISE_WILLNEED) >= 0;
1457 if (found) {
1458 _add_map_bl(e, bl);
1459 }
1460 return found;
1461 }
1462
1463 bool OSDService::get_inc_map_bl(epoch_t e, bufferlist& bl)
1464 {
1465 Mutex::Locker l(map_cache_lock);
1466 bool found = map_bl_inc_cache.lookup(e, &bl);
1467 if (found) {
1468 if (logger)
1469 logger->inc(l_osd_map_bl_cache_hit);
1470 return true;
1471 }
1472 if (logger)
1473 logger->inc(l_osd_map_bl_cache_miss);
1474 found = store->read(coll_t::meta(),
1475 OSD::get_inc_osdmap_pobject_name(e), 0, 0, bl,
1476 CEPH_OSD_OP_FLAG_FADVISE_WILLNEED) >= 0;
1477 if (found) {
1478 _add_map_inc_bl(e, bl);
1479 }
1480 return found;
1481 }
1482
1483 void OSDService::_add_map_bl(epoch_t e, bufferlist& bl)
1484 {
1485 dout(10) << "add_map_bl " << e << " " << bl.length() << " bytes" << dendl;
1486 // cache a contiguous buffer
1487 if (bl.get_num_buffers() > 1) {
1488 bl.rebuild();
1489 }
1490 bl.try_assign_to_mempool(mempool::mempool_osd_mapbl);
1491 map_bl_cache.add(e, bl);
1492 }
1493
1494 void OSDService::_add_map_inc_bl(epoch_t e, bufferlist& bl)
1495 {
1496 dout(10) << "add_map_inc_bl " << e << " " << bl.length() << " bytes" << dendl;
1497 // cache a contiguous buffer
1498 if (bl.get_num_buffers() > 1) {
1499 bl.rebuild();
1500 }
1501 bl.try_assign_to_mempool(mempool::mempool_osd_mapbl);
1502 map_bl_inc_cache.add(e, bl);
1503 }
1504
1505 void OSDService::pin_map_inc_bl(epoch_t e, bufferlist &bl)
1506 {
1507 Mutex::Locker l(map_cache_lock);
1508 // cache a contiguous buffer
1509 if (bl.get_num_buffers() > 1) {
1510 bl.rebuild();
1511 }
1512 map_bl_inc_cache.pin(e, bl);
1513 }
1514
1515 void OSDService::pin_map_bl(epoch_t e, bufferlist &bl)
1516 {
1517 Mutex::Locker l(map_cache_lock);
1518 // cache a contiguous buffer
1519 if (bl.get_num_buffers() > 1) {
1520 bl.rebuild();
1521 }
1522 map_bl_cache.pin(e, bl);
1523 }
1524
1525 void OSDService::clear_map_bl_cache_pins(epoch_t e)
1526 {
1527 Mutex::Locker l(map_cache_lock);
1528 map_bl_inc_cache.clear_pinned(e);
1529 map_bl_cache.clear_pinned(e);
1530 }
1531
1532 OSDMapRef OSDService::_add_map(OSDMap *o)
1533 {
1534 epoch_t e = o->get_epoch();
1535
1536 if (cct->_conf->osd_map_dedup) {
1537 // Dedup against an existing map at a nearby epoch
1538 OSDMapRef for_dedup = map_cache.lower_bound(e);
1539 if (for_dedup) {
1540 OSDMap::dedup(for_dedup.get(), o);
1541 }
1542 }
1543 bool existed;
1544 OSDMapRef l = map_cache.add(e, o, &existed);
1545 if (existed) {
1546 delete o;
1547 }
1548 return l;
1549 }
1550
1551 OSDMapRef OSDService::try_get_map(epoch_t epoch)
1552 {
1553 Mutex::Locker l(map_cache_lock);
1554 OSDMapRef retval = map_cache.lookup(epoch);
1555 if (retval) {
1556 dout(30) << "get_map " << epoch << " -cached" << dendl;
1557 if (logger) {
1558 logger->inc(l_osd_map_cache_hit);
1559 }
1560 return retval;
1561 }
1562 if (logger) {
1563 logger->inc(l_osd_map_cache_miss);
1564 epoch_t lb = map_cache.cached_key_lower_bound();
1565 if (epoch < lb) {
1566 dout(30) << "get_map " << epoch << " - miss, below lower bound" << dendl;
1567 logger->inc(l_osd_map_cache_miss_low);
1568 logger->inc(l_osd_map_cache_miss_low_avg, lb - epoch);
1569 }
1570 }
1571
1572 OSDMap *map = new OSDMap;
1573 if (epoch > 0) {
1574 dout(20) << "get_map " << epoch << " - loading and decoding " << map << dendl;
1575 bufferlist bl;
1576 if (!_get_map_bl(epoch, bl) || bl.length() == 0) {
1577 derr << "failed to load OSD map for epoch " << epoch << ", got " << bl.length() << " bytes" << dendl;
1578 delete map;
1579 return OSDMapRef();
1580 }
1581 map->decode(bl);
1582 } else {
1583 dout(20) << "get_map " << epoch << " - return initial " << map << dendl;
1584 }
1585 return _add_map(map);
1586 }
1587
1588 // ops
1589
1590
1591 void OSDService::reply_op_error(OpRequestRef op, int err)
1592 {
1593 reply_op_error(op, err, eversion_t(), 0);
1594 }
1595
1596 void OSDService::reply_op_error(OpRequestRef op, int err, eversion_t v,
1597 version_t uv)
1598 {
1599 const MOSDOp *m = static_cast<const MOSDOp*>(op->get_req());
1600 assert(m->get_type() == CEPH_MSG_OSD_OP);
1601 int flags;
1602 flags = m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK);
1603
1604 MOSDOpReply *reply = new MOSDOpReply(m, err, osdmap->get_epoch(), flags,
1605 true);
1606 reply->set_reply_versions(v, uv);
1607 m->get_connection()->send_message(reply);
1608 }
1609
1610 void OSDService::handle_misdirected_op(PG *pg, OpRequestRef op)
1611 {
1612 if (!cct->_conf->osd_debug_misdirected_ops) {
1613 return;
1614 }
1615
1616 const MOSDOp *m = static_cast<const MOSDOp*>(op->get_req());
1617 assert(m->get_type() == CEPH_MSG_OSD_OP);
1618
1619 assert(m->get_map_epoch() >= pg->info.history.same_primary_since);
1620
1621 if (pg->is_ec_pg()) {
1622 /**
1623 * OSD recomputes op target based on current OSDMap. With an EC pg, we
1624 * can get this result:
1625 * 1) client at map 512 sends an op to osd 3, pg_t 3.9 based on mapping
1626 * [CRUSH_ITEM_NONE, 2, 3]/3
1627 * 2) OSD 3 at map 513 remaps op to osd 3, spg_t 3.9s0 based on mapping
1628 * [3, 2, 3]/3
1629 * 3) PG 3.9s0 dequeues the op at epoch 512 and notices that it isn't primary
1630 * -- misdirected op
1631 * 4) client resends and this time PG 3.9s0 having caught up to 513 gets
1632 * it and fulfils it
1633 *
1634 * We can't compute the op target based on the sending map epoch due to
1635 * splitting. The simplest thing is to detect such cases here and drop
1636 * them without an error (the client will resend anyway).
1637 */
1638 assert(m->get_map_epoch() <= superblock.newest_map);
1639 OSDMapRef opmap = try_get_map(m->get_map_epoch());
1640 if (!opmap) {
1641 dout(7) << __func__ << ": " << *pg << " no longer have map for "
1642 << m->get_map_epoch() << ", dropping" << dendl;
1643 return;
1644 }
1645 pg_t _pgid = m->get_raw_pg();
1646 spg_t pgid;
1647 if ((m->get_flags() & CEPH_OSD_FLAG_PGOP) == 0)
1648 _pgid = opmap->raw_pg_to_pg(_pgid);
1649 if (opmap->get_primary_shard(_pgid, &pgid) &&
1650 pgid.shard != pg->info.pgid.shard) {
1651 dout(7) << __func__ << ": " << *pg << " primary changed since "
1652 << m->get_map_epoch() << ", dropping" << dendl;
1653 return;
1654 }
1655 }
1656
1657 dout(7) << *pg << " misdirected op in " << m->get_map_epoch() << dendl;
1658 clog->warn() << m->get_source_inst() << " misdirected " << m->get_reqid()
1659 << " pg " << m->get_raw_pg()
1660 << " to osd." << whoami
1661 << " not " << pg->acting
1662 << " in e" << m->get_map_epoch() << "/" << osdmap->get_epoch();
1663 }
1664
1665 void OSDService::enqueue_back(spg_t pgid, PGQueueable qi)
1666 {
1667 osd->op_shardedwq.queue(make_pair(pgid, qi));
1668 }
1669
1670 void OSDService::enqueue_front(spg_t pgid, PGQueueable qi)
1671 {
1672 osd->op_shardedwq.queue_front(make_pair(pgid, qi));
1673 }
1674
1675 void OSDService::queue_for_peering(PG *pg)
1676 {
1677 peering_wq.queue(pg);
1678 }
1679
1680 void OSDService::queue_for_snap_trim(PG *pg)
1681 {
1682 dout(10) << "queueing " << *pg << " for snaptrim" << dendl;
1683 osd->op_shardedwq.queue(
1684 make_pair(
1685 pg->info.pgid,
1686 PGQueueable(
1687 PGSnapTrim(pg->get_osdmap()->get_epoch()),
1688 cct->_conf->osd_snap_trim_cost,
1689 cct->_conf->osd_snap_trim_priority,
1690 ceph_clock_now(),
1691 entity_inst_t(),
1692 pg->get_osdmap()->get_epoch())));
1693 }
1694
1695
1696 // ====================================================================
1697 // OSD
1698
1699 #undef dout_prefix
1700 #define dout_prefix *_dout
1701
1702 // Commands shared between OSD's console and admin console:
1703 namespace ceph {
1704 namespace osd_cmds {
1705
1706 int heap(CephContext& cct, cmdmap_t& cmdmap, Formatter& f, std::ostream& os);
1707
1708 }} // namespace ceph::osd_cmds
1709
1710 int OSD::mkfs(CephContext *cct, ObjectStore *store, const string &dev,
1711 uuid_d fsid, int whoami)
1712 {
1713 int ret;
1714
1715 ceph::shared_ptr<ObjectStore::Sequencer> osr(
1716 new ObjectStore::Sequencer("mkfs"));
1717 OSDSuperblock sb;
1718 bufferlist sbbl;
1719 C_SaferCond waiter;
1720
1721 // if we are fed a uuid for this osd, use it.
1722 store->set_fsid(cct->_conf->osd_uuid);
1723
1724 ret = store->mkfs();
1725 if (ret) {
1726 derr << "OSD::mkfs: ObjectStore::mkfs failed with error "
1727 << cpp_strerror(ret) << dendl;
1728 goto free_store;
1729 }
1730
1731 store->set_cache_shards(1); // doesn't matter for mkfs!
1732
1733 ret = store->mount();
1734 if (ret) {
1735 derr << "OSD::mkfs: couldn't mount ObjectStore: error "
1736 << cpp_strerror(ret) << dendl;
1737 goto free_store;
1738 }
1739
1740 ret = store->read(coll_t::meta(), OSD_SUPERBLOCK_GOBJECT, 0, 0, sbbl);
1741 if (ret >= 0) {
1742 /* if we already have superblock, check content of superblock */
1743 dout(0) << " have superblock" << dendl;
1744 bufferlist::iterator p;
1745 p = sbbl.begin();
1746 ::decode(sb, p);
1747 if (whoami != sb.whoami) {
1748 derr << "provided osd id " << whoami << " != superblock's " << sb.whoami
1749 << dendl;
1750 ret = -EINVAL;
1751 goto umount_store;
1752 }
1753 if (fsid != sb.cluster_fsid) {
1754 derr << "provided cluster fsid " << fsid
1755 << " != superblock's " << sb.cluster_fsid << dendl;
1756 ret = -EINVAL;
1757 goto umount_store;
1758 }
1759 } else {
1760 // create superblock
1761 sb.cluster_fsid = fsid;
1762 sb.osd_fsid = store->get_fsid();
1763 sb.whoami = whoami;
1764 sb.compat_features = get_osd_initial_compat_set();
1765
1766 bufferlist bl;
1767 ::encode(sb, bl);
1768
1769 ObjectStore::Transaction t;
1770 t.create_collection(coll_t::meta(), 0);
1771 t.write(coll_t::meta(), OSD_SUPERBLOCK_GOBJECT, 0, bl.length(), bl);
1772 ret = store->apply_transaction(osr.get(), std::move(t));
1773 if (ret) {
1774 derr << "OSD::mkfs: error while writing OSD_SUPERBLOCK_GOBJECT: "
1775 << "apply_transaction returned " << cpp_strerror(ret) << dendl;
1776 goto umount_store;
1777 }
1778 }
1779
1780 if (!osr->flush_commit(&waiter)) {
1781 waiter.wait();
1782 }
1783
1784 ret = write_meta(store, sb.cluster_fsid, sb.osd_fsid, whoami);
1785 if (ret) {
1786 derr << "OSD::mkfs: failed to write fsid file: error "
1787 << cpp_strerror(ret) << dendl;
1788 goto umount_store;
1789 }
1790
1791 umount_store:
1792 store->umount();
1793 free_store:
1794 delete store;
1795 return ret;
1796 }
1797
1798 int OSD::write_meta(ObjectStore *store, uuid_d& cluster_fsid, uuid_d& osd_fsid, int whoami)
1799 {
1800 char val[80];
1801 int r;
1802
1803 snprintf(val, sizeof(val), "%s", CEPH_OSD_ONDISK_MAGIC);
1804 r = store->write_meta("magic", val);
1805 if (r < 0)
1806 return r;
1807
1808 snprintf(val, sizeof(val), "%d", whoami);
1809 r = store->write_meta("whoami", val);
1810 if (r < 0)
1811 return r;
1812
1813 cluster_fsid.print(val);
1814 r = store->write_meta("ceph_fsid", val);
1815 if (r < 0)
1816 return r;
1817
1818 r = store->write_meta("ready", "ready");
1819 if (r < 0)
1820 return r;
1821
1822 return 0;
1823 }
1824
1825 int OSD::peek_meta(ObjectStore *store, std::string& magic,
1826 uuid_d& cluster_fsid, uuid_d& osd_fsid, int& whoami)
1827 {
1828 string val;
1829
1830 int r = store->read_meta("magic", &val);
1831 if (r < 0)
1832 return r;
1833 magic = val;
1834
1835 r = store->read_meta("whoami", &val);
1836 if (r < 0)
1837 return r;
1838 whoami = atoi(val.c_str());
1839
1840 r = store->read_meta("ceph_fsid", &val);
1841 if (r < 0)
1842 return r;
1843 r = cluster_fsid.parse(val.c_str());
1844 if (!r)
1845 return -EINVAL;
1846
1847 r = store->read_meta("fsid", &val);
1848 if (r < 0) {
1849 osd_fsid = uuid_d();
1850 } else {
1851 r = osd_fsid.parse(val.c_str());
1852 if (!r)
1853 return -EINVAL;
1854 }
1855
1856 return 0;
1857 }
1858
1859
1860 #undef dout_prefix
1861 #define dout_prefix _prefix(_dout, whoami, get_osdmap_epoch())
1862
1863 // cons/des
1864
1865 OSD::OSD(CephContext *cct_, ObjectStore *store_,
1866 int id,
1867 Messenger *internal_messenger,
1868 Messenger *external_messenger,
1869 Messenger *hb_client_front,
1870 Messenger *hb_client_back,
1871 Messenger *hb_front_serverm,
1872 Messenger *hb_back_serverm,
1873 Messenger *osdc_messenger,
1874 MonClient *mc,
1875 const std::string &dev, const std::string &jdev) :
1876 Dispatcher(cct_),
1877 osd_lock("OSD::osd_lock"),
1878 tick_timer(cct, osd_lock),
1879 tick_timer_lock("OSD::tick_timer_lock"),
1880 tick_timer_without_osd_lock(cct, tick_timer_lock),
1881 authorize_handler_cluster_registry(new AuthAuthorizeHandlerRegistry(cct,
1882 cct->_conf->auth_supported.empty() ?
1883 cct->_conf->auth_cluster_required :
1884 cct->_conf->auth_supported)),
1885 authorize_handler_service_registry(new AuthAuthorizeHandlerRegistry(cct,
1886 cct->_conf->auth_supported.empty() ?
1887 cct->_conf->auth_service_required :
1888 cct->_conf->auth_supported)),
1889 cluster_messenger(internal_messenger),
1890 client_messenger(external_messenger),
1891 objecter_messenger(osdc_messenger),
1892 monc(mc),
1893 mgrc(cct_, client_messenger),
1894 logger(NULL),
1895 recoverystate_perf(NULL),
1896 store(store_),
1897 log_client(cct, client_messenger, &mc->monmap, LogClient::NO_FLAGS),
1898 clog(log_client.create_channel()),
1899 whoami(id),
1900 dev_path(dev), journal_path(jdev),
1901 store_is_rotational(store->is_rotational()),
1902 trace_endpoint("0.0.0.0", 0, "osd"),
1903 asok_hook(NULL),
1904 osd_compat(get_osd_compat_set()),
1905 peering_tp(cct, "OSD::peering_tp", "tp_peering",
1906 cct->_conf->osd_peering_wq_threads,
1907 "osd_peering_tp_threads"),
1908 osd_op_tp(cct, "OSD::osd_op_tp", "tp_osd_tp",
1909 get_num_op_threads()),
1910 disk_tp(cct, "OSD::disk_tp", "tp_osd_disk", cct->_conf->osd_disk_threads, "osd_disk_threads"),
1911 command_tp(cct, "OSD::command_tp", "tp_osd_cmd", 1),
1912 session_waiting_lock("OSD::session_waiting_lock"),
1913 heartbeat_lock("OSD::heartbeat_lock"),
1914 heartbeat_stop(false),
1915 heartbeat_need_update(true),
1916 hb_front_client_messenger(hb_client_front),
1917 hb_back_client_messenger(hb_client_back),
1918 hb_front_server_messenger(hb_front_serverm),
1919 hb_back_server_messenger(hb_back_serverm),
1920 daily_loadavg(0.0),
1921 heartbeat_thread(this),
1922 heartbeat_dispatcher(this),
1923 op_tracker(cct, cct->_conf->osd_enable_op_tracker,
1924 cct->_conf->osd_num_op_tracker_shard),
1925 test_ops_hook(NULL),
1926 op_queue(get_io_queue()),
1927 op_prio_cutoff(get_io_prio_cut()),
1928 op_shardedwq(
1929 get_num_op_shards(),
1930 this,
1931 cct->_conf->osd_op_thread_timeout,
1932 cct->_conf->osd_op_thread_suicide_timeout,
1933 &osd_op_tp),
1934 peering_wq(
1935 this,
1936 cct->_conf->osd_op_thread_timeout,
1937 cct->_conf->osd_op_thread_suicide_timeout,
1938 &peering_tp),
1939 map_lock("OSD::map_lock"),
1940 pg_map_lock("OSD::pg_map_lock"),
1941 last_pg_create_epoch(0),
1942 mon_report_lock("OSD::mon_report_lock"),
1943 stats_ack_timeout(cct->_conf->osd_mon_ack_timeout),
1944 up_thru_wanted(0),
1945 requested_full_first(0),
1946 requested_full_last(0),
1947 pg_stat_queue_lock("OSD::pg_stat_queue_lock"),
1948 osd_stat_updated(false),
1949 pg_stat_tid(0), pg_stat_tid_flushed(0),
1950 command_wq(
1951 this,
1952 cct->_conf->osd_command_thread_timeout,
1953 cct->_conf->osd_command_thread_suicide_timeout,
1954 &command_tp),
1955 remove_wq(
1956 cct,
1957 store,
1958 cct->_conf->osd_remove_thread_timeout,
1959 cct->_conf->osd_remove_thread_suicide_timeout,
1960 &disk_tp),
1961 service(this)
1962 {
1963 monc->set_messenger(client_messenger);
1964 op_tracker.set_complaint_and_threshold(cct->_conf->osd_op_complaint_time,
1965 cct->_conf->osd_op_log_threshold);
1966 op_tracker.set_history_size_and_duration(cct->_conf->osd_op_history_size,
1967 cct->_conf->osd_op_history_duration);
1968 op_tracker.set_history_slow_op_size_and_threshold(cct->_conf->osd_op_history_slow_op_size,
1969 cct->_conf->osd_op_history_slow_op_threshold);
1970 #ifdef WITH_BLKIN
1971 std::stringstream ss;
1972 ss << "osd." << whoami;
1973 trace_endpoint.copy_name(ss.str());
1974 #endif
1975 }
1976
1977 OSD::~OSD()
1978 {
1979 delete authorize_handler_cluster_registry;
1980 delete authorize_handler_service_registry;
1981 delete class_handler;
1982 cct->get_perfcounters_collection()->remove(recoverystate_perf);
1983 cct->get_perfcounters_collection()->remove(logger);
1984 delete recoverystate_perf;
1985 delete logger;
1986 delete store;
1987 }
1988
1989 void cls_initialize(ClassHandler *ch);
1990
1991 void OSD::handle_signal(int signum)
1992 {
1993 assert(signum == SIGINT || signum == SIGTERM);
1994 derr << "*** Got signal " << sig_str(signum) << " ***" << dendl;
1995 shutdown();
1996 }
1997
1998 int OSD::pre_init()
1999 {
2000 Mutex::Locker lock(osd_lock);
2001 if (is_stopping())
2002 return 0;
2003
2004 if (store->test_mount_in_use()) {
2005 derr << "OSD::pre_init: object store '" << dev_path << "' is "
2006 << "currently in use. (Is ceph-osd already running?)" << dendl;
2007 return -EBUSY;
2008 }
2009
2010 cct->_conf->add_observer(this);
2011 return 0;
2012 }
2013
2014 // asok
2015
2016 class OSDSocketHook : public AdminSocketHook {
2017 OSD *osd;
2018 public:
2019 explicit OSDSocketHook(OSD *o) : osd(o) {}
2020 bool call(std::string admin_command, cmdmap_t& cmdmap, std::string format,
2021 bufferlist& out) override {
2022 stringstream ss;
2023 bool r = osd->asok_command(admin_command, cmdmap, format, ss);
2024 out.append(ss);
2025 return r;
2026 }
2027 };
2028
2029 bool OSD::asok_command(string admin_command, cmdmap_t& cmdmap, string format,
2030 ostream& ss)
2031 {
2032 Formatter *f = Formatter::create(format, "json-pretty", "json-pretty");
2033 if (admin_command == "status") {
2034 f->open_object_section("status");
2035 f->dump_stream("cluster_fsid") << superblock.cluster_fsid;
2036 f->dump_stream("osd_fsid") << superblock.osd_fsid;
2037 f->dump_unsigned("whoami", superblock.whoami);
2038 f->dump_string("state", get_state_name(get_state()));
2039 f->dump_unsigned("oldest_map", superblock.oldest_map);
2040 f->dump_unsigned("newest_map", superblock.newest_map);
2041 {
2042 RWLock::RLocker l(pg_map_lock);
2043 f->dump_unsigned("num_pgs", pg_map.size());
2044 }
2045 f->close_section();
2046 } else if (admin_command == "flush_journal") {
2047 store->flush_journal();
2048 } else if (admin_command == "dump_ops_in_flight" ||
2049 admin_command == "ops" ||
2050 admin_command == "dump_blocked_ops" ||
2051 admin_command == "dump_historic_ops" ||
2052 admin_command == "dump_historic_ops_by_duration" ||
2053 admin_command == "dump_historic_slow_ops") {
2054
2055 const string error_str = "op_tracker tracking is not enabled now, so no ops are tracked currently, \
2056 even those get stuck. Please enable \"osd_enable_op_tracker\", and the tracker \
2057 will start to track new ops received afterwards.";
2058
2059 set<string> filters;
2060 vector<string> filter_str;
2061 if (cmd_getval(cct, cmdmap, "filterstr", filter_str)) {
2062 copy(filter_str.begin(), filter_str.end(),
2063 inserter(filters, filters.end()));
2064 }
2065
2066 if (admin_command == "dump_ops_in_flight" ||
2067 admin_command == "ops") {
2068 if (!op_tracker.dump_ops_in_flight(f, false, filters)) {
2069 ss << error_str;
2070 }
2071 }
2072 if (admin_command == "dump_blocked_ops") {
2073 if (!op_tracker.dump_ops_in_flight(f, true, filters)) {
2074 ss << error_str;
2075 }
2076 }
2077 if (admin_command == "dump_historic_ops") {
2078 if (!op_tracker.dump_historic_ops(f, false, filters)) {
2079 ss << error_str;
2080 }
2081 }
2082 if (admin_command == "dump_historic_ops_by_duration") {
2083 if (!op_tracker.dump_historic_ops(f, true, filters)) {
2084 ss << error_str;
2085 }
2086 }
2087 if (admin_command == "dump_historic_slow_ops") {
2088 if (!op_tracker.dump_historic_slow_ops(f, filters)) {
2089 ss << error_str;
2090 }
2091 }
2092 } else if (admin_command == "dump_op_pq_state") {
2093 f->open_object_section("pq");
2094 op_shardedwq.dump(f);
2095 f->close_section();
2096 } else if (admin_command == "dump_blacklist") {
2097 list<pair<entity_addr_t,utime_t> > bl;
2098 OSDMapRef curmap = service.get_osdmap();
2099
2100 f->open_array_section("blacklist");
2101 curmap->get_blacklist(&bl);
2102 for (list<pair<entity_addr_t,utime_t> >::iterator it = bl.begin();
2103 it != bl.end(); ++it) {
2104 f->open_object_section("entry");
2105 f->open_object_section("entity_addr_t");
2106 it->first.dump(f);
2107 f->close_section(); //entity_addr_t
2108 it->second.localtime(f->dump_stream("expire_time"));
2109 f->close_section(); //entry
2110 }
2111 f->close_section(); //blacklist
2112 } else if (admin_command == "dump_watchers") {
2113 list<obj_watch_item_t> watchers;
2114 // scan pg's
2115 {
2116 Mutex::Locker l(osd_lock);
2117 RWLock::RLocker l2(pg_map_lock);
2118 for (ceph::unordered_map<spg_t,PG*>::iterator it = pg_map.begin();
2119 it != pg_map.end();
2120 ++it) {
2121
2122 list<obj_watch_item_t> pg_watchers;
2123 PG *pg = it->second;
2124 pg->lock();
2125 pg->get_watchers(pg_watchers);
2126 pg->unlock();
2127 watchers.splice(watchers.end(), pg_watchers);
2128 }
2129 }
2130
2131 f->open_array_section("watchers");
2132 for (list<obj_watch_item_t>::iterator it = watchers.begin();
2133 it != watchers.end(); ++it) {
2134
2135 f->open_object_section("watch");
2136
2137 f->dump_string("namespace", it->obj.nspace);
2138 f->dump_string("object", it->obj.oid.name);
2139
2140 f->open_object_section("entity_name");
2141 it->wi.name.dump(f);
2142 f->close_section(); //entity_name_t
2143
2144 f->dump_unsigned("cookie", it->wi.cookie);
2145 f->dump_unsigned("timeout", it->wi.timeout_seconds);
2146
2147 f->open_object_section("entity_addr_t");
2148 it->wi.addr.dump(f);
2149 f->close_section(); //entity_addr_t
2150
2151 f->close_section(); //watch
2152 }
2153
2154 f->close_section(); //watchers
2155 } else if (admin_command == "dump_reservations") {
2156 f->open_object_section("reservations");
2157 f->open_object_section("local_reservations");
2158 service.local_reserver.dump(f);
2159 f->close_section();
2160 f->open_object_section("remote_reservations");
2161 service.remote_reserver.dump(f);
2162 f->close_section();
2163 f->close_section();
2164 } else if (admin_command == "get_latest_osdmap") {
2165 get_latest_osdmap();
2166 } else if (admin_command == "heap") {
2167 auto result = ceph::osd_cmds::heap(*cct, cmdmap, *f, ss);
2168
2169 // Note: Failed heap profile commands won't necessarily trigger an error:
2170 f->open_object_section("result");
2171 f->dump_string("error", cpp_strerror(result));
2172 f->dump_bool("success", result >= 0);
2173 f->close_section();
2174 } else if (admin_command == "set_heap_property") {
2175 string property;
2176 int64_t value = 0;
2177 string error;
2178 bool success = false;
2179 if (!cmd_getval(cct, cmdmap, "property", property)) {
2180 error = "unable to get property";
2181 success = false;
2182 } else if (!cmd_getval(cct, cmdmap, "value", value)) {
2183 error = "unable to get value";
2184 success = false;
2185 } else if (value < 0) {
2186 error = "negative value not allowed";
2187 success = false;
2188 } else if (!ceph_heap_set_numeric_property(property.c_str(), (size_t)value)) {
2189 error = "invalid property";
2190 success = false;
2191 } else {
2192 success = true;
2193 }
2194 f->open_object_section("result");
2195 f->dump_string("error", error);
2196 f->dump_bool("success", success);
2197 f->close_section();
2198 } else if (admin_command == "get_heap_property") {
2199 string property;
2200 size_t value = 0;
2201 string error;
2202 bool success = false;
2203 if (!cmd_getval(cct, cmdmap, "property", property)) {
2204 error = "unable to get property";
2205 success = false;
2206 } else if (!ceph_heap_get_numeric_property(property.c_str(), &value)) {
2207 error = "invalid property";
2208 success = false;
2209 } else {
2210 success = true;
2211 }
2212 f->open_object_section("result");
2213 f->dump_string("error", error);
2214 f->dump_bool("success", success);
2215 f->dump_int("value", value);
2216 f->close_section();
2217 } else if (admin_command == "dump_objectstore_kv_stats") {
2218 store->get_db_statistics(f);
2219 } else if (admin_command == "dump_scrubs") {
2220 service.dumps_scrub(f);
2221 } else if (admin_command == "calc_objectstore_db_histogram") {
2222 store->generate_db_histogram(f);
2223 } else if (admin_command == "flush_store_cache") {
2224 store->flush_cache();
2225 } else if (admin_command == "dump_pgstate_history") {
2226 f->open_object_section("pgstate_history");
2227 RWLock::RLocker l2(pg_map_lock);
2228 for (ceph::unordered_map<spg_t,PG*>::iterator it = pg_map.begin();
2229 it != pg_map.end();
2230 ++it) {
2231
2232 PG *pg = it->second;
2233 f->dump_stream("pg") << pg->get_pgid();
2234 pg->lock();
2235 pg->pgstate_history.dump(f);
2236 pg->unlock();
2237 }
2238 f->close_section();
2239 } else if (admin_command == "compact") {
2240 dout(1) << "triggering manual compaction" << dendl;
2241 auto start = ceph::coarse_mono_clock::now();
2242 store->compact();
2243 auto end = ceph::coarse_mono_clock::now();
2244 auto time_span = chrono::duration_cast<chrono::duration<double>>(end - start);
2245 dout(1) << "finished manual compaction in "
2246 << time_span.count()
2247 << " seconds" << dendl;
2248 f->open_object_section("compact_result");
2249 f->dump_float("elapsed_time", time_span.count());
2250 f->close_section();
2251 } else {
2252 assert(0 == "broken asok registration");
2253 }
2254 f->flush(ss);
2255 delete f;
2256 return true;
2257 }
2258
2259 class TestOpsSocketHook : public AdminSocketHook {
2260 OSDService *service;
2261 ObjectStore *store;
2262 public:
2263 TestOpsSocketHook(OSDService *s, ObjectStore *st) : service(s), store(st) {}
2264 bool call(std::string command, cmdmap_t& cmdmap, std::string format,
2265 bufferlist& out) override {
2266 stringstream ss;
2267 test_ops(service, store, command, cmdmap, ss);
2268 out.append(ss);
2269 return true;
2270 }
2271 void test_ops(OSDService *service, ObjectStore *store,
2272 const std::string &command, cmdmap_t& cmdmap, ostream &ss);
2273
2274 };
2275
2276 class OSD::C_Tick : public Context {
2277 OSD *osd;
2278 public:
2279 explicit C_Tick(OSD *o) : osd(o) {}
2280 void finish(int r) override {
2281 osd->tick();
2282 }
2283 };
2284
2285 class OSD::C_Tick_WithoutOSDLock : public Context {
2286 OSD *osd;
2287 public:
2288 explicit C_Tick_WithoutOSDLock(OSD *o) : osd(o) {}
2289 void finish(int r) override {
2290 osd->tick_without_osd_lock();
2291 }
2292 };
2293
2294 int OSD::enable_disable_fuse(bool stop)
2295 {
2296 #ifdef HAVE_LIBFUSE
2297 int r;
2298 string mntpath = cct->_conf->osd_data + "/fuse";
2299 if (fuse_store && (stop || !cct->_conf->osd_objectstore_fuse)) {
2300 dout(1) << __func__ << " disabling" << dendl;
2301 fuse_store->stop();
2302 delete fuse_store;
2303 fuse_store = NULL;
2304 r = ::rmdir(mntpath.c_str());
2305 if (r < 0) {
2306 r = -errno;
2307 derr << __func__ << " failed to rmdir " << mntpath << ": "
2308 << cpp_strerror(r) << dendl;
2309 return r;
2310 }
2311 return 0;
2312 }
2313 if (!fuse_store && cct->_conf->osd_objectstore_fuse) {
2314 dout(1) << __func__ << " enabling" << dendl;
2315 r = ::mkdir(mntpath.c_str(), 0700);
2316 if (r < 0)
2317 r = -errno;
2318 if (r < 0 && r != -EEXIST) {
2319 derr << __func__ << " unable to create " << mntpath << ": "
2320 << cpp_strerror(r) << dendl;
2321 return r;
2322 }
2323 fuse_store = new FuseStore(store, mntpath);
2324 r = fuse_store->start();
2325 if (r < 0) {
2326 derr << __func__ << " unable to start fuse: " << cpp_strerror(r) << dendl;
2327 delete fuse_store;
2328 fuse_store = NULL;
2329 return r;
2330 }
2331 }
2332 #endif // HAVE_LIBFUSE
2333 return 0;
2334 }
2335
2336 int OSD::get_num_op_shards()
2337 {
2338 if (cct->_conf->osd_op_num_shards)
2339 return cct->_conf->osd_op_num_shards;
2340 if (store_is_rotational)
2341 return cct->_conf->osd_op_num_shards_hdd;
2342 else
2343 return cct->_conf->osd_op_num_shards_ssd;
2344 }
2345
2346 int OSD::get_num_op_threads()
2347 {
2348 if (cct->_conf->osd_op_num_threads_per_shard)
2349 return get_num_op_shards() * cct->_conf->osd_op_num_threads_per_shard;
2350 if (store_is_rotational)
2351 return get_num_op_shards() * cct->_conf->osd_op_num_threads_per_shard_hdd;
2352 else
2353 return get_num_op_shards() * cct->_conf->osd_op_num_threads_per_shard_ssd;
2354 }
2355
2356 float OSD::get_osd_recovery_sleep()
2357 {
2358 if (cct->_conf->osd_recovery_sleep)
2359 return cct->_conf->osd_recovery_sleep;
2360 if (store_is_rotational)
2361 return cct->_conf->osd_recovery_sleep_hdd;
2362 else
2363 return cct->_conf->osd_recovery_sleep_ssd;
2364 }
2365
2366 int OSD::init()
2367 {
2368 CompatSet initial, diff;
2369 Mutex::Locker lock(osd_lock);
2370 if (is_stopping())
2371 return 0;
2372
2373 tick_timer.init();
2374 tick_timer_without_osd_lock.init();
2375 service.recovery_request_timer.init();
2376 service.recovery_sleep_timer.init();
2377
2378 // mount.
2379 dout(2) << "init " << dev_path
2380 << " (looks like " << (store_is_rotational ? "hdd" : "ssd") << ")"
2381 << dendl;
2382 assert(store); // call pre_init() first!
2383
2384 store->set_cache_shards(get_num_op_shards());
2385
2386 int r = store->mount();
2387 if (r < 0) {
2388 derr << "OSD:init: unable to mount object store" << dendl;
2389 return r;
2390 }
2391
2392 enable_disable_fuse(false);
2393
2394 dout(2) << "boot" << dendl;
2395
2396 // initialize the daily loadavg with current 15min loadavg
2397 double loadavgs[3];
2398 if (getloadavg(loadavgs, 3) == 3) {
2399 daily_loadavg = loadavgs[2];
2400 } else {
2401 derr << "OSD::init() : couldn't read loadavgs\n" << dendl;
2402 daily_loadavg = 1.0;
2403 }
2404
2405 int rotating_auth_attempts = 0;
2406
2407 // sanity check long object name handling
2408 {
2409 hobject_t l;
2410 l.oid.name = string(cct->_conf->osd_max_object_name_len, 'n');
2411 l.set_key(string(cct->_conf->osd_max_object_name_len, 'k'));
2412 l.nspace = string(cct->_conf->osd_max_object_namespace_len, 's');
2413 r = store->validate_hobject_key(l);
2414 if (r < 0) {
2415 derr << "backend (" << store->get_type() << ") is unable to support max "
2416 << "object name[space] len" << dendl;
2417 derr << " osd max object name len = "
2418 << cct->_conf->osd_max_object_name_len << dendl;
2419 derr << " osd max object namespace len = "
2420 << cct->_conf->osd_max_object_namespace_len << dendl;
2421 derr << cpp_strerror(r) << dendl;
2422 if (cct->_conf->osd_check_max_object_name_len_on_startup) {
2423 goto out;
2424 }
2425 derr << "osd_check_max_object_name_len_on_startup = false, starting anyway"
2426 << dendl;
2427 } else {
2428 dout(20) << "configured osd_max_object_name[space]_len looks ok" << dendl;
2429 }
2430 }
2431
2432 // read superblock
2433 r = read_superblock();
2434 if (r < 0) {
2435 derr << "OSD::init() : unable to read osd superblock" << dendl;
2436 r = -EINVAL;
2437 goto out;
2438 }
2439
2440 if (osd_compat.compare(superblock.compat_features) < 0) {
2441 derr << "The disk uses features unsupported by the executable." << dendl;
2442 derr << " ondisk features " << superblock.compat_features << dendl;
2443 derr << " daemon features " << osd_compat << dendl;
2444
2445 if (osd_compat.writeable(superblock.compat_features)) {
2446 CompatSet diff = osd_compat.unsupported(superblock.compat_features);
2447 derr << "it is still writeable, though. Missing features: " << diff << dendl;
2448 r = -EOPNOTSUPP;
2449 goto out;
2450 }
2451 else {
2452 CompatSet diff = osd_compat.unsupported(superblock.compat_features);
2453 derr << "Cannot write to disk! Missing features: " << diff << dendl;
2454 r = -EOPNOTSUPP;
2455 goto out;
2456 }
2457 }
2458
2459 assert_warn(whoami == superblock.whoami);
2460 if (whoami != superblock.whoami) {
2461 derr << "OSD::init: superblock says osd"
2462 << superblock.whoami << " but I am osd." << whoami << dendl;
2463 r = -EINVAL;
2464 goto out;
2465 }
2466
2467 initial = get_osd_initial_compat_set();
2468 diff = superblock.compat_features.unsupported(initial);
2469 if (superblock.compat_features.merge(initial)) {
2470 // We need to persist the new compat_set before we
2471 // do anything else
2472 dout(5) << "Upgrading superblock adding: " << diff << dendl;
2473 ObjectStore::Transaction t;
2474 write_superblock(t);
2475 r = store->apply_transaction(service.meta_osr.get(), std::move(t));
2476 if (r < 0)
2477 goto out;
2478 }
2479
2480 // make sure snap mapper object exists
2481 if (!store->exists(coll_t::meta(), OSD::make_snapmapper_oid())) {
2482 dout(10) << "init creating/touching snapmapper object" << dendl;
2483 ObjectStore::Transaction t;
2484 t.touch(coll_t::meta(), OSD::make_snapmapper_oid());
2485 r = store->apply_transaction(service.meta_osr.get(), std::move(t));
2486 if (r < 0)
2487 goto out;
2488 }
2489
2490 class_handler = new ClassHandler(cct);
2491 cls_initialize(class_handler);
2492
2493 if (cct->_conf->osd_open_classes_on_start) {
2494 int r = class_handler->open_all_classes();
2495 if (r)
2496 dout(1) << "warning: got an error loading one or more classes: " << cpp_strerror(r) << dendl;
2497 }
2498
2499 // load up "current" osdmap
2500 assert_warn(!osdmap);
2501 if (osdmap) {
2502 derr << "OSD::init: unable to read current osdmap" << dendl;
2503 r = -EINVAL;
2504 goto out;
2505 }
2506 osdmap = get_map(superblock.current_epoch);
2507 check_osdmap_features(store);
2508
2509 create_recoverystate_perf();
2510
2511 {
2512 epoch_t bind_epoch = osdmap->get_epoch();
2513 service.set_epochs(NULL, NULL, &bind_epoch);
2514 }
2515
2516 clear_temp_objects();
2517
2518 // load up pgs (as they previously existed)
2519 load_pgs();
2520
2521 dout(2) << "superblock: I am osd." << superblock.whoami << dendl;
2522 dout(0) << "using " << op_queue << " op queue with priority op cut off at " <<
2523 op_prio_cutoff << "." << dendl;
2524
2525 create_logger();
2526
2527 // i'm ready!
2528 client_messenger->add_dispatcher_head(this);
2529 cluster_messenger->add_dispatcher_head(this);
2530
2531 hb_front_client_messenger->add_dispatcher_head(&heartbeat_dispatcher);
2532 hb_back_client_messenger->add_dispatcher_head(&heartbeat_dispatcher);
2533 hb_front_server_messenger->add_dispatcher_head(&heartbeat_dispatcher);
2534 hb_back_server_messenger->add_dispatcher_head(&heartbeat_dispatcher);
2535
2536 objecter_messenger->add_dispatcher_head(service.objecter);
2537
2538 monc->set_want_keys(CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD
2539 | CEPH_ENTITY_TYPE_MGR);
2540 r = monc->init();
2541 if (r < 0)
2542 goto out;
2543
2544 /**
2545 * FIXME: this is a placeholder implementation that unconditionally
2546 * sends every is_primary PG's stats every time we're called, unlike
2547 * the existing mon PGStats mechanism that uses pg_stat_queue and acks.
2548 * This has equivalent cost to the existing worst case where all
2549 * PGs are busy and their stats are always enqueued for sending.
2550 */
2551 mgrc.set_pgstats_cb([this](){
2552 RWLock::RLocker l(map_lock);
2553
2554 utime_t had_for = ceph_clock_now() - had_map_since;
2555 osd_stat_t cur_stat = service.get_osd_stat();
2556 cur_stat.os_perf_stat = store->get_cur_stats();
2557
2558 MPGStats *m = new MPGStats(monc->get_fsid(), osdmap->get_epoch(), had_for);
2559 m->osd_stat = cur_stat;
2560
2561 Mutex::Locker lec{min_last_epoch_clean_lock};
2562 min_last_epoch_clean = osdmap->get_epoch();
2563 min_last_epoch_clean_pgs.clear();
2564 RWLock::RLocker lpg(pg_map_lock);
2565 for (const auto &i : pg_map) {
2566 PG *pg = i.second;
2567 if (!pg->is_primary()) {
2568 continue;
2569 }
2570
2571 pg->pg_stats_publish_lock.Lock();
2572 if (pg->pg_stats_publish_valid) {
2573 m->pg_stat[pg->info.pgid.pgid] = pg->pg_stats_publish;
2574 const auto lec = pg->pg_stats_publish.get_effective_last_epoch_clean();
2575 min_last_epoch_clean = min(min_last_epoch_clean, lec);
2576 min_last_epoch_clean_pgs.push_back(pg->info.pgid.pgid);
2577 }
2578 pg->pg_stats_publish_lock.Unlock();
2579 }
2580
2581 return m;
2582 });
2583
2584 mgrc.init();
2585 client_messenger->add_dispatcher_head(&mgrc);
2586
2587 // tell monc about log_client so it will know about mon session resets
2588 monc->set_log_client(&log_client);
2589 update_log_config();
2590
2591 peering_tp.start();
2592 osd_op_tp.start();
2593 disk_tp.start();
2594 command_tp.start();
2595
2596 set_disk_tp_priority();
2597
2598 // start the heartbeat
2599 heartbeat_thread.create("osd_srv_heartbt");
2600
2601 // tick
2602 tick_timer.add_event_after(cct->_conf->osd_heartbeat_interval, new C_Tick(this));
2603 {
2604 Mutex::Locker l(tick_timer_lock);
2605 tick_timer_without_osd_lock.add_event_after(cct->_conf->osd_heartbeat_interval, new C_Tick_WithoutOSDLock(this));
2606 }
2607
2608 service.init();
2609 service.publish_map(osdmap);
2610 service.publish_superblock(superblock);
2611 service.max_oldest_map = superblock.oldest_map;
2612
2613 osd_lock.Unlock();
2614
2615 r = monc->authenticate();
2616 if (r < 0) {
2617 derr << __func__ << " authentication failed: " << cpp_strerror(r)
2618 << dendl;
2619 osd_lock.Lock(); // locker is going to unlock this on function exit
2620 if (is_stopping())
2621 r = 0;
2622 goto monout;
2623 }
2624
2625 while (monc->wait_auth_rotating(30.0) < 0) {
2626 derr << "unable to obtain rotating service keys; retrying" << dendl;
2627 ++rotating_auth_attempts;
2628 if (rotating_auth_attempts > g_conf->max_rotating_auth_attempts) {
2629 derr << __func__ << " wait_auth_rotating timed out" << dendl;
2630 osd_lock.Lock(); // make locker happy
2631 if (!is_stopping()) {
2632 r = -ETIMEDOUT;
2633 }
2634 goto monout;
2635 }
2636 }
2637
2638 r = update_crush_device_class();
2639 if (r < 0) {
2640 derr << __func__ <<" unable to update_crush_device_class: "
2641 << cpp_strerror(r) << dendl;
2642 osd_lock.Lock();
2643 goto monout;
2644 }
2645
2646 r = update_crush_location();
2647 if (r < 0) {
2648 derr << __func__ <<" unable to update_crush_location: "
2649 << cpp_strerror(r) << dendl;
2650 osd_lock.Lock();
2651 goto monout;
2652 }
2653
2654 osd_lock.Lock();
2655 if (is_stopping())
2656 return 0;
2657
2658 // start objecter *after* we have authenticated, so that we don't ignore
2659 // the OSDMaps it requests.
2660 service.final_init();
2661
2662 check_config();
2663
2664 dout(10) << "ensuring pgs have consumed prior maps" << dendl;
2665 consume_map();
2666 peering_wq.drain();
2667
2668 dout(0) << "done with init, starting boot process" << dendl;
2669
2670 // subscribe to any pg creations
2671 monc->sub_want("osd_pg_creates", last_pg_create_epoch, 0);
2672
2673 // MgrClient needs this (it doesn't have MonClient reference itself)
2674 monc->sub_want("mgrmap", 0, 0);
2675
2676 // we don't need to ask for an osdmap here; objecter will
2677 //monc->sub_want("osdmap", osdmap->get_epoch(), CEPH_SUBSCRIBE_ONETIME);
2678
2679 monc->renew_subs();
2680
2681 start_boot();
2682
2683 return 0;
2684 monout:
2685 exit(1);
2686
2687 out:
2688 enable_disable_fuse(true);
2689 store->umount();
2690 delete store;
2691 store = NULL;
2692 return r;
2693 }
2694
2695 void OSD::final_init()
2696 {
2697 AdminSocket *admin_socket = cct->get_admin_socket();
2698 asok_hook = new OSDSocketHook(this);
2699 int r = admin_socket->register_command("status", "status", asok_hook,
2700 "high-level status of OSD");
2701 assert(r == 0);
2702 r = admin_socket->register_command("flush_journal", "flush_journal",
2703 asok_hook,
2704 "flush the journal to permanent store");
2705 assert(r == 0);
2706 r = admin_socket->register_command("dump_ops_in_flight",
2707 "dump_ops_in_flight " \
2708 "name=filterstr,type=CephString,n=N,req=false",
2709 asok_hook,
2710 "show the ops currently in flight");
2711 assert(r == 0);
2712 r = admin_socket->register_command("ops",
2713 "ops " \
2714 "name=filterstr,type=CephString,n=N,req=false",
2715 asok_hook,
2716 "show the ops currently in flight");
2717 assert(r == 0);
2718 r = admin_socket->register_command("dump_blocked_ops",
2719 "dump_blocked_ops " \
2720 "name=filterstr,type=CephString,n=N,req=false",
2721 asok_hook,
2722 "show the blocked ops currently in flight");
2723 assert(r == 0);
2724 r = admin_socket->register_command("dump_historic_ops",
2725 "dump_historic_ops " \
2726 "name=filterstr,type=CephString,n=N,req=false",
2727 asok_hook,
2728 "show recent ops");
2729 assert(r == 0);
2730 r = admin_socket->register_command("dump_historic_slow_ops",
2731 "dump_historic_slow_ops " \
2732 "name=filterstr,type=CephString,n=N,req=false",
2733 asok_hook,
2734 "show slowest recent ops");
2735 assert(r == 0);
2736 r = admin_socket->register_command("dump_historic_ops_by_duration",
2737 "dump_historic_ops_by_duration " \
2738 "name=filterstr,type=CephString,n=N,req=false",
2739 asok_hook,
2740 "show slowest recent ops, sorted by duration");
2741 assert(r == 0);
2742 r = admin_socket->register_command("dump_op_pq_state", "dump_op_pq_state",
2743 asok_hook,
2744 "dump op priority queue state");
2745 assert(r == 0);
2746 r = admin_socket->register_command("dump_blacklist", "dump_blacklist",
2747 asok_hook,
2748 "dump blacklisted clients and times");
2749 assert(r == 0);
2750 r = admin_socket->register_command("dump_watchers", "dump_watchers",
2751 asok_hook,
2752 "show clients which have active watches,"
2753 " and on which objects");
2754 assert(r == 0);
2755 r = admin_socket->register_command("dump_reservations", "dump_reservations",
2756 asok_hook,
2757 "show recovery reservations");
2758 assert(r == 0);
2759 r = admin_socket->register_command("get_latest_osdmap", "get_latest_osdmap",
2760 asok_hook,
2761 "force osd to update the latest map from "
2762 "the mon");
2763 assert(r == 0);
2764
2765 r = admin_socket->register_command( "heap",
2766 "heap " \
2767 "name=heapcmd,type=CephString",
2768 asok_hook,
2769 "show heap usage info (available only if "
2770 "compiled with tcmalloc)");
2771 assert(r == 0);
2772
2773 r = admin_socket->register_command("set_heap_property",
2774 "set_heap_property " \
2775 "name=property,type=CephString " \
2776 "name=value,type=CephInt",
2777 asok_hook,
2778 "update malloc extension heap property");
2779 assert(r == 0);
2780
2781 r = admin_socket->register_command("get_heap_property",
2782 "get_heap_property " \
2783 "name=property,type=CephString",
2784 asok_hook,
2785 "get malloc extension heap property");
2786 assert(r == 0);
2787
2788 r = admin_socket->register_command("dump_objectstore_kv_stats",
2789 "dump_objectstore_kv_stats",
2790 asok_hook,
2791 "print statistics of kvdb which used by bluestore");
2792 assert(r == 0);
2793
2794 r = admin_socket->register_command("dump_scrubs",
2795 "dump_scrubs",
2796 asok_hook,
2797 "print scheduled scrubs");
2798 assert(r == 0);
2799
2800 r = admin_socket->register_command("calc_objectstore_db_histogram",
2801 "calc_objectstore_db_histogram",
2802 asok_hook,
2803 "Generate key value histogram of kvdb(rocksdb) which used by bluestore");
2804 assert(r == 0);
2805
2806 r = admin_socket->register_command("flush_store_cache",
2807 "flush_store_cache",
2808 asok_hook,
2809 "Flush bluestore internal cache");
2810 assert(r == 0);
2811 r = admin_socket->register_command("dump_pgstate_history", "dump_pgstate_history",
2812 asok_hook,
2813 "show recent state history");
2814 assert(r == 0);
2815
2816 r = admin_socket->register_command("compact", "compact",
2817 asok_hook,
2818 "Commpact object store's omap."
2819 " WARNING: Compaction probably slows your requests");
2820 assert(r == 0);
2821
2822 test_ops_hook = new TestOpsSocketHook(&(this->service), this->store);
2823 // Note: pools are CephString instead of CephPoolname because
2824 // these commands traditionally support both pool names and numbers
2825 r = admin_socket->register_command(
2826 "setomapval",
2827 "setomapval " \
2828 "name=pool,type=CephString " \
2829 "name=objname,type=CephObjectname " \
2830 "name=key,type=CephString "\
2831 "name=val,type=CephString",
2832 test_ops_hook,
2833 "set omap key");
2834 assert(r == 0);
2835 r = admin_socket->register_command(
2836 "rmomapkey",
2837 "rmomapkey " \
2838 "name=pool,type=CephString " \
2839 "name=objname,type=CephObjectname " \
2840 "name=key,type=CephString",
2841 test_ops_hook,
2842 "remove omap key");
2843 assert(r == 0);
2844 r = admin_socket->register_command(
2845 "setomapheader",
2846 "setomapheader " \
2847 "name=pool,type=CephString " \
2848 "name=objname,type=CephObjectname " \
2849 "name=header,type=CephString",
2850 test_ops_hook,
2851 "set omap header");
2852 assert(r == 0);
2853
2854 r = admin_socket->register_command(
2855 "getomap",
2856 "getomap " \
2857 "name=pool,type=CephString " \
2858 "name=objname,type=CephObjectname",
2859 test_ops_hook,
2860 "output entire object map");
2861 assert(r == 0);
2862
2863 r = admin_socket->register_command(
2864 "truncobj",
2865 "truncobj " \
2866 "name=pool,type=CephString " \
2867 "name=objname,type=CephObjectname " \
2868 "name=len,type=CephInt",
2869 test_ops_hook,
2870 "truncate object to length");
2871 assert(r == 0);
2872
2873 r = admin_socket->register_command(
2874 "injectdataerr",
2875 "injectdataerr " \
2876 "name=pool,type=CephString " \
2877 "name=objname,type=CephObjectname " \
2878 "name=shardid,type=CephInt,req=false,range=0|255",
2879 test_ops_hook,
2880 "inject data error to an object");
2881 assert(r == 0);
2882
2883 r = admin_socket->register_command(
2884 "injectmdataerr",
2885 "injectmdataerr " \
2886 "name=pool,type=CephString " \
2887 "name=objname,type=CephObjectname " \
2888 "name=shardid,type=CephInt,req=false,range=0|255",
2889 test_ops_hook,
2890 "inject metadata error to an object");
2891 assert(r == 0);
2892 r = admin_socket->register_command(
2893 "set_recovery_delay",
2894 "set_recovery_delay " \
2895 "name=utime,type=CephInt,req=false",
2896 test_ops_hook,
2897 "Delay osd recovery by specified seconds");
2898 assert(r == 0);
2899 r = admin_socket->register_command(
2900 "trigger_scrub",
2901 "trigger_scrub " \
2902 "name=pgid,type=CephString ",
2903 test_ops_hook,
2904 "Trigger a scheduled scrub ");
2905 assert(r == 0);
2906 r = admin_socket->register_command(
2907 "injectfull",
2908 "injectfull " \
2909 "name=type,type=CephString,req=false " \
2910 "name=count,type=CephInt,req=false ",
2911 test_ops_hook,
2912 "Inject a full disk (optional count times)");
2913 assert(r == 0);
2914 }
2915
2916 void OSD::create_logger()
2917 {
2918 dout(10) << "create_logger" << dendl;
2919
2920 PerfCountersBuilder osd_plb(cct, "osd", l_osd_first, l_osd_last);
2921
2922 // Latency axis configuration for op histograms, values are in nanoseconds
2923 PerfHistogramCommon::axis_config_d op_hist_x_axis_config{
2924 "Latency (usec)",
2925 PerfHistogramCommon::SCALE_LOG2, ///< Latency in logarithmic scale
2926 0, ///< Start at 0
2927 100000, ///< Quantization unit is 100usec
2928 32, ///< Enough to cover much longer than slow requests
2929 };
2930
2931 // Op size axis configuration for op histograms, values are in bytes
2932 PerfHistogramCommon::axis_config_d op_hist_y_axis_config{
2933 "Request size (bytes)",
2934 PerfHistogramCommon::SCALE_LOG2, ///< Request size in logarithmic scale
2935 0, ///< Start at 0
2936 512, ///< Quantization unit is 512 bytes
2937 32, ///< Enough to cover requests larger than GB
2938 };
2939
2940
2941 osd_plb.add_u64(
2942 l_osd_op_wip, "op_wip",
2943 "Replication operations currently being processed (primary)");
2944 osd_plb.add_u64_counter(
2945 l_osd_op, "op",
2946 "Client operations",
2947 "ops", PerfCountersBuilder::PRIO_CRITICAL);
2948 osd_plb.add_u64_counter(
2949 l_osd_op_inb, "op_in_bytes",
2950 "Client operations total write size",
2951 "wr", PerfCountersBuilder::PRIO_INTERESTING);
2952 osd_plb.add_u64_counter(
2953 l_osd_op_outb, "op_out_bytes",
2954 "Client operations total read size",
2955 "rd", PerfCountersBuilder::PRIO_INTERESTING);
2956 osd_plb.add_time_avg(
2957 l_osd_op_lat, "op_latency",
2958 "Latency of client operations (including queue time)",
2959 "l", 9);
2960 osd_plb.add_time_avg(
2961 l_osd_op_process_lat, "op_process_latency",
2962 "Latency of client operations (excluding queue time)");
2963 osd_plb.add_time_avg(
2964 l_osd_op_prepare_lat, "op_prepare_latency",
2965 "Latency of client operations (excluding queue time and wait for finished)");
2966
2967 osd_plb.add_u64_counter(
2968 l_osd_op_r, "op_r", "Client read operations");
2969 osd_plb.add_u64_counter(
2970 l_osd_op_r_outb, "op_r_out_bytes", "Client data read");
2971 osd_plb.add_time_avg(
2972 l_osd_op_r_lat, "op_r_latency",
2973 "Latency of read operation (including queue time)");
2974 osd_plb.add_u64_counter_histogram(
2975 l_osd_op_r_lat_outb_hist, "op_r_latency_out_bytes_histogram",
2976 op_hist_x_axis_config, op_hist_y_axis_config,
2977 "Histogram of operation latency (including queue time) + data read");
2978 osd_plb.add_time_avg(
2979 l_osd_op_r_process_lat, "op_r_process_latency",
2980 "Latency of read operation (excluding queue time)");
2981 osd_plb.add_time_avg(
2982 l_osd_op_r_prepare_lat, "op_r_prepare_latency",
2983 "Latency of read operations (excluding queue time and wait for finished)");
2984 osd_plb.add_u64_counter(
2985 l_osd_op_w, "op_w", "Client write operations");
2986 osd_plb.add_u64_counter(
2987 l_osd_op_w_inb, "op_w_in_bytes", "Client data written");
2988 osd_plb.add_time_avg(
2989 l_osd_op_w_lat, "op_w_latency",
2990 "Latency of write operation (including queue time)");
2991 osd_plb.add_u64_counter_histogram(
2992 l_osd_op_w_lat_inb_hist, "op_w_latency_in_bytes_histogram",
2993 op_hist_x_axis_config, op_hist_y_axis_config,
2994 "Histogram of operation latency (including queue time) + data written");
2995 osd_plb.add_time_avg(
2996 l_osd_op_w_process_lat, "op_w_process_latency",
2997 "Latency of write operation (excluding queue time)");
2998 osd_plb.add_time_avg(
2999 l_osd_op_w_prepare_lat, "op_w_prepare_latency",
3000 "Latency of write operations (excluding queue time and wait for finished)");
3001 osd_plb.add_u64_counter(
3002 l_osd_op_rw, "op_rw",
3003 "Client read-modify-write operations");
3004 osd_plb.add_u64_counter(
3005 l_osd_op_rw_inb, "op_rw_in_bytes",
3006 "Client read-modify-write operations write in");
3007 osd_plb.add_u64_counter(
3008 l_osd_op_rw_outb,"op_rw_out_bytes",
3009 "Client read-modify-write operations read out ");
3010 osd_plb.add_time_avg(
3011 l_osd_op_rw_lat, "op_rw_latency",
3012 "Latency of read-modify-write operation (including queue time)");
3013 osd_plb.add_u64_counter_histogram(
3014 l_osd_op_rw_lat_inb_hist, "op_rw_latency_in_bytes_histogram",
3015 op_hist_x_axis_config, op_hist_y_axis_config,
3016 "Histogram of rw operation latency (including queue time) + data written");
3017 osd_plb.add_u64_counter_histogram(
3018 l_osd_op_rw_lat_outb_hist, "op_rw_latency_out_bytes_histogram",
3019 op_hist_x_axis_config, op_hist_y_axis_config,
3020 "Histogram of rw operation latency (including queue time) + data read");
3021 osd_plb.add_time_avg(
3022 l_osd_op_rw_process_lat, "op_rw_process_latency",
3023 "Latency of read-modify-write operation (excluding queue time)");
3024 osd_plb.add_time_avg(
3025 l_osd_op_rw_prepare_lat, "op_rw_prepare_latency",
3026 "Latency of read-modify-write operations (excluding queue time and wait for finished)");
3027
3028 osd_plb.add_time_avg(l_osd_op_before_queue_op_lat, "op_before_queue_op_lat",
3029 "Latency of IO before calling queue(before really queue into ShardedOpWq)"); // client io before queue op_wq latency
3030 osd_plb.add_time_avg(l_osd_op_before_dequeue_op_lat, "op_before_dequeue_op_lat",
3031 "Latency of IO before calling dequeue_op(already dequeued and get PG lock)"); // client io before dequeue_op latency
3032
3033 osd_plb.add_u64_counter(
3034 l_osd_sop, "subop", "Suboperations");
3035 osd_plb.add_u64_counter(
3036 l_osd_sop_inb, "subop_in_bytes", "Suboperations total size");
3037 osd_plb.add_time_avg(l_osd_sop_lat, "subop_latency", "Suboperations latency");
3038
3039 osd_plb.add_u64_counter(l_osd_sop_w, "subop_w", "Replicated writes");
3040 osd_plb.add_u64_counter(
3041 l_osd_sop_w_inb, "subop_w_in_bytes", "Replicated written data size");
3042 osd_plb.add_time_avg(
3043 l_osd_sop_w_lat, "subop_w_latency", "Replicated writes latency");
3044 osd_plb.add_u64_counter(
3045 l_osd_sop_pull, "subop_pull", "Suboperations pull requests");
3046 osd_plb.add_time_avg(
3047 l_osd_sop_pull_lat, "subop_pull_latency", "Suboperations pull latency");
3048 osd_plb.add_u64_counter(
3049 l_osd_sop_push, "subop_push", "Suboperations push messages");
3050 osd_plb.add_u64_counter(
3051 l_osd_sop_push_inb, "subop_push_in_bytes", "Suboperations pushed size");
3052 osd_plb.add_time_avg(
3053 l_osd_sop_push_lat, "subop_push_latency", "Suboperations push latency");
3054
3055 osd_plb.add_u64_counter(l_osd_pull, "pull", "Pull requests sent");
3056 osd_plb.add_u64_counter(l_osd_push, "push", "Push messages sent");
3057 osd_plb.add_u64_counter(l_osd_push_outb, "push_out_bytes", "Pushed size");
3058
3059 osd_plb.add_u64_counter(
3060 l_osd_rop, "recovery_ops",
3061 "Started recovery operations",
3062 "rop", PerfCountersBuilder::PRIO_INTERESTING);
3063
3064 osd_plb.add_u64(l_osd_loadavg, "loadavg", "CPU load");
3065 osd_plb.add_u64(l_osd_buf, "buffer_bytes", "Total allocated buffer size");
3066 osd_plb.add_u64(l_osd_history_alloc_bytes, "history_alloc_Mbytes");
3067 osd_plb.add_u64(l_osd_history_alloc_num, "history_alloc_num");
3068 osd_plb.add_u64(
3069 l_osd_cached_crc, "cached_crc", "Total number getting crc from crc_cache");
3070 osd_plb.add_u64(
3071 l_osd_cached_crc_adjusted, "cached_crc_adjusted",
3072 "Total number getting crc from crc_cache with adjusting");
3073 osd_plb.add_u64(l_osd_missed_crc, "missed_crc",
3074 "Total number of crc cache misses");
3075
3076 osd_plb.add_u64(l_osd_pg, "numpg", "Placement groups",
3077 "pgs", PerfCountersBuilder::PRIO_USEFUL);
3078 osd_plb.add_u64(
3079 l_osd_pg_primary, "numpg_primary",
3080 "Placement groups for which this osd is primary");
3081 osd_plb.add_u64(
3082 l_osd_pg_replica, "numpg_replica",
3083 "Placement groups for which this osd is replica");
3084 osd_plb.add_u64(
3085 l_osd_pg_stray, "numpg_stray",
3086 "Placement groups ready to be deleted from this osd");
3087 osd_plb.add_u64(
3088 l_osd_hb_to, "heartbeat_to_peers", "Heartbeat (ping) peers we send to");
3089 osd_plb.add_u64_counter(l_osd_map, "map_messages", "OSD map messages");
3090 osd_plb.add_u64_counter(l_osd_mape, "map_message_epochs", "OSD map epochs");
3091 osd_plb.add_u64_counter(
3092 l_osd_mape_dup, "map_message_epoch_dups", "OSD map duplicates");
3093 osd_plb.add_u64_counter(
3094 l_osd_waiting_for_map, "messages_delayed_for_map",
3095 "Operations waiting for OSD map");
3096
3097 osd_plb.add_u64_counter(
3098 l_osd_map_cache_hit, "osd_map_cache_hit", "osdmap cache hit");
3099 osd_plb.add_u64_counter(
3100 l_osd_map_cache_miss, "osd_map_cache_miss", "osdmap cache miss");
3101 osd_plb.add_u64_counter(
3102 l_osd_map_cache_miss_low, "osd_map_cache_miss_low",
3103 "osdmap cache miss below cache lower bound");
3104 osd_plb.add_u64_avg(
3105 l_osd_map_cache_miss_low_avg, "osd_map_cache_miss_low_avg",
3106 "osdmap cache miss, avg distance below cache lower bound");
3107 osd_plb.add_u64_counter(
3108 l_osd_map_bl_cache_hit, "osd_map_bl_cache_hit",
3109 "OSDMap buffer cache hits");
3110 osd_plb.add_u64_counter(
3111 l_osd_map_bl_cache_miss, "osd_map_bl_cache_miss",
3112 "OSDMap buffer cache misses");
3113
3114 osd_plb.add_u64(l_osd_stat_bytes, "stat_bytes", "OSD size");
3115 osd_plb.add_u64(l_osd_stat_bytes_used, "stat_bytes_used", "Used space");
3116 osd_plb.add_u64(l_osd_stat_bytes_avail, "stat_bytes_avail", "Available space");
3117
3118 osd_plb.add_u64_counter(
3119 l_osd_copyfrom, "copyfrom", "Rados \"copy-from\" operations");
3120
3121 osd_plb.add_u64_counter(l_osd_tier_promote, "tier_promote", "Tier promotions");
3122 osd_plb.add_u64_counter(l_osd_tier_flush, "tier_flush", "Tier flushes");
3123 osd_plb.add_u64_counter(
3124 l_osd_tier_flush_fail, "tier_flush_fail", "Failed tier flushes");
3125 osd_plb.add_u64_counter(
3126 l_osd_tier_try_flush, "tier_try_flush", "Tier flush attempts");
3127 osd_plb.add_u64_counter(
3128 l_osd_tier_try_flush_fail, "tier_try_flush_fail",
3129 "Failed tier flush attempts");
3130 osd_plb.add_u64_counter(
3131 l_osd_tier_evict, "tier_evict", "Tier evictions");
3132 osd_plb.add_u64_counter(
3133 l_osd_tier_whiteout, "tier_whiteout", "Tier whiteouts");
3134 osd_plb.add_u64_counter(
3135 l_osd_tier_dirty, "tier_dirty", "Dirty tier flag set");
3136 osd_plb.add_u64_counter(
3137 l_osd_tier_clean, "tier_clean", "Dirty tier flag cleaned");
3138 osd_plb.add_u64_counter(
3139 l_osd_tier_delay, "tier_delay", "Tier delays (agent waiting)");
3140 osd_plb.add_u64_counter(
3141 l_osd_tier_proxy_read, "tier_proxy_read", "Tier proxy reads");
3142 osd_plb.add_u64_counter(
3143 l_osd_tier_proxy_write, "tier_proxy_write", "Tier proxy writes");
3144
3145 osd_plb.add_u64_counter(
3146 l_osd_agent_wake, "agent_wake", "Tiering agent wake up");
3147 osd_plb.add_u64_counter(
3148 l_osd_agent_skip, "agent_skip", "Objects skipped by agent");
3149 osd_plb.add_u64_counter(
3150 l_osd_agent_flush, "agent_flush", "Tiering agent flushes");
3151 osd_plb.add_u64_counter(
3152 l_osd_agent_evict, "agent_evict", "Tiering agent evictions");
3153
3154 osd_plb.add_u64_counter(
3155 l_osd_object_ctx_cache_hit, "object_ctx_cache_hit", "Object context cache hits");
3156 osd_plb.add_u64_counter(
3157 l_osd_object_ctx_cache_total, "object_ctx_cache_total", "Object context cache lookups");
3158
3159 osd_plb.add_u64_counter(l_osd_op_cache_hit, "op_cache_hit");
3160 osd_plb.add_time_avg(
3161 l_osd_tier_flush_lat, "osd_tier_flush_lat", "Object flush latency");
3162 osd_plb.add_time_avg(
3163 l_osd_tier_promote_lat, "osd_tier_promote_lat", "Object promote latency");
3164 osd_plb.add_time_avg(
3165 l_osd_tier_r_lat, "osd_tier_r_lat", "Object proxy read latency");
3166
3167 osd_plb.add_u64_counter(
3168 l_osd_pg_info, "osd_pg_info", "PG updated its info (using any method)");
3169 osd_plb.add_u64_counter(
3170 l_osd_pg_fastinfo, "osd_pg_fastinfo",
3171 "PG updated its info using fastinfo attr");
3172 osd_plb.add_u64_counter(
3173 l_osd_pg_biginfo, "osd_pg_biginfo", "PG updated its biginfo attr");
3174
3175 logger = osd_plb.create_perf_counters();
3176 cct->get_perfcounters_collection()->add(logger);
3177 }
3178
3179 void OSD::create_recoverystate_perf()
3180 {
3181 dout(10) << "create_recoverystate_perf" << dendl;
3182
3183 PerfCountersBuilder rs_perf(cct, "recoverystate_perf", rs_first, rs_last);
3184
3185 rs_perf.add_time_avg(rs_initial_latency, "initial_latency", "Initial recovery state latency");
3186 rs_perf.add_time_avg(rs_started_latency, "started_latency", "Started recovery state latency");
3187 rs_perf.add_time_avg(rs_reset_latency, "reset_latency", "Reset recovery state latency");
3188 rs_perf.add_time_avg(rs_start_latency, "start_latency", "Start recovery state latency");
3189 rs_perf.add_time_avg(rs_primary_latency, "primary_latency", "Primary recovery state latency");
3190 rs_perf.add_time_avg(rs_peering_latency, "peering_latency", "Peering recovery state latency");
3191 rs_perf.add_time_avg(rs_backfilling_latency, "backfilling_latency", "Backfilling recovery state latency");
3192 rs_perf.add_time_avg(rs_waitremotebackfillreserved_latency, "waitremotebackfillreserved_latency", "Wait remote backfill reserved recovery state latency");
3193 rs_perf.add_time_avg(rs_waitlocalbackfillreserved_latency, "waitlocalbackfillreserved_latency", "Wait local backfill reserved recovery state latency");
3194 rs_perf.add_time_avg(rs_notbackfilling_latency, "notbackfilling_latency", "Notbackfilling recovery state latency");
3195 rs_perf.add_time_avg(rs_repnotrecovering_latency, "repnotrecovering_latency", "Repnotrecovering recovery state latency");
3196 rs_perf.add_time_avg(rs_repwaitrecoveryreserved_latency, "repwaitrecoveryreserved_latency", "Rep wait recovery reserved recovery state latency");
3197 rs_perf.add_time_avg(rs_repwaitbackfillreserved_latency, "repwaitbackfillreserved_latency", "Rep wait backfill reserved recovery state latency");
3198 rs_perf.add_time_avg(rs_reprecovering_latency, "reprecovering_latency", "RepRecovering recovery state latency");
3199 rs_perf.add_time_avg(rs_activating_latency, "activating_latency", "Activating recovery state latency");
3200 rs_perf.add_time_avg(rs_waitlocalrecoveryreserved_latency, "waitlocalrecoveryreserved_latency", "Wait local recovery reserved recovery state latency");
3201 rs_perf.add_time_avg(rs_waitremoterecoveryreserved_latency, "waitremoterecoveryreserved_latency", "Wait remote recovery reserved recovery state latency");
3202 rs_perf.add_time_avg(rs_recovering_latency, "recovering_latency", "Recovering recovery state latency");
3203 rs_perf.add_time_avg(rs_recovered_latency, "recovered_latency", "Recovered recovery state latency");
3204 rs_perf.add_time_avg(rs_clean_latency, "clean_latency", "Clean recovery state latency");
3205 rs_perf.add_time_avg(rs_active_latency, "active_latency", "Active recovery state latency");
3206 rs_perf.add_time_avg(rs_replicaactive_latency, "replicaactive_latency", "Replicaactive recovery state latency");
3207 rs_perf.add_time_avg(rs_stray_latency, "stray_latency", "Stray recovery state latency");
3208 rs_perf.add_time_avg(rs_getinfo_latency, "getinfo_latency", "Getinfo recovery state latency");
3209 rs_perf.add_time_avg(rs_getlog_latency, "getlog_latency", "Getlog recovery state latency");
3210 rs_perf.add_time_avg(rs_waitactingchange_latency, "waitactingchange_latency", "Waitactingchange recovery state latency");
3211 rs_perf.add_time_avg(rs_incomplete_latency, "incomplete_latency", "Incomplete recovery state latency");
3212 rs_perf.add_time_avg(rs_down_latency, "down_latency", "Down recovery state latency");
3213 rs_perf.add_time_avg(rs_getmissing_latency, "getmissing_latency", "Getmissing recovery state latency");
3214 rs_perf.add_time_avg(rs_waitupthru_latency, "waitupthru_latency", "Waitupthru recovery state latency");
3215 rs_perf.add_time_avg(rs_notrecovering_latency, "notrecovering_latency", "Notrecovering recovery state latency");
3216
3217 recoverystate_perf = rs_perf.create_perf_counters();
3218 cct->get_perfcounters_collection()->add(recoverystate_perf);
3219 }
3220
3221 int OSD::shutdown()
3222 {
3223 if (!service.prepare_to_stop())
3224 return 0; // already shutting down
3225 osd_lock.Lock();
3226 if (is_stopping()) {
3227 osd_lock.Unlock();
3228 return 0;
3229 }
3230 derr << "shutdown" << dendl;
3231
3232 set_state(STATE_STOPPING);
3233
3234 // Debugging
3235 cct->_conf->set_val("debug_osd", "100");
3236 cct->_conf->set_val("debug_journal", "100");
3237 cct->_conf->set_val("debug_filestore", "100");
3238 cct->_conf->set_val("debug_ms", "100");
3239 cct->_conf->apply_changes(NULL);
3240
3241 // stop MgrClient earlier as it's more like an internal consumer of OSD
3242 mgrc.shutdown();
3243
3244 service.start_shutdown();
3245
3246 // stop sending work to pgs. this just prevents any new work in _process
3247 // from racing with on_shutdown and potentially entering the pg after.
3248 op_shardedwq.drain();
3249
3250 // Shutdown PGs
3251 {
3252 RWLock::RLocker l(pg_map_lock);
3253 for (ceph::unordered_map<spg_t, PG*>::iterator p = pg_map.begin();
3254 p != pg_map.end();
3255 ++p) {
3256 dout(20) << " kicking pg " << p->first << dendl;
3257 p->second->lock();
3258 p->second->on_shutdown();
3259 p->second->unlock();
3260 p->second->osr->flush();
3261 }
3262 }
3263 clear_pg_stat_queue();
3264
3265 // drain op queue again (in case PGs requeued something)
3266 op_shardedwq.drain();
3267 {
3268 finished.clear(); // zap waiters (bleh, this is messy)
3269 }
3270
3271 op_shardedwq.clear_pg_slots();
3272
3273 // unregister commands
3274 cct->get_admin_socket()->unregister_command("status");
3275 cct->get_admin_socket()->unregister_command("flush_journal");
3276 cct->get_admin_socket()->unregister_command("dump_ops_in_flight");
3277 cct->get_admin_socket()->unregister_command("ops");
3278 cct->get_admin_socket()->unregister_command("dump_blocked_ops");
3279 cct->get_admin_socket()->unregister_command("dump_historic_ops");
3280 cct->get_admin_socket()->unregister_command("dump_historic_ops_by_duration");
3281 cct->get_admin_socket()->unregister_command("dump_historic_slow_ops");
3282 cct->get_admin_socket()->unregister_command("dump_op_pq_state");
3283 cct->get_admin_socket()->unregister_command("dump_blacklist");
3284 cct->get_admin_socket()->unregister_command("dump_watchers");
3285 cct->get_admin_socket()->unregister_command("dump_reservations");
3286 cct->get_admin_socket()->unregister_command("get_latest_osdmap");
3287 cct->get_admin_socket()->unregister_command("heap");
3288 cct->get_admin_socket()->unregister_command("set_heap_property");
3289 cct->get_admin_socket()->unregister_command("get_heap_property");
3290 cct->get_admin_socket()->unregister_command("dump_objectstore_kv_stats");
3291 cct->get_admin_socket()->unregister_command("dump_scrubs");
3292 cct->get_admin_socket()->unregister_command("calc_objectstore_db_histogram");
3293 cct->get_admin_socket()->unregister_command("flush_store_cache");
3294 cct->get_admin_socket()->unregister_command("dump_pgstate_history");
3295 cct->get_admin_socket()->unregister_command("compact");
3296 delete asok_hook;
3297 asok_hook = NULL;
3298
3299 cct->get_admin_socket()->unregister_command("setomapval");
3300 cct->get_admin_socket()->unregister_command("rmomapkey");
3301 cct->get_admin_socket()->unregister_command("setomapheader");
3302 cct->get_admin_socket()->unregister_command("getomap");
3303 cct->get_admin_socket()->unregister_command("truncobj");
3304 cct->get_admin_socket()->unregister_command("injectdataerr");
3305 cct->get_admin_socket()->unregister_command("injectmdataerr");
3306 cct->get_admin_socket()->unregister_command("set_recovery_delay");
3307 cct->get_admin_socket()->unregister_command("trigger_scrub");
3308 cct->get_admin_socket()->unregister_command("injectfull");
3309 delete test_ops_hook;
3310 test_ops_hook = NULL;
3311
3312 osd_lock.Unlock();
3313
3314 heartbeat_lock.Lock();
3315 heartbeat_stop = true;
3316 heartbeat_cond.Signal();
3317 heartbeat_lock.Unlock();
3318 heartbeat_thread.join();
3319
3320 peering_tp.drain();
3321 peering_wq.clear();
3322 peering_tp.stop();
3323 dout(10) << "osd tp stopped" << dendl;
3324
3325 osd_op_tp.drain();
3326 osd_op_tp.stop();
3327 dout(10) << "op sharded tp stopped" << dendl;
3328
3329 command_tp.drain();
3330 command_tp.stop();
3331 dout(10) << "command tp stopped" << dendl;
3332
3333 disk_tp.drain();
3334 disk_tp.stop();
3335 dout(10) << "disk tp paused (new)" << dendl;
3336
3337 dout(10) << "stopping agent" << dendl;
3338 service.agent_stop();
3339
3340 osd_lock.Lock();
3341
3342 reset_heartbeat_peers();
3343
3344 tick_timer.shutdown();
3345
3346 {
3347 Mutex::Locker l(tick_timer_lock);
3348 tick_timer_without_osd_lock.shutdown();
3349 }
3350
3351 // note unmount epoch
3352 dout(10) << "noting clean unmount in epoch " << osdmap->get_epoch() << dendl;
3353 superblock.mounted = service.get_boot_epoch();
3354 superblock.clean_thru = osdmap->get_epoch();
3355 ObjectStore::Transaction t;
3356 write_superblock(t);
3357 int r = store->apply_transaction(service.meta_osr.get(), std::move(t));
3358 if (r) {
3359 derr << "OSD::shutdown: error writing superblock: "
3360 << cpp_strerror(r) << dendl;
3361 }
3362
3363
3364 {
3365 Mutex::Locker l(pg_stat_queue_lock);
3366 assert(pg_stat_queue.empty());
3367 }
3368
3369 service.shutdown_reserver();
3370
3371 // Remove PGs
3372 #ifdef PG_DEBUG_REFS
3373 service.dump_live_pgids();
3374 #endif
3375 {
3376 RWLock::RLocker l(pg_map_lock);
3377 for (ceph::unordered_map<spg_t, PG*>::iterator p = pg_map.begin();
3378 p != pg_map.end();
3379 ++p) {
3380 dout(20) << " kicking pg " << p->first << dendl;
3381 p->second->lock();
3382 if (p->second->ref != 1) {
3383 derr << "pgid " << p->first << " has ref count of "
3384 << p->second->ref << dendl;
3385 #ifdef PG_DEBUG_REFS
3386 p->second->dump_live_ids();
3387 #endif
3388 if (cct->_conf->osd_shutdown_pgref_assert) {
3389 ceph_abort();
3390 }
3391 }
3392 p->second->unlock();
3393 p->second->put("PGMap");
3394 }
3395 pg_map.clear();
3396 }
3397 #ifdef PG_DEBUG_REFS
3398 service.dump_live_pgids();
3399 #endif
3400 cct->_conf->remove_observer(this);
3401
3402 dout(10) << "syncing store" << dendl;
3403 enable_disable_fuse(true);
3404
3405 if (cct->_conf->osd_journal_flush_on_shutdown) {
3406 dout(10) << "flushing journal" << dendl;
3407 store->flush_journal();
3408 }
3409
3410 store->umount();
3411 delete store;
3412 store = 0;
3413 dout(10) << "Store synced" << dendl;
3414
3415 monc->shutdown();
3416 osd_lock.Unlock();
3417
3418 osdmap = OSDMapRef();
3419 service.shutdown();
3420 op_tracker.on_shutdown();
3421
3422 class_handler->shutdown();
3423 client_messenger->shutdown();
3424 cluster_messenger->shutdown();
3425 hb_front_client_messenger->shutdown();
3426 hb_back_client_messenger->shutdown();
3427 objecter_messenger->shutdown();
3428 hb_front_server_messenger->shutdown();
3429 hb_back_server_messenger->shutdown();
3430
3431 peering_wq.clear();
3432
3433 return r;
3434 }
3435
3436 int OSD::mon_cmd_maybe_osd_create(string &cmd)
3437 {
3438 bool created = false;
3439 while (true) {
3440 dout(10) << __func__ << " cmd: " << cmd << dendl;
3441 vector<string> vcmd{cmd};
3442 bufferlist inbl;
3443 C_SaferCond w;
3444 string outs;
3445 monc->start_mon_command(vcmd, inbl, NULL, &outs, &w);
3446 int r = w.wait();
3447 if (r < 0) {
3448 if (r == -ENOENT && !created) {
3449 string newcmd = "{\"prefix\": \"osd create\", \"id\": " + stringify(whoami)
3450 + ", \"uuid\": \"" + stringify(superblock.osd_fsid) + "\"}";
3451 vector<string> vnewcmd{newcmd};
3452 bufferlist inbl;
3453 C_SaferCond w;
3454 string outs;
3455 monc->start_mon_command(vnewcmd, inbl, NULL, &outs, &w);
3456 int r = w.wait();
3457 if (r < 0) {
3458 derr << __func__ << " fail: osd does not exist and created failed: "
3459 << cpp_strerror(r) << dendl;
3460 return r;
3461 }
3462 created = true;
3463 continue;
3464 }
3465 derr << __func__ << " fail: '" << outs << "': " << cpp_strerror(r) << dendl;
3466 return r;
3467 }
3468 break;
3469 }
3470
3471 return 0;
3472 }
3473
3474 int OSD::update_crush_location()
3475 {
3476 if (!cct->_conf->osd_crush_update_on_start) {
3477 dout(10) << __func__ << " osd_crush_update_on_start = false" << dendl;
3478 return 0;
3479 }
3480
3481 char weight[32];
3482 if (cct->_conf->osd_crush_initial_weight >= 0) {
3483 snprintf(weight, sizeof(weight), "%.4lf", cct->_conf->osd_crush_initial_weight);
3484 } else {
3485 struct store_statfs_t st;
3486 int r = store->statfs(&st);
3487 if (r < 0) {
3488 derr << "statfs: " << cpp_strerror(r) << dendl;
3489 return r;
3490 }
3491 snprintf(weight, sizeof(weight), "%.4lf",
3492 MAX((double).00001,
3493 (double)(st.total) /
3494 (double)(1ull << 40 /* TB */)));
3495 }
3496
3497 std::multimap<string,string> loc = cct->crush_location.get_location();
3498 dout(10) << __func__ << " crush location is " << loc << dendl;
3499
3500 string cmd =
3501 string("{\"prefix\": \"osd crush create-or-move\", ") +
3502 string("\"id\": ") + stringify(whoami) + string(", ") +
3503 string("\"weight\":") + weight + string(", ") +
3504 string("\"args\": [");
3505 for (multimap<string,string>::iterator p = loc.begin(); p != loc.end(); ++p) {
3506 if (p != loc.begin())
3507 cmd += ", ";
3508 cmd += "\"" + p->first + "=" + p->second + "\"";
3509 }
3510 cmd += "]}";
3511
3512 return mon_cmd_maybe_osd_create(cmd);
3513 }
3514
3515 int OSD::update_crush_device_class()
3516 {
3517 if (!cct->_conf->osd_class_update_on_start) {
3518 dout(10) << __func__ << " osd_class_update_on_start = false" << dendl;
3519 return 0;
3520 }
3521
3522 string device_class;
3523 int r = store->read_meta("crush_device_class", &device_class);
3524 if (r < 0 || device_class.empty()) {
3525 device_class = store->get_default_device_class();
3526 }
3527
3528 if (device_class.empty()) {
3529 return 0;
3530 }
3531
3532 string cmd =
3533 string("{\"prefix\": \"osd crush set-device-class\", ") +
3534 string("\"class\": \"") + device_class + string("\", ") +
3535 string("\"ids\": [\"") + stringify(whoami) + string("\"]}");
3536
3537 r = mon_cmd_maybe_osd_create(cmd);
3538 if (r == -EPERM) {
3539 r = 0;
3540 }
3541
3542 return r;
3543 }
3544
3545 void OSD::write_superblock(ObjectStore::Transaction& t)
3546 {
3547 dout(10) << "write_superblock " << superblock << dendl;
3548
3549 //hack: at minimum it's using the baseline feature set
3550 if (!superblock.compat_features.incompat.contains(CEPH_OSD_FEATURE_INCOMPAT_BASE))
3551 superblock.compat_features.incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE);
3552
3553 bufferlist bl;
3554 ::encode(superblock, bl);
3555 t.write(coll_t::meta(), OSD_SUPERBLOCK_GOBJECT, 0, bl.length(), bl);
3556 }
3557
3558 int OSD::read_superblock()
3559 {
3560 bufferlist bl;
3561 int r = store->read(coll_t::meta(), OSD_SUPERBLOCK_GOBJECT, 0, 0, bl);
3562 if (r < 0)
3563 return r;
3564
3565 bufferlist::iterator p = bl.begin();
3566 ::decode(superblock, p);
3567
3568 dout(10) << "read_superblock " << superblock << dendl;
3569
3570 return 0;
3571 }
3572
3573 void OSD::clear_temp_objects()
3574 {
3575 dout(10) << __func__ << dendl;
3576 vector<coll_t> ls;
3577 store->list_collections(ls);
3578 for (vector<coll_t>::iterator p = ls.begin(); p != ls.end(); ++p) {
3579 spg_t pgid;
3580 if (!p->is_pg(&pgid))
3581 continue;
3582
3583 // list temp objects
3584 dout(20) << " clearing temps in " << *p << " pgid " << pgid << dendl;
3585
3586 vector<ghobject_t> temps;
3587 ghobject_t next;
3588 while (1) {
3589 vector<ghobject_t> objects;
3590 store->collection_list(*p, next, ghobject_t::get_max(),
3591 store->get_ideal_list_max(),
3592 &objects, &next);
3593 if (objects.empty())
3594 break;
3595 vector<ghobject_t>::iterator q;
3596 for (q = objects.begin(); q != objects.end(); ++q) {
3597 // Hammer set pool for temps to -1, so check for clean-up
3598 if (q->hobj.is_temp() || (q->hobj.pool == -1)) {
3599 temps.push_back(*q);
3600 } else {
3601 break;
3602 }
3603 }
3604 // If we saw a non-temp object and hit the break above we can
3605 // break out of the while loop too.
3606 if (q != objects.end())
3607 break;
3608 }
3609 if (!temps.empty()) {
3610 ObjectStore::Transaction t;
3611 int removed = 0;
3612 for (vector<ghobject_t>::iterator q = temps.begin(); q != temps.end(); ++q) {
3613 dout(20) << " removing " << *p << " object " << *q << dendl;
3614 t.remove(*p, *q);
3615 if (++removed > cct->_conf->osd_target_transaction_size) {
3616 store->apply_transaction(service.meta_osr.get(), std::move(t));
3617 t = ObjectStore::Transaction();
3618 removed = 0;
3619 }
3620 }
3621 if (removed) {
3622 store->apply_transaction(service.meta_osr.get(), std::move(t));
3623 }
3624 }
3625 }
3626 }
3627
3628 void OSD::recursive_remove_collection(CephContext* cct,
3629 ObjectStore *store, spg_t pgid,
3630 coll_t tmp)
3631 {
3632 OSDriver driver(
3633 store,
3634 coll_t(),
3635 make_snapmapper_oid());
3636
3637 ceph::shared_ptr<ObjectStore::Sequencer> osr (std::make_shared<
3638 ObjectStore::Sequencer>("rm"));
3639 ObjectStore::Transaction t;
3640 SnapMapper mapper(cct, &driver, 0, 0, 0, pgid.shard);
3641
3642 vector<ghobject_t> objects;
3643 store->collection_list(tmp, ghobject_t(), ghobject_t::get_max(),
3644 INT_MAX, &objects, 0);
3645 generic_dout(10) << __func__ << " " << objects << dendl;
3646 // delete them.
3647 int removed = 0;
3648 for (vector<ghobject_t>::iterator p = objects.begin();
3649 p != objects.end();
3650 ++p, removed++) {
3651 OSDriver::OSTransaction _t(driver.get_transaction(&t));
3652 int r = mapper.remove_oid(p->hobj, &_t);
3653 if (r != 0 && r != -ENOENT)
3654 ceph_abort();
3655 t.remove(tmp, *p);
3656 if (removed > cct->_conf->osd_target_transaction_size) {
3657 int r = store->apply_transaction(osr.get(), std::move(t));
3658 assert(r == 0);
3659 t = ObjectStore::Transaction();
3660 removed = 0;
3661 }
3662 }
3663 t.remove_collection(tmp);
3664 int r = store->apply_transaction(osr.get(), std::move(t));
3665 assert(r == 0);
3666
3667 C_SaferCond waiter;
3668 if (!osr->flush_commit(&waiter)) {
3669 waiter.wait();
3670 }
3671 }
3672
3673
3674 // ======================================================
3675 // PG's
3676
3677 PGPool OSD::_get_pool(int id, OSDMapRef createmap)
3678 {
3679 if (!createmap->have_pg_pool(id)) {
3680 dout(5) << __func__ << ": the OSDmap does not contain a PG pool with id = "
3681 << id << dendl;
3682 ceph_abort();
3683 }
3684
3685 PGPool p = PGPool(cct, createmap, id);
3686
3687 dout(10) << "_get_pool " << p.id << dendl;
3688 return p;
3689 }
3690
3691 PG *OSD::_open_lock_pg(
3692 OSDMapRef createmap,
3693 spg_t pgid, bool no_lockdep_check)
3694 {
3695 assert(osd_lock.is_locked());
3696
3697 PG* pg = _make_pg(createmap, pgid);
3698 {
3699 RWLock::WLocker l(pg_map_lock);
3700 pg->lock(no_lockdep_check);
3701 pg_map[pgid] = pg;
3702 pg->get("PGMap"); // because it's in pg_map
3703 service.pg_add_epoch(pg->info.pgid, createmap->get_epoch());
3704 }
3705 return pg;
3706 }
3707
3708 PG* OSD::_make_pg(
3709 OSDMapRef createmap,
3710 spg_t pgid)
3711 {
3712 dout(10) << "_open_lock_pg " << pgid << dendl;
3713 PGPool pool = _get_pool(pgid.pool(), createmap);
3714
3715 // create
3716 PG *pg;
3717 if (createmap->get_pg_type(pgid.pgid) == pg_pool_t::TYPE_REPLICATED ||
3718 createmap->get_pg_type(pgid.pgid) == pg_pool_t::TYPE_ERASURE)
3719 pg = new PrimaryLogPG(&service, createmap, pool, pgid);
3720 else
3721 ceph_abort();
3722
3723 return pg;
3724 }
3725
3726
3727 void OSD::add_newly_split_pg(PG *pg, PG::RecoveryCtx *rctx)
3728 {
3729 epoch_t e(service.get_osdmap()->get_epoch());
3730 pg->get("PGMap"); // For pg_map
3731 pg_map[pg->info.pgid] = pg;
3732 service.pg_add_epoch(pg->info.pgid, pg->get_osdmap()->get_epoch());
3733
3734 dout(10) << "Adding newly split pg " << *pg << dendl;
3735 pg->handle_loaded(rctx);
3736 pg->write_if_dirty(*(rctx->transaction));
3737 pg->queue_null(e, e);
3738 map<spg_t, list<PG::CephPeeringEvtRef> >::iterator to_wake =
3739 peering_wait_for_split.find(pg->info.pgid);
3740 if (to_wake != peering_wait_for_split.end()) {
3741 for (list<PG::CephPeeringEvtRef>::iterator i =
3742 to_wake->second.begin();
3743 i != to_wake->second.end();
3744 ++i) {
3745 pg->queue_peering_event(*i);
3746 }
3747 peering_wait_for_split.erase(to_wake);
3748 }
3749 if (!service.get_osdmap()->have_pg_pool(pg->info.pgid.pool()))
3750 _remove_pg(pg);
3751 }
3752
3753 OSD::res_result OSD::_try_resurrect_pg(
3754 OSDMapRef curmap, spg_t pgid, spg_t *resurrected, PGRef *old_pg_state)
3755 {
3756 assert(resurrected);
3757 assert(old_pg_state);
3758 // find nearest ancestor
3759 DeletingStateRef df;
3760 spg_t cur(pgid);
3761 while (true) {
3762 df = service.deleting_pgs.lookup(cur);
3763 if (df)
3764 break;
3765 if (!cur.ps())
3766 break;
3767 cur = cur.get_parent();
3768 }
3769 if (!df)
3770 return RES_NONE; // good to go
3771
3772 df->old_pg_state->lock();
3773 OSDMapRef create_map = df->old_pg_state->get_osdmap();
3774 df->old_pg_state->unlock();
3775
3776 set<spg_t> children;
3777 if (cur == pgid) {
3778 if (df->try_stop_deletion()) {
3779 dout(10) << __func__ << ": halted deletion on pg " << pgid << dendl;
3780 *resurrected = cur;
3781 *old_pg_state = df->old_pg_state;
3782 service.deleting_pgs.remove(pgid); // PG is no longer being removed!
3783 return RES_SELF;
3784 } else {
3785 // raced, ensure we don't see DeletingStateRef when we try to
3786 // delete this pg
3787 service.deleting_pgs.remove(pgid);
3788 return RES_NONE;
3789 }
3790 } else if (cur.is_split(create_map->get_pg_num(cur.pool()),
3791 curmap->get_pg_num(cur.pool()),
3792 &children) &&
3793 children.count(pgid)) {
3794 if (df->try_stop_deletion()) {
3795 dout(10) << __func__ << ": halted deletion on ancestor pg " << pgid
3796 << dendl;
3797 *resurrected = cur;
3798 *old_pg_state = df->old_pg_state;
3799 service.deleting_pgs.remove(cur); // PG is no longer being removed!
3800 return RES_PARENT;
3801 } else {
3802 /* this is not a problem, failing to cancel proves that all objects
3803 * have been removed, so no hobject_t overlap is possible
3804 */
3805 return RES_NONE;
3806 }
3807 }
3808 return RES_NONE;
3809 }
3810
3811 PG *OSD::_create_lock_pg(
3812 OSDMapRef createmap,
3813 spg_t pgid,
3814 bool hold_map_lock,
3815 bool backfill,
3816 int role,
3817 vector<int>& up, int up_primary,
3818 vector<int>& acting, int acting_primary,
3819 pg_history_t history,
3820 const PastIntervals& pi,
3821 ObjectStore::Transaction& t)
3822 {
3823 assert(osd_lock.is_locked());
3824 dout(20) << "_create_lock_pg pgid " << pgid << dendl;
3825
3826 PG *pg = _open_lock_pg(createmap, pgid, true);
3827
3828 service.init_splits_between(pgid, pg->get_osdmap(), service.get_osdmap());
3829
3830 pg->init(
3831 role,
3832 up,
3833 up_primary,
3834 acting,
3835 acting_primary,
3836 history,
3837 pi,
3838 backfill,
3839 &t);
3840
3841 dout(7) << "_create_lock_pg " << *pg << dendl;
3842 return pg;
3843 }
3844
3845 PG *OSD::_lookup_lock_pg(spg_t pgid)
3846 {
3847 RWLock::RLocker l(pg_map_lock);
3848
3849 auto pg_map_entry = pg_map.find(pgid);
3850 if (pg_map_entry == pg_map.end())
3851 return nullptr;
3852 PG *pg = pg_map_entry->second;
3853 pg->lock();
3854 return pg;
3855 }
3856
3857 PG *OSD::lookup_lock_pg(spg_t pgid)
3858 {
3859 return _lookup_lock_pg(pgid);
3860 }
3861
3862 PG *OSD::_lookup_lock_pg_with_map_lock_held(spg_t pgid)
3863 {
3864 assert(pg_map.count(pgid));
3865 PG *pg = pg_map[pgid];
3866 pg->lock();
3867 return pg;
3868 }
3869
3870 void OSD::load_pgs()
3871 {
3872 assert(osd_lock.is_locked());
3873 dout(0) << "load_pgs" << dendl;
3874 {
3875 RWLock::RLocker l(pg_map_lock);
3876 assert(pg_map.empty());
3877 }
3878
3879 vector<coll_t> ls;
3880 int r = store->list_collections(ls);
3881 if (r < 0) {
3882 derr << "failed to list pgs: " << cpp_strerror(-r) << dendl;
3883 }
3884
3885 bool has_upgraded = false;
3886
3887 for (vector<coll_t>::iterator it = ls.begin();
3888 it != ls.end();
3889 ++it) {
3890 spg_t pgid;
3891 if (it->is_temp(&pgid) ||
3892 (it->is_pg(&pgid) && PG::_has_removal_flag(store, pgid))) {
3893 dout(10) << "load_pgs " << *it << " clearing temp" << dendl;
3894 recursive_remove_collection(cct, store, pgid, *it);
3895 continue;
3896 }
3897
3898 if (!it->is_pg(&pgid)) {
3899 dout(10) << "load_pgs ignoring unrecognized " << *it << dendl;
3900 continue;
3901 }
3902
3903 if (pgid.preferred() >= 0) {
3904 dout(10) << __func__ << ": skipping localized PG " << pgid << dendl;
3905 // FIXME: delete it too, eventually
3906 continue;
3907 }
3908
3909 dout(10) << "pgid " << pgid << " coll " << coll_t(pgid) << dendl;
3910 bufferlist bl;
3911 epoch_t map_epoch = 0;
3912 int r = PG::peek_map_epoch(store, pgid, &map_epoch, &bl);
3913 if (r < 0) {
3914 derr << __func__ << " unable to peek at " << pgid << " metadata, skipping"
3915 << dendl;
3916 continue;
3917 }
3918
3919 PG *pg = NULL;
3920 if (map_epoch > 0) {
3921 OSDMapRef pgosdmap = service.try_get_map(map_epoch);
3922 if (!pgosdmap) {
3923 if (!osdmap->have_pg_pool(pgid.pool())) {
3924 derr << __func__ << ": could not find map for epoch " << map_epoch
3925 << " on pg " << pgid << ", but the pool is not present in the "
3926 << "current map, so this is probably a result of bug 10617. "
3927 << "Skipping the pg for now, you can use ceph-objectstore-tool "
3928 << "to clean it up later." << dendl;
3929 continue;
3930 } else {
3931 derr << __func__ << ": have pgid " << pgid << " at epoch "
3932 << map_epoch << ", but missing map. Crashing."
3933 << dendl;
3934 assert(0 == "Missing map in load_pgs");
3935 }
3936 }
3937 pg = _open_lock_pg(pgosdmap, pgid);
3938 } else {
3939 pg = _open_lock_pg(osdmap, pgid);
3940 }
3941 // there can be no waiters here, so we don't call wake_pg_waiters
3942
3943 pg->ch = store->open_collection(pg->coll);
3944
3945 // read pg state, log
3946 pg->read_state(store, bl);
3947
3948 if (pg->must_upgrade()) {
3949 if (!pg->can_upgrade()) {
3950 derr << "PG needs upgrade, but on-disk data is too old; upgrade to"
3951 << " an older version first." << dendl;
3952 assert(0 == "PG too old to upgrade");
3953 }
3954 if (!has_upgraded) {
3955 derr << "PGs are upgrading" << dendl;
3956 has_upgraded = true;
3957 }
3958 dout(10) << "PG " << pg->info.pgid
3959 << " must upgrade..." << dendl;
3960 pg->upgrade(store);
3961 }
3962
3963 service.init_splits_between(pg->info.pgid, pg->get_osdmap(), osdmap);
3964
3965 // generate state for PG's current mapping
3966 int primary, up_primary;
3967 vector<int> acting, up;
3968 pg->get_osdmap()->pg_to_up_acting_osds(
3969 pgid.pgid, &up, &up_primary, &acting, &primary);
3970 pg->init_primary_up_acting(
3971 up,
3972 acting,
3973 up_primary,
3974 primary);
3975 int role = OSDMap::calc_pg_role(whoami, pg->acting);
3976 if (pg->pool.info.is_replicated() || role == pg->pg_whoami.shard)
3977 pg->set_role(role);
3978 else
3979 pg->set_role(-1);
3980
3981 pg->reg_next_scrub();
3982
3983 PG::RecoveryCtx rctx(0, 0, 0, 0, 0, 0);
3984 pg->handle_loaded(&rctx);
3985
3986 dout(10) << "load_pgs loaded " << *pg << " " << pg->pg_log.get_log() << dendl;
3987 if (pg->pg_log.is_dirty()) {
3988 ObjectStore::Transaction t;
3989 pg->write_if_dirty(t);
3990 store->apply_transaction(pg->osr.get(), std::move(t));
3991 }
3992 pg->unlock();
3993 }
3994 {
3995 RWLock::RLocker l(pg_map_lock);
3996 dout(0) << "load_pgs opened " << pg_map.size() << " pgs" << dendl;
3997 }
3998
3999 // clean up old infos object?
4000 if (has_upgraded && store->exists(coll_t::meta(), OSD::make_infos_oid())) {
4001 dout(1) << __func__ << " removing legacy infos object" << dendl;
4002 ObjectStore::Transaction t;
4003 t.remove(coll_t::meta(), OSD::make_infos_oid());
4004 int r = store->apply_transaction(service.meta_osr.get(), std::move(t));
4005 if (r != 0) {
4006 derr << __func__ << ": apply_transaction returned "
4007 << cpp_strerror(r) << dendl;
4008 ceph_abort();
4009 }
4010 }
4011
4012 build_past_intervals_parallel();
4013 }
4014
4015
4016 /*
4017 * build past_intervals efficiently on old, degraded, and buried
4018 * clusters. this is important for efficiently catching up osds that
4019 * are way behind on maps to the current cluster state.
4020 *
4021 * this is a parallel version of PG::generate_past_intervals().
4022 * follow the same logic, but do all pgs at the same time so that we
4023 * can make a single pass across the osdmap history.
4024 */
4025 void OSD::build_past_intervals_parallel()
4026 {
4027 struct pistate {
4028 epoch_t start, end;
4029 vector<int> old_acting, old_up;
4030 epoch_t same_interval_since;
4031 int primary;
4032 int up_primary;
4033 };
4034 map<PG*,pistate> pis;
4035
4036 // calculate junction of map range
4037 epoch_t end_epoch = superblock.oldest_map;
4038 epoch_t cur_epoch = superblock.newest_map;
4039 {
4040 RWLock::RLocker l(pg_map_lock);
4041 for (ceph::unordered_map<spg_t, PG*>::iterator i = pg_map.begin();
4042 i != pg_map.end();
4043 ++i) {
4044 PG *pg = i->second;
4045
4046 auto rpib = pg->get_required_past_interval_bounds(
4047 pg->info,
4048 superblock.oldest_map);
4049 if (rpib.first >= rpib.second && pg->past_intervals.empty()) {
4050 if (pg->info.history.same_interval_since == 0) {
4051 pg->info.history.same_interval_since = rpib.second;
4052 }
4053 continue;
4054 } else {
4055 auto apib = pg->past_intervals.get_bounds();
4056 if (apib.second >= rpib.second &&
4057 apib.first <= rpib.first) {
4058 if (pg->info.history.same_interval_since == 0) {
4059 pg->info.history.same_interval_since = rpib.second;
4060 }
4061 continue;
4062 }
4063 }
4064
4065 dout(10) << pg->info.pgid << " needs " << rpib.first << "-"
4066 << rpib.second << dendl;
4067 pistate& p = pis[pg];
4068 p.start = rpib.first;
4069 p.end = rpib.second;
4070 p.same_interval_since = 0;
4071
4072 if (rpib.first < cur_epoch)
4073 cur_epoch = rpib.first;
4074 if (rpib.second > end_epoch)
4075 end_epoch = rpib.second;
4076 }
4077 }
4078 if (pis.empty()) {
4079 dout(10) << __func__ << " nothing to build" << dendl;
4080 return;
4081 }
4082
4083 dout(1) << __func__ << " over " << cur_epoch << "-" << end_epoch << dendl;
4084 assert(cur_epoch <= end_epoch);
4085
4086 OSDMapRef cur_map, last_map;
4087 for ( ; cur_epoch <= end_epoch; cur_epoch++) {
4088 dout(10) << __func__ << " epoch " << cur_epoch << dendl;
4089 last_map = cur_map;
4090 cur_map = get_map(cur_epoch);
4091
4092 for (map<PG*,pistate>::iterator i = pis.begin(); i != pis.end(); ++i) {
4093 PG *pg = i->first;
4094 pistate& p = i->second;
4095
4096 if (cur_epoch < p.start || cur_epoch > p.end)
4097 continue;
4098
4099 vector<int> acting, up;
4100 int up_primary;
4101 int primary;
4102 pg_t pgid = pg->info.pgid.pgid;
4103 if (p.same_interval_since && last_map->get_pools().count(pgid.pool()))
4104 pgid = pgid.get_ancestor(last_map->get_pg_num(pgid.pool()));
4105 cur_map->pg_to_up_acting_osds(
4106 pgid, &up, &up_primary, &acting, &primary);
4107
4108 if (p.same_interval_since == 0) {
4109 dout(10) << __func__ << " epoch " << cur_epoch << " pg " << pg->info.pgid
4110 << " first map, acting " << acting
4111 << " up " << up << ", same_interval_since = " << cur_epoch << dendl;
4112 p.same_interval_since = cur_epoch;
4113 p.old_up = up;
4114 p.old_acting = acting;
4115 p.primary = primary;
4116 p.up_primary = up_primary;
4117 continue;
4118 }
4119 assert(last_map);
4120
4121 boost::scoped_ptr<IsPGRecoverablePredicate> recoverable(
4122 pg->get_is_recoverable_predicate());
4123 std::stringstream debug;
4124 bool new_interval = PastIntervals::check_new_interval(
4125 p.primary,
4126 primary,
4127 p.old_acting, acting,
4128 p.up_primary,
4129 up_primary,
4130 p.old_up, up,
4131 p.same_interval_since,
4132 pg->info.history.last_epoch_clean,
4133 cur_map, last_map,
4134 pgid,
4135 recoverable.get(),
4136 &pg->past_intervals,
4137 &debug);
4138 if (new_interval) {
4139 dout(10) << __func__ << " epoch " << cur_epoch << " pg " << pg->info.pgid
4140 << " " << debug.str() << dendl;
4141 p.old_up = up;
4142 p.old_acting = acting;
4143 p.primary = primary;
4144 p.up_primary = up_primary;
4145 p.same_interval_since = cur_epoch;
4146 }
4147 }
4148 }
4149
4150 // Now that past_intervals have been recomputed let's fix the same_interval_since
4151 // if it was cleared by import.
4152 for (map<PG*,pistate>::iterator i = pis.begin(); i != pis.end(); ++i) {
4153 PG *pg = i->first;
4154 pistate& p = i->second;
4155
4156 if (pg->info.history.same_interval_since == 0) {
4157 assert(p.same_interval_since);
4158 dout(10) << __func__ << " fix same_interval_since " << p.same_interval_since << " pg " << *pg << dendl;
4159 dout(10) << __func__ << " past_intervals " << pg->past_intervals << dendl;
4160 // Fix it
4161 pg->info.history.same_interval_since = p.same_interval_since;
4162 }
4163 }
4164
4165 // write info only at the end. this is necessary because we check
4166 // whether the past_intervals go far enough back or forward in time,
4167 // but we don't check for holes. we could avoid it by discarding
4168 // the previous past_intervals and rebuilding from scratch, or we
4169 // can just do this and commit all our work at the end.
4170 ObjectStore::Transaction t;
4171 int num = 0;
4172 for (map<PG*,pistate>::iterator i = pis.begin(); i != pis.end(); ++i) {
4173 PG *pg = i->first;
4174 pg->lock();
4175 pg->dirty_big_info = true;
4176 pg->dirty_info = true;
4177 pg->write_if_dirty(t);
4178 pg->unlock();
4179
4180 // don't let the transaction get too big
4181 if (++num >= cct->_conf->osd_target_transaction_size) {
4182 store->apply_transaction(service.meta_osr.get(), std::move(t));
4183 t = ObjectStore::Transaction();
4184 num = 0;
4185 }
4186 }
4187 if (!t.empty())
4188 store->apply_transaction(service.meta_osr.get(), std::move(t));
4189 }
4190
4191 /*
4192 * look up a pg. if we have it, great. if not, consider creating it IF the pg mapping
4193 * hasn't changed since the given epoch and we are the primary.
4194 */
4195 int OSD::handle_pg_peering_evt(
4196 spg_t pgid,
4197 const pg_history_t& orig_history,
4198 const PastIntervals& pi,
4199 epoch_t epoch,
4200 PG::CephPeeringEvtRef evt)
4201 {
4202 if (service.splitting(pgid)) {
4203 peering_wait_for_split[pgid].push_back(evt);
4204 return -EEXIST;
4205 }
4206
4207 PG *pg = _lookup_lock_pg(pgid);
4208 if (!pg) {
4209 // same primary?
4210 if (!osdmap->have_pg_pool(pgid.pool()))
4211 return -EINVAL;
4212 int up_primary, acting_primary;
4213 vector<int> up, acting;
4214 osdmap->pg_to_up_acting_osds(
4215 pgid.pgid, &up, &up_primary, &acting, &acting_primary);
4216
4217 pg_history_t history = orig_history;
4218 bool valid_history = project_pg_history(
4219 pgid, history, epoch, up, up_primary, acting, acting_primary);
4220
4221 if (!valid_history || epoch < history.same_interval_since) {
4222 dout(10) << __func__ << pgid << " acting changed in "
4223 << history.same_interval_since << " (msg from " << epoch << ")"
4224 << dendl;
4225 return -EINVAL;
4226 }
4227
4228 if (service.splitting(pgid)) {
4229 ceph_abort();
4230 }
4231
4232 // do we need to resurrect a deleting pg?
4233 spg_t resurrected;
4234 PGRef old_pg_state;
4235 res_result result = _try_resurrect_pg(
4236 service.get_osdmap(),
4237 pgid,
4238 &resurrected,
4239 &old_pg_state);
4240
4241 PG::RecoveryCtx rctx = create_context();
4242 switch (result) {
4243 case RES_NONE: {
4244 const pg_pool_t* pp = osdmap->get_pg_pool(pgid.pool());
4245 if (pp->has_flag(pg_pool_t::FLAG_EC_OVERWRITES) &&
4246 store->get_type() != "bluestore") {
4247 clog->warn() << "pg " << pgid
4248 << " is at risk of silent data corruption: "
4249 << "the pool allows ec overwrites but is not stored in "
4250 << "bluestore, so deep scrubbing will not detect bitrot";
4251 }
4252 PG::_create(*rctx.transaction, pgid, pgid.get_split_bits(pp->get_pg_num()));
4253 PG::_init(*rctx.transaction, pgid, pp);
4254
4255 int role = osdmap->calc_pg_role(whoami, acting, acting.size());
4256 if (!pp->is_replicated() && role != pgid.shard)
4257 role = -1;
4258
4259 pg = _create_lock_pg(
4260 get_map(epoch),
4261 pgid, false, false,
4262 role,
4263 up, up_primary,
4264 acting, acting_primary,
4265 history, pi,
4266 *rctx.transaction);
4267 pg->handle_create(&rctx);
4268 pg->write_if_dirty(*rctx.transaction);
4269 dispatch_context(rctx, pg, osdmap);
4270
4271 dout(10) << *pg << " is new" << dendl;
4272
4273 pg->queue_peering_event(evt);
4274 wake_pg_waiters(pg);
4275 pg->unlock();
4276 return 0;
4277 }
4278 case RES_SELF: {
4279 old_pg_state->lock();
4280 OSDMapRef old_osd_map = old_pg_state->get_osdmap();
4281 int old_role = old_pg_state->role;
4282 vector<int> old_up = old_pg_state->up;
4283 int old_up_primary = old_pg_state->up_primary.osd;
4284 vector<int> old_acting = old_pg_state->acting;
4285 int old_primary = old_pg_state->primary.osd;
4286 pg_history_t old_history = old_pg_state->info.history;
4287 PastIntervals old_past_intervals = old_pg_state->past_intervals;
4288 old_pg_state->unlock();
4289 pg = _create_lock_pg(
4290 old_osd_map,
4291 resurrected,
4292 false,
4293 true,
4294 old_role,
4295 old_up,
4296 old_up_primary,
4297 old_acting,
4298 old_primary,
4299 old_history,
4300 old_past_intervals,
4301 *rctx.transaction);
4302 pg->handle_create(&rctx);
4303 pg->write_if_dirty(*rctx.transaction);
4304 dispatch_context(rctx, pg, osdmap);
4305
4306 dout(10) << *pg << " is new (resurrected)" << dendl;
4307
4308 pg->queue_peering_event(evt);
4309 wake_pg_waiters(pg);
4310 pg->unlock();
4311 return 0;
4312 }
4313 case RES_PARENT: {
4314 assert(old_pg_state);
4315 old_pg_state->lock();
4316 OSDMapRef old_osd_map = old_pg_state->get_osdmap();
4317 int old_role = old_pg_state->role;
4318 vector<int> old_up = old_pg_state->up;
4319 int old_up_primary = old_pg_state->up_primary.osd;
4320 vector<int> old_acting = old_pg_state->acting;
4321 int old_primary = old_pg_state->primary.osd;
4322 pg_history_t old_history = old_pg_state->info.history;
4323 PastIntervals old_past_intervals = old_pg_state->past_intervals;
4324 old_pg_state->unlock();
4325 PG *parent = _create_lock_pg(
4326 old_osd_map,
4327 resurrected,
4328 false,
4329 true,
4330 old_role,
4331 old_up,
4332 old_up_primary,
4333 old_acting,
4334 old_primary,
4335 old_history,
4336 old_past_intervals,
4337 *rctx.transaction
4338 );
4339 parent->handle_create(&rctx);
4340 parent->write_if_dirty(*rctx.transaction);
4341 dispatch_context(rctx, parent, osdmap);
4342
4343 dout(10) << *parent << " is new" << dendl;
4344
4345 assert(service.splitting(pgid));
4346 peering_wait_for_split[pgid].push_back(evt);
4347
4348 //parent->queue_peering_event(evt);
4349 parent->queue_null(osdmap->get_epoch(), osdmap->get_epoch());
4350 wake_pg_waiters(parent);
4351 parent->unlock();
4352 return 0;
4353 }
4354 default:
4355 assert(0);
4356 return 0;
4357 }
4358 } else {
4359 // already had it. did the mapping change?
4360 if (epoch < pg->info.history.same_interval_since) {
4361 dout(10) << *pg << __func__ << " acting changed in "
4362 << pg->info.history.same_interval_since
4363 << " (msg from " << epoch << ")" << dendl;
4364 } else {
4365 pg->queue_peering_event(evt);
4366 }
4367 pg->unlock();
4368 return -EEXIST;
4369 }
4370 }
4371
4372
4373 void OSD::build_initial_pg_history(
4374 spg_t pgid,
4375 epoch_t created,
4376 utime_t created_stamp,
4377 pg_history_t *h,
4378 PastIntervals *pi)
4379 {
4380 dout(10) << __func__ << " " << pgid << " created " << created << dendl;
4381 h->epoch_created = created;
4382 h->epoch_pool_created = created;
4383 h->same_interval_since = created;
4384 h->same_up_since = created;
4385 h->same_primary_since = created;
4386 h->last_scrub_stamp = created_stamp;
4387 h->last_deep_scrub_stamp = created_stamp;
4388 h->last_clean_scrub_stamp = created_stamp;
4389
4390 OSDMapRef lastmap = service.get_map(created);
4391 int up_primary, acting_primary;
4392 vector<int> up, acting;
4393 lastmap->pg_to_up_acting_osds(
4394 pgid.pgid, &up, &up_primary, &acting, &acting_primary);
4395
4396 ostringstream debug;
4397 for (epoch_t e = created + 1; e <= osdmap->get_epoch(); ++e) {
4398 OSDMapRef osdmap = service.get_map(e);
4399 int new_up_primary, new_acting_primary;
4400 vector<int> new_up, new_acting;
4401 osdmap->pg_to_up_acting_osds(
4402 pgid.pgid, &new_up, &new_up_primary, &new_acting, &new_acting_primary);
4403
4404 // this is a bit imprecise, but sufficient?
4405 struct min_size_predicate_t : public IsPGRecoverablePredicate {
4406 const pg_pool_t *pi;
4407 bool operator()(const set<pg_shard_t> &have) const {
4408 return have.size() >= pi->min_size;
4409 }
4410 min_size_predicate_t(const pg_pool_t *i) : pi(i) {}
4411 } min_size_predicate(osdmap->get_pg_pool(pgid.pgid.pool()));
4412
4413 bool new_interval = PastIntervals::check_new_interval(
4414 acting_primary,
4415 new_acting_primary,
4416 acting, new_acting,
4417 up_primary,
4418 new_up_primary,
4419 up, new_up,
4420 h->same_interval_since,
4421 h->last_epoch_clean,
4422 osdmap,
4423 lastmap,
4424 pgid.pgid,
4425 &min_size_predicate,
4426 pi,
4427 &debug);
4428 if (new_interval) {
4429 h->same_interval_since = e;
4430 }
4431 if (up != new_up) {
4432 h->same_up_since = e;
4433 }
4434 if (acting_primary != new_acting_primary) {
4435 h->same_primary_since = e;
4436 }
4437 if (pgid.pgid.is_split(lastmap->get_pg_num(pgid.pgid.pool()),
4438 osdmap->get_pg_num(pgid.pgid.pool()),
4439 nullptr)) {
4440 h->last_epoch_split = e;
4441 }
4442 lastmap = osdmap;
4443 }
4444 dout(20) << __func__ << " " << debug.str() << dendl;
4445 dout(10) << __func__ << " " << *h << " " << *pi
4446 << " [" << (pi->empty() ? pair<epoch_t,epoch_t>(0,0) :
4447 pi->get_bounds()) << ")"
4448 << dendl;
4449 }
4450
4451 /**
4452 * Fill in the passed history so you know same_interval_since, same_up_since,
4453 * and same_primary_since.
4454 */
4455 bool OSD::project_pg_history(spg_t pgid, pg_history_t& h, epoch_t from,
4456 const vector<int>& currentup,
4457 int currentupprimary,
4458 const vector<int>& currentacting,
4459 int currentactingprimary)
4460 {
4461 dout(15) << "project_pg_history " << pgid
4462 << " from " << from << " to " << osdmap->get_epoch()
4463 << ", start " << h
4464 << dendl;
4465
4466 epoch_t e;
4467 for (e = osdmap->get_epoch();
4468 e > from;
4469 e--) {
4470 // verify during intermediate epoch (e-1)
4471 OSDMapRef oldmap = service.try_get_map(e-1);
4472 if (!oldmap) {
4473 dout(15) << __func__ << ": found map gap, returning false" << dendl;
4474 return false;
4475 }
4476 assert(oldmap->have_pg_pool(pgid.pool()));
4477
4478 int upprimary, actingprimary;
4479 vector<int> up, acting;
4480 oldmap->pg_to_up_acting_osds(
4481 pgid.pgid,
4482 &up,
4483 &upprimary,
4484 &acting,
4485 &actingprimary);
4486
4487 // acting set change?
4488 if ((actingprimary != currentactingprimary ||
4489 upprimary != currentupprimary ||
4490 acting != currentacting ||
4491 up != currentup) && e > h.same_interval_since) {
4492 dout(15) << "project_pg_history " << pgid << " acting|up changed in " << e
4493 << " from " << acting << "/" << up
4494 << " " << actingprimary << "/" << upprimary
4495 << " -> " << currentacting << "/" << currentup
4496 << " " << currentactingprimary << "/" << currentupprimary
4497 << dendl;
4498 h.same_interval_since = e;
4499 }
4500 // split?
4501 if (pgid.is_split(oldmap->get_pg_num(pgid.pool()),
4502 osdmap->get_pg_num(pgid.pool()),
4503 0) && e > h.same_interval_since) {
4504 h.same_interval_since = e;
4505 }
4506 // up set change?
4507 if ((up != currentup || upprimary != currentupprimary)
4508 && e > h.same_up_since) {
4509 dout(15) << "project_pg_history " << pgid << " up changed in " << e
4510 << " from " << up << " " << upprimary
4511 << " -> " << currentup << " " << currentupprimary << dendl;
4512 h.same_up_since = e;
4513 }
4514
4515 // primary change?
4516 if (OSDMap::primary_changed(
4517 actingprimary,
4518 acting,
4519 currentactingprimary,
4520 currentacting) &&
4521 e > h.same_primary_since) {
4522 dout(15) << "project_pg_history " << pgid << " primary changed in " << e << dendl;
4523 h.same_primary_since = e;
4524 }
4525
4526 if (h.same_interval_since >= e && h.same_up_since >= e && h.same_primary_since >= e)
4527 break;
4528 }
4529
4530 // base case: these floors should be the pg creation epoch if we didn't
4531 // find any changes.
4532 if (e == h.epoch_created) {
4533 if (!h.same_interval_since)
4534 h.same_interval_since = e;
4535 if (!h.same_up_since)
4536 h.same_up_since = e;
4537 if (!h.same_primary_since)
4538 h.same_primary_since = e;
4539 }
4540
4541 dout(15) << "project_pg_history end " << h << dendl;
4542 return true;
4543 }
4544
4545
4546
4547 void OSD::_add_heartbeat_peer(int p)
4548 {
4549 if (p == whoami)
4550 return;
4551 HeartbeatInfo *hi;
4552
4553 map<int,HeartbeatInfo>::iterator i = heartbeat_peers.find(p);
4554 if (i == heartbeat_peers.end()) {
4555 pair<ConnectionRef,ConnectionRef> cons = service.get_con_osd_hb(p, osdmap->get_epoch());
4556 if (!cons.first)
4557 return;
4558 hi = &heartbeat_peers[p];
4559 hi->peer = p;
4560 HeartbeatSession *s = new HeartbeatSession(p);
4561 hi->con_back = cons.first.get();
4562 hi->con_back->set_priv(s->get());
4563 if (cons.second) {
4564 hi->con_front = cons.second.get();
4565 hi->con_front->set_priv(s->get());
4566 dout(10) << "_add_heartbeat_peer: new peer osd." << p
4567 << " " << hi->con_back->get_peer_addr()
4568 << " " << hi->con_front->get_peer_addr()
4569 << dendl;
4570 } else {
4571 hi->con_front.reset(NULL);
4572 dout(10) << "_add_heartbeat_peer: new peer osd." << p
4573 << " " << hi->con_back->get_peer_addr()
4574 << dendl;
4575 }
4576 s->put();
4577 } else {
4578 hi = &i->second;
4579 }
4580 hi->epoch = osdmap->get_epoch();
4581 }
4582
4583 void OSD::_remove_heartbeat_peer(int n)
4584 {
4585 map<int,HeartbeatInfo>::iterator q = heartbeat_peers.find(n);
4586 assert(q != heartbeat_peers.end());
4587 dout(20) << " removing heartbeat peer osd." << n
4588 << " " << q->second.con_back->get_peer_addr()
4589 << " " << (q->second.con_front ? q->second.con_front->get_peer_addr() : entity_addr_t())
4590 << dendl;
4591 q->second.con_back->mark_down();
4592 if (q->second.con_front) {
4593 q->second.con_front->mark_down();
4594 }
4595 heartbeat_peers.erase(q);
4596 }
4597
4598 void OSD::need_heartbeat_peer_update()
4599 {
4600 if (is_stopping())
4601 return;
4602 dout(20) << "need_heartbeat_peer_update" << dendl;
4603 heartbeat_set_peers_need_update();
4604 }
4605
4606 void OSD::maybe_update_heartbeat_peers()
4607 {
4608 assert(osd_lock.is_locked());
4609
4610 if (is_waiting_for_healthy()) {
4611 utime_t now = ceph_clock_now();
4612 if (last_heartbeat_resample == utime_t()) {
4613 last_heartbeat_resample = now;
4614 heartbeat_set_peers_need_update();
4615 } else if (!heartbeat_peers_need_update()) {
4616 utime_t dur = now - last_heartbeat_resample;
4617 if (dur > cct->_conf->osd_heartbeat_grace) {
4618 dout(10) << "maybe_update_heartbeat_peers forcing update after " << dur << " seconds" << dendl;
4619 heartbeat_set_peers_need_update();
4620 last_heartbeat_resample = now;
4621 reset_heartbeat_peers(); // we want *new* peers!
4622 }
4623 }
4624 }
4625
4626 if (!heartbeat_peers_need_update())
4627 return;
4628 heartbeat_clear_peers_need_update();
4629
4630 Mutex::Locker l(heartbeat_lock);
4631
4632 dout(10) << "maybe_update_heartbeat_peers updating" << dendl;
4633
4634
4635 // build heartbeat from set
4636 if (is_active()) {
4637 RWLock::RLocker l(pg_map_lock);
4638 for (ceph::unordered_map<spg_t, PG*>::iterator i = pg_map.begin();
4639 i != pg_map.end();
4640 ++i) {
4641 PG *pg = i->second;
4642 pg->heartbeat_peer_lock.Lock();
4643 dout(20) << i->first << " heartbeat_peers " << pg->heartbeat_peers << dendl;
4644 for (set<int>::iterator p = pg->heartbeat_peers.begin();
4645 p != pg->heartbeat_peers.end();
4646 ++p)
4647 if (osdmap->is_up(*p))
4648 _add_heartbeat_peer(*p);
4649 for (set<int>::iterator p = pg->probe_targets.begin();
4650 p != pg->probe_targets.end();
4651 ++p)
4652 if (osdmap->is_up(*p))
4653 _add_heartbeat_peer(*p);
4654 pg->heartbeat_peer_lock.Unlock();
4655 }
4656 }
4657
4658 // include next and previous up osds to ensure we have a fully-connected set
4659 set<int> want, extras;
4660 int next = osdmap->get_next_up_osd_after(whoami);
4661 if (next >= 0)
4662 want.insert(next);
4663 int prev = osdmap->get_previous_up_osd_before(whoami);
4664 if (prev >= 0 && prev != next)
4665 want.insert(prev);
4666
4667 for (set<int>::iterator p = want.begin(); p != want.end(); ++p) {
4668 dout(10) << " adding neighbor peer osd." << *p << dendl;
4669 extras.insert(*p);
4670 _add_heartbeat_peer(*p);
4671 }
4672
4673 // remove down peers; enumerate extras
4674 map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin();
4675 while (p != heartbeat_peers.end()) {
4676 if (!osdmap->is_up(p->first)) {
4677 int o = p->first;
4678 ++p;
4679 _remove_heartbeat_peer(o);
4680 continue;
4681 }
4682 if (p->second.epoch < osdmap->get_epoch()) {
4683 extras.insert(p->first);
4684 }
4685 ++p;
4686 }
4687
4688 // too few?
4689 int start = osdmap->get_next_up_osd_after(whoami);
4690 for (int n = start; n >= 0; ) {
4691 if ((int)heartbeat_peers.size() >= cct->_conf->osd_heartbeat_min_peers)
4692 break;
4693 if (!extras.count(n) && !want.count(n) && n != whoami) {
4694 dout(10) << " adding random peer osd." << n << dendl;
4695 extras.insert(n);
4696 _add_heartbeat_peer(n);
4697 }
4698 n = osdmap->get_next_up_osd_after(n);
4699 if (n == start)
4700 break; // came full circle; stop
4701 }
4702
4703 // too many?
4704 for (set<int>::iterator p = extras.begin();
4705 (int)heartbeat_peers.size() > cct->_conf->osd_heartbeat_min_peers && p != extras.end();
4706 ++p) {
4707 if (want.count(*p))
4708 continue;
4709 _remove_heartbeat_peer(*p);
4710 }
4711
4712 dout(10) << "maybe_update_heartbeat_peers " << heartbeat_peers.size() << " peers, extras " << extras << dendl;
4713 }
4714
4715 void OSD::reset_heartbeat_peers()
4716 {
4717 assert(osd_lock.is_locked());
4718 dout(10) << "reset_heartbeat_peers" << dendl;
4719 Mutex::Locker l(heartbeat_lock);
4720 while (!heartbeat_peers.empty()) {
4721 HeartbeatInfo& hi = heartbeat_peers.begin()->second;
4722 hi.con_back->mark_down();
4723 if (hi.con_front) {
4724 hi.con_front->mark_down();
4725 }
4726 heartbeat_peers.erase(heartbeat_peers.begin());
4727 }
4728 failure_queue.clear();
4729 }
4730
4731 void OSD::handle_osd_ping(MOSDPing *m)
4732 {
4733 if (superblock.cluster_fsid != m->fsid) {
4734 dout(20) << "handle_osd_ping from " << m->get_source_inst()
4735 << " bad fsid " << m->fsid << " != " << superblock.cluster_fsid << dendl;
4736 m->put();
4737 return;
4738 }
4739
4740 int from = m->get_source().num();
4741
4742 heartbeat_lock.Lock();
4743 if (is_stopping()) {
4744 heartbeat_lock.Unlock();
4745 m->put();
4746 return;
4747 }
4748
4749 OSDMapRef curmap = service.get_osdmap();
4750 if (!curmap) {
4751 heartbeat_lock.Unlock();
4752 m->put();
4753 return;
4754 }
4755
4756 switch (m->op) {
4757
4758 case MOSDPing::PING:
4759 {
4760 if (cct->_conf->osd_debug_drop_ping_probability > 0) {
4761 auto heartbeat_drop = debug_heartbeat_drops_remaining.find(from);
4762 if (heartbeat_drop != debug_heartbeat_drops_remaining.end()) {
4763 if (heartbeat_drop->second == 0) {
4764 debug_heartbeat_drops_remaining.erase(heartbeat_drop);
4765 } else {
4766 --heartbeat_drop->second;
4767 dout(5) << "Dropping heartbeat from " << from
4768 << ", " << heartbeat_drop->second
4769 << " remaining to drop" << dendl;
4770 break;
4771 }
4772 } else if (cct->_conf->osd_debug_drop_ping_probability >
4773 ((((double)(rand()%100))/100.0))) {
4774 heartbeat_drop =
4775 debug_heartbeat_drops_remaining.insert(std::make_pair(from,
4776 cct->_conf->osd_debug_drop_ping_duration)).first;
4777 dout(5) << "Dropping heartbeat from " << from
4778 << ", " << heartbeat_drop->second
4779 << " remaining to drop" << dendl;
4780 break;
4781 }
4782 }
4783
4784 if (!cct->get_heartbeat_map()->is_healthy()) {
4785 dout(10) << "internal heartbeat not healthy, dropping ping request" << dendl;
4786 break;
4787 }
4788
4789 Message *r = new MOSDPing(monc->get_fsid(),
4790 curmap->get_epoch(),
4791 MOSDPing::PING_REPLY, m->stamp,
4792 cct->_conf->osd_heartbeat_min_size);
4793 m->get_connection()->send_message(r);
4794
4795 if (curmap->is_up(from)) {
4796 service.note_peer_epoch(from, m->map_epoch);
4797 if (is_active()) {
4798 ConnectionRef con = service.get_con_osd_cluster(from, curmap->get_epoch());
4799 if (con) {
4800 service.share_map_peer(from, con.get());
4801 }
4802 }
4803 } else if (!curmap->exists(from) ||
4804 curmap->get_down_at(from) > m->map_epoch) {
4805 // tell them they have died
4806 Message *r = new MOSDPing(monc->get_fsid(),
4807 curmap->get_epoch(),
4808 MOSDPing::YOU_DIED,
4809 m->stamp,
4810 cct->_conf->osd_heartbeat_min_size);
4811 m->get_connection()->send_message(r);
4812 }
4813 }
4814 break;
4815
4816 case MOSDPing::PING_REPLY:
4817 {
4818 map<int,HeartbeatInfo>::iterator i = heartbeat_peers.find(from);
4819 if (i != heartbeat_peers.end()) {
4820 if (m->get_connection() == i->second.con_back) {
4821 dout(25) << "handle_osd_ping got reply from osd." << from
4822 << " first_tx " << i->second.first_tx
4823 << " last_tx " << i->second.last_tx
4824 << " last_rx_back " << i->second.last_rx_back << " -> " << m->stamp
4825 << " last_rx_front " << i->second.last_rx_front
4826 << dendl;
4827 i->second.last_rx_back = m->stamp;
4828 // if there is no front con, set both stamps.
4829 if (i->second.con_front == NULL)
4830 i->second.last_rx_front = m->stamp;
4831 } else if (m->get_connection() == i->second.con_front) {
4832 dout(25) << "handle_osd_ping got reply from osd." << from
4833 << " first_tx " << i->second.first_tx
4834 << " last_tx " << i->second.last_tx
4835 << " last_rx_back " << i->second.last_rx_back
4836 << " last_rx_front " << i->second.last_rx_front << " -> " << m->stamp
4837 << dendl;
4838 i->second.last_rx_front = m->stamp;
4839 }
4840
4841 utime_t cutoff = ceph_clock_now();
4842 cutoff -= cct->_conf->osd_heartbeat_grace;
4843 if (i->second.is_healthy(cutoff)) {
4844 // Cancel false reports
4845 auto failure_queue_entry = failure_queue.find(from);
4846 if (failure_queue_entry != failure_queue.end()) {
4847 dout(10) << "handle_osd_ping canceling queued "
4848 << "failure report for osd." << from << dendl;
4849 failure_queue.erase(failure_queue_entry);
4850 }
4851
4852 auto failure_pending_entry = failure_pending.find(from);
4853 if (failure_pending_entry != failure_pending.end()) {
4854 dout(10) << "handle_osd_ping canceling in-flight "
4855 << "failure report for osd." << from << dendl;
4856 send_still_alive(curmap->get_epoch(),
4857 failure_pending_entry->second.second);
4858 failure_pending.erase(failure_pending_entry);
4859 }
4860 }
4861 }
4862
4863 if (m->map_epoch &&
4864 curmap->is_up(from)) {
4865 service.note_peer_epoch(from, m->map_epoch);
4866 if (is_active()) {
4867 ConnectionRef con = service.get_con_osd_cluster(from, curmap->get_epoch());
4868 if (con) {
4869 service.share_map_peer(from, con.get());
4870 }
4871 }
4872 }
4873 }
4874 break;
4875
4876 case MOSDPing::YOU_DIED:
4877 dout(10) << "handle_osd_ping " << m->get_source_inst()
4878 << " says i am down in " << m->map_epoch << dendl;
4879 osdmap_subscribe(curmap->get_epoch()+1, false);
4880 break;
4881 }
4882
4883 heartbeat_lock.Unlock();
4884 m->put();
4885 }
4886
4887 void OSD::heartbeat_entry()
4888 {
4889 Mutex::Locker l(heartbeat_lock);
4890 if (is_stopping())
4891 return;
4892 while (!heartbeat_stop) {
4893 heartbeat();
4894
4895 double wait = .5 + ((float)(rand() % 10)/10.0) * (float)cct->_conf->osd_heartbeat_interval;
4896 utime_t w;
4897 w.set_from_double(wait);
4898 dout(30) << "heartbeat_entry sleeping for " << wait << dendl;
4899 heartbeat_cond.WaitInterval(heartbeat_lock, w);
4900 if (is_stopping())
4901 return;
4902 dout(30) << "heartbeat_entry woke up" << dendl;
4903 }
4904 }
4905
4906 void OSD::heartbeat_check()
4907 {
4908 assert(heartbeat_lock.is_locked());
4909 utime_t now = ceph_clock_now();
4910
4911 // check for heartbeat replies (move me elsewhere?)
4912 utime_t cutoff = now;
4913 cutoff -= cct->_conf->osd_heartbeat_grace;
4914 for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin();
4915 p != heartbeat_peers.end();
4916 ++p) {
4917
4918 if (p->second.first_tx == utime_t()) {
4919 dout(25) << "heartbeat_check we haven't sent ping to osd." << p->first
4920 << "yet, skipping" << dendl;
4921 continue;
4922 }
4923
4924 dout(25) << "heartbeat_check osd." << p->first
4925 << " first_tx " << p->second.first_tx
4926 << " last_tx " << p->second.last_tx
4927 << " last_rx_back " << p->second.last_rx_back
4928 << " last_rx_front " << p->second.last_rx_front
4929 << dendl;
4930 if (p->second.is_unhealthy(cutoff)) {
4931 if (p->second.last_rx_back == utime_t() ||
4932 p->second.last_rx_front == utime_t()) {
4933 derr << "heartbeat_check: no reply from " << p->second.con_front->get_peer_addr().get_sockaddr()
4934 << " osd." << p->first << " ever on either front or back, first ping sent "
4935 << p->second.first_tx << " (cutoff " << cutoff << ")" << dendl;
4936 // fail
4937 failure_queue[p->first] = p->second.last_tx;
4938 } else {
4939 derr << "heartbeat_check: no reply from " << p->second.con_front->get_peer_addr().get_sockaddr()
4940 << " osd." << p->first << " since back " << p->second.last_rx_back
4941 << " front " << p->second.last_rx_front
4942 << " (cutoff " << cutoff << ")" << dendl;
4943 // fail
4944 failure_queue[p->first] = MIN(p->second.last_rx_back, p->second.last_rx_front);
4945 }
4946 }
4947 }
4948 }
4949
4950 void OSD::heartbeat()
4951 {
4952 dout(30) << "heartbeat" << dendl;
4953
4954 // get CPU load avg
4955 double loadavgs[1];
4956 int n_samples = 86400 / cct->_conf->osd_heartbeat_interval;
4957 if (getloadavg(loadavgs, 1) == 1) {
4958 logger->set(l_osd_loadavg, 100 * loadavgs[0]);
4959 daily_loadavg = (daily_loadavg * (n_samples - 1) + loadavgs[0]) / n_samples;
4960 dout(30) << "heartbeat: daily_loadavg " << daily_loadavg << dendl;
4961 }
4962
4963 dout(30) << "heartbeat checking stats" << dendl;
4964
4965 // refresh stats?
4966 vector<int> hb_peers;
4967 for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin();
4968 p != heartbeat_peers.end();
4969 ++p)
4970 hb_peers.push_back(p->first);
4971 service.update_osd_stat(hb_peers);
4972
4973 dout(5) << "heartbeat: " << service.get_osd_stat() << dendl;
4974
4975 utime_t now = ceph_clock_now();
4976
4977 // send heartbeats
4978 for (map<int,HeartbeatInfo>::iterator i = heartbeat_peers.begin();
4979 i != heartbeat_peers.end();
4980 ++i) {
4981 int peer = i->first;
4982 i->second.last_tx = now;
4983 if (i->second.first_tx == utime_t())
4984 i->second.first_tx = now;
4985 dout(30) << "heartbeat sending ping to osd." << peer << dendl;
4986 i->second.con_back->send_message(new MOSDPing(monc->get_fsid(),
4987 service.get_osdmap()->get_epoch(),
4988 MOSDPing::PING, now,
4989 cct->_conf->osd_heartbeat_min_size));
4990
4991 if (i->second.con_front)
4992 i->second.con_front->send_message(new MOSDPing(monc->get_fsid(),
4993 service.get_osdmap()->get_epoch(),
4994 MOSDPing::PING, now,
4995 cct->_conf->osd_heartbeat_min_size));
4996 }
4997
4998 logger->set(l_osd_hb_to, heartbeat_peers.size());
4999
5000 // hmm.. am i all alone?
5001 dout(30) << "heartbeat lonely?" << dendl;
5002 if (heartbeat_peers.empty()) {
5003 if (now - last_mon_heartbeat > cct->_conf->osd_mon_heartbeat_interval && is_active()) {
5004 last_mon_heartbeat = now;
5005 dout(10) << "i have no heartbeat peers; checking mon for new map" << dendl;
5006 osdmap_subscribe(osdmap->get_epoch() + 1, false);
5007 }
5008 }
5009
5010 dout(30) << "heartbeat done" << dendl;
5011 }
5012
5013 bool OSD::heartbeat_reset(Connection *con)
5014 {
5015 HeartbeatSession *s = static_cast<HeartbeatSession*>(con->get_priv());
5016 if (s) {
5017 heartbeat_lock.Lock();
5018 if (is_stopping()) {
5019 heartbeat_lock.Unlock();
5020 s->put();
5021 return true;
5022 }
5023 map<int,HeartbeatInfo>::iterator p = heartbeat_peers.find(s->peer);
5024 if (p != heartbeat_peers.end() &&
5025 (p->second.con_back == con ||
5026 p->second.con_front == con)) {
5027 dout(10) << "heartbeat_reset failed hb con " << con << " for osd." << p->second.peer
5028 << ", reopening" << dendl;
5029 if (con != p->second.con_back) {
5030 p->second.con_back->mark_down();
5031 }
5032 p->second.con_back.reset(NULL);
5033 if (p->second.con_front && con != p->second.con_front) {
5034 p->second.con_front->mark_down();
5035 }
5036 p->second.con_front.reset(NULL);
5037 pair<ConnectionRef,ConnectionRef> newcon = service.get_con_osd_hb(p->second.peer, p->second.epoch);
5038 if (newcon.first) {
5039 p->second.con_back = newcon.first.get();
5040 p->second.con_back->set_priv(s->get());
5041 if (newcon.second) {
5042 p->second.con_front = newcon.second.get();
5043 p->second.con_front->set_priv(s->get());
5044 }
5045 } else {
5046 dout(10) << "heartbeat_reset failed hb con " << con << " for osd." << p->second.peer
5047 << ", raced with osdmap update, closing out peer" << dendl;
5048 heartbeat_peers.erase(p);
5049 }
5050 } else {
5051 dout(10) << "heartbeat_reset closing (old) failed hb con " << con << dendl;
5052 }
5053 heartbeat_lock.Unlock();
5054 s->put();
5055 }
5056 return true;
5057 }
5058
5059
5060
5061 // =========================================
5062
5063 void OSD::tick()
5064 {
5065 assert(osd_lock.is_locked());
5066 dout(10) << "tick" << dendl;
5067
5068 if (is_active() || is_waiting_for_healthy()) {
5069 maybe_update_heartbeat_peers();
5070 }
5071
5072 if (is_waiting_for_healthy()) {
5073 start_boot();
5074 } else if (is_preboot() &&
5075 waiting_for_luminous_mons &&
5076 monc->monmap.get_required_features().contains_all(
5077 ceph::features::mon::FEATURE_LUMINOUS)) {
5078 // mon upgrade finished!
5079 start_boot();
5080 }
5081
5082 do_waiters();
5083
5084 tick_timer.add_event_after(OSD_TICK_INTERVAL, new C_Tick(this));
5085 }
5086
5087 void OSD::tick_without_osd_lock()
5088 {
5089 assert(tick_timer_lock.is_locked());
5090 dout(10) << "tick_without_osd_lock" << dendl;
5091
5092 logger->set(l_osd_buf, buffer::get_total_alloc());
5093 logger->set(l_osd_history_alloc_bytes, SHIFT_ROUND_UP(buffer::get_history_alloc_bytes(), 20));
5094 logger->set(l_osd_history_alloc_num, buffer::get_history_alloc_num());
5095 logger->set(l_osd_cached_crc, buffer::get_cached_crc());
5096 logger->set(l_osd_cached_crc_adjusted, buffer::get_cached_crc_adjusted());
5097 logger->set(l_osd_missed_crc, buffer::get_missed_crc());
5098
5099 // osd_lock is not being held, which means the OSD state
5100 // might change when doing the monitor report
5101 if (is_active() || is_waiting_for_healthy()) {
5102 heartbeat_lock.Lock();
5103 heartbeat_check();
5104 heartbeat_lock.Unlock();
5105
5106 map_lock.get_read();
5107 Mutex::Locker l(mon_report_lock);
5108
5109 // mon report?
5110 bool reset = false;
5111 bool report = false;
5112 utime_t now = ceph_clock_now();
5113 pg_stat_queue_lock.Lock();
5114 double backoff = stats_ack_timeout / cct->_conf->osd_mon_ack_timeout;
5115 double adjusted_min = cct->_conf->osd_mon_report_interval_min * backoff;
5116 // note: we shouldn't adjust max because it must remain < the
5117 // mon's mon_osd_report_timeout (which defaults to 1.5x our
5118 // value).
5119 double max = cct->_conf->osd_mon_report_interval_max;
5120 if (!outstanding_pg_stats.empty() &&
5121 (now - stats_ack_timeout) > last_pg_stats_ack) {
5122 dout(1) << __func__ << " mon hasn't acked PGStats in "
5123 << now - last_pg_stats_ack
5124 << " seconds, reconnecting elsewhere" << dendl;
5125 reset = true;
5126 last_pg_stats_ack = now; // reset clock
5127 last_pg_stats_sent = utime_t();
5128 stats_ack_timeout =
5129 MAX(cct->_conf->osd_mon_ack_timeout,
5130 stats_ack_timeout * cct->_conf->osd_stats_ack_timeout_factor);
5131 outstanding_pg_stats.clear();
5132 }
5133 if (now - last_pg_stats_sent > max) {
5134 osd_stat_updated = true;
5135 report = true;
5136 } else if (service.need_fullness_update()) {
5137 report = true;
5138 } else if ((int)outstanding_pg_stats.size() >=
5139 cct->_conf->osd_mon_report_max_in_flight) {
5140 dout(20) << __func__ << " have max " << outstanding_pg_stats
5141 << " stats updates in flight" << dendl;
5142 } else {
5143 if (now - last_mon_report > adjusted_min) {
5144 dout(20) << __func__ << " stats backoff " << backoff
5145 << " adjusted_min " << adjusted_min << " - sending report"
5146 << dendl;
5147 osd_stat_updated = true;
5148 report = true;
5149 }
5150 }
5151 pg_stat_queue_lock.Unlock();
5152
5153 if (reset) {
5154 monc->reopen_session();
5155 } else if (report) {
5156 last_mon_report = now;
5157
5158 // do any pending reports
5159 send_full_update();
5160 send_failures();
5161 if (osdmap->require_osd_release < CEPH_RELEASE_LUMINOUS) {
5162 send_pg_stats(now);
5163 }
5164 }
5165 map_lock.put_read();
5166 }
5167
5168 if (is_active()) {
5169 if (!scrub_random_backoff()) {
5170 sched_scrub();
5171 }
5172 service.promote_throttle_recalibrate();
5173 bool need_send_beacon = false;
5174 const auto now = ceph::coarse_mono_clock::now();
5175 {
5176 // borrow lec lock to pretect last_sent_beacon from changing
5177 Mutex::Locker l{min_last_epoch_clean_lock};
5178 const auto elapsed = now - last_sent_beacon;
5179 if (chrono::duration_cast<chrono::seconds>(elapsed).count() >
5180 cct->_conf->osd_beacon_report_interval) {
5181 need_send_beacon = true;
5182 }
5183 }
5184 if (need_send_beacon) {
5185 send_beacon(now);
5186 }
5187 }
5188
5189 check_ops_in_flight();
5190 service.kick_recovery_queue();
5191 tick_timer_without_osd_lock.add_event_after(OSD_TICK_INTERVAL, new C_Tick_WithoutOSDLock(this));
5192 }
5193
5194 void OSD::check_ops_in_flight()
5195 {
5196 vector<string> warnings;
5197 if (op_tracker.check_ops_in_flight(warnings)) {
5198 for (vector<string>::iterator i = warnings.begin();
5199 i != warnings.end();
5200 ++i) {
5201 clog->warn() << *i;
5202 }
5203 }
5204 }
5205
5206 // Usage:
5207 // setomapval <pool-id> [namespace/]<obj-name> <key> <val>
5208 // rmomapkey <pool-id> [namespace/]<obj-name> <key>
5209 // setomapheader <pool-id> [namespace/]<obj-name> <header>
5210 // getomap <pool> [namespace/]<obj-name>
5211 // truncobj <pool-id> [namespace/]<obj-name> <newlen>
5212 // injectmdataerr [namespace/]<obj-name> [shardid]
5213 // injectdataerr [namespace/]<obj-name> [shardid]
5214 //
5215 // set_recovery_delay [utime]
5216 void TestOpsSocketHook::test_ops(OSDService *service, ObjectStore *store,
5217 const std::string &command, cmdmap_t& cmdmap, ostream &ss)
5218 {
5219 //Test support
5220 //Support changing the omap on a single osd by using the Admin Socket to
5221 //directly request the osd make a change.
5222 if (command == "setomapval" || command == "rmomapkey" ||
5223 command == "setomapheader" || command == "getomap" ||
5224 command == "truncobj" || command == "injectmdataerr" ||
5225 command == "injectdataerr"
5226 ) {
5227 pg_t rawpg;
5228 int64_t pool;
5229 OSDMapRef curmap = service->get_osdmap();
5230 int r = -1;
5231
5232 string poolstr;
5233
5234 cmd_getval(service->cct, cmdmap, "pool", poolstr);
5235 pool = curmap->lookup_pg_pool_name(poolstr);
5236 //If we can't find it by name then maybe id specified
5237 if (pool < 0 && isdigit(poolstr[0]))
5238 pool = atoll(poolstr.c_str());
5239 if (pool < 0) {
5240 ss << "Invalid pool" << poolstr;
5241 return;
5242 }
5243
5244 string objname, nspace;
5245 cmd_getval(service->cct, cmdmap, "objname", objname);
5246 std::size_t found = objname.find_first_of('/');
5247 if (found != string::npos) {
5248 nspace = objname.substr(0, found);
5249 objname = objname.substr(found+1);
5250 }
5251 object_locator_t oloc(pool, nspace);
5252 r = curmap->object_locator_to_pg(object_t(objname), oloc, rawpg);
5253
5254 if (r < 0) {
5255 ss << "Invalid namespace/objname";
5256 return;
5257 }
5258
5259 int64_t shardid;
5260 cmd_getval(service->cct, cmdmap, "shardid", shardid, int64_t(shard_id_t::NO_SHARD));
5261 hobject_t obj(object_t(objname), string(""), CEPH_NOSNAP, rawpg.ps(), pool, nspace);
5262 ghobject_t gobj(obj, ghobject_t::NO_GEN, shard_id_t(uint8_t(shardid)));
5263 spg_t pgid(curmap->raw_pg_to_pg(rawpg), shard_id_t(shardid));
5264 if (curmap->pg_is_ec(rawpg)) {
5265 if ((command != "injectdataerr") && (command != "injectmdataerr")) {
5266 ss << "Must not call on ec pool, except injectdataerr or injectmdataerr";
5267 return;
5268 }
5269 }
5270
5271 ObjectStore::Transaction t;
5272
5273 if (command == "setomapval") {
5274 map<string, bufferlist> newattrs;
5275 bufferlist val;
5276 string key, valstr;
5277 cmd_getval(service->cct, cmdmap, "key", key);
5278 cmd_getval(service->cct, cmdmap, "val", valstr);
5279
5280 val.append(valstr);
5281 newattrs[key] = val;
5282 t.omap_setkeys(coll_t(pgid), ghobject_t(obj), newattrs);
5283 r = store->apply_transaction(service->meta_osr.get(), std::move(t));
5284 if (r < 0)
5285 ss << "error=" << r;
5286 else
5287 ss << "ok";
5288 } else if (command == "rmomapkey") {
5289 string key;
5290 set<string> keys;
5291 cmd_getval(service->cct, cmdmap, "key", key);
5292
5293 keys.insert(key);
5294 t.omap_rmkeys(coll_t(pgid), ghobject_t(obj), keys);
5295 r = store->apply_transaction(service->meta_osr.get(), std::move(t));
5296 if (r < 0)
5297 ss << "error=" << r;
5298 else
5299 ss << "ok";
5300 } else if (command == "setomapheader") {
5301 bufferlist newheader;
5302 string headerstr;
5303
5304 cmd_getval(service->cct, cmdmap, "header", headerstr);
5305 newheader.append(headerstr);
5306 t.omap_setheader(coll_t(pgid), ghobject_t(obj), newheader);
5307 r = store->apply_transaction(service->meta_osr.get(), std::move(t));
5308 if (r < 0)
5309 ss << "error=" << r;
5310 else
5311 ss << "ok";
5312 } else if (command == "getomap") {
5313 //Debug: Output entire omap
5314 bufferlist hdrbl;
5315 map<string, bufferlist> keyvals;
5316 r = store->omap_get(coll_t(pgid), ghobject_t(obj), &hdrbl, &keyvals);
5317 if (r >= 0) {
5318 ss << "header=" << string(hdrbl.c_str(), hdrbl.length());
5319 for (map<string, bufferlist>::iterator it = keyvals.begin();
5320 it != keyvals.end(); ++it)
5321 ss << " key=" << (*it).first << " val="
5322 << string((*it).second.c_str(), (*it).second.length());
5323 } else {
5324 ss << "error=" << r;
5325 }
5326 } else if (command == "truncobj") {
5327 int64_t trunclen;
5328 cmd_getval(service->cct, cmdmap, "len", trunclen);
5329 t.truncate(coll_t(pgid), ghobject_t(obj), trunclen);
5330 r = store->apply_transaction(service->meta_osr.get(), std::move(t));
5331 if (r < 0)
5332 ss << "error=" << r;
5333 else
5334 ss << "ok";
5335 } else if (command == "injectdataerr") {
5336 store->inject_data_error(gobj);
5337 ss << "ok";
5338 } else if (command == "injectmdataerr") {
5339 store->inject_mdata_error(gobj);
5340 ss << "ok";
5341 }
5342 return;
5343 }
5344 if (command == "set_recovery_delay") {
5345 int64_t delay;
5346 cmd_getval(service->cct, cmdmap, "utime", delay, (int64_t)0);
5347 ostringstream oss;
5348 oss << delay;
5349 int r = service->cct->_conf->set_val("osd_recovery_delay_start",
5350 oss.str().c_str());
5351 if (r != 0) {
5352 ss << "set_recovery_delay: error setting "
5353 << "osd_recovery_delay_start to '" << delay << "': error "
5354 << r;
5355 return;
5356 }
5357 service->cct->_conf->apply_changes(NULL);
5358 ss << "set_recovery_delay: set osd_recovery_delay_start "
5359 << "to " << service->cct->_conf->osd_recovery_delay_start;
5360 return;
5361 }
5362 if (command == "trigger_scrub") {
5363 spg_t pgid;
5364 OSDMapRef curmap = service->get_osdmap();
5365
5366 string pgidstr;
5367
5368 cmd_getval(service->cct, cmdmap, "pgid", pgidstr);
5369 if (!pgid.parse(pgidstr.c_str())) {
5370 ss << "Invalid pgid specified";
5371 return;
5372 }
5373
5374 PG *pg = service->osd->_lookup_lock_pg(pgid);
5375 if (pg == nullptr) {
5376 ss << "Can't find pg " << pgid;
5377 return;
5378 }
5379
5380 if (pg->is_primary()) {
5381 pg->unreg_next_scrub();
5382 const pg_pool_t *p = curmap->get_pg_pool(pgid.pool());
5383 double pool_scrub_max_interval = 0;
5384 p->opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &pool_scrub_max_interval);
5385 double scrub_max_interval = pool_scrub_max_interval > 0 ?
5386 pool_scrub_max_interval : g_conf->osd_scrub_max_interval;
5387 // Instead of marking must_scrub force a schedule scrub
5388 utime_t stamp = ceph_clock_now();
5389 stamp -= scrub_max_interval;
5390 stamp -= 100.0; // push back last scrub more for good measure
5391 pg->info.history.last_scrub_stamp = stamp;
5392 pg->reg_next_scrub();
5393 ss << "ok";
5394 } else {
5395 ss << "Not primary";
5396 }
5397 pg->unlock();
5398 return;
5399 }
5400 if (command == "injectfull") {
5401 int64_t count;
5402 string type;
5403 OSDService::s_names state;
5404 cmd_getval(service->cct, cmdmap, "type", type, string("full"));
5405 cmd_getval(service->cct, cmdmap, "count", count, (int64_t)-1);
5406 if (type == "none" || count == 0) {
5407 type = "none";
5408 count = 0;
5409 }
5410 state = service->get_full_state(type);
5411 if (state == OSDService::s_names::INVALID) {
5412 ss << "Invalid type use (none, nearfull, backfillfull, full, failsafe)";
5413 return;
5414 }
5415 service->set_injectfull(state, count);
5416 return;
5417 }
5418 ss << "Internal error - command=" << command;
5419 }
5420
5421 // =========================================
5422 bool remove_dir(
5423 CephContext *cct,
5424 ObjectStore *store, SnapMapper *mapper,
5425 OSDriver *osdriver,
5426 ObjectStore::Sequencer *osr,
5427 coll_t coll, DeletingStateRef dstate,
5428 bool *finished,
5429 ThreadPool::TPHandle &handle)
5430 {
5431 vector<ghobject_t> olist;
5432 int64_t num = 0;
5433 ObjectStore::Transaction t;
5434 ghobject_t next;
5435 handle.reset_tp_timeout();
5436 store->collection_list(
5437 coll,
5438 next,
5439 ghobject_t::get_max(),
5440 store->get_ideal_list_max(),
5441 &olist,
5442 &next);
5443 generic_dout(10) << __func__ << " " << olist << dendl;
5444 // default cont to true, this is safe because caller(OSD::RemoveWQ::_process())
5445 // will recheck the answer before it really goes on.
5446 bool cont = true;
5447 for (vector<ghobject_t>::iterator i = olist.begin();
5448 i != olist.end();
5449 ++i) {
5450 if (i->is_pgmeta())
5451 continue;
5452 OSDriver::OSTransaction _t(osdriver->get_transaction(&t));
5453 int r = mapper->remove_oid(i->hobj, &_t);
5454 if (r != 0 && r != -ENOENT) {
5455 ceph_abort();
5456 }
5457 t.remove(coll, *i);
5458 if (++num >= cct->_conf->osd_target_transaction_size) {
5459 C_SaferCond waiter;
5460 store->queue_transaction(osr, std::move(t), &waiter);
5461 cont = dstate->pause_clearing();
5462 handle.suspend_tp_timeout();
5463 waiter.wait();
5464 handle.reset_tp_timeout();
5465 if (cont)
5466 cont = dstate->resume_clearing();
5467 if (!cont)
5468 return false;
5469 t = ObjectStore::Transaction();
5470 num = 0;
5471 }
5472 }
5473 if (num) {
5474 C_SaferCond waiter;
5475 store->queue_transaction(osr, std::move(t), &waiter);
5476 cont = dstate->pause_clearing();
5477 handle.suspend_tp_timeout();
5478 waiter.wait();
5479 handle.reset_tp_timeout();
5480 if (cont)
5481 cont = dstate->resume_clearing();
5482 }
5483 // whether there are more objects to remove in the collection
5484 *finished = next.is_max();
5485 return cont;
5486 }
5487
5488 void OSD::RemoveWQ::_process(
5489 pair<PGRef, DeletingStateRef> item,
5490 ThreadPool::TPHandle &handle)
5491 {
5492 FUNCTRACE();
5493 PGRef pg(item.first);
5494 SnapMapper &mapper = pg->snap_mapper;
5495 OSDriver &driver = pg->osdriver;
5496 coll_t coll = coll_t(pg->info.pgid);
5497 pg->osr->flush();
5498 bool finished = false;
5499
5500 if (!item.second->start_or_resume_clearing())
5501 return;
5502
5503 bool cont = remove_dir(
5504 pg->cct, store, &mapper, &driver, pg->osr.get(), coll, item.second,
5505 &finished, handle);
5506 if (!cont)
5507 return;
5508 if (!finished) {
5509 if (item.second->pause_clearing())
5510 queue_front(item);
5511 return;
5512 }
5513
5514 if (!item.second->start_deleting())
5515 return;
5516
5517 ObjectStore::Transaction t;
5518 PGLog::clear_info_log(pg->info.pgid, &t);
5519
5520 if (cct->_conf->osd_inject_failure_on_pg_removal) {
5521 generic_derr << "osd_inject_failure_on_pg_removal" << dendl;
5522 _exit(1);
5523 }
5524 t.remove_collection(coll);
5525
5526 // We need the sequencer to stick around until the op is complete
5527 store->queue_transaction(
5528 pg->osr.get(),
5529 std::move(t),
5530 0, // onapplied
5531 0, // oncommit
5532 0, // onreadable sync
5533 new ContainerContext<PGRef>(pg),
5534 TrackedOpRef());
5535
5536 item.second->finish_deleting();
5537 }
5538 // =========================================
5539
5540 void OSD::ms_handle_connect(Connection *con)
5541 {
5542 dout(10) << __func__ << " con " << con << dendl;
5543 if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON) {
5544 Mutex::Locker l(osd_lock);
5545 if (is_stopping())
5546 return;
5547 dout(10) << __func__ << " on mon" << dendl;
5548
5549 if (is_preboot()) {
5550 start_boot();
5551 } else if (is_booting()) {
5552 _send_boot(); // resend boot message
5553 } else {
5554 map_lock.get_read();
5555 Mutex::Locker l2(mon_report_lock);
5556
5557 utime_t now = ceph_clock_now();
5558 last_mon_report = now;
5559
5560 // resend everything, it's a new session
5561 send_full_update();
5562 send_alive();
5563 service.requeue_pg_temp();
5564 service.send_pg_temp();
5565 requeue_failures();
5566 send_failures();
5567 if (osdmap->require_osd_release < CEPH_RELEASE_LUMINOUS) {
5568 send_pg_stats(now);
5569 }
5570
5571 map_lock.put_read();
5572 if (is_active()) {
5573 send_beacon(ceph::coarse_mono_clock::now());
5574 }
5575 }
5576
5577 // full map requests may happen while active or pre-boot
5578 if (requested_full_first) {
5579 rerequest_full_maps();
5580 }
5581 }
5582 }
5583
5584 void OSD::ms_handle_fast_connect(Connection *con)
5585 {
5586 if (con->get_peer_type() != CEPH_ENTITY_TYPE_MON &&
5587 con->get_peer_type() != CEPH_ENTITY_TYPE_MGR) {
5588 Session *s = static_cast<Session*>(con->get_priv());
5589 if (!s) {
5590 s = new Session(cct);
5591 con->set_priv(s->get());
5592 s->con = con;
5593 dout(10) << " new session (outgoing) " << s << " con=" << s->con
5594 << " addr=" << s->con->get_peer_addr() << dendl;
5595 // we don't connect to clients
5596 assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD);
5597 s->entity_name.set_type(CEPH_ENTITY_TYPE_OSD);
5598 }
5599 s->put();
5600 }
5601 }
5602
5603 void OSD::ms_handle_fast_accept(Connection *con)
5604 {
5605 if (con->get_peer_type() != CEPH_ENTITY_TYPE_MON &&
5606 con->get_peer_type() != CEPH_ENTITY_TYPE_MGR) {
5607 Session *s = static_cast<Session*>(con->get_priv());
5608 if (!s) {
5609 s = new Session(cct);
5610 con->set_priv(s->get());
5611 s->con = con;
5612 dout(10) << "new session (incoming)" << s << " con=" << con
5613 << " addr=" << con->get_peer_addr()
5614 << " must have raced with connect" << dendl;
5615 assert(con->get_peer_type() == CEPH_ENTITY_TYPE_OSD);
5616 s->entity_name.set_type(CEPH_ENTITY_TYPE_OSD);
5617 }
5618 s->put();
5619 }
5620 }
5621
5622 bool OSD::ms_handle_reset(Connection *con)
5623 {
5624 Session *session = static_cast<Session*>(con->get_priv());
5625 dout(2) << "ms_handle_reset con " << con << " session " << session << dendl;
5626 if (!session)
5627 return false;
5628 session->wstate.reset(con);
5629 session->con.reset(NULL); // break con <-> session ref cycle
5630 // note that we break session->con *before* the session_handle_reset
5631 // cleanup below. this avoids a race between us and
5632 // PG::add_backoff, Session::check_backoff, etc.
5633 session_handle_reset(session);
5634 session->put();
5635 return true;
5636 }
5637
5638 bool OSD::ms_handle_refused(Connection *con)
5639 {
5640 if (!cct->_conf->osd_fast_fail_on_connection_refused)
5641 return false;
5642
5643 Session *session = static_cast<Session*>(con->get_priv());
5644 dout(2) << "ms_handle_refused con " << con << " session " << session << dendl;
5645 if (!session)
5646 return false;
5647 int type = con->get_peer_type();
5648 // handle only OSD failures here
5649 if (monc && (type == CEPH_ENTITY_TYPE_OSD)) {
5650 OSDMapRef osdmap = get_osdmap();
5651 if (osdmap) {
5652 int id = osdmap->identify_osd_on_all_channels(con->get_peer_addr());
5653 if (id >= 0 && osdmap->is_up(id)) {
5654 // I'm cheating mon heartbeat grace logic, because we know it's not going
5655 // to respawn alone. +1 so we won't hit any boundary case.
5656 monc->send_mon_message(new MOSDFailure(monc->get_fsid(),
5657 osdmap->get_inst(id),
5658 cct->_conf->osd_heartbeat_grace + 1,
5659 osdmap->get_epoch(),
5660 MOSDFailure::FLAG_IMMEDIATE | MOSDFailure::FLAG_FAILED
5661 ));
5662 }
5663 }
5664 }
5665 session->put();
5666 return true;
5667 }
5668
5669 struct C_OSD_GetVersion : public Context {
5670 OSD *osd;
5671 uint64_t oldest, newest;
5672 explicit C_OSD_GetVersion(OSD *o) : osd(o), oldest(0), newest(0) {}
5673 void finish(int r) override {
5674 if (r >= 0)
5675 osd->_got_mon_epochs(oldest, newest);
5676 }
5677 };
5678
5679 void OSD::start_boot()
5680 {
5681 if (!_is_healthy()) {
5682 // if we are not healthy, do not mark ourselves up (yet)
5683 dout(1) << "not healthy; waiting to boot" << dendl;
5684 if (!is_waiting_for_healthy())
5685 start_waiting_for_healthy();
5686 // send pings sooner rather than later
5687 heartbeat_kick();
5688 return;
5689 }
5690 dout(1) << __func__ << dendl;
5691 set_state(STATE_PREBOOT);
5692 waiting_for_luminous_mons = false;
5693 dout(10) << "start_boot - have maps " << superblock.oldest_map
5694 << ".." << superblock.newest_map << dendl;
5695 C_OSD_GetVersion *c = new C_OSD_GetVersion(this);
5696 monc->get_version("osdmap", &c->newest, &c->oldest, c);
5697 }
5698
5699 void OSD::_got_mon_epochs(epoch_t oldest, epoch_t newest)
5700 {
5701 Mutex::Locker l(osd_lock);
5702 if (is_preboot()) {
5703 _preboot(oldest, newest);
5704 }
5705 }
5706
5707 void OSD::_preboot(epoch_t oldest, epoch_t newest)
5708 {
5709 assert(is_preboot());
5710 dout(10) << __func__ << " _preboot mon has osdmaps "
5711 << oldest << ".." << newest << dendl;
5712
5713 // ensure our local fullness awareness is accurate
5714 heartbeat();
5715
5716 // if our map within recent history, try to add ourselves to the osdmap.
5717 if (osdmap->get_epoch() == 0) {
5718 derr << "waiting for initial osdmap" << dendl;
5719 } else if (osdmap->is_destroyed(whoami)) {
5720 derr << "osdmap says I am destroyed, exiting" << dendl;
5721 exit(0);
5722 } else if (osdmap->test_flag(CEPH_OSDMAP_NOUP) || osdmap->is_noup(whoami)) {
5723 derr << "osdmap NOUP flag is set, waiting for it to clear" << dendl;
5724 } else if (!osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE)) {
5725 derr << "osdmap SORTBITWISE OSDMap flag is NOT set; please set it"
5726 << dendl;
5727 } else if (osdmap->require_osd_release < CEPH_RELEASE_JEWEL) {
5728 derr << "osdmap REQUIRE_JEWEL OSDMap flag is NOT set; please set it"
5729 << dendl;
5730 } else if (!monc->monmap.get_required_features().contains_all(
5731 ceph::features::mon::FEATURE_LUMINOUS)) {
5732 derr << "monmap REQUIRE_LUMINOUS is NOT set; must upgrade all monitors to "
5733 << "Luminous or later before Luminous OSDs will boot" << dendl;
5734 waiting_for_luminous_mons = true;
5735 } else if (service.need_fullness_update()) {
5736 derr << "osdmap fullness state needs update" << dendl;
5737 send_full_update();
5738 } else if (osdmap->get_epoch() >= oldest - 1 &&
5739 osdmap->get_epoch() + cct->_conf->osd_map_message_max > newest) {
5740 _send_boot();
5741 return;
5742 }
5743
5744 // get all the latest maps
5745 if (osdmap->get_epoch() + 1 >= oldest)
5746 osdmap_subscribe(osdmap->get_epoch() + 1, false);
5747 else
5748 osdmap_subscribe(oldest - 1, true);
5749 }
5750
5751 void OSD::send_full_update()
5752 {
5753 if (!service.need_fullness_update())
5754 return;
5755 unsigned state = 0;
5756 if (service.is_full()) {
5757 state = CEPH_OSD_FULL;
5758 } else if (service.is_backfillfull()) {
5759 state = CEPH_OSD_BACKFILLFULL;
5760 } else if (service.is_nearfull()) {
5761 state = CEPH_OSD_NEARFULL;
5762 }
5763 set<string> s;
5764 OSDMap::calc_state_set(state, s);
5765 dout(10) << __func__ << " want state " << s << dendl;
5766 monc->send_mon_message(new MOSDFull(osdmap->get_epoch(), state));
5767 }
5768
5769 void OSD::start_waiting_for_healthy()
5770 {
5771 dout(1) << "start_waiting_for_healthy" << dendl;
5772 set_state(STATE_WAITING_FOR_HEALTHY);
5773 last_heartbeat_resample = utime_t();
5774 }
5775
5776 bool OSD::_is_healthy()
5777 {
5778 if (!cct->get_heartbeat_map()->is_healthy()) {
5779 dout(1) << "is_healthy false -- internal heartbeat failed" << dendl;
5780 return false;
5781 }
5782
5783 if (is_waiting_for_healthy()) {
5784 Mutex::Locker l(heartbeat_lock);
5785 utime_t cutoff = ceph_clock_now();
5786 cutoff -= cct->_conf->osd_heartbeat_grace;
5787 int num = 0, up = 0;
5788 for (map<int,HeartbeatInfo>::iterator p = heartbeat_peers.begin();
5789 p != heartbeat_peers.end();
5790 ++p) {
5791 if (p->second.is_healthy(cutoff))
5792 ++up;
5793 ++num;
5794 }
5795 if ((float)up < (float)num * cct->_conf->osd_heartbeat_min_healthy_ratio) {
5796 dout(1) << "is_healthy false -- only " << up << "/" << num << " up peers (less than "
5797 << int(cct->_conf->osd_heartbeat_min_healthy_ratio * 100.0) << "%)" << dendl;
5798 return false;
5799 }
5800 }
5801
5802 return true;
5803 }
5804
5805 void OSD::_send_boot()
5806 {
5807 dout(10) << "_send_boot" << dendl;
5808 entity_addr_t cluster_addr = cluster_messenger->get_myaddr();
5809 Connection *local_connection = cluster_messenger->get_loopback_connection().get();
5810 if (cluster_addr.is_blank_ip()) {
5811 int port = cluster_addr.get_port();
5812 cluster_addr = client_messenger->get_myaddr();
5813 cluster_addr.set_port(port);
5814 cluster_messenger->set_addr_unknowns(cluster_addr);
5815 dout(10) << " assuming cluster_addr ip matches client_addr" << dendl;
5816 } else {
5817 Session *s = static_cast<Session*>(local_connection->get_priv());
5818 if (s)
5819 s->put();
5820 else
5821 cluster_messenger->ms_deliver_handle_fast_connect(local_connection);
5822 }
5823
5824 entity_addr_t hb_back_addr = hb_back_server_messenger->get_myaddr();
5825 local_connection = hb_back_server_messenger->get_loopback_connection().get();
5826 if (hb_back_addr.is_blank_ip()) {
5827 int port = hb_back_addr.get_port();
5828 hb_back_addr = cluster_addr;
5829 hb_back_addr.set_port(port);
5830 hb_back_server_messenger->set_addr_unknowns(hb_back_addr);
5831 dout(10) << " assuming hb_back_addr ip matches cluster_addr" << dendl;
5832 } else {
5833 Session *s = static_cast<Session*>(local_connection->get_priv());
5834 if (s)
5835 s->put();
5836 else
5837 hb_back_server_messenger->ms_deliver_handle_fast_connect(local_connection);
5838 }
5839
5840 entity_addr_t hb_front_addr = hb_front_server_messenger->get_myaddr();
5841 local_connection = hb_front_server_messenger->get_loopback_connection().get();
5842 if (hb_front_addr.is_blank_ip()) {
5843 int port = hb_front_addr.get_port();
5844 hb_front_addr = client_messenger->get_myaddr();
5845 hb_front_addr.set_port(port);
5846 hb_front_server_messenger->set_addr_unknowns(hb_front_addr);
5847 dout(10) << " assuming hb_front_addr ip matches client_addr" << dendl;
5848 } else {
5849 Session *s = static_cast<Session*>(local_connection->get_priv());
5850 if (s)
5851 s->put();
5852 else
5853 hb_front_server_messenger->ms_deliver_handle_fast_connect(local_connection);
5854 }
5855
5856 MOSDBoot *mboot = new MOSDBoot(superblock, get_osdmap_epoch(), service.get_boot_epoch(),
5857 hb_back_addr, hb_front_addr, cluster_addr,
5858 CEPH_FEATURES_ALL);
5859 dout(10) << " client_addr " << client_messenger->get_myaddr()
5860 << ", cluster_addr " << cluster_addr
5861 << ", hb_back_addr " << hb_back_addr
5862 << ", hb_front_addr " << hb_front_addr
5863 << dendl;
5864 _collect_metadata(&mboot->metadata);
5865 monc->send_mon_message(mboot);
5866 set_state(STATE_BOOTING);
5867 }
5868
5869 void OSD::_collect_metadata(map<string,string> *pm)
5870 {
5871 // config info
5872 (*pm)["osd_data"] = dev_path;
5873 if (store->get_type() == "filestore") {
5874 // not applicable for bluestore
5875 (*pm)["osd_journal"] = journal_path;
5876 }
5877 (*pm)["front_addr"] = stringify(client_messenger->get_myaddr());
5878 (*pm)["back_addr"] = stringify(cluster_messenger->get_myaddr());
5879 (*pm)["hb_front_addr"] = stringify(hb_front_server_messenger->get_myaddr());
5880 (*pm)["hb_back_addr"] = stringify(hb_back_server_messenger->get_myaddr());
5881
5882 // backend
5883 (*pm)["osd_objectstore"] = store->get_type();
5884 (*pm)["rotational"] = store_is_rotational ? "1" : "0";
5885 (*pm)["default_device_class"] = store->get_default_device_class();
5886 store->collect_metadata(pm);
5887
5888 collect_sys_info(pm, cct);
5889
5890 dout(10) << __func__ << " " << *pm << dendl;
5891 }
5892
5893 void OSD::queue_want_up_thru(epoch_t want)
5894 {
5895 map_lock.get_read();
5896 epoch_t cur = osdmap->get_up_thru(whoami);
5897 Mutex::Locker l(mon_report_lock);
5898 if (want > up_thru_wanted) {
5899 dout(10) << "queue_want_up_thru now " << want << " (was " << up_thru_wanted << ")"
5900 << ", currently " << cur
5901 << dendl;
5902 up_thru_wanted = want;
5903 send_alive();
5904 } else {
5905 dout(10) << "queue_want_up_thru want " << want << " <= queued " << up_thru_wanted
5906 << ", currently " << cur
5907 << dendl;
5908 }
5909 map_lock.put_read();
5910 }
5911
5912 void OSD::send_alive()
5913 {
5914 assert(mon_report_lock.is_locked());
5915 if (!osdmap->exists(whoami))
5916 return;
5917 epoch_t up_thru = osdmap->get_up_thru(whoami);
5918 dout(10) << "send_alive up_thru currently " << up_thru << " want " << up_thru_wanted << dendl;
5919 if (up_thru_wanted > up_thru) {
5920 dout(10) << "send_alive want " << up_thru_wanted << dendl;
5921 monc->send_mon_message(new MOSDAlive(osdmap->get_epoch(), up_thru_wanted));
5922 }
5923 }
5924
5925 void OSD::request_full_map(epoch_t first, epoch_t last)
5926 {
5927 dout(10) << __func__ << " " << first << ".." << last
5928 << ", previously requested "
5929 << requested_full_first << ".." << requested_full_last << dendl;
5930 assert(osd_lock.is_locked());
5931 assert(first > 0 && last > 0);
5932 assert(first <= last);
5933 assert(first >= requested_full_first); // we shouldn't ever ask for older maps
5934 if (requested_full_first == 0) {
5935 // first request
5936 requested_full_first = first;
5937 requested_full_last = last;
5938 } else if (last <= requested_full_last) {
5939 // dup
5940 return;
5941 } else {
5942 // additional request
5943 first = requested_full_last + 1;
5944 requested_full_last = last;
5945 }
5946 MMonGetOSDMap *req = new MMonGetOSDMap;
5947 req->request_full(first, last);
5948 monc->send_mon_message(req);
5949 }
5950
5951 void OSD::got_full_map(epoch_t e)
5952 {
5953 assert(requested_full_first <= requested_full_last);
5954 assert(osd_lock.is_locked());
5955 if (requested_full_first == 0) {
5956 dout(20) << __func__ << " " << e << ", nothing requested" << dendl;
5957 return;
5958 }
5959 if (e < requested_full_first) {
5960 dout(10) << __func__ << " " << e << ", requested " << requested_full_first
5961 << ".." << requested_full_last
5962 << ", ignoring" << dendl;
5963 return;
5964 }
5965 if (e >= requested_full_last) {
5966 dout(10) << __func__ << " " << e << ", requested " << requested_full_first
5967 << ".." << requested_full_last << ", resetting" << dendl;
5968 requested_full_first = requested_full_last = 0;
5969 return;
5970 }
5971
5972 requested_full_first = e + 1;
5973
5974 dout(10) << __func__ << " " << e << ", requested " << requested_full_first
5975 << ".." << requested_full_last
5976 << ", still need more" << dendl;
5977 }
5978
5979 void OSD::requeue_failures()
5980 {
5981 Mutex::Locker l(heartbeat_lock);
5982 unsigned old_queue = failure_queue.size();
5983 unsigned old_pending = failure_pending.size();
5984 for (map<int,pair<utime_t,entity_inst_t> >::iterator p =
5985 failure_pending.begin();
5986 p != failure_pending.end(); ) {
5987 failure_queue[p->first] = p->second.first;
5988 failure_pending.erase(p++);
5989 }
5990 dout(10) << __func__ << " " << old_queue << " + " << old_pending << " -> "
5991 << failure_queue.size() << dendl;
5992 }
5993
5994 void OSD::send_failures()
5995 {
5996 assert(map_lock.is_locked());
5997 assert(mon_report_lock.is_locked());
5998 Mutex::Locker l(heartbeat_lock);
5999 utime_t now = ceph_clock_now();
6000 while (!failure_queue.empty()) {
6001 int osd = failure_queue.begin()->first;
6002 if (!failure_pending.count(osd)) {
6003 entity_inst_t i = osdmap->get_inst(osd);
6004 int failed_for = (int)(double)(now - failure_queue.begin()->second);
6005 monc->send_mon_message(new MOSDFailure(monc->get_fsid(), i, failed_for,
6006 osdmap->get_epoch()));
6007 failure_pending[osd] = make_pair(failure_queue.begin()->second, i);
6008 }
6009 failure_queue.erase(osd);
6010 }
6011 }
6012
6013 void OSD::send_still_alive(epoch_t epoch, const entity_inst_t &i)
6014 {
6015 MOSDFailure *m = new MOSDFailure(monc->get_fsid(), i, 0, epoch, MOSDFailure::FLAG_ALIVE);
6016 monc->send_mon_message(m);
6017 }
6018
6019 void OSD::send_pg_stats(const utime_t &now)
6020 {
6021 assert(map_lock.is_locked());
6022 assert(osdmap->require_osd_release < CEPH_RELEASE_LUMINOUS);
6023 dout(20) << "send_pg_stats" << dendl;
6024
6025 osd_stat_t cur_stat = service.get_osd_stat();
6026
6027 cur_stat.os_perf_stat = store->get_cur_stats();
6028
6029 pg_stat_queue_lock.Lock();
6030
6031 if (osd_stat_updated || !pg_stat_queue.empty()) {
6032 last_pg_stats_sent = now;
6033 osd_stat_updated = false;
6034
6035 dout(10) << "send_pg_stats - " << pg_stat_queue.size() << " pgs updated" << dendl;
6036
6037 utime_t had_for(now);
6038 had_for -= had_map_since;
6039
6040 MPGStats *m = new MPGStats(monc->get_fsid(), osdmap->get_epoch(), had_for);
6041
6042 uint64_t tid = ++pg_stat_tid;
6043 m->set_tid(tid);
6044 m->osd_stat = cur_stat;
6045
6046 xlist<PG*>::iterator p = pg_stat_queue.begin();
6047 while (!p.end()) {
6048 PG *pg = *p;
6049 ++p;
6050 if (!pg->is_primary()) { // we hold map_lock; role is stable.
6051 pg->stat_queue_item.remove_myself();
6052 pg->put("pg_stat_queue");
6053 continue;
6054 }
6055 pg->pg_stats_publish_lock.Lock();
6056 if (pg->pg_stats_publish_valid) {
6057 m->pg_stat[pg->info.pgid.pgid] = pg->pg_stats_publish;
6058 dout(25) << " sending " << pg->info.pgid << " " << pg->pg_stats_publish.reported_epoch << ":"
6059 << pg->pg_stats_publish.reported_seq << dendl;
6060 } else {
6061 dout(25) << " NOT sending " << pg->info.pgid << " " << pg->pg_stats_publish.reported_epoch << ":"
6062 << pg->pg_stats_publish.reported_seq << ", not valid" << dendl;
6063 }
6064 pg->pg_stats_publish_lock.Unlock();
6065 }
6066
6067 if (last_pg_stats_ack == utime_t() || !outstanding_pg_stats.empty()) {
6068 last_pg_stats_ack = ceph_clock_now();
6069 }
6070 outstanding_pg_stats.insert(tid);
6071 dout(20) << __func__ << " updates pending: " << outstanding_pg_stats << dendl;
6072
6073 monc->send_mon_message(m);
6074 }
6075
6076 pg_stat_queue_lock.Unlock();
6077 }
6078
6079 void OSD::handle_pg_stats_ack(MPGStatsAck *ack)
6080 {
6081 dout(10) << "handle_pg_stats_ack " << dendl;
6082
6083 if (!require_mon_peer(ack)) {
6084 ack->put();
6085 return;
6086 }
6087
6088 // NOTE: we may get replies from a previous mon even while
6089 // outstanding_pg_stats is empty if reconnecting races with replies
6090 // in flight.
6091
6092 pg_stat_queue_lock.Lock();
6093
6094 last_pg_stats_ack = ceph_clock_now();
6095
6096 // decay timeout slowly (analogous to TCP)
6097 stats_ack_timeout =
6098 MAX(cct->_conf->osd_mon_ack_timeout,
6099 stats_ack_timeout * cct->_conf->osd_stats_ack_timeout_decay);
6100 dout(20) << __func__ << " timeout now " << stats_ack_timeout << dendl;
6101
6102 if (ack->get_tid() > pg_stat_tid_flushed) {
6103 pg_stat_tid_flushed = ack->get_tid();
6104 pg_stat_queue_cond.Signal();
6105 }
6106
6107 xlist<PG*>::iterator p = pg_stat_queue.begin();
6108 while (!p.end()) {
6109 PG *pg = *p;
6110 PGRef _pg(pg);
6111 ++p;
6112
6113 auto acked = ack->pg_stat.find(pg->info.pgid.pgid);
6114 if (acked != ack->pg_stat.end()) {
6115 pg->pg_stats_publish_lock.Lock();
6116 if (acked->second.first == pg->pg_stats_publish.reported_seq &&
6117 acked->second.second == pg->pg_stats_publish.reported_epoch) {
6118 dout(25) << " ack on " << pg->info.pgid << " " << pg->pg_stats_publish.reported_epoch
6119 << ":" << pg->pg_stats_publish.reported_seq << dendl;
6120 pg->stat_queue_item.remove_myself();
6121 pg->put("pg_stat_queue");
6122 } else {
6123 dout(25) << " still pending " << pg->info.pgid << " " << pg->pg_stats_publish.reported_epoch
6124 << ":" << pg->pg_stats_publish.reported_seq << " > acked "
6125 << acked->second << dendl;
6126 }
6127 pg->pg_stats_publish_lock.Unlock();
6128 } else {
6129 dout(30) << " still pending " << pg->info.pgid << " " << pg->pg_stats_publish.reported_epoch
6130 << ":" << pg->pg_stats_publish.reported_seq << dendl;
6131 }
6132 }
6133
6134 outstanding_pg_stats.erase(ack->get_tid());
6135 dout(20) << __func__ << " still pending: " << outstanding_pg_stats << dendl;
6136
6137 pg_stat_queue_lock.Unlock();
6138
6139 ack->put();
6140 }
6141
6142 void OSD::flush_pg_stats()
6143 {
6144 dout(10) << "flush_pg_stats" << dendl;
6145 osd_lock.Unlock();
6146 utime_t now = ceph_clock_now();
6147 map_lock.get_read();
6148 mon_report_lock.Lock();
6149 send_pg_stats(now);
6150 mon_report_lock.Unlock();
6151 map_lock.put_read();
6152
6153
6154 pg_stat_queue_lock.Lock();
6155 uint64_t tid = pg_stat_tid;
6156 dout(10) << "flush_pg_stats waiting for stats tid " << tid << " to flush" << dendl;
6157 while (tid > pg_stat_tid_flushed)
6158 pg_stat_queue_cond.Wait(pg_stat_queue_lock);
6159 dout(10) << "flush_pg_stats finished waiting for stats tid " << tid << " to flush" << dendl;
6160 pg_stat_queue_lock.Unlock();
6161
6162 osd_lock.Lock();
6163 }
6164
6165 void OSD::send_beacon(const ceph::coarse_mono_clock::time_point& now)
6166 {
6167 const auto& monmap = monc->monmap;
6168 // send beacon to mon even if we are just connected, and the monmap is not
6169 // initialized yet by then.
6170 if (monmap.epoch > 0 &&
6171 monmap.get_required_features().contains_all(
6172 ceph::features::mon::FEATURE_LUMINOUS)) {
6173 dout(20) << __func__ << " sending" << dendl;
6174 MOSDBeacon* beacon = nullptr;
6175 {
6176 Mutex::Locker l{min_last_epoch_clean_lock};
6177 beacon = new MOSDBeacon(osdmap->get_epoch(), min_last_epoch_clean);
6178 std::swap(beacon->pgs, min_last_epoch_clean_pgs);
6179 last_sent_beacon = now;
6180 }
6181 monc->send_mon_message(beacon);
6182 } else {
6183 dout(20) << __func__ << " not sending" << dendl;
6184 }
6185 }
6186
6187 void OSD::handle_command(MMonCommand *m)
6188 {
6189 if (!require_mon_peer(m)) {
6190 m->put();
6191 return;
6192 }
6193
6194 Command *c = new Command(m->cmd, m->get_tid(), m->get_data(), NULL);
6195 command_wq.queue(c);
6196 m->put();
6197 }
6198
6199 void OSD::handle_command(MCommand *m)
6200 {
6201 ConnectionRef con = m->get_connection();
6202 Session *session = static_cast<Session *>(con->get_priv());
6203 if (!session) {
6204 con->send_message(new MCommandReply(m, -EPERM));
6205 m->put();
6206 return;
6207 }
6208
6209 OSDCap& caps = session->caps;
6210 session->put();
6211
6212 if (!caps.allow_all() || m->get_source().is_mon()) {
6213 con->send_message(new MCommandReply(m, -EPERM));
6214 m->put();
6215 return;
6216 }
6217
6218 Command *c = new Command(m->cmd, m->get_tid(), m->get_data(), con.get());
6219 command_wq.queue(c);
6220
6221 m->put();
6222 }
6223
6224 struct OSDCommand {
6225 string cmdstring;
6226 string helpstring;
6227 string module;
6228 string perm;
6229 string availability;
6230 } osd_commands[] = {
6231
6232 #define COMMAND(parsesig, helptext, module, perm, availability) \
6233 {parsesig, helptext, module, perm, availability},
6234
6235 // yes, these are really pg commands, but there's a limit to how
6236 // much work it's worth. The OSD returns all of them. Make this
6237 // form (pg <pgid> <cmd>) valid only for the cli.
6238 // Rest uses "tell <pgid> <cmd>"
6239
6240 COMMAND("pg " \
6241 "name=pgid,type=CephPgid " \
6242 "name=cmd,type=CephChoices,strings=query", \
6243 "show details of a specific pg", "osd", "r", "cli")
6244 COMMAND("pg " \
6245 "name=pgid,type=CephPgid " \
6246 "name=cmd,type=CephChoices,strings=mark_unfound_lost " \
6247 "name=mulcmd,type=CephChoices,strings=revert|delete", \
6248 "mark all unfound objects in this pg as lost, either removing or reverting to a prior version if one is available",
6249 "osd", "rw", "cli")
6250 COMMAND("pg " \
6251 "name=pgid,type=CephPgid " \
6252 "name=cmd,type=CephChoices,strings=list_missing " \
6253 "name=offset,type=CephString,req=false",
6254 "list missing objects on this pg, perhaps starting at an offset given in JSON",
6255 "osd", "r", "cli")
6256
6257 // new form: tell <pgid> <cmd> for both cli and rest
6258
6259 COMMAND("query",
6260 "show details of a specific pg", "osd", "r", "cli,rest")
6261 COMMAND("mark_unfound_lost " \
6262 "name=mulcmd,type=CephChoices,strings=revert|delete", \
6263 "mark all unfound objects in this pg as lost, either removing or reverting to a prior version if one is available",
6264 "osd", "rw", "cli,rest")
6265 COMMAND("list_missing " \
6266 "name=offset,type=CephString,req=false",
6267 "list missing objects on this pg, perhaps starting at an offset given in JSON",
6268 "osd", "r", "cli,rest")
6269 COMMAND("perf histogram dump "
6270 "name=logger,type=CephString,req=false "
6271 "name=counter,type=CephString,req=false",
6272 "Get histogram data",
6273 "osd", "r", "cli,rest")
6274
6275 // tell <osd.n> commands. Validation of osd.n must be special-cased in client
6276 COMMAND("version", "report version of OSD", "osd", "r", "cli,rest")
6277 COMMAND("get_command_descriptions", "list commands descriptions", "osd", "r", "cli,rest")
6278 COMMAND("injectargs " \
6279 "name=injected_args,type=CephString,n=N",
6280 "inject configuration arguments into running OSD",
6281 "osd", "rw", "cli,rest")
6282 COMMAND("config set " \
6283 "name=key,type=CephString name=value,type=CephString",
6284 "Set a configuration option at runtime (not persistent)",
6285 "osd", "rw", "cli,rest")
6286 COMMAND("cluster_log " \
6287 "name=level,type=CephChoices,strings=error,warning,info,debug " \
6288 "name=message,type=CephString,n=N",
6289 "log a message to the cluster log",
6290 "osd", "rw", "cli,rest")
6291 COMMAND("bench " \
6292 "name=count,type=CephInt,req=false " \
6293 "name=size,type=CephInt,req=false " \
6294 "name=object_size,type=CephInt,req=false " \
6295 "name=object_num,type=CephInt,req=false ", \
6296 "OSD benchmark: write <count> <size>-byte objects, " \
6297 "(default 1G size 4MB). Results in log.",
6298 "osd", "rw", "cli,rest")
6299 COMMAND("flush_pg_stats", "flush pg stats", "osd", "rw", "cli,rest")
6300 COMMAND("heap " \
6301 "name=heapcmd,type=CephChoices,strings=dump|start_profiler|stop_profiler|release|stats", \
6302 "show heap usage info (available only if compiled with tcmalloc)", \
6303 "osd", "rw", "cli,rest")
6304 COMMAND("debug dump_missing " \
6305 "name=filename,type=CephFilepath",
6306 "dump missing objects to a named file", "osd", "r", "cli,rest")
6307 COMMAND("debug kick_recovery_wq " \
6308 "name=delay,type=CephInt,range=0",
6309 "set osd_recovery_delay_start to <val>", "osd", "rw", "cli,rest")
6310 COMMAND("cpu_profiler " \
6311 "name=arg,type=CephChoices,strings=status|flush",
6312 "run cpu profiling on daemon", "osd", "rw", "cli,rest")
6313 COMMAND("dump_pg_recovery_stats", "dump pg recovery statistics",
6314 "osd", "r", "cli,rest")
6315 COMMAND("reset_pg_recovery_stats", "reset pg recovery statistics",
6316 "osd", "rw", "cli,rest")
6317 COMMAND("compact",
6318 "compact object store's omap. "
6319 "WARNING: Compaction probably slows your requests",
6320 "osd", "rw", "cli,rest")
6321 };
6322
6323 void OSD::do_command(Connection *con, ceph_tid_t tid, vector<string>& cmd, bufferlist& data)
6324 {
6325 int r = 0;
6326 stringstream ss, ds;
6327 string rs;
6328 bufferlist odata;
6329
6330 dout(20) << "do_command tid " << tid << " " << cmd << dendl;
6331
6332 map<string, cmd_vartype> cmdmap;
6333 string prefix;
6334 string format;
6335 string pgidstr;
6336 boost::scoped_ptr<Formatter> f;
6337
6338 if (cmd.empty()) {
6339 ss << "no command given";
6340 goto out;
6341 }
6342
6343 if (!cmdmap_from_json(cmd, &cmdmap, ss)) {
6344 r = -EINVAL;
6345 goto out;
6346 }
6347
6348 cmd_getval(cct, cmdmap, "prefix", prefix);
6349
6350 if (prefix == "get_command_descriptions") {
6351 int cmdnum = 0;
6352 JSONFormatter *f = new JSONFormatter();
6353 f->open_object_section("command_descriptions");
6354 for (OSDCommand *cp = osd_commands;
6355 cp < &osd_commands[ARRAY_SIZE(osd_commands)]; cp++) {
6356
6357 ostringstream secname;
6358 secname << "cmd" << setfill('0') << std::setw(3) << cmdnum;
6359 dump_cmddesc_to_json(f, secname.str(), cp->cmdstring, cp->helpstring,
6360 cp->module, cp->perm, cp->availability, 0);
6361 cmdnum++;
6362 }
6363 f->close_section(); // command_descriptions
6364
6365 f->flush(ds);
6366 delete f;
6367 goto out;
6368 }
6369
6370 cmd_getval(cct, cmdmap, "format", format);
6371 f.reset(Formatter::create(format));
6372
6373 if (prefix == "version") {
6374 if (f) {
6375 f->open_object_section("version");
6376 f->dump_string("version", pretty_version_to_str());
6377 f->close_section();
6378 f->flush(ds);
6379 } else {
6380 ds << pretty_version_to_str();
6381 }
6382 goto out;
6383 }
6384 else if (prefix == "injectargs") {
6385 vector<string> argsvec;
6386 cmd_getval(cct, cmdmap, "injected_args", argsvec);
6387
6388 if (argsvec.empty()) {
6389 r = -EINVAL;
6390 ss << "ignoring empty injectargs";
6391 goto out;
6392 }
6393 string args = argsvec.front();
6394 for (vector<string>::iterator a = ++argsvec.begin(); a != argsvec.end(); ++a)
6395 args += " " + *a;
6396 osd_lock.Unlock();
6397 r = cct->_conf->injectargs(args, &ss);
6398 osd_lock.Lock();
6399 }
6400 else if (prefix == "config set") {
6401 std::string key;
6402 std::string val;
6403 cmd_getval(cct, cmdmap, "key", key);
6404 cmd_getval(cct, cmdmap, "value", val);
6405 osd_lock.Unlock();
6406 r = cct->_conf->set_val(key, val, true, &ss);
6407 osd_lock.Lock();
6408 }
6409 else if (prefix == "cluster_log") {
6410 vector<string> msg;
6411 cmd_getval(cct, cmdmap, "message", msg);
6412 if (msg.empty()) {
6413 r = -EINVAL;
6414 ss << "ignoring empty log message";
6415 goto out;
6416 }
6417 string message = msg.front();
6418 for (vector<string>::iterator a = ++msg.begin(); a != msg.end(); ++a)
6419 message += " " + *a;
6420 string lvl;
6421 cmd_getval(cct, cmdmap, "level", lvl);
6422 clog_type level = string_to_clog_type(lvl);
6423 if (level < 0) {
6424 r = -EINVAL;
6425 ss << "unknown level '" << lvl << "'";
6426 goto out;
6427 }
6428 clog->do_log(level, message);
6429 }
6430
6431 // either 'pg <pgid> <command>' or
6432 // 'tell <pgid>' (which comes in without any of that prefix)?
6433
6434 else if (prefix == "pg" ||
6435 prefix == "query" ||
6436 prefix == "mark_unfound_lost" ||
6437 prefix == "list_missing"
6438 ) {
6439 pg_t pgid;
6440
6441 if (!cmd_getval(cct, cmdmap, "pgid", pgidstr)) {
6442 ss << "no pgid specified";
6443 r = -EINVAL;
6444 } else if (!pgid.parse(pgidstr.c_str())) {
6445 ss << "couldn't parse pgid '" << pgidstr << "'";
6446 r = -EINVAL;
6447 } else {
6448 spg_t pcand;
6449 PG *pg = nullptr;
6450 if (osdmap->get_primary_shard(pgid, &pcand) &&
6451 (pg = _lookup_lock_pg(pcand))) {
6452 if (pg->is_primary()) {
6453 // simulate pg <pgid> cmd= for pg->do-command
6454 if (prefix != "pg")
6455 cmd_putval(cct, cmdmap, "cmd", prefix);
6456 r = pg->do_command(cmdmap, ss, data, odata, con, tid);
6457 if (r == -EAGAIN) {
6458 pg->unlock();
6459 // don't reply, pg will do so async
6460 return;
6461 }
6462 } else {
6463 ss << "not primary for pgid " << pgid;
6464
6465 // send them the latest diff to ensure they realize the mapping
6466 // has changed.
6467 service.send_incremental_map(osdmap->get_epoch() - 1, con, osdmap);
6468
6469 // do not reply; they will get newer maps and realize they
6470 // need to resend.
6471 pg->unlock();
6472 return;
6473 }
6474 pg->unlock();
6475 } else {
6476 ss << "i don't have pgid " << pgid;
6477 r = -ENOENT;
6478 }
6479 }
6480 }
6481
6482 else if (prefix == "bench") {
6483 int64_t count;
6484 int64_t bsize;
6485 int64_t osize, onum;
6486 // default count 1G, size 4MB
6487 cmd_getval(cct, cmdmap, "count", count, (int64_t)1 << 30);
6488 cmd_getval(cct, cmdmap, "size", bsize, (int64_t)4 << 20);
6489 cmd_getval(cct, cmdmap, "object_size", osize, (int64_t)0);
6490 cmd_getval(cct, cmdmap, "object_num", onum, (int64_t)0);
6491
6492 ceph::shared_ptr<ObjectStore::Sequencer> osr (std::make_shared<
6493 ObjectStore::Sequencer>("bench"));
6494
6495 uint32_t duration = cct->_conf->osd_bench_duration;
6496
6497 if (bsize > (int64_t) cct->_conf->osd_bench_max_block_size) {
6498 // let us limit the block size because the next checks rely on it
6499 // having a sane value. If we allow any block size to be set things
6500 // can still go sideways.
6501 ss << "block 'size' values are capped at "
6502 << prettybyte_t(cct->_conf->osd_bench_max_block_size) << ". If you wish to use"
6503 << " a higher value, please adjust 'osd_bench_max_block_size'";
6504 r = -EINVAL;
6505 goto out;
6506 } else if (bsize < (int64_t) (1 << 20)) {
6507 // entering the realm of small block sizes.
6508 // limit the count to a sane value, assuming a configurable amount of
6509 // IOPS and duration, so that the OSD doesn't get hung up on this,
6510 // preventing timeouts from going off
6511 int64_t max_count =
6512 bsize * duration * cct->_conf->osd_bench_small_size_max_iops;
6513 if (count > max_count) {
6514 ss << "'count' values greater than " << max_count
6515 << " for a block size of " << prettybyte_t(bsize) << ", assuming "
6516 << cct->_conf->osd_bench_small_size_max_iops << " IOPS,"
6517 << " for " << duration << " seconds,"
6518 << " can cause ill effects on osd. "
6519 << " Please adjust 'osd_bench_small_size_max_iops' with a higher"
6520 << " value if you wish to use a higher 'count'.";
6521 r = -EINVAL;
6522 goto out;
6523 }
6524 } else {
6525 // 1MB block sizes are big enough so that we get more stuff done.
6526 // However, to avoid the osd from getting hung on this and having
6527 // timers being triggered, we are going to limit the count assuming
6528 // a configurable throughput and duration.
6529 // NOTE: max_count is the total amount of bytes that we believe we
6530 // will be able to write during 'duration' for the given
6531 // throughput. The block size hardly impacts this unless it's
6532 // way too big. Given we already check how big the block size
6533 // is, it's safe to assume everything will check out.
6534 int64_t max_count =
6535 cct->_conf->osd_bench_large_size_max_throughput * duration;
6536 if (count > max_count) {
6537 ss << "'count' values greater than " << max_count
6538 << " for a block size of " << prettybyte_t(bsize) << ", assuming "
6539 << prettybyte_t(cct->_conf->osd_bench_large_size_max_throughput) << "/s,"
6540 << " for " << duration << " seconds,"
6541 << " can cause ill effects on osd. "
6542 << " Please adjust 'osd_bench_large_size_max_throughput'"
6543 << " with a higher value if you wish to use a higher 'count'.";
6544 r = -EINVAL;
6545 goto out;
6546 }
6547 }
6548
6549 if (osize && bsize > osize)
6550 bsize = osize;
6551
6552 dout(1) << " bench count " << count
6553 << " bsize " << prettybyte_t(bsize) << dendl;
6554
6555 ObjectStore::Transaction cleanupt;
6556
6557 if (osize && onum) {
6558 bufferlist bl;
6559 bufferptr bp(osize);
6560 bp.zero();
6561 bl.push_back(std::move(bp));
6562 bl.rebuild_page_aligned();
6563 for (int i=0; i<onum; ++i) {
6564 char nm[30];
6565 snprintf(nm, sizeof(nm), "disk_bw_test_%d", i);
6566 object_t oid(nm);
6567 hobject_t soid(sobject_t(oid, 0));
6568 ObjectStore::Transaction t;
6569 t.write(coll_t(), ghobject_t(soid), 0, osize, bl);
6570 store->queue_transaction(osr.get(), std::move(t), NULL);
6571 cleanupt.remove(coll_t(), ghobject_t(soid));
6572 }
6573 }
6574
6575 bufferlist bl;
6576 bufferptr bp(bsize);
6577 bp.zero();
6578 bl.push_back(std::move(bp));
6579 bl.rebuild_page_aligned();
6580
6581 {
6582 C_SaferCond waiter;
6583 if (!osr->flush_commit(&waiter)) {
6584 waiter.wait();
6585 }
6586 }
6587
6588 utime_t start = ceph_clock_now();
6589 for (int64_t pos = 0; pos < count; pos += bsize) {
6590 char nm[30];
6591 unsigned offset = 0;
6592 if (onum && osize) {
6593 snprintf(nm, sizeof(nm), "disk_bw_test_%d", (int)(rand() % onum));
6594 offset = rand() % (osize / bsize) * bsize;
6595 } else {
6596 snprintf(nm, sizeof(nm), "disk_bw_test_%lld", (long long)pos);
6597 }
6598 object_t oid(nm);
6599 hobject_t soid(sobject_t(oid, 0));
6600 ObjectStore::Transaction t;
6601 t.write(coll_t::meta(), ghobject_t(soid), offset, bsize, bl);
6602 store->queue_transaction(osr.get(), std::move(t), NULL);
6603 if (!onum || !osize)
6604 cleanupt.remove(coll_t::meta(), ghobject_t(soid));
6605 }
6606
6607 {
6608 C_SaferCond waiter;
6609 if (!osr->flush_commit(&waiter)) {
6610 waiter.wait();
6611 }
6612 }
6613 utime_t end = ceph_clock_now();
6614
6615 // clean up
6616 store->queue_transaction(osr.get(), std::move(cleanupt), NULL);
6617 {
6618 C_SaferCond waiter;
6619 if (!osr->flush_commit(&waiter)) {
6620 waiter.wait();
6621 }
6622 }
6623
6624 uint64_t rate = (double)count / (end - start);
6625 if (f) {
6626 f->open_object_section("osd_bench_results");
6627 f->dump_int("bytes_written", count);
6628 f->dump_int("blocksize", bsize);
6629 f->dump_unsigned("bytes_per_sec", rate);
6630 f->close_section();
6631 f->flush(ss);
6632 } else {
6633 ss << "bench: wrote " << prettybyte_t(count)
6634 << " in blocks of " << prettybyte_t(bsize) << " in "
6635 << (end-start) << " sec at " << prettybyte_t(rate) << "/sec";
6636 }
6637 }
6638
6639 else if (prefix == "flush_pg_stats") {
6640 if (osdmap->require_osd_release >= CEPH_RELEASE_LUMINOUS) {
6641 mgrc.send_pgstats();
6642 ds << service.get_osd_stat_seq() << "\n";
6643 } else {
6644 flush_pg_stats();
6645 }
6646 }
6647
6648 else if (prefix == "heap") {
6649 r = ceph::osd_cmds::heap(*cct, cmdmap, *f, ds);
6650 }
6651
6652 else if (prefix == "debug dump_missing") {
6653 string file_name;
6654 cmd_getval(cct, cmdmap, "filename", file_name);
6655 std::ofstream fout(file_name.c_str());
6656 if (!fout.is_open()) {
6657 ss << "failed to open file '" << file_name << "'";
6658 r = -EINVAL;
6659 goto out;
6660 }
6661
6662 fout << "*** osd " << whoami << ": dump_missing ***" << std::endl;
6663 RWLock::RLocker l(pg_map_lock);
6664 for (ceph::unordered_map<spg_t, PG*>::const_iterator pg_map_e = pg_map.begin();
6665 pg_map_e != pg_map.end(); ++pg_map_e) {
6666 PG *pg = pg_map_e->second;
6667 pg->lock();
6668
6669 fout << *pg << std::endl;
6670 std::map<hobject_t, pg_missing_item>::const_iterator mend =
6671 pg->pg_log.get_missing().get_items().end();
6672 std::map<hobject_t, pg_missing_item>::const_iterator mi =
6673 pg->pg_log.get_missing().get_items().begin();
6674 for (; mi != mend; ++mi) {
6675 fout << mi->first << " -> " << mi->second << std::endl;
6676 if (!pg->missing_loc.needs_recovery(mi->first))
6677 continue;
6678 if (pg->missing_loc.is_unfound(mi->first))
6679 fout << " unfound ";
6680 const set<pg_shard_t> &mls(pg->missing_loc.get_locations(mi->first));
6681 if (mls.empty())
6682 continue;
6683 fout << "missing_loc: " << mls << std::endl;
6684 }
6685 pg->unlock();
6686 fout << std::endl;
6687 }
6688
6689 fout.close();
6690 }
6691 else if (prefix == "debug kick_recovery_wq") {
6692 int64_t delay;
6693 cmd_getval(cct, cmdmap, "delay", delay);
6694 ostringstream oss;
6695 oss << delay;
6696 r = cct->_conf->set_val("osd_recovery_delay_start", oss.str().c_str());
6697 if (r != 0) {
6698 ss << "kick_recovery_wq: error setting "
6699 << "osd_recovery_delay_start to '" << delay << "': error "
6700 << r;
6701 goto out;
6702 }
6703 cct->_conf->apply_changes(NULL);
6704 ss << "kicking recovery queue. set osd_recovery_delay_start "
6705 << "to " << cct->_conf->osd_recovery_delay_start;
6706 }
6707
6708 else if (prefix == "cpu_profiler") {
6709 string arg;
6710 cmd_getval(cct, cmdmap, "arg", arg);
6711 vector<string> argvec;
6712 get_str_vec(arg, argvec);
6713 cpu_profiler_handle_command(argvec, ds);
6714 }
6715
6716 else if (prefix == "dump_pg_recovery_stats") {
6717 stringstream s;
6718 if (f) {
6719 pg_recovery_stats.dump_formatted(f.get());
6720 f->flush(ds);
6721 } else {
6722 pg_recovery_stats.dump(s);
6723 ds << "dump pg recovery stats: " << s.str();
6724 }
6725 }
6726
6727 else if (prefix == "reset_pg_recovery_stats") {
6728 ss << "reset pg recovery stats";
6729 pg_recovery_stats.reset();
6730 }
6731
6732 else if (prefix == "perf histogram dump") {
6733 std::string logger;
6734 std::string counter;
6735 cmd_getval(cct, cmdmap, "logger", logger);
6736 cmd_getval(cct, cmdmap, "counter", counter);
6737 if (f) {
6738 cct->get_perfcounters_collection()->dump_formatted_histograms(
6739 f.get(), false, logger, counter);
6740 f->flush(ds);
6741 }
6742 }
6743
6744 else if (prefix == "compact") {
6745 dout(1) << "triggering manual compaction" << dendl;
6746 auto start = ceph::coarse_mono_clock::now();
6747 store->compact();
6748 auto end = ceph::coarse_mono_clock::now();
6749 auto time_span = chrono::duration_cast<chrono::duration<double>>(end - start);
6750 dout(1) << "finished manual compaction in "
6751 << time_span.count()
6752 << " seconds" << dendl;
6753 ss << "compacted omap in " << time_span.count() << " seconds";
6754 }
6755
6756 else {
6757 ss << "unrecognized command! " << cmd;
6758 r = -EINVAL;
6759 }
6760
6761 out:
6762 rs = ss.str();
6763 odata.append(ds);
6764 dout(0) << "do_command r=" << r << " " << rs << dendl;
6765 clog->info() << rs;
6766 if (con) {
6767 MCommandReply *reply = new MCommandReply(r, rs);
6768 reply->set_tid(tid);
6769 reply->set_data(odata);
6770 con->send_message(reply);
6771 }
6772 }
6773
6774 bool OSD::heartbeat_dispatch(Message *m)
6775 {
6776 dout(30) << "heartbeat_dispatch " << m << dendl;
6777 switch (m->get_type()) {
6778
6779 case CEPH_MSG_PING:
6780 dout(10) << "ping from " << m->get_source_inst() << dendl;
6781 m->put();
6782 break;
6783
6784 case MSG_OSD_PING:
6785 handle_osd_ping(static_cast<MOSDPing*>(m));
6786 break;
6787
6788 default:
6789 dout(0) << "dropping unexpected message " << *m << " from " << m->get_source_inst() << dendl;
6790 m->put();
6791 }
6792
6793 return true;
6794 }
6795
6796 bool OSD::ms_dispatch(Message *m)
6797 {
6798 dout(20) << "OSD::ms_dispatch: " << *m << dendl;
6799 if (m->get_type() == MSG_OSD_MARK_ME_DOWN) {
6800 service.got_stop_ack();
6801 m->put();
6802 return true;
6803 }
6804
6805 // lock!
6806
6807 osd_lock.Lock();
6808 if (is_stopping()) {
6809 osd_lock.Unlock();
6810 m->put();
6811 return true;
6812 }
6813
6814 do_waiters();
6815 _dispatch(m);
6816
6817 osd_lock.Unlock();
6818
6819 return true;
6820 }
6821
6822 void OSD::maybe_share_map(
6823 Session *session,
6824 OpRequestRef op,
6825 OSDMapRef osdmap)
6826 {
6827 if (!op->check_send_map) {
6828 return;
6829 }
6830 epoch_t last_sent_epoch = 0;
6831
6832 session->sent_epoch_lock.lock();
6833 last_sent_epoch = session->last_sent_epoch;
6834 session->sent_epoch_lock.unlock();
6835
6836 const Message *m = op->get_req();
6837 service.share_map(
6838 m->get_source(),
6839 m->get_connection().get(),
6840 op->sent_epoch,
6841 osdmap,
6842 session ? &last_sent_epoch : NULL);
6843
6844 session->sent_epoch_lock.lock();
6845 if (session->last_sent_epoch < last_sent_epoch) {
6846 session->last_sent_epoch = last_sent_epoch;
6847 }
6848 session->sent_epoch_lock.unlock();
6849
6850 op->check_send_map = false;
6851 }
6852
6853 void OSD::dispatch_session_waiting(Session *session, OSDMapRef osdmap)
6854 {
6855 assert(session->session_dispatch_lock.is_locked());
6856
6857 auto i = session->waiting_on_map.begin();
6858 while (i != session->waiting_on_map.end()) {
6859 OpRequestRef op = &(*i);
6860 assert(ms_can_fast_dispatch(op->get_req()));
6861 const MOSDFastDispatchOp *m = static_cast<const MOSDFastDispatchOp*>(
6862 op->get_req());
6863 if (m->get_min_epoch() > osdmap->get_epoch()) {
6864 break;
6865 }
6866 session->waiting_on_map.erase(i++);
6867 op->put();
6868
6869 spg_t pgid;
6870 if (m->get_type() == CEPH_MSG_OSD_OP) {
6871 pg_t actual_pgid = osdmap->raw_pg_to_pg(
6872 static_cast<const MOSDOp*>(m)->get_pg());
6873 if (!osdmap->get_primary_shard(actual_pgid, &pgid)) {
6874 continue;
6875 }
6876 } else {
6877 pgid = m->get_spg();
6878 }
6879 enqueue_op(pgid, op, m->get_map_epoch());
6880 }
6881
6882 if (session->waiting_on_map.empty()) {
6883 clear_session_waiting_on_map(session);
6884 } else {
6885 register_session_waiting_on_map(session);
6886 }
6887 }
6888
6889 void OSD::ms_fast_dispatch(Message *m)
6890 {
6891 FUNCTRACE();
6892 if (service.is_stopping()) {
6893 m->put();
6894 return;
6895 }
6896 OpRequestRef op = op_tracker.create_request<OpRequest, Message*>(m);
6897 {
6898 #ifdef WITH_LTTNG
6899 osd_reqid_t reqid = op->get_reqid();
6900 #endif
6901 tracepoint(osd, ms_fast_dispatch, reqid.name._type,
6902 reqid.name._num, reqid.tid, reqid.inc);
6903 }
6904
6905 if (m->trace)
6906 op->osd_trace.init("osd op", &trace_endpoint, &m->trace);
6907
6908 // note sender epoch, min req'd epoch
6909 op->sent_epoch = static_cast<MOSDFastDispatchOp*>(m)->get_map_epoch();
6910 op->min_epoch = static_cast<MOSDFastDispatchOp*>(m)->get_min_epoch();
6911 assert(op->min_epoch <= op->sent_epoch); // sanity check!
6912
6913 service.maybe_inject_dispatch_delay();
6914
6915 if (m->get_connection()->has_features(CEPH_FEATUREMASK_RESEND_ON_SPLIT) ||
6916 m->get_type() != CEPH_MSG_OSD_OP) {
6917 // queue it directly
6918 enqueue_op(
6919 static_cast<MOSDFastDispatchOp*>(m)->get_spg(),
6920 op,
6921 static_cast<MOSDFastDispatchOp*>(m)->get_map_epoch());
6922 } else {
6923 // legacy client, and this is an MOSDOp (the *only* fast dispatch
6924 // message that didn't have an explicit spg_t); we need to map
6925 // them to an spg_t while preserving delivery order.
6926 Session *session = static_cast<Session*>(m->get_connection()->get_priv());
6927 if (session) {
6928 {
6929 Mutex::Locker l(session->session_dispatch_lock);
6930 op->get();
6931 session->waiting_on_map.push_back(*op);
6932 OSDMapRef nextmap = service.get_nextmap_reserved();
6933 dispatch_session_waiting(session, nextmap);
6934 service.release_map(nextmap);
6935 }
6936 session->put();
6937 }
6938 }
6939 OID_EVENT_TRACE_WITH_MSG(m, "MS_FAST_DISPATCH_END", false);
6940 }
6941
6942 void OSD::ms_fast_preprocess(Message *m)
6943 {
6944 if (m->get_connection()->get_peer_type() == CEPH_ENTITY_TYPE_OSD) {
6945 if (m->get_type() == CEPH_MSG_OSD_MAP) {
6946 MOSDMap *mm = static_cast<MOSDMap*>(m);
6947 Session *s = static_cast<Session*>(m->get_connection()->get_priv());
6948 if (s) {
6949 s->received_map_lock.lock();
6950 s->received_map_epoch = mm->get_last();
6951 s->received_map_lock.unlock();
6952 s->put();
6953 }
6954 }
6955 }
6956 }
6957
6958 bool OSD::ms_get_authorizer(int dest_type, AuthAuthorizer **authorizer, bool force_new)
6959 {
6960 dout(10) << "OSD::ms_get_authorizer type=" << ceph_entity_type_name(dest_type) << dendl;
6961
6962 if (is_stopping()) {
6963 dout(10) << __func__ << " bailing, we are shutting down" << dendl;
6964 return false;
6965 }
6966
6967 if (dest_type == CEPH_ENTITY_TYPE_MON)
6968 return true;
6969
6970 if (force_new) {
6971 /* the MonClient checks keys every tick(), so we should just wait for that cycle
6972 to get through */
6973 if (monc->wait_auth_rotating(10) < 0) {
6974 derr << "OSD::ms_get_authorizer wait_auth_rotating failed" << dendl;
6975 return false;
6976 }
6977 }
6978
6979 *authorizer = monc->build_authorizer(dest_type);
6980 return *authorizer != NULL;
6981 }
6982
6983
6984 bool OSD::ms_verify_authorizer(Connection *con, int peer_type,
6985 int protocol, bufferlist& authorizer_data, bufferlist& authorizer_reply,
6986 bool& isvalid, CryptoKey& session_key)
6987 {
6988 AuthAuthorizeHandler *authorize_handler = 0;
6989 switch (peer_type) {
6990 case CEPH_ENTITY_TYPE_MDS:
6991 /*
6992 * note: mds is technically a client from our perspective, but
6993 * this makes the 'cluster' consistent w/ monitor's usage.
6994 */
6995 case CEPH_ENTITY_TYPE_OSD:
6996 case CEPH_ENTITY_TYPE_MGR:
6997 authorize_handler = authorize_handler_cluster_registry->get_handler(protocol);
6998 break;
6999 default:
7000 authorize_handler = authorize_handler_service_registry->get_handler(protocol);
7001 }
7002 if (!authorize_handler) {
7003 dout(0) << "No AuthAuthorizeHandler found for protocol " << protocol << dendl;
7004 isvalid = false;
7005 return true;
7006 }
7007
7008 AuthCapsInfo caps_info;
7009 EntityName name;
7010 uint64_t global_id;
7011 uint64_t auid = CEPH_AUTH_UID_DEFAULT;
7012
7013 RotatingKeyRing *keys = monc->rotating_secrets.get();
7014 if (keys) {
7015 isvalid = authorize_handler->verify_authorizer(
7016 cct, keys,
7017 authorizer_data, authorizer_reply, name, global_id, caps_info, session_key,
7018 &auid);
7019 } else {
7020 dout(10) << __func__ << " no rotating_keys (yet), denied" << dendl;
7021 isvalid = false;
7022 }
7023
7024 if (isvalid) {
7025 Session *s = static_cast<Session *>(con->get_priv());
7026 if (!s) {
7027 s = new Session(cct);
7028 con->set_priv(s->get());
7029 s->con = con;
7030 dout(10) << " new session " << s << " con=" << s->con << " addr=" << s->con->get_peer_addr() << dendl;
7031 }
7032
7033 s->entity_name = name;
7034 if (caps_info.allow_all)
7035 s->caps.set_allow_all();
7036 s->auid = auid;
7037
7038 if (caps_info.caps.length() > 0) {
7039 bufferlist::iterator p = caps_info.caps.begin();
7040 string str;
7041 try {
7042 ::decode(str, p);
7043 }
7044 catch (buffer::error& e) {
7045 }
7046 bool success = s->caps.parse(str);
7047 if (success)
7048 dout(10) << " session " << s << " " << s->entity_name << " has caps " << s->caps << " '" << str << "'" << dendl;
7049 else
7050 dout(10) << " session " << s << " " << s->entity_name << " failed to parse caps '" << str << "'" << dendl;
7051 }
7052
7053 s->put();
7054 }
7055 return true;
7056 }
7057
7058 void OSD::do_waiters()
7059 {
7060 assert(osd_lock.is_locked());
7061
7062 dout(10) << "do_waiters -- start" << dendl;
7063 while (!finished.empty()) {
7064 OpRequestRef next = finished.front();
7065 finished.pop_front();
7066 dispatch_op(next);
7067 }
7068 dout(10) << "do_waiters -- finish" << dendl;
7069 }
7070
7071 void OSD::dispatch_op(OpRequestRef op)
7072 {
7073 switch (op->get_req()->get_type()) {
7074
7075 case MSG_OSD_PG_CREATE:
7076 handle_pg_create(op);
7077 break;
7078 case MSG_OSD_PG_NOTIFY:
7079 handle_pg_notify(op);
7080 break;
7081 case MSG_OSD_PG_QUERY:
7082 handle_pg_query(op);
7083 break;
7084 case MSG_OSD_PG_LOG:
7085 handle_pg_log(op);
7086 break;
7087 case MSG_OSD_PG_REMOVE:
7088 handle_pg_remove(op);
7089 break;
7090 case MSG_OSD_PG_INFO:
7091 handle_pg_info(op);
7092 break;
7093 case MSG_OSD_PG_TRIM:
7094 handle_pg_trim(op);
7095 break;
7096 case MSG_OSD_BACKFILL_RESERVE:
7097 handle_pg_backfill_reserve(op);
7098 break;
7099 case MSG_OSD_RECOVERY_RESERVE:
7100 handle_pg_recovery_reserve(op);
7101 break;
7102 }
7103 }
7104
7105 void OSD::_dispatch(Message *m)
7106 {
7107 assert(osd_lock.is_locked());
7108 dout(20) << "_dispatch " << m << " " << *m << dendl;
7109
7110 switch (m->get_type()) {
7111
7112 // -- don't need lock --
7113 case CEPH_MSG_PING:
7114 dout(10) << "ping from " << m->get_source() << dendl;
7115 m->put();
7116 break;
7117
7118 // -- don't need OSDMap --
7119
7120 // map and replication
7121 case CEPH_MSG_OSD_MAP:
7122 handle_osd_map(static_cast<MOSDMap*>(m));
7123 break;
7124
7125 // osd
7126 case MSG_PGSTATSACK:
7127 handle_pg_stats_ack(static_cast<MPGStatsAck*>(m));
7128 break;
7129
7130 case MSG_MON_COMMAND:
7131 handle_command(static_cast<MMonCommand*>(m));
7132 break;
7133 case MSG_COMMAND:
7134 handle_command(static_cast<MCommand*>(m));
7135 break;
7136
7137 case MSG_OSD_SCRUB:
7138 handle_scrub(static_cast<MOSDScrub*>(m));
7139 break;
7140
7141 case MSG_OSD_FORCE_RECOVERY:
7142 handle_force_recovery(m);
7143 break;
7144
7145 // -- need OSDMap --
7146
7147 case MSG_OSD_PG_CREATE:
7148 case MSG_OSD_PG_NOTIFY:
7149 case MSG_OSD_PG_QUERY:
7150 case MSG_OSD_PG_LOG:
7151 case MSG_OSD_PG_REMOVE:
7152 case MSG_OSD_PG_INFO:
7153 case MSG_OSD_PG_TRIM:
7154 case MSG_OSD_BACKFILL_RESERVE:
7155 case MSG_OSD_RECOVERY_RESERVE:
7156 {
7157 OpRequestRef op = op_tracker.create_request<OpRequest, Message*>(m);
7158 if (m->trace)
7159 op->osd_trace.init("osd op", &trace_endpoint, &m->trace);
7160 // no map? starting up?
7161 if (!osdmap) {
7162 dout(7) << "no OSDMap, not booted" << dendl;
7163 logger->inc(l_osd_waiting_for_map);
7164 waiting_for_osdmap.push_back(op);
7165 op->mark_delayed("no osdmap");
7166 break;
7167 }
7168
7169 // need OSDMap
7170 dispatch_op(op);
7171 }
7172 }
7173 }
7174
7175 void OSD::handle_pg_scrub(MOSDScrub *m, PG *pg)
7176 {
7177 pg->lock();
7178 if (pg->is_primary()) {
7179 pg->unreg_next_scrub();
7180 pg->scrubber.must_scrub = true;
7181 pg->scrubber.must_deep_scrub = m->deep || m->repair;
7182 pg->scrubber.must_repair = m->repair;
7183 pg->reg_next_scrub();
7184 dout(10) << "marking " << *pg << " for scrub" << dendl;
7185 }
7186 pg->unlock();
7187 }
7188
7189 void OSD::handle_scrub(MOSDScrub *m)
7190 {
7191 dout(10) << "handle_scrub " << *m << dendl;
7192 if (!require_mon_or_mgr_peer(m)) {
7193 m->put();
7194 return;
7195 }
7196 if (m->fsid != monc->get_fsid()) {
7197 dout(0) << "handle_scrub fsid " << m->fsid << " != " << monc->get_fsid() << dendl;
7198 m->put();
7199 return;
7200 }
7201
7202 RWLock::RLocker l(pg_map_lock);
7203 if (m->scrub_pgs.empty()) {
7204 for (ceph::unordered_map<spg_t, PG*>::iterator p = pg_map.begin();
7205 p != pg_map.end();
7206 ++p)
7207 handle_pg_scrub(m, p->second);
7208 } else {
7209 for (vector<pg_t>::iterator p = m->scrub_pgs.begin();
7210 p != m->scrub_pgs.end();
7211 ++p) {
7212 spg_t pcand;
7213 if (osdmap->get_primary_shard(*p, &pcand)) {
7214 auto pg_map_entry = pg_map.find(pcand);
7215 if (pg_map_entry != pg_map.end()) {
7216 handle_pg_scrub(m, pg_map_entry->second);
7217 }
7218 }
7219 }
7220 }
7221
7222 m->put();
7223 }
7224
7225 bool OSD::scrub_random_backoff()
7226 {
7227 bool coin_flip = (rand() / (double)RAND_MAX >=
7228 cct->_conf->osd_scrub_backoff_ratio);
7229 if (!coin_flip) {
7230 dout(20) << "scrub_random_backoff lost coin flip, randomly backing off" << dendl;
7231 return true;
7232 }
7233 return false;
7234 }
7235
7236 OSDService::ScrubJob::ScrubJob(CephContext* cct,
7237 const spg_t& pg, const utime_t& timestamp,
7238 double pool_scrub_min_interval,
7239 double pool_scrub_max_interval, bool must)
7240 : cct(cct),
7241 pgid(pg),
7242 sched_time(timestamp),
7243 deadline(timestamp)
7244 {
7245 // if not explicitly requested, postpone the scrub with a random delay
7246 if (!must) {
7247 double scrub_min_interval = pool_scrub_min_interval > 0 ?
7248 pool_scrub_min_interval : cct->_conf->osd_scrub_min_interval;
7249 double scrub_max_interval = pool_scrub_max_interval > 0 ?
7250 pool_scrub_max_interval : cct->_conf->osd_scrub_max_interval;
7251
7252 sched_time += scrub_min_interval;
7253 double r = rand() / (double)RAND_MAX;
7254 sched_time +=
7255 scrub_min_interval * cct->_conf->osd_scrub_interval_randomize_ratio * r;
7256 deadline += scrub_max_interval;
7257 }
7258 }
7259
7260 bool OSDService::ScrubJob::ScrubJob::operator<(const OSDService::ScrubJob& rhs) const {
7261 if (sched_time < rhs.sched_time)
7262 return true;
7263 if (sched_time > rhs.sched_time)
7264 return false;
7265 return pgid < rhs.pgid;
7266 }
7267
7268 bool OSD::scrub_time_permit(utime_t now)
7269 {
7270 struct tm bdt;
7271 time_t tt = now.sec();
7272 localtime_r(&tt, &bdt);
7273 bool time_permit = false;
7274 if (cct->_conf->osd_scrub_begin_hour < cct->_conf->osd_scrub_end_hour) {
7275 if (bdt.tm_hour >= cct->_conf->osd_scrub_begin_hour && bdt.tm_hour < cct->_conf->osd_scrub_end_hour) {
7276 time_permit = true;
7277 }
7278 } else {
7279 if (bdt.tm_hour >= cct->_conf->osd_scrub_begin_hour || bdt.tm_hour < cct->_conf->osd_scrub_end_hour) {
7280 time_permit = true;
7281 }
7282 }
7283 if (!time_permit) {
7284 dout(20) << __func__ << " should run between " << cct->_conf->osd_scrub_begin_hour
7285 << " - " << cct->_conf->osd_scrub_end_hour
7286 << " now " << bdt.tm_hour << " = no" << dendl;
7287 } else {
7288 dout(20) << __func__ << " should run between " << cct->_conf->osd_scrub_begin_hour
7289 << " - " << cct->_conf->osd_scrub_end_hour
7290 << " now " << bdt.tm_hour << " = yes" << dendl;
7291 }
7292 return time_permit;
7293 }
7294
7295 bool OSD::scrub_load_below_threshold()
7296 {
7297 double loadavgs[3];
7298 if (getloadavg(loadavgs, 3) != 3) {
7299 dout(10) << __func__ << " couldn't read loadavgs\n" << dendl;
7300 return false;
7301 }
7302
7303 // allow scrub if below configured threshold
7304 if (loadavgs[0] < cct->_conf->osd_scrub_load_threshold) {
7305 dout(20) << __func__ << " loadavg " << loadavgs[0]
7306 << " < max " << cct->_conf->osd_scrub_load_threshold
7307 << " = yes" << dendl;
7308 return true;
7309 }
7310
7311 // allow scrub if below daily avg and currently decreasing
7312 if (loadavgs[0] < daily_loadavg && loadavgs[0] < loadavgs[2]) {
7313 dout(20) << __func__ << " loadavg " << loadavgs[0]
7314 << " < daily_loadavg " << daily_loadavg
7315 << " and < 15m avg " << loadavgs[2]
7316 << " = yes" << dendl;
7317 return true;
7318 }
7319
7320 dout(20) << __func__ << " loadavg " << loadavgs[0]
7321 << " >= max " << cct->_conf->osd_scrub_load_threshold
7322 << " and ( >= daily_loadavg " << daily_loadavg
7323 << " or >= 15m avg " << loadavgs[2]
7324 << ") = no" << dendl;
7325 return false;
7326 }
7327
7328 void OSD::sched_scrub()
7329 {
7330 // if not permitted, fail fast
7331 if (!service.can_inc_scrubs_pending()) {
7332 return;
7333 }
7334
7335 utime_t now = ceph_clock_now();
7336 bool time_permit = scrub_time_permit(now);
7337 bool load_is_low = scrub_load_below_threshold();
7338 dout(20) << "sched_scrub load_is_low=" << (int)load_is_low << dendl;
7339
7340 OSDService::ScrubJob scrub;
7341 if (service.first_scrub_stamp(&scrub)) {
7342 do {
7343 dout(30) << "sched_scrub examine " << scrub.pgid << " at " << scrub.sched_time << dendl;
7344
7345 if (scrub.sched_time > now) {
7346 // save ourselves some effort
7347 dout(10) << "sched_scrub " << scrub.pgid << " scheduled at " << scrub.sched_time
7348 << " > " << now << dendl;
7349 break;
7350 }
7351
7352 if (!cct->_conf->osd_scrub_during_recovery && service.is_recovery_active()) {
7353 dout(10) << __func__ << "not scheduling scrub of " << scrub.pgid << " due to active recovery ops" << dendl;
7354 break;
7355 }
7356
7357 if ((scrub.deadline >= now) && !(time_permit && load_is_low)) {
7358 dout(10) << __func__ << " not scheduling scrub for " << scrub.pgid << " due to "
7359 << (!time_permit ? "time not permit" : "high load") << dendl;
7360 continue;
7361 }
7362
7363 PG *pg = _lookup_lock_pg(scrub.pgid);
7364 if (!pg)
7365 continue;
7366 if (pg->get_pgbackend()->scrub_supported() && pg->is_active()) {
7367 dout(10) << "sched_scrub scrubbing " << scrub.pgid << " at " << scrub.sched_time
7368 << (pg->scrubber.must_scrub ? ", explicitly requested" :
7369 (load_is_low ? ", load_is_low" : " deadline < now"))
7370 << dendl;
7371 if (pg->sched_scrub()) {
7372 pg->unlock();
7373 break;
7374 }
7375 }
7376 pg->unlock();
7377 } while (service.next_scrub_stamp(scrub, &scrub));
7378 }
7379 dout(20) << "sched_scrub done" << dendl;
7380 }
7381
7382
7383
7384 // =====================================================
7385 // MAP
7386
7387 void OSD::wait_for_new_map(OpRequestRef op)
7388 {
7389 // ask?
7390 if (waiting_for_osdmap.empty()) {
7391 osdmap_subscribe(osdmap->get_epoch() + 1, false);
7392 }
7393
7394 logger->inc(l_osd_waiting_for_map);
7395 waiting_for_osdmap.push_back(op);
7396 op->mark_delayed("wait for new map");
7397 }
7398
7399
7400 /** update_map
7401 * assimilate new OSDMap(s). scan pgs, etc.
7402 */
7403
7404 void OSD::note_down_osd(int peer)
7405 {
7406 assert(osd_lock.is_locked());
7407 cluster_messenger->mark_down(osdmap->get_cluster_addr(peer));
7408
7409 heartbeat_lock.Lock();
7410 failure_queue.erase(peer);
7411 failure_pending.erase(peer);
7412 map<int,HeartbeatInfo>::iterator p = heartbeat_peers.find(peer);
7413 if (p != heartbeat_peers.end()) {
7414 p->second.con_back->mark_down();
7415 if (p->second.con_front) {
7416 p->second.con_front->mark_down();
7417 }
7418 heartbeat_peers.erase(p);
7419 }
7420 heartbeat_lock.Unlock();
7421 }
7422
7423 void OSD::note_up_osd(int peer)
7424 {
7425 service.forget_peer_epoch(peer, osdmap->get_epoch() - 1);
7426 heartbeat_set_peers_need_update();
7427 }
7428
7429 struct C_OnMapCommit : public Context {
7430 OSD *osd;
7431 epoch_t first, last;
7432 MOSDMap *msg;
7433 C_OnMapCommit(OSD *o, epoch_t f, epoch_t l, MOSDMap *m)
7434 : osd(o), first(f), last(l), msg(m) {}
7435 void finish(int r) override {
7436 osd->_committed_osd_maps(first, last, msg);
7437 msg->put();
7438 }
7439 };
7440
7441 struct C_OnMapApply : public Context {
7442 OSDService *service;
7443 list<OSDMapRef> pinned_maps;
7444 epoch_t e;
7445 C_OnMapApply(OSDService *service,
7446 const list<OSDMapRef> &pinned_maps,
7447 epoch_t e)
7448 : service(service), pinned_maps(pinned_maps), e(e) {}
7449 void finish(int r) override {
7450 service->clear_map_bl_cache_pins(e);
7451 }
7452 };
7453
7454 void OSD::osdmap_subscribe(version_t epoch, bool force_request)
7455 {
7456 OSDMapRef osdmap = service.get_osdmap();
7457 if (osdmap->get_epoch() >= epoch)
7458 return;
7459
7460 if (monc->sub_want_increment("osdmap", epoch, CEPH_SUBSCRIBE_ONETIME) ||
7461 force_request) {
7462 monc->renew_subs();
7463 }
7464 }
7465
7466 void OSD::trim_maps(epoch_t oldest, int nreceived, bool skip_maps)
7467 {
7468 epoch_t min = std::min(oldest, service.map_cache.cached_key_lower_bound());
7469 if (min <= superblock.oldest_map)
7470 return;
7471
7472 int num = 0;
7473 ObjectStore::Transaction t;
7474 for (epoch_t e = superblock.oldest_map; e < min; ++e) {
7475 dout(20) << " removing old osdmap epoch " << e << dendl;
7476 t.remove(coll_t::meta(), get_osdmap_pobject_name(e));
7477 t.remove(coll_t::meta(), get_inc_osdmap_pobject_name(e));
7478 superblock.oldest_map = e + 1;
7479 num++;
7480 if (num >= cct->_conf->osd_target_transaction_size && num >= nreceived) {
7481 service.publish_superblock(superblock);
7482 write_superblock(t);
7483 int tr = store->queue_transaction(service.meta_osr.get(), std::move(t), nullptr);
7484 assert(tr == 0);
7485 num = 0;
7486 if (!skip_maps) {
7487 // skip_maps leaves us with a range of old maps if we fail to remove all
7488 // of them before moving superblock.oldest_map forward to the first map
7489 // in the incoming MOSDMap msg. so we should continue removing them in
7490 // this case, even we could do huge series of delete transactions all at
7491 // once.
7492 break;
7493 }
7494 }
7495 }
7496 if (num > 0) {
7497 service.publish_superblock(superblock);
7498 write_superblock(t);
7499 int tr = store->queue_transaction(service.meta_osr.get(), std::move(t), nullptr);
7500 assert(tr == 0);
7501 }
7502 // we should not remove the cached maps
7503 assert(min <= service.map_cache.cached_key_lower_bound());
7504 }
7505
7506 void OSD::handle_osd_map(MOSDMap *m)
7507 {
7508 assert(osd_lock.is_locked());
7509 // Keep a ref in the list until we get the newly received map written
7510 // onto disk. This is important because as long as the refs are alive,
7511 // the OSDMaps will be pinned in the cache and we won't try to read it
7512 // off of disk. Otherwise these maps will probably not stay in the cache,
7513 // and reading those OSDMaps before they are actually written can result
7514 // in a crash.
7515 list<OSDMapRef> pinned_maps;
7516 if (m->fsid != monc->get_fsid()) {
7517 dout(0) << "handle_osd_map fsid " << m->fsid << " != "
7518 << monc->get_fsid() << dendl;
7519 m->put();
7520 return;
7521 }
7522 if (is_initializing()) {
7523 dout(0) << "ignoring osdmap until we have initialized" << dendl;
7524 m->put();
7525 return;
7526 }
7527
7528 Session *session = static_cast<Session *>(m->get_connection()->get_priv());
7529 if (session && !(session->entity_name.is_mon() ||
7530 session->entity_name.is_osd())) {
7531 //not enough perms!
7532 dout(10) << "got osd map from Session " << session
7533 << " which we can't take maps from (not a mon or osd)" << dendl;
7534 m->put();
7535 session->put();
7536 return;
7537 }
7538 if (session)
7539 session->put();
7540
7541 // share with the objecter
7542 if (!is_preboot())
7543 service.objecter->handle_osd_map(m);
7544
7545 epoch_t first = m->get_first();
7546 epoch_t last = m->get_last();
7547 dout(3) << "handle_osd_map epochs [" << first << "," << last << "], i have "
7548 << superblock.newest_map
7549 << ", src has [" << m->oldest_map << "," << m->newest_map << "]"
7550 << dendl;
7551
7552 logger->inc(l_osd_map);
7553 logger->inc(l_osd_mape, last - first + 1);
7554 if (first <= superblock.newest_map)
7555 logger->inc(l_osd_mape_dup, superblock.newest_map - first + 1);
7556 if (service.max_oldest_map < m->oldest_map) {
7557 service.max_oldest_map = m->oldest_map;
7558 assert(service.max_oldest_map >= superblock.oldest_map);
7559 }
7560
7561 // make sure there is something new, here, before we bother flushing
7562 // the queues and such
7563 if (last <= superblock.newest_map) {
7564 dout(10) << " no new maps here, dropping" << dendl;
7565 m->put();
7566 return;
7567 }
7568
7569 // missing some?
7570 bool skip_maps = false;
7571 if (first > superblock.newest_map + 1) {
7572 dout(10) << "handle_osd_map message skips epochs "
7573 << superblock.newest_map + 1 << ".." << (first-1) << dendl;
7574 if (m->oldest_map <= superblock.newest_map + 1) {
7575 osdmap_subscribe(superblock.newest_map + 1, false);
7576 m->put();
7577 return;
7578 }
7579 // always try to get the full range of maps--as many as we can. this
7580 // 1- is good to have
7581 // 2- is at present the only way to ensure that we get a *full* map as
7582 // the first map!
7583 if (m->oldest_map < first) {
7584 osdmap_subscribe(m->oldest_map - 1, true);
7585 m->put();
7586 return;
7587 }
7588 skip_maps = true;
7589 }
7590
7591 ObjectStore::Transaction t;
7592 uint64_t txn_size = 0;
7593
7594 // store new maps: queue for disk and put in the osdmap cache
7595 epoch_t start = MAX(superblock.newest_map + 1, first);
7596 for (epoch_t e = start; e <= last; e++) {
7597 if (txn_size >= t.get_num_bytes()) {
7598 derr << __func__ << " transaction size overflowed" << dendl;
7599 assert(txn_size < t.get_num_bytes());
7600 }
7601 txn_size = t.get_num_bytes();
7602 map<epoch_t,bufferlist>::iterator p;
7603 p = m->maps.find(e);
7604 if (p != m->maps.end()) {
7605 dout(10) << "handle_osd_map got full map for epoch " << e << dendl;
7606 OSDMap *o = new OSDMap;
7607 bufferlist& bl = p->second;
7608
7609 o->decode(bl);
7610
7611 ghobject_t fulloid = get_osdmap_pobject_name(e);
7612 t.write(coll_t::meta(), fulloid, 0, bl.length(), bl);
7613 pin_map_bl(e, bl);
7614 pinned_maps.push_back(add_map(o));
7615
7616 got_full_map(e);
7617 continue;
7618 }
7619
7620 p = m->incremental_maps.find(e);
7621 if (p != m->incremental_maps.end()) {
7622 dout(10) << "handle_osd_map got inc map for epoch " << e << dendl;
7623 bufferlist& bl = p->second;
7624 ghobject_t oid = get_inc_osdmap_pobject_name(e);
7625 t.write(coll_t::meta(), oid, 0, bl.length(), bl);
7626 pin_map_inc_bl(e, bl);
7627
7628 OSDMap *o = new OSDMap;
7629 if (e > 1) {
7630 bufferlist obl;
7631 bool got = get_map_bl(e - 1, obl);
7632 assert(got);
7633 o->decode(obl);
7634 }
7635
7636 OSDMap::Incremental inc;
7637 bufferlist::iterator p = bl.begin();
7638 inc.decode(p);
7639 if (o->apply_incremental(inc) < 0) {
7640 derr << "ERROR: bad fsid? i have " << osdmap->get_fsid() << " and inc has " << inc.fsid << dendl;
7641 assert(0 == "bad fsid");
7642 }
7643
7644 bufferlist fbl;
7645 o->encode(fbl, inc.encode_features | CEPH_FEATURE_RESERVED);
7646
7647 bool injected_failure = false;
7648 if (cct->_conf->osd_inject_bad_map_crc_probability > 0 &&
7649 (rand() % 10000) < cct->_conf->osd_inject_bad_map_crc_probability*10000.0) {
7650 derr << __func__ << " injecting map crc failure" << dendl;
7651 injected_failure = true;
7652 }
7653
7654 if ((inc.have_crc && o->get_crc() != inc.full_crc) || injected_failure) {
7655 dout(2) << "got incremental " << e
7656 << " but failed to encode full with correct crc; requesting"
7657 << dendl;
7658 clog->warn() << "failed to encode map e" << e << " with expected crc";
7659 dout(20) << "my encoded map was:\n";
7660 fbl.hexdump(*_dout);
7661 *_dout << dendl;
7662 delete o;
7663 request_full_map(e, last);
7664 last = e - 1;
7665 break;
7666 }
7667 got_full_map(e);
7668
7669 ghobject_t fulloid = get_osdmap_pobject_name(e);
7670 t.write(coll_t::meta(), fulloid, 0, fbl.length(), fbl);
7671 pin_map_bl(e, fbl);
7672 pinned_maps.push_back(add_map(o));
7673 continue;
7674 }
7675
7676 assert(0 == "MOSDMap lied about what maps it had?");
7677 }
7678
7679 // even if this map isn't from a mon, we may have satisfied our subscription
7680 monc->sub_got("osdmap", last);
7681
7682 if (!m->maps.empty() && requested_full_first) {
7683 dout(10) << __func__ << " still missing full maps " << requested_full_first
7684 << ".." << requested_full_last << dendl;
7685 rerequest_full_maps();
7686 }
7687
7688 if (superblock.oldest_map) {
7689 // make sure we at least keep pace with incoming maps
7690 trim_maps(m->oldest_map, last - first + 1, skip_maps);
7691 }
7692
7693 if (!superblock.oldest_map || skip_maps)
7694 superblock.oldest_map = first;
7695 superblock.newest_map = last;
7696 superblock.current_epoch = last;
7697
7698 // note in the superblock that we were clean thru the prior epoch
7699 epoch_t boot_epoch = service.get_boot_epoch();
7700 if (boot_epoch && boot_epoch >= superblock.mounted) {
7701 superblock.mounted = boot_epoch;
7702 superblock.clean_thru = last;
7703 }
7704
7705 // superblock and commit
7706 write_superblock(t);
7707 store->queue_transaction(
7708 service.meta_osr.get(),
7709 std::move(t),
7710 new C_OnMapApply(&service, pinned_maps, last),
7711 new C_OnMapCommit(this, start, last, m), 0);
7712 service.publish_superblock(superblock);
7713 }
7714
7715 void OSD::_committed_osd_maps(epoch_t first, epoch_t last, MOSDMap *m)
7716 {
7717 dout(10) << __func__ << " " << first << ".." << last << dendl;
7718 if (is_stopping()) {
7719 dout(10) << __func__ << " bailing, we are shutting down" << dendl;
7720 return;
7721 }
7722 Mutex::Locker l(osd_lock);
7723 if (is_stopping()) {
7724 dout(10) << __func__ << " bailing, we are shutting down" << dendl;
7725 return;
7726 }
7727 map_lock.get_write();
7728
7729 bool do_shutdown = false;
7730 bool do_restart = false;
7731 bool network_error = false;
7732
7733 // advance through the new maps
7734 for (epoch_t cur = first; cur <= last; cur++) {
7735 dout(10) << " advance to epoch " << cur
7736 << " (<= last " << last
7737 << " <= newest_map " << superblock.newest_map
7738 << ")" << dendl;
7739
7740 OSDMapRef newmap = get_map(cur);
7741 assert(newmap); // we just cached it above!
7742
7743 // start blacklisting messages sent to peers that go down.
7744 service.pre_publish_map(newmap);
7745
7746 // kill connections to newly down osds
7747 bool waited_for_reservations = false;
7748 set<int> old;
7749 osdmap->get_all_osds(old);
7750 for (set<int>::iterator p = old.begin(); p != old.end(); ++p) {
7751 if (*p != whoami &&
7752 osdmap->is_up(*p) && // in old map
7753 newmap->is_down(*p)) { // but not the new one
7754 if (!waited_for_reservations) {
7755 service.await_reserved_maps();
7756 waited_for_reservations = true;
7757 }
7758 note_down_osd(*p);
7759 } else if (*p != whoami &&
7760 osdmap->is_down(*p) &&
7761 newmap->is_up(*p)) {
7762 note_up_osd(*p);
7763 }
7764 }
7765
7766 if ((osdmap->test_flag(CEPH_OSDMAP_NOUP) !=
7767 newmap->test_flag(CEPH_OSDMAP_NOUP)) ||
7768 (osdmap->is_noup(whoami) != newmap->is_noup(whoami))) {
7769 dout(10) << __func__ << " NOUP flag changed in " << newmap->get_epoch()
7770 << dendl;
7771 if (is_booting()) {
7772 // this captures the case where we sent the boot message while
7773 // NOUP was being set on the mon and our boot request was
7774 // dropped, and then later it is cleared. it imperfectly
7775 // handles the case where our original boot message was not
7776 // dropped and we restart even though we might have booted, but
7777 // that is harmless (boot will just take slightly longer).
7778 do_restart = true;
7779 }
7780 }
7781 if (osdmap->require_osd_release < CEPH_RELEASE_LUMINOUS &&
7782 newmap->require_osd_release >= CEPH_RELEASE_LUMINOUS) {
7783 dout(10) << __func__ << " require_osd_release reached luminous in "
7784 << newmap->get_epoch() << dendl;
7785 clear_pg_stat_queue();
7786 clear_outstanding_pg_stats();
7787 }
7788
7789 osdmap = newmap;
7790 epoch_t up_epoch;
7791 epoch_t boot_epoch;
7792 service.retrieve_epochs(&boot_epoch, &up_epoch, NULL);
7793 if (!up_epoch &&
7794 osdmap->is_up(whoami) &&
7795 osdmap->get_inst(whoami) == client_messenger->get_myinst()) {
7796 up_epoch = osdmap->get_epoch();
7797 dout(10) << "up_epoch is " << up_epoch << dendl;
7798 if (!boot_epoch) {
7799 boot_epoch = osdmap->get_epoch();
7800 dout(10) << "boot_epoch is " << boot_epoch << dendl;
7801 }
7802 service.set_epochs(&boot_epoch, &up_epoch, NULL);
7803 }
7804 }
7805
7806 had_map_since = ceph_clock_now();
7807
7808 epoch_t _bind_epoch = service.get_bind_epoch();
7809 if (osdmap->is_up(whoami) &&
7810 osdmap->get_addr(whoami) == client_messenger->get_myaddr() &&
7811 _bind_epoch < osdmap->get_up_from(whoami)) {
7812
7813 if (is_booting()) {
7814 dout(1) << "state: booting -> active" << dendl;
7815 set_state(STATE_ACTIVE);
7816
7817 // set incarnation so that osd_reqid_t's we generate for our
7818 // objecter requests are unique across restarts.
7819 service.objecter->set_client_incarnation(osdmap->get_epoch());
7820 }
7821 }
7822
7823 if (osdmap->get_epoch() > 0 &&
7824 is_active()) {
7825 if (!osdmap->exists(whoami)) {
7826 dout(0) << "map says i do not exist. shutting down." << dendl;
7827 do_shutdown = true; // don't call shutdown() while we have
7828 // everything paused
7829 } else if (!osdmap->is_up(whoami) ||
7830 !osdmap->get_addr(whoami).probably_equals(
7831 client_messenger->get_myaddr()) ||
7832 !osdmap->get_cluster_addr(whoami).probably_equals(
7833 cluster_messenger->get_myaddr()) ||
7834 !osdmap->get_hb_back_addr(whoami).probably_equals(
7835 hb_back_server_messenger->get_myaddr()) ||
7836 (osdmap->get_hb_front_addr(whoami) != entity_addr_t() &&
7837 !osdmap->get_hb_front_addr(whoami).probably_equals(
7838 hb_front_server_messenger->get_myaddr()))) {
7839 if (!osdmap->is_up(whoami)) {
7840 if (service.is_preparing_to_stop() || service.is_stopping()) {
7841 service.got_stop_ack();
7842 } else {
7843 clog->warn() << "Monitor daemon marked osd." << whoami << " down, "
7844 "but it is still running";
7845 clog->debug() << "map e" << osdmap->get_epoch()
7846 << " wrongly marked me down at e"
7847 << osdmap->get_down_at(whoami);
7848 }
7849 } else if (!osdmap->get_addr(whoami).probably_equals(
7850 client_messenger->get_myaddr())) {
7851 clog->error() << "map e" << osdmap->get_epoch()
7852 << " had wrong client addr (" << osdmap->get_addr(whoami)
7853 << " != my " << client_messenger->get_myaddr() << ")";
7854 } else if (!osdmap->get_cluster_addr(whoami).probably_equals(
7855 cluster_messenger->get_myaddr())) {
7856 clog->error() << "map e" << osdmap->get_epoch()
7857 << " had wrong cluster addr ("
7858 << osdmap->get_cluster_addr(whoami)
7859 << " != my " << cluster_messenger->get_myaddr() << ")";
7860 } else if (!osdmap->get_hb_back_addr(whoami).probably_equals(
7861 hb_back_server_messenger->get_myaddr())) {
7862 clog->error() << "map e" << osdmap->get_epoch()
7863 << " had wrong heartbeat back addr ("
7864 << osdmap->get_hb_back_addr(whoami)
7865 << " != my " << hb_back_server_messenger->get_myaddr()
7866 << ")";
7867 } else if (osdmap->get_hb_front_addr(whoami) != entity_addr_t() &&
7868 !osdmap->get_hb_front_addr(whoami).probably_equals(
7869 hb_front_server_messenger->get_myaddr())) {
7870 clog->error() << "map e" << osdmap->get_epoch()
7871 << " had wrong heartbeat front addr ("
7872 << osdmap->get_hb_front_addr(whoami)
7873 << " != my " << hb_front_server_messenger->get_myaddr()
7874 << ")";
7875 }
7876
7877 if (!service.is_stopping()) {
7878 epoch_t up_epoch = 0;
7879 epoch_t bind_epoch = osdmap->get_epoch();
7880 service.set_epochs(NULL,&up_epoch, &bind_epoch);
7881 do_restart = true;
7882
7883 //add markdown log
7884 utime_t now = ceph_clock_now();
7885 utime_t grace = utime_t(cct->_conf->osd_max_markdown_period, 0);
7886 osd_markdown_log.push_back(now);
7887 //clear all out-of-date log
7888 while (!osd_markdown_log.empty() &&
7889 osd_markdown_log.front() + grace < now)
7890 osd_markdown_log.pop_front();
7891 if ((int)osd_markdown_log.size() > cct->_conf->osd_max_markdown_count) {
7892 dout(0) << __func__ << " marked down "
7893 << osd_markdown_log.size()
7894 << " > osd_max_markdown_count "
7895 << cct->_conf->osd_max_markdown_count
7896 << " in last " << grace << " seconds, shutting down"
7897 << dendl;
7898 do_restart = false;
7899 do_shutdown = true;
7900 }
7901
7902 start_waiting_for_healthy();
7903
7904 set<int> avoid_ports;
7905 #if defined(__FreeBSD__)
7906 // prevent FreeBSD from grabbing the client_messenger port during
7907 // rebinding. In which case a cluster_meesneger will connect also
7908 // to the same port
7909 avoid_ports.insert(client_messenger->get_myaddr().get_port());
7910 #endif
7911 avoid_ports.insert(cluster_messenger->get_myaddr().get_port());
7912 avoid_ports.insert(hb_back_server_messenger->get_myaddr().get_port());
7913 avoid_ports.insert(hb_front_server_messenger->get_myaddr().get_port());
7914
7915 int r = cluster_messenger->rebind(avoid_ports);
7916 if (r != 0) {
7917 do_shutdown = true; // FIXME: do_restart?
7918 network_error = true;
7919 dout(0) << __func__ << " marked down:"
7920 << " rebind cluster_messenger failed" << dendl;
7921 }
7922
7923 r = hb_back_server_messenger->rebind(avoid_ports);
7924 if (r != 0) {
7925 do_shutdown = true; // FIXME: do_restart?
7926 network_error = true;
7927 dout(0) << __func__ << " marked down:"
7928 << " rebind hb_back_server_messenger failed" << dendl;
7929 }
7930
7931 r = hb_front_server_messenger->rebind(avoid_ports);
7932 if (r != 0) {
7933 do_shutdown = true; // FIXME: do_restart?
7934 network_error = true;
7935 dout(0) << __func__ << " marked down:"
7936 << " rebind hb_front_server_messenger failed" << dendl;
7937 }
7938
7939 hb_front_client_messenger->mark_down_all();
7940 hb_back_client_messenger->mark_down_all();
7941
7942 reset_heartbeat_peers();
7943 }
7944 }
7945 }
7946
7947 map_lock.put_write();
7948
7949 check_osdmap_features(store);
7950
7951 // yay!
7952 consume_map();
7953
7954 if (is_active() || is_waiting_for_healthy())
7955 maybe_update_heartbeat_peers();
7956
7957 if (!is_active()) {
7958 dout(10) << " not yet active; waiting for peering wq to drain" << dendl;
7959 peering_wq.drain();
7960 } else {
7961 activate_map();
7962 }
7963
7964 if (do_shutdown) {
7965 if (network_error) {
7966 Mutex::Locker l(heartbeat_lock);
7967 map<int,pair<utime_t,entity_inst_t>>::iterator it =
7968 failure_pending.begin();
7969 while (it != failure_pending.end()) {
7970 dout(10) << "handle_osd_ping canceling in-flight failure report for osd."
7971 << it->first << dendl;
7972 send_still_alive(osdmap->get_epoch(), it->second.second);
7973 failure_pending.erase(it++);
7974 }
7975 }
7976 // trigger shutdown in a different thread
7977 dout(0) << __func__ << " shutdown OSD via async signal" << dendl;
7978 queue_async_signal(SIGINT);
7979 }
7980 else if (m->newest_map && m->newest_map > last) {
7981 dout(10) << " msg say newest map is " << m->newest_map
7982 << ", requesting more" << dendl;
7983 osdmap_subscribe(osdmap->get_epoch()+1, false);
7984 }
7985 else if (is_preboot()) {
7986 if (m->get_source().is_mon())
7987 _preboot(m->oldest_map, m->newest_map);
7988 else
7989 start_boot();
7990 }
7991 else if (do_restart)
7992 start_boot();
7993
7994 }
7995
7996 void OSD::check_osdmap_features(ObjectStore *fs)
7997 {
7998 // adjust required feature bits?
7999
8000 // we have to be a bit careful here, because we are accessing the
8001 // Policy structures without taking any lock. in particular, only
8002 // modify integer values that can safely be read by a racing CPU.
8003 // since we are only accessing existing Policy structures a their
8004 // current memory location, and setting or clearing bits in integer
8005 // fields, and we are the only writer, this is not a problem.
8006
8007 {
8008 Messenger::Policy p = client_messenger->get_default_policy();
8009 uint64_t mask;
8010 uint64_t features = osdmap->get_features(entity_name_t::TYPE_CLIENT, &mask);
8011 if ((p.features_required & mask) != features) {
8012 dout(0) << "crush map has features " << features
8013 << ", adjusting msgr requires for clients" << dendl;
8014 p.features_required = (p.features_required & ~mask) | features;
8015 client_messenger->set_default_policy(p);
8016 }
8017 }
8018 {
8019 Messenger::Policy p = client_messenger->get_policy(entity_name_t::TYPE_MON);
8020 uint64_t mask;
8021 uint64_t features = osdmap->get_features(entity_name_t::TYPE_MON, &mask);
8022 if ((p.features_required & mask) != features) {
8023 dout(0) << "crush map has features " << features
8024 << " was " << p.features_required
8025 << ", adjusting msgr requires for mons" << dendl;
8026 p.features_required = (p.features_required & ~mask) | features;
8027 client_messenger->set_policy(entity_name_t::TYPE_MON, p);
8028 }
8029 }
8030 {
8031 Messenger::Policy p = cluster_messenger->get_policy(entity_name_t::TYPE_OSD);
8032 uint64_t mask;
8033 uint64_t features = osdmap->get_features(entity_name_t::TYPE_OSD, &mask);
8034
8035 if ((p.features_required & mask) != features) {
8036 dout(0) << "crush map has features " << features
8037 << ", adjusting msgr requires for osds" << dendl;
8038 p.features_required = (p.features_required & ~mask) | features;
8039 cluster_messenger->set_policy(entity_name_t::TYPE_OSD, p);
8040 }
8041
8042 if ((features & CEPH_FEATURE_OSD_ERASURE_CODES) &&
8043 !superblock.compat_features.incompat.contains(CEPH_OSD_FEATURE_INCOMPAT_SHARDS)) {
8044 dout(0) << __func__ << " enabling on-disk ERASURE CODES compat feature" << dendl;
8045 superblock.compat_features.incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_SHARDS);
8046 ObjectStore::Transaction t;
8047 write_superblock(t);
8048 int err = store->queue_transaction(service.meta_osr.get(), std::move(t), NULL);
8049 assert(err == 0);
8050 }
8051 }
8052 }
8053
8054 bool OSD::advance_pg(
8055 epoch_t osd_epoch, PG *pg,
8056 ThreadPool::TPHandle &handle,
8057 PG::RecoveryCtx *rctx,
8058 set<PGRef> *new_pgs)
8059 {
8060 assert(pg->is_locked());
8061 epoch_t next_epoch = pg->get_osdmap()->get_epoch() + 1;
8062 OSDMapRef lastmap = pg->get_osdmap();
8063
8064 if (lastmap->get_epoch() == osd_epoch)
8065 return true;
8066 assert(lastmap->get_epoch() < osd_epoch);
8067
8068 epoch_t min_epoch = service.get_min_pg_epoch();
8069 epoch_t max;
8070 if (min_epoch) {
8071 max = min_epoch + cct->_conf->osd_map_max_advance;
8072 } else {
8073 max = next_epoch + cct->_conf->osd_map_max_advance;
8074 }
8075
8076 for (;
8077 next_epoch <= osd_epoch && next_epoch <= max;
8078 ++next_epoch) {
8079 OSDMapRef nextmap = service.try_get_map(next_epoch);
8080 if (!nextmap) {
8081 dout(20) << __func__ << " missing map " << next_epoch << dendl;
8082 // make sure max is bumped up so that we can get past any
8083 // gap in maps
8084 max = MAX(max, next_epoch + cct->_conf->osd_map_max_advance);
8085 continue;
8086 }
8087
8088 vector<int> newup, newacting;
8089 int up_primary, acting_primary;
8090 nextmap->pg_to_up_acting_osds(
8091 pg->info.pgid.pgid,
8092 &newup, &up_primary,
8093 &newacting, &acting_primary);
8094 pg->handle_advance_map(
8095 nextmap, lastmap, newup, up_primary,
8096 newacting, acting_primary, rctx);
8097
8098 // Check for split!
8099 set<spg_t> children;
8100 spg_t parent(pg->info.pgid);
8101 if (parent.is_split(
8102 lastmap->get_pg_num(pg->pool.id),
8103 nextmap->get_pg_num(pg->pool.id),
8104 &children)) {
8105 service.mark_split_in_progress(pg->info.pgid, children);
8106 split_pgs(
8107 pg, children, new_pgs, lastmap, nextmap,
8108 rctx);
8109 }
8110
8111 lastmap = nextmap;
8112 handle.reset_tp_timeout();
8113 }
8114 service.pg_update_epoch(pg->info.pgid, lastmap->get_epoch());
8115 pg->handle_activate_map(rctx);
8116 if (next_epoch <= osd_epoch) {
8117 dout(10) << __func__ << " advanced to max " << max
8118 << " past min epoch " << min_epoch
8119 << " ... will requeue " << *pg << dendl;
8120 return false;
8121 }
8122 return true;
8123 }
8124
8125 void OSD::consume_map()
8126 {
8127 assert(osd_lock.is_locked());
8128 dout(7) << "consume_map version " << osdmap->get_epoch() << dendl;
8129
8130 int num_pg_primary = 0, num_pg_replica = 0, num_pg_stray = 0;
8131 list<PGRef> to_remove;
8132
8133 // scan pg's
8134 {
8135 RWLock::RLocker l(pg_map_lock);
8136 for (ceph::unordered_map<spg_t,PG*>::iterator it = pg_map.begin();
8137 it != pg_map.end();
8138 ++it) {
8139 PG *pg = it->second;
8140 pg->lock();
8141 if (pg->is_primary())
8142 num_pg_primary++;
8143 else if (pg->is_replica())
8144 num_pg_replica++;
8145 else
8146 num_pg_stray++;
8147
8148 if (!osdmap->have_pg_pool(pg->info.pgid.pool())) {
8149 //pool is deleted!
8150 to_remove.push_back(PGRef(pg));
8151 } else {
8152 service.init_splits_between(it->first, service.get_osdmap(), osdmap);
8153 }
8154
8155 pg->unlock();
8156 }
8157 }
8158
8159 for (list<PGRef>::iterator i = to_remove.begin();
8160 i != to_remove.end();
8161 to_remove.erase(i++)) {
8162 RWLock::WLocker locker(pg_map_lock);
8163 (*i)->lock();
8164 _remove_pg(&**i);
8165 (*i)->unlock();
8166 }
8167
8168 service.expand_pg_num(service.get_osdmap(), osdmap);
8169
8170 service.pre_publish_map(osdmap);
8171 service.await_reserved_maps();
8172 service.publish_map(osdmap);
8173
8174 service.maybe_inject_dispatch_delay();
8175
8176 dispatch_sessions_waiting_on_map();
8177
8178 service.maybe_inject_dispatch_delay();
8179
8180 // remove any PGs which we no longer host from the session waiting_for_pg lists
8181 dout(20) << __func__ << " checking waiting_for_pg" << dendl;
8182 op_shardedwq.prune_pg_waiters(osdmap, whoami);
8183
8184 service.maybe_inject_dispatch_delay();
8185
8186 // scan pg's
8187 {
8188 RWLock::RLocker l(pg_map_lock);
8189 for (ceph::unordered_map<spg_t,PG*>::iterator it = pg_map.begin();
8190 it != pg_map.end();
8191 ++it) {
8192 PG *pg = it->second;
8193 pg->lock();
8194 pg->queue_null(osdmap->get_epoch(), osdmap->get_epoch());
8195 pg->unlock();
8196 }
8197
8198 logger->set(l_osd_pg, pg_map.size());
8199 }
8200 logger->set(l_osd_pg_primary, num_pg_primary);
8201 logger->set(l_osd_pg_replica, num_pg_replica);
8202 logger->set(l_osd_pg_stray, num_pg_stray);
8203 }
8204
8205 void OSD::activate_map()
8206 {
8207 assert(osd_lock.is_locked());
8208
8209 dout(7) << "activate_map version " << osdmap->get_epoch() << dendl;
8210
8211 if (!osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE)) {
8212 derr << __func__ << " SORTBITWISE flag is not set" << dendl;
8213 ceph_abort();
8214 }
8215
8216 if (osdmap->test_flag(CEPH_OSDMAP_FULL)) {
8217 dout(10) << " osdmap flagged full, doing onetime osdmap subscribe" << dendl;
8218 osdmap_subscribe(osdmap->get_epoch() + 1, false);
8219 }
8220
8221 // norecover?
8222 if (osdmap->test_flag(CEPH_OSDMAP_NORECOVER)) {
8223 if (!service.recovery_is_paused()) {
8224 dout(1) << "pausing recovery (NORECOVER flag set)" << dendl;
8225 service.pause_recovery();
8226 }
8227 } else {
8228 if (service.recovery_is_paused()) {
8229 dout(1) << "unpausing recovery (NORECOVER flag unset)" << dendl;
8230 service.unpause_recovery();
8231 }
8232 }
8233
8234 service.activate_map();
8235
8236 // process waiters
8237 take_waiters(waiting_for_osdmap);
8238 }
8239
8240 bool OSD::require_mon_peer(const Message *m)
8241 {
8242 if (!m->get_connection()->peer_is_mon()) {
8243 dout(0) << "require_mon_peer received from non-mon "
8244 << m->get_connection()->get_peer_addr()
8245 << " " << *m << dendl;
8246 return false;
8247 }
8248 return true;
8249 }
8250
8251 bool OSD::require_mon_or_mgr_peer(const Message *m)
8252 {
8253 if (!m->get_connection()->peer_is_mon() &&
8254 !m->get_connection()->peer_is_mgr()) {
8255 dout(0) << "require_mon_or_mgr_peer received from non-mon, non-mgr "
8256 << m->get_connection()->get_peer_addr()
8257 << " " << *m << dendl;
8258 return false;
8259 }
8260 return true;
8261 }
8262
8263 bool OSD::require_osd_peer(const Message *m)
8264 {
8265 if (!m->get_connection()->peer_is_osd()) {
8266 dout(0) << "require_osd_peer received from non-osd "
8267 << m->get_connection()->get_peer_addr()
8268 << " " << *m << dendl;
8269 return false;
8270 }
8271 return true;
8272 }
8273
8274 bool OSD::require_self_aliveness(const Message *m, epoch_t epoch)
8275 {
8276 epoch_t up_epoch = service.get_up_epoch();
8277 if (epoch < up_epoch) {
8278 dout(7) << "from pre-up epoch " << epoch << " < " << up_epoch << dendl;
8279 return false;
8280 }
8281
8282 if (!is_active()) {
8283 dout(7) << "still in boot state, dropping message " << *m << dendl;
8284 return false;
8285 }
8286
8287 return true;
8288 }
8289
8290 bool OSD::require_same_peer_instance(const Message *m, OSDMapRef& map,
8291 bool is_fast_dispatch)
8292 {
8293 int from = m->get_source().num();
8294
8295 if (map->is_down(from) ||
8296 (map->get_cluster_addr(from) != m->get_source_inst().addr)) {
8297 dout(5) << "from dead osd." << from << ", marking down, "
8298 << " msg was " << m->get_source_inst().addr
8299 << " expected " << (map->is_up(from) ?
8300 map->get_cluster_addr(from) : entity_addr_t())
8301 << dendl;
8302 ConnectionRef con = m->get_connection();
8303 con->mark_down();
8304 Session *s = static_cast<Session*>(con->get_priv());
8305 if (s) {
8306 if (!is_fast_dispatch)
8307 s->session_dispatch_lock.Lock();
8308 clear_session_waiting_on_map(s);
8309 con->set_priv(NULL); // break ref <-> session cycle, if any
8310 if (!is_fast_dispatch)
8311 s->session_dispatch_lock.Unlock();
8312 s->put();
8313 }
8314 return false;
8315 }
8316 return true;
8317 }
8318
8319
8320 /*
8321 * require that we have same (or newer) map, and that
8322 * the source is the pg primary.
8323 */
8324 bool OSD::require_same_or_newer_map(OpRequestRef& op, epoch_t epoch,
8325 bool is_fast_dispatch)
8326 {
8327 const Message *m = op->get_req();
8328 dout(15) << "require_same_or_newer_map " << epoch
8329 << " (i am " << osdmap->get_epoch() << ") " << m << dendl;
8330
8331 assert(osd_lock.is_locked());
8332
8333 // do they have a newer map?
8334 if (epoch > osdmap->get_epoch()) {
8335 dout(7) << "waiting for newer map epoch " << epoch
8336 << " > my " << osdmap->get_epoch() << " with " << m << dendl;
8337 wait_for_new_map(op);
8338 return false;
8339 }
8340
8341 if (!require_self_aliveness(op->get_req(), epoch)) {
8342 return false;
8343 }
8344
8345 // ok, our map is same or newer.. do they still exist?
8346 if (m->get_connection()->get_messenger() == cluster_messenger &&
8347 !require_same_peer_instance(op->get_req(), osdmap, is_fast_dispatch)) {
8348 return false;
8349 }
8350
8351 return true;
8352 }
8353
8354
8355
8356
8357
8358 // ----------------------------------------
8359 // pg creation
8360
8361 void OSD::split_pgs(
8362 PG *parent,
8363 const set<spg_t> &childpgids, set<PGRef> *out_pgs,
8364 OSDMapRef curmap,
8365 OSDMapRef nextmap,
8366 PG::RecoveryCtx *rctx)
8367 {
8368 unsigned pg_num = nextmap->get_pg_num(
8369 parent->pool.id);
8370 parent->update_snap_mapper_bits(
8371 parent->info.pgid.get_split_bits(pg_num)
8372 );
8373
8374 vector<object_stat_sum_t> updated_stats(childpgids.size() + 1);
8375 parent->info.stats.stats.sum.split(updated_stats);
8376
8377 vector<object_stat_sum_t>::iterator stat_iter = updated_stats.begin();
8378 for (set<spg_t>::const_iterator i = childpgids.begin();
8379 i != childpgids.end();
8380 ++i, ++stat_iter) {
8381 assert(stat_iter != updated_stats.end());
8382 dout(10) << "Splitting " << *parent << " into " << *i << dendl;
8383 assert(service.splitting(*i));
8384 PG* child = _make_pg(nextmap, *i);
8385 child->lock(true);
8386 out_pgs->insert(child);
8387 rctx->created_pgs.insert(child);
8388
8389 unsigned split_bits = i->get_split_bits(pg_num);
8390 dout(10) << "pg_num is " << pg_num << dendl;
8391 dout(10) << "m_seed " << i->ps() << dendl;
8392 dout(10) << "split_bits is " << split_bits << dendl;
8393
8394 parent->split_colls(
8395 *i,
8396 split_bits,
8397 i->ps(),
8398 &child->pool.info,
8399 rctx->transaction);
8400 parent->split_into(
8401 i->pgid,
8402 child,
8403 split_bits);
8404 child->info.stats.stats.sum = *stat_iter;
8405
8406 child->write_if_dirty(*(rctx->transaction));
8407 child->unlock();
8408 }
8409 assert(stat_iter != updated_stats.end());
8410 parent->info.stats.stats.sum = *stat_iter;
8411 parent->write_if_dirty(*(rctx->transaction));
8412 }
8413
8414 /*
8415 * holding osd_lock
8416 */
8417 void OSD::handle_pg_create(OpRequestRef op)
8418 {
8419 const MOSDPGCreate *m = static_cast<const MOSDPGCreate*>(op->get_req());
8420 assert(m->get_type() == MSG_OSD_PG_CREATE);
8421
8422 dout(10) << "handle_pg_create " << *m << dendl;
8423
8424 if (!require_mon_peer(op->get_req())) {
8425 return;
8426 }
8427
8428 if (!require_same_or_newer_map(op, m->epoch, false))
8429 return;
8430
8431 op->mark_started();
8432
8433 map<pg_t,utime_t>::const_iterator ci = m->ctimes.begin();
8434 for (map<pg_t,pg_create_t>::const_iterator p = m->mkpg.begin();
8435 p != m->mkpg.end();
8436 ++p, ++ci) {
8437 assert(ci != m->ctimes.end() && ci->first == p->first);
8438 epoch_t created = p->second.created;
8439 if (p->second.split_bits) // Skip split pgs
8440 continue;
8441 pg_t on = p->first;
8442
8443 if (on.preferred() >= 0) {
8444 dout(20) << "ignoring localized pg " << on << dendl;
8445 continue;
8446 }
8447
8448 if (!osdmap->have_pg_pool(on.pool())) {
8449 dout(20) << "ignoring pg on deleted pool " << on << dendl;
8450 continue;
8451 }
8452
8453 dout(20) << "mkpg " << on << " e" << created << "@" << ci->second << dendl;
8454
8455 // is it still ours?
8456 vector<int> up, acting;
8457 int up_primary = -1;
8458 int acting_primary = -1;
8459 osdmap->pg_to_up_acting_osds(on, &up, &up_primary, &acting, &acting_primary);
8460 int role = osdmap->calc_pg_role(whoami, acting, acting.size());
8461
8462 if (acting_primary != whoami) {
8463 dout(10) << "mkpg " << on << " not acting_primary (" << acting_primary
8464 << "), my role=" << role << ", skipping" << dendl;
8465 continue;
8466 }
8467
8468 spg_t pgid;
8469 bool mapped = osdmap->get_primary_shard(on, &pgid);
8470 assert(mapped);
8471
8472 PastIntervals pi(
8473 osdmap->get_pools().at(pgid.pool()).ec_pool(),
8474 *osdmap);
8475 pg_history_t history;
8476 build_initial_pg_history(pgid, created, ci->second, &history, &pi);
8477
8478 // The mon won't resend unless the primary changed, so
8479 // we ignore same_interval_since. We'll pass this history
8480 // to handle_pg_peering_evt with the current epoch as the
8481 // event -- the project_pg_history check in
8482 // handle_pg_peering_evt will be a noop.
8483 if (history.same_primary_since > m->epoch) {
8484 dout(10) << __func__ << ": got obsolete pg create on pgid "
8485 << pgid << " from epoch " << m->epoch
8486 << ", primary changed in " << history.same_primary_since
8487 << dendl;
8488 continue;
8489 }
8490
8491 if (handle_pg_peering_evt(
8492 pgid,
8493 history,
8494 pi,
8495 osdmap->get_epoch(),
8496 PG::CephPeeringEvtRef(
8497 new PG::CephPeeringEvt(
8498 osdmap->get_epoch(),
8499 osdmap->get_epoch(),
8500 PG::NullEvt()))
8501 ) == -EEXIST) {
8502 service.send_pg_created(pgid.pgid);
8503 }
8504 }
8505 last_pg_create_epoch = m->epoch;
8506
8507 maybe_update_heartbeat_peers();
8508 }
8509
8510
8511 // ----------------------------------------
8512 // peering and recovery
8513
8514 PG::RecoveryCtx OSD::create_context()
8515 {
8516 ObjectStore::Transaction *t = new ObjectStore::Transaction;
8517 C_Contexts *on_applied = new C_Contexts(cct);
8518 C_Contexts *on_safe = new C_Contexts(cct);
8519 map<int, map<spg_t,pg_query_t> > *query_map =
8520 new map<int, map<spg_t, pg_query_t> >;
8521 map<int,vector<pair<pg_notify_t, PastIntervals> > > *notify_list =
8522 new map<int, vector<pair<pg_notify_t, PastIntervals> > >;
8523 map<int,vector<pair<pg_notify_t, PastIntervals> > > *info_map =
8524 new map<int,vector<pair<pg_notify_t, PastIntervals> > >;
8525 PG::RecoveryCtx rctx(query_map, info_map, notify_list,
8526 on_applied, on_safe, t);
8527 return rctx;
8528 }
8529
8530 struct C_OpenPGs : public Context {
8531 set<PGRef> pgs;
8532 ObjectStore *store;
8533 OSD *osd;
8534 C_OpenPGs(set<PGRef>& p, ObjectStore *s, OSD* o) : store(s), osd(o) {
8535 pgs.swap(p);
8536 }
8537 void finish(int r) override {
8538 RWLock::RLocker l(osd->pg_map_lock);
8539 for (auto p : pgs) {
8540 if (osd->pg_map.count(p->info.pgid)) {
8541 p->ch = store->open_collection(p->coll);
8542 assert(p->ch);
8543 }
8544 }
8545 }
8546 };
8547
8548 void OSD::dispatch_context_transaction(PG::RecoveryCtx &ctx, PG *pg,
8549 ThreadPool::TPHandle *handle)
8550 {
8551 if (!ctx.transaction->empty()) {
8552 if (!ctx.created_pgs.empty()) {
8553 ctx.on_applied->add(new C_OpenPGs(ctx.created_pgs, store, this));
8554 }
8555 int tr = store->queue_transaction(
8556 pg->osr.get(),
8557 std::move(*ctx.transaction), ctx.on_applied, ctx.on_safe, NULL,
8558 TrackedOpRef(), handle);
8559 delete (ctx.transaction);
8560 assert(tr == 0);
8561 ctx.transaction = new ObjectStore::Transaction;
8562 ctx.on_applied = new C_Contexts(cct);
8563 ctx.on_safe = new C_Contexts(cct);
8564 }
8565 }
8566
8567 void OSD::dispatch_context(PG::RecoveryCtx &ctx, PG *pg, OSDMapRef curmap,
8568 ThreadPool::TPHandle *handle)
8569 {
8570 if (service.get_osdmap()->is_up(whoami) &&
8571 is_active()) {
8572 do_notifies(*ctx.notify_list, curmap);
8573 do_queries(*ctx.query_map, curmap);
8574 do_infos(*ctx.info_map, curmap);
8575 }
8576 delete ctx.notify_list;
8577 delete ctx.query_map;
8578 delete ctx.info_map;
8579 if ((ctx.on_applied->empty() &&
8580 ctx.on_safe->empty() &&
8581 ctx.transaction->empty() &&
8582 ctx.created_pgs.empty()) || !pg) {
8583 delete ctx.transaction;
8584 delete ctx.on_applied;
8585 delete ctx.on_safe;
8586 assert(ctx.created_pgs.empty());
8587 } else {
8588 if (!ctx.created_pgs.empty()) {
8589 ctx.on_applied->add(new C_OpenPGs(ctx.created_pgs, store, this));
8590 }
8591 int tr = store->queue_transaction(
8592 pg->osr.get(),
8593 std::move(*ctx.transaction), ctx.on_applied, ctx.on_safe, NULL, TrackedOpRef(),
8594 handle);
8595 delete (ctx.transaction);
8596 assert(tr == 0);
8597 }
8598 }
8599
8600 /** do_notifies
8601 * Send an MOSDPGNotify to a primary, with a list of PGs that I have
8602 * content for, and they are primary for.
8603 */
8604
8605 void OSD::do_notifies(
8606 map<int,vector<pair<pg_notify_t,PastIntervals> > >& notify_list,
8607 OSDMapRef curmap)
8608 {
8609 for (map<int,
8610 vector<pair<pg_notify_t,PastIntervals> > >::iterator it =
8611 notify_list.begin();
8612 it != notify_list.end();
8613 ++it) {
8614 if (!curmap->is_up(it->first)) {
8615 dout(20) << __func__ << " skipping down osd." << it->first << dendl;
8616 continue;
8617 }
8618 ConnectionRef con = service.get_con_osd_cluster(
8619 it->first, curmap->get_epoch());
8620 if (!con) {
8621 dout(20) << __func__ << " skipping osd." << it->first
8622 << " (NULL con)" << dendl;
8623 continue;
8624 }
8625 service.share_map_peer(it->first, con.get(), curmap);
8626 dout(7) << __func__ << " osd " << it->first
8627 << " on " << it->second.size() << " PGs" << dendl;
8628 MOSDPGNotify *m = new MOSDPGNotify(curmap->get_epoch(),
8629 it->second);
8630 con->send_message(m);
8631 }
8632 }
8633
8634
8635 /** do_queries
8636 * send out pending queries for info | summaries
8637 */
8638 void OSD::do_queries(map<int, map<spg_t,pg_query_t> >& query_map,
8639 OSDMapRef curmap)
8640 {
8641 for (map<int, map<spg_t,pg_query_t> >::iterator pit = query_map.begin();
8642 pit != query_map.end();
8643 ++pit) {
8644 if (!curmap->is_up(pit->first)) {
8645 dout(20) << __func__ << " skipping down osd." << pit->first << dendl;
8646 continue;
8647 }
8648 int who = pit->first;
8649 ConnectionRef con = service.get_con_osd_cluster(who, curmap->get_epoch());
8650 if (!con) {
8651 dout(20) << __func__ << " skipping osd." << who
8652 << " (NULL con)" << dendl;
8653 continue;
8654 }
8655 service.share_map_peer(who, con.get(), curmap);
8656 dout(7) << __func__ << " querying osd." << who
8657 << " on " << pit->second.size() << " PGs" << dendl;
8658 MOSDPGQuery *m = new MOSDPGQuery(curmap->get_epoch(), pit->second);
8659 con->send_message(m);
8660 }
8661 }
8662
8663
8664 void OSD::do_infos(map<int,
8665 vector<pair<pg_notify_t, PastIntervals> > >& info_map,
8666 OSDMapRef curmap)
8667 {
8668 for (map<int,
8669 vector<pair<pg_notify_t, PastIntervals> > >::iterator p =
8670 info_map.begin();
8671 p != info_map.end();
8672 ++p) {
8673 if (!curmap->is_up(p->first)) {
8674 dout(20) << __func__ << " skipping down osd." << p->first << dendl;
8675 continue;
8676 }
8677 for (vector<pair<pg_notify_t,PastIntervals> >::iterator i = p->second.begin();
8678 i != p->second.end();
8679 ++i) {
8680 dout(20) << __func__ << " sending info " << i->first.info
8681 << " to shard " << p->first << dendl;
8682 }
8683 ConnectionRef con = service.get_con_osd_cluster(
8684 p->first, curmap->get_epoch());
8685 if (!con) {
8686 dout(20) << __func__ << " skipping osd." << p->first
8687 << " (NULL con)" << dendl;
8688 continue;
8689 }
8690 service.share_map_peer(p->first, con.get(), curmap);
8691 MOSDPGInfo *m = new MOSDPGInfo(curmap->get_epoch());
8692 m->pg_list = p->second;
8693 con->send_message(m);
8694 }
8695 info_map.clear();
8696 }
8697
8698
8699 /** PGNotify
8700 * from non-primary to primary
8701 * includes pg_info_t.
8702 * NOTE: called with opqueue active.
8703 */
8704 void OSD::handle_pg_notify(OpRequestRef op)
8705 {
8706 const MOSDPGNotify *m = static_cast<const MOSDPGNotify*>(op->get_req());
8707 assert(m->get_type() == MSG_OSD_PG_NOTIFY);
8708
8709 dout(7) << "handle_pg_notify from " << m->get_source() << dendl;
8710 int from = m->get_source().num();
8711
8712 if (!require_osd_peer(op->get_req()))
8713 return;
8714
8715 if (!require_same_or_newer_map(op, m->get_epoch(), false))
8716 return;
8717
8718 op->mark_started();
8719
8720 for (auto it = m->get_pg_list().begin();
8721 it != m->get_pg_list().end();
8722 ++it) {
8723 if (it->first.info.pgid.preferred() >= 0) {
8724 dout(20) << "ignoring localized pg " << it->first.info.pgid << dendl;
8725 continue;
8726 }
8727
8728 handle_pg_peering_evt(
8729 spg_t(it->first.info.pgid.pgid, it->first.to),
8730 it->first.info.history, it->second,
8731 it->first.query_epoch,
8732 PG::CephPeeringEvtRef(
8733 new PG::CephPeeringEvt(
8734 it->first.epoch_sent, it->first.query_epoch,
8735 PG::MNotifyRec(pg_shard_t(from, it->first.from), it->first,
8736 op->get_req()->get_connection()->get_features())))
8737 );
8738 }
8739 }
8740
8741 void OSD::handle_pg_log(OpRequestRef op)
8742 {
8743 MOSDPGLog *m = static_cast<MOSDPGLog*>(op->get_nonconst_req());
8744 assert(m->get_type() == MSG_OSD_PG_LOG);
8745 dout(7) << "handle_pg_log " << *m << " from " << m->get_source() << dendl;
8746
8747 if (!require_osd_peer(op->get_req()))
8748 return;
8749
8750 int from = m->get_source().num();
8751 if (!require_same_or_newer_map(op, m->get_epoch(), false))
8752 return;
8753
8754 if (m->info.pgid.preferred() >= 0) {
8755 dout(10) << "ignoring localized pg " << m->info.pgid << dendl;
8756 return;
8757 }
8758
8759 op->mark_started();
8760 handle_pg_peering_evt(
8761 spg_t(m->info.pgid.pgid, m->to),
8762 m->info.history, m->past_intervals, m->get_epoch(),
8763 PG::CephPeeringEvtRef(
8764 new PG::CephPeeringEvt(
8765 m->get_epoch(), m->get_query_epoch(),
8766 PG::MLogRec(pg_shard_t(from, m->from), m)))
8767 );
8768 }
8769
8770 void OSD::handle_pg_info(OpRequestRef op)
8771 {
8772 const MOSDPGInfo *m = static_cast<const MOSDPGInfo *>(op->get_req());
8773 assert(m->get_type() == MSG_OSD_PG_INFO);
8774 dout(7) << "handle_pg_info " << *m << " from " << m->get_source() << dendl;
8775
8776 if (!require_osd_peer(op->get_req()))
8777 return;
8778
8779 int from = m->get_source().num();
8780 if (!require_same_or_newer_map(op, m->get_epoch(), false))
8781 return;
8782
8783 op->mark_started();
8784
8785 for (auto p = m->pg_list.begin();
8786 p != m->pg_list.end();
8787 ++p) {
8788 if (p->first.info.pgid.preferred() >= 0) {
8789 dout(10) << "ignoring localized pg " << p->first.info.pgid << dendl;
8790 continue;
8791 }
8792
8793 handle_pg_peering_evt(
8794 spg_t(p->first.info.pgid.pgid, p->first.to),
8795 p->first.info.history, p->second, p->first.epoch_sent,
8796 PG::CephPeeringEvtRef(
8797 new PG::CephPeeringEvt(
8798 p->first.epoch_sent, p->first.query_epoch,
8799 PG::MInfoRec(
8800 pg_shard_t(
8801 from, p->first.from), p->first.info, p->first.epoch_sent)))
8802 );
8803 }
8804 }
8805
8806 void OSD::handle_pg_trim(OpRequestRef op)
8807 {
8808 const MOSDPGTrim *m = static_cast<const MOSDPGTrim*>(op->get_req());
8809 assert(m->get_type() == MSG_OSD_PG_TRIM);
8810
8811 dout(7) << "handle_pg_trim " << *m << " from " << m->get_source() << dendl;
8812
8813 if (!require_osd_peer(op->get_req()))
8814 return;
8815
8816 int from = m->get_source().num();
8817 if (!require_same_or_newer_map(op, m->epoch, false))
8818 return;
8819
8820 if (m->pgid.preferred() >= 0) {
8821 dout(10) << "ignoring localized pg " << m->pgid << dendl;
8822 return;
8823 }
8824
8825 op->mark_started();
8826
8827 PG *pg = _lookup_lock_pg(m->pgid);
8828 if(!pg) {
8829 dout(10) << " don't have pg " << m->pgid << dendl;
8830 return;
8831 }
8832
8833 if (m->epoch < pg->info.history.same_interval_since) {
8834 dout(10) << *pg << " got old trim to " << m->trim_to << ", ignoring" << dendl;
8835 pg->unlock();
8836 return;
8837 }
8838
8839 if (pg->is_primary()) {
8840 // peer is informing us of their last_complete_ondisk
8841 dout(10) << *pg << " replica osd." << from << " lcod " << m->trim_to << dendl;
8842 pg->peer_last_complete_ondisk[pg_shard_t(from, m->pgid.shard)] =
8843 m->trim_to;
8844 // trim log when the pg is recovered
8845 pg->calc_min_last_complete_ondisk();
8846 } else {
8847 // primary is instructing us to trim
8848 ObjectStore::Transaction t;
8849 pg->pg_log.trim(m->trim_to, pg->info);
8850 pg->dirty_info = true;
8851 pg->write_if_dirty(t);
8852 int tr = store->queue_transaction(pg->osr.get(), std::move(t), NULL);
8853 assert(tr == 0);
8854 }
8855 pg->unlock();
8856 }
8857
8858 void OSD::handle_pg_backfill_reserve(OpRequestRef op)
8859 {
8860 const MBackfillReserve *m = static_cast<const MBackfillReserve*>(op->get_req());
8861 assert(m->get_type() == MSG_OSD_BACKFILL_RESERVE);
8862
8863 if (!require_osd_peer(op->get_req()))
8864 return;
8865 if (!require_same_or_newer_map(op, m->query_epoch, false))
8866 return;
8867
8868 PG::CephPeeringEvtRef evt;
8869 if (m->type == MBackfillReserve::REQUEST) {
8870 evt = PG::CephPeeringEvtRef(
8871 new PG::CephPeeringEvt(
8872 m->query_epoch,
8873 m->query_epoch,
8874 PG::RequestBackfillPrio(m->priority)));
8875 } else if (m->type == MBackfillReserve::GRANT) {
8876 evt = PG::CephPeeringEvtRef(
8877 new PG::CephPeeringEvt(
8878 m->query_epoch,
8879 m->query_epoch,
8880 PG::RemoteBackfillReserved()));
8881 } else if (m->type == MBackfillReserve::REJECT) {
8882 evt = PG::CephPeeringEvtRef(
8883 new PG::CephPeeringEvt(
8884 m->query_epoch,
8885 m->query_epoch,
8886 PG::RemoteReservationRejected()));
8887 } else {
8888 ceph_abort();
8889 }
8890
8891 if (service.splitting(m->pgid)) {
8892 peering_wait_for_split[m->pgid].push_back(evt);
8893 return;
8894 }
8895
8896 PG *pg = _lookup_lock_pg(m->pgid);
8897 if (!pg) {
8898 dout(10) << " don't have pg " << m->pgid << dendl;
8899 return;
8900 }
8901
8902 pg->queue_peering_event(evt);
8903 pg->unlock();
8904 }
8905
8906 void OSD::handle_pg_recovery_reserve(OpRequestRef op)
8907 {
8908 const MRecoveryReserve *m = static_cast<const MRecoveryReserve*>(op->get_req());
8909 assert(m->get_type() == MSG_OSD_RECOVERY_RESERVE);
8910
8911 if (!require_osd_peer(op->get_req()))
8912 return;
8913 if (!require_same_or_newer_map(op, m->query_epoch, false))
8914 return;
8915
8916 PG::CephPeeringEvtRef evt;
8917 if (m->type == MRecoveryReserve::REQUEST) {
8918 evt = PG::CephPeeringEvtRef(
8919 new PG::CephPeeringEvt(
8920 m->query_epoch,
8921 m->query_epoch,
8922 PG::RequestRecovery()));
8923 } else if (m->type == MRecoveryReserve::GRANT) {
8924 evt = PG::CephPeeringEvtRef(
8925 new PG::CephPeeringEvt(
8926 m->query_epoch,
8927 m->query_epoch,
8928 PG::RemoteRecoveryReserved()));
8929 } else if (m->type == MRecoveryReserve::RELEASE) {
8930 evt = PG::CephPeeringEvtRef(
8931 new PG::CephPeeringEvt(
8932 m->query_epoch,
8933 m->query_epoch,
8934 PG::RecoveryDone()));
8935 } else {
8936 ceph_abort();
8937 }
8938
8939 if (service.splitting(m->pgid)) {
8940 peering_wait_for_split[m->pgid].push_back(evt);
8941 return;
8942 }
8943
8944 PG *pg = _lookup_lock_pg(m->pgid);
8945 if (!pg) {
8946 dout(10) << " don't have pg " << m->pgid << dendl;
8947 return;
8948 }
8949
8950 pg->queue_peering_event(evt);
8951 pg->unlock();
8952 }
8953
8954 void OSD::handle_force_recovery(Message *m)
8955 {
8956 MOSDForceRecovery *msg = static_cast<MOSDForceRecovery*>(m);
8957 assert(msg->get_type() == MSG_OSD_FORCE_RECOVERY);
8958 RWLock::RLocker l(pg_map_lock);
8959
8960 vector<PG*> local_pgs;
8961 local_pgs.reserve(msg->forced_pgs.size());
8962
8963 for (auto& i : msg->forced_pgs) {
8964 spg_t locpg;
8965 if (osdmap->get_primary_shard(i, &locpg)) {
8966 auto pg_map_entry = pg_map.find(locpg);
8967 if (pg_map_entry != pg_map.end()) {
8968 local_pgs.push_back(pg_map_entry->second);
8969 }
8970 }
8971 }
8972
8973 if (local_pgs.size()) {
8974 service.adjust_pg_priorities(local_pgs, msg->options);
8975 }
8976
8977 msg->put();
8978 }
8979
8980 /** PGQuery
8981 * from primary to replica | stray
8982 * NOTE: called with opqueue active.
8983 */
8984 void OSD::handle_pg_query(OpRequestRef op)
8985 {
8986 assert(osd_lock.is_locked());
8987
8988 const MOSDPGQuery *m = static_cast<const MOSDPGQuery*>(op->get_req());
8989 assert(m->get_type() == MSG_OSD_PG_QUERY);
8990
8991 if (!require_osd_peer(op->get_req()))
8992 return;
8993
8994 dout(7) << "handle_pg_query from " << m->get_source() << " epoch " << m->get_epoch() << dendl;
8995 int from = m->get_source().num();
8996
8997 if (!require_same_or_newer_map(op, m->get_epoch(), false))
8998 return;
8999
9000 op->mark_started();
9001
9002 map< int, vector<pair<pg_notify_t, PastIntervals> > > notify_list;
9003
9004 for (auto it = m->pg_list.begin();
9005 it != m->pg_list.end();
9006 ++it) {
9007 spg_t pgid = it->first;
9008
9009 if (pgid.preferred() >= 0) {
9010 dout(10) << "ignoring localized pg " << pgid << dendl;
9011 continue;
9012 }
9013
9014 if (service.splitting(pgid)) {
9015 peering_wait_for_split[pgid].push_back(
9016 PG::CephPeeringEvtRef(
9017 new PG::CephPeeringEvt(
9018 it->second.epoch_sent, it->second.epoch_sent,
9019 PG::MQuery(pg_shard_t(from, it->second.from),
9020 it->second, it->second.epoch_sent))));
9021 continue;
9022 }
9023
9024 {
9025 RWLock::RLocker l(pg_map_lock);
9026 if (pg_map.count(pgid)) {
9027 PG *pg = 0;
9028 pg = _lookup_lock_pg_with_map_lock_held(pgid);
9029 pg->queue_query(
9030 it->second.epoch_sent, it->second.epoch_sent,
9031 pg_shard_t(from, it->second.from), it->second);
9032 pg->unlock();
9033 continue;
9034 }
9035 }
9036
9037 if (!osdmap->have_pg_pool(pgid.pool()))
9038 continue;
9039
9040 // get active crush mapping
9041 int up_primary, acting_primary;
9042 vector<int> up, acting;
9043 osdmap->pg_to_up_acting_osds(
9044 pgid.pgid, &up, &up_primary, &acting, &acting_primary);
9045
9046 // same primary?
9047 pg_history_t history = it->second.history;
9048 bool valid_history = project_pg_history(
9049 pgid, history, it->second.epoch_sent,
9050 up, up_primary, acting, acting_primary);
9051
9052 if (!valid_history ||
9053 it->second.epoch_sent < history.same_interval_since) {
9054 dout(10) << " pg " << pgid << " dne, and pg has changed in "
9055 << history.same_interval_since
9056 << " (msg from " << it->second.epoch_sent << ")" << dendl;
9057 continue;
9058 }
9059
9060 dout(10) << " pg " << pgid << " dne" << dendl;
9061 pg_info_t empty(spg_t(pgid.pgid, it->second.to));
9062 /* This is racy, but that should be ok: if we complete the deletion
9063 * before the pg is recreated, we'll just start it off backfilling
9064 * instead of just empty */
9065 if (service.deleting_pgs.lookup(pgid))
9066 empty.set_last_backfill(hobject_t());
9067 if (it->second.type == pg_query_t::LOG ||
9068 it->second.type == pg_query_t::FULLLOG) {
9069 ConnectionRef con = service.get_con_osd_cluster(from, osdmap->get_epoch());
9070 if (con) {
9071 MOSDPGLog *mlog = new MOSDPGLog(
9072 it->second.from, it->second.to,
9073 osdmap->get_epoch(), empty,
9074 it->second.epoch_sent);
9075 service.share_map_peer(from, con.get(), osdmap);
9076 con->send_message(mlog);
9077 }
9078 } else {
9079 notify_list[from].push_back(
9080 make_pair(
9081 pg_notify_t(
9082 it->second.from, it->second.to,
9083 it->second.epoch_sent,
9084 osdmap->get_epoch(),
9085 empty),
9086 PastIntervals(
9087 osdmap->get_pools().at(pgid.pool()).ec_pool(),
9088 *osdmap)));
9089 }
9090 }
9091 do_notifies(notify_list, osdmap);
9092 }
9093
9094
9095 void OSD::handle_pg_remove(OpRequestRef op)
9096 {
9097 const MOSDPGRemove *m = static_cast<const MOSDPGRemove *>(op->get_req());
9098 assert(m->get_type() == MSG_OSD_PG_REMOVE);
9099 assert(osd_lock.is_locked());
9100
9101 if (!require_osd_peer(op->get_req()))
9102 return;
9103
9104 dout(7) << "handle_pg_remove from " << m->get_source() << " on "
9105 << m->pg_list.size() << " pgs" << dendl;
9106
9107 if (!require_same_or_newer_map(op, m->get_epoch(), false))
9108 return;
9109
9110 op->mark_started();
9111
9112 for (auto it = m->pg_list.begin();
9113 it != m->pg_list.end();
9114 ++it) {
9115 spg_t pgid = *it;
9116 if (pgid.preferred() >= 0) {
9117 dout(10) << "ignoring localized pg " << pgid << dendl;
9118 continue;
9119 }
9120
9121 RWLock::WLocker l(pg_map_lock);
9122 if (pg_map.count(pgid) == 0) {
9123 dout(10) << " don't have pg " << pgid << dendl;
9124 continue;
9125 }
9126 dout(5) << "queue_pg_for_deletion: " << pgid << dendl;
9127 PG *pg = _lookup_lock_pg_with_map_lock_held(pgid);
9128 pg_history_t history = pg->info.history;
9129 int up_primary, acting_primary;
9130 vector<int> up, acting;
9131 osdmap->pg_to_up_acting_osds(
9132 pgid.pgid, &up, &up_primary, &acting, &acting_primary);
9133 bool valid_history = project_pg_history(
9134 pg->info.pgid, history, pg->get_osdmap()->get_epoch(),
9135 up, up_primary, acting, acting_primary);
9136 if (valid_history &&
9137 history.same_interval_since <= m->get_epoch()) {
9138 assert(pg->get_primary().osd == m->get_source().num());
9139 PGRef _pg(pg);
9140 _remove_pg(pg);
9141 pg->unlock();
9142 } else {
9143 dout(10) << *pg << " ignoring remove request, pg changed in epoch "
9144 << history.same_interval_since
9145 << " > " << m->get_epoch() << dendl;
9146 pg->unlock();
9147 }
9148 }
9149 }
9150
9151 void OSD::_remove_pg(PG *pg)
9152 {
9153 ObjectStore::Transaction rmt ;
9154
9155 // on_removal, which calls remove_watchers_and_notifies, and the erasure from
9156 // the pg_map must be done together without unlocking the pg lock,
9157 // to avoid racing with watcher cleanup in ms_handle_reset
9158 // and handle_notify_timeout
9159 pg->on_removal(&rmt);
9160
9161 service.cancel_pending_splits_for_parent(pg->info.pgid);
9162 int tr = store->queue_transaction(
9163 pg->osr.get(), std::move(rmt), NULL,
9164 new ContainerContext<
9165 SequencerRef>(pg->osr));
9166 assert(tr == 0);
9167
9168 DeletingStateRef deleting = service.deleting_pgs.lookup_or_create(
9169 pg->info.pgid,
9170 make_pair(
9171 pg->info.pgid,
9172 PGRef(pg))
9173 );
9174 remove_wq.queue(make_pair(PGRef(pg), deleting));
9175
9176 service.pg_remove_epoch(pg->info.pgid);
9177
9178 // dereference from op_wq
9179 op_shardedwq.clear_pg_pointer(pg->info.pgid);
9180
9181 // remove from map
9182 pg_map.erase(pg->info.pgid);
9183 pg->put("PGMap"); // since we've taken it out of map
9184 }
9185
9186
9187 // =========================================================
9188 // RECOVERY
9189
9190 void OSDService::_maybe_queue_recovery() {
9191 assert(recovery_lock.is_locked_by_me());
9192 uint64_t available_pushes;
9193 while (!awaiting_throttle.empty() &&
9194 _recover_now(&available_pushes)) {
9195 uint64_t to_start = MIN(
9196 available_pushes,
9197 cct->_conf->osd_recovery_max_single_start);
9198 _queue_for_recovery(awaiting_throttle.front(), to_start);
9199 awaiting_throttle.pop_front();
9200 recovery_ops_reserved += to_start;
9201 }
9202 }
9203
9204 bool OSDService::_recover_now(uint64_t *available_pushes)
9205 {
9206 if (available_pushes)
9207 *available_pushes = 0;
9208
9209 if (ceph_clock_now() < defer_recovery_until) {
9210 dout(15) << __func__ << " defer until " << defer_recovery_until << dendl;
9211 return false;
9212 }
9213
9214 if (recovery_paused) {
9215 dout(15) << __func__ << " paused" << dendl;
9216 return false;
9217 }
9218
9219 uint64_t max = cct->_conf->osd_recovery_max_active;
9220 if (max <= recovery_ops_active + recovery_ops_reserved) {
9221 dout(15) << __func__ << " active " << recovery_ops_active
9222 << " + reserved " << recovery_ops_reserved
9223 << " >= max " << max << dendl;
9224 return false;
9225 }
9226
9227 if (available_pushes)
9228 *available_pushes = max - recovery_ops_active - recovery_ops_reserved;
9229
9230 return true;
9231 }
9232
9233
9234 void OSDService::adjust_pg_priorities(vector<PG*> pgs, int newflags)
9235 {
9236 if (!pgs.size() || !(newflags & (OFR_BACKFILL | OFR_RECOVERY)))
9237 return;
9238 int newstate = 0;
9239
9240 Mutex::Locker l(recovery_lock);
9241
9242 if (newflags & OFR_BACKFILL) {
9243 newstate = PG_STATE_FORCED_BACKFILL;
9244 } else if (newflags & OFR_RECOVERY) {
9245 newstate = PG_STATE_FORCED_RECOVERY;
9246 }
9247
9248 // debug output here may get large, don't generate it if debug level is below
9249 // 10 and use abbreviated pg ids otherwise
9250 if ((cct)->_conf->subsys.should_gather(ceph_subsys_osd, 10)) {
9251 stringstream ss;
9252
9253 for (auto& i : pgs) {
9254 ss << i->get_pgid() << " ";
9255 }
9256
9257 dout(10) << __func__ << " working on " << ss.str() << dendl;
9258 }
9259
9260 if (newflags & OFR_CANCEL) {
9261 for (auto& i : pgs) {
9262 i->change_recovery_force_mode(newstate, true);
9263 }
9264 } else {
9265 for (auto& i : pgs) {
9266 // make sure the PG is in correct state before forcing backfill or recovery, or
9267 // else we'll make PG keeping FORCE_* flag forever, requiring osds restart
9268 // or forcing somehow recovery/backfill.
9269 int pgstate = i->get_state();
9270 if ( ((newstate == PG_STATE_FORCED_RECOVERY) && (pgstate & (PG_STATE_DEGRADED | PG_STATE_RECOVERY_WAIT | PG_STATE_RECOVERING))) ||
9271 ((newstate == PG_STATE_FORCED_BACKFILL) && (pgstate & (PG_STATE_DEGRADED | PG_STATE_BACKFILL_WAIT | PG_STATE_BACKFILL))) )
9272 i->change_recovery_force_mode(newstate, false);
9273 }
9274 }
9275 }
9276
9277 void OSD::do_recovery(
9278 PG *pg, epoch_t queued, uint64_t reserved_pushes,
9279 ThreadPool::TPHandle &handle)
9280 {
9281 uint64_t started = 0;
9282
9283 /*
9284 * When the value of osd_recovery_sleep is set greater than zero, recovery
9285 * ops are scheduled after osd_recovery_sleep amount of time from the previous
9286 * recovery event's schedule time. This is done by adding a
9287 * recovery_requeue_callback event, which re-queues the recovery op using
9288 * queue_recovery_after_sleep.
9289 */
9290 float recovery_sleep = get_osd_recovery_sleep();
9291 if (recovery_sleep > 0 && service.recovery_needs_sleep) {
9292 PGRef pgref(pg);
9293 auto recovery_requeue_callback = new FunctionContext([this, pgref, queued, reserved_pushes](int r) {
9294 dout(20) << "do_recovery wake up at "
9295 << ceph_clock_now()
9296 << ", re-queuing recovery" << dendl;
9297 service.recovery_needs_sleep = false;
9298 service.queue_recovery_after_sleep(pgref.get(), queued, reserved_pushes);
9299 });
9300 Mutex::Locker l(service.recovery_sleep_lock);
9301
9302 // This is true for the first recovery op and when the previous recovery op
9303 // has been scheduled in the past. The next recovery op is scheduled after
9304 // completing the sleep from now.
9305 if (service.recovery_schedule_time < ceph_clock_now()) {
9306 service.recovery_schedule_time = ceph_clock_now();
9307 }
9308 service.recovery_schedule_time += recovery_sleep;
9309 service.recovery_sleep_timer.add_event_at(service.recovery_schedule_time,
9310 recovery_requeue_callback);
9311 dout(20) << "Recovery event scheduled at "
9312 << service.recovery_schedule_time << dendl;
9313 return;
9314 }
9315
9316 {
9317 service.recovery_needs_sleep = true;
9318 if (pg->pg_has_reset_since(queued)) {
9319 goto out;
9320 }
9321
9322 assert(!pg->deleting);
9323 assert(pg->is_peered() && pg->is_primary());
9324
9325 assert(pg->recovery_queued);
9326 pg->recovery_queued = false;
9327
9328 dout(10) << "do_recovery starting " << reserved_pushes << " " << *pg << dendl;
9329 #ifdef DEBUG_RECOVERY_OIDS
9330 dout(20) << " active was " << service.recovery_oids[pg->info.pgid] << dendl;
9331 #endif
9332
9333 bool more = pg->start_recovery_ops(reserved_pushes, handle, &started);
9334 dout(10) << "do_recovery started " << started << "/" << reserved_pushes
9335 << " on " << *pg << dendl;
9336
9337 // If no recovery op is started, don't bother to manipulate the RecoveryCtx
9338 if (!started && (more || !pg->have_unfound())) {
9339 goto out;
9340 }
9341
9342 PG::RecoveryCtx rctx = create_context();
9343 rctx.handle = &handle;
9344
9345 /*
9346 * if we couldn't start any recovery ops and things are still
9347 * unfound, see if we can discover more missing object locations.
9348 * It may be that our initial locations were bad and we errored
9349 * out while trying to pull.
9350 */
9351 if (!more && pg->have_unfound()) {
9352 pg->discover_all_missing(*rctx.query_map);
9353 if (rctx.query_map->empty()) {
9354 string action;
9355 if (pg->state_test(PG_STATE_BACKFILL)) {
9356 auto evt = PG::CephPeeringEvtRef(new PG::CephPeeringEvt(
9357 queued,
9358 queued,
9359 PG::CancelBackfill()));
9360 pg->queue_peering_event(evt);
9361 action = "in backfill";
9362 } else if (pg->state_test(PG_STATE_RECOVERING)) {
9363 auto evt = PG::CephPeeringEvtRef(new PG::CephPeeringEvt(
9364 queued,
9365 queued,
9366 PG::CancelRecovery()));
9367 pg->queue_peering_event(evt);
9368 action = "in recovery";
9369 } else {
9370 action = "already out of recovery/backfill";
9371 }
9372 dout(10) << __func__ << ": no luck, giving up on this pg for now (" << action << ")" << dendl;
9373 } else {
9374 dout(10) << __func__ << ": no luck, giving up on this pg for now (queue_recovery)" << dendl;
9375 pg->queue_recovery();
9376 }
9377 }
9378
9379 pg->write_if_dirty(*rctx.transaction);
9380 OSDMapRef curmap = pg->get_osdmap();
9381 dispatch_context(rctx, pg, curmap);
9382 }
9383
9384 out:
9385 assert(started <= reserved_pushes);
9386 service.release_reserved_pushes(reserved_pushes);
9387 }
9388
9389 void OSDService::start_recovery_op(PG *pg, const hobject_t& soid)
9390 {
9391 Mutex::Locker l(recovery_lock);
9392 dout(10) << "start_recovery_op " << *pg << " " << soid
9393 << " (" << recovery_ops_active << "/"
9394 << cct->_conf->osd_recovery_max_active << " rops)"
9395 << dendl;
9396 recovery_ops_active++;
9397
9398 #ifdef DEBUG_RECOVERY_OIDS
9399 dout(20) << " active was " << recovery_oids[pg->info.pgid] << dendl;
9400 assert(recovery_oids[pg->info.pgid].count(soid) == 0);
9401 recovery_oids[pg->info.pgid].insert(soid);
9402 #endif
9403 }
9404
9405 void OSDService::finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue)
9406 {
9407 Mutex::Locker l(recovery_lock);
9408 dout(10) << "finish_recovery_op " << *pg << " " << soid
9409 << " dequeue=" << dequeue
9410 << " (" << recovery_ops_active << "/" << cct->_conf->osd_recovery_max_active << " rops)"
9411 << dendl;
9412
9413 // adjust count
9414 assert(recovery_ops_active > 0);
9415 recovery_ops_active--;
9416
9417 #ifdef DEBUG_RECOVERY_OIDS
9418 dout(20) << " active oids was " << recovery_oids[pg->info.pgid] << dendl;
9419 assert(recovery_oids[pg->info.pgid].count(soid));
9420 recovery_oids[pg->info.pgid].erase(soid);
9421 #endif
9422
9423 _maybe_queue_recovery();
9424 }
9425
9426 bool OSDService::is_recovery_active()
9427 {
9428 Mutex::Locker l(recovery_lock);
9429 return recovery_ops_active > 0;
9430 }
9431
9432 // =========================================================
9433 // OPS
9434
9435 bool OSD::op_is_discardable(const MOSDOp *op)
9436 {
9437 // drop client request if they are not connected and can't get the
9438 // reply anyway.
9439 if (!op->get_connection()->is_connected()) {
9440 return true;
9441 }
9442 return false;
9443 }
9444
9445 void OSD::enqueue_op(spg_t pg, OpRequestRef& op, epoch_t epoch)
9446 {
9447 utime_t latency = ceph_clock_now() - op->get_req()->get_recv_stamp();
9448 dout(15) << "enqueue_op " << op << " prio " << op->get_req()->get_priority()
9449 << " cost " << op->get_req()->get_cost()
9450 << " latency " << latency
9451 << " epoch " << epoch
9452 << " " << *(op->get_req()) << dendl;
9453 op->osd_trace.event("enqueue op");
9454 op->osd_trace.keyval("priority", op->get_req()->get_priority());
9455 op->osd_trace.keyval("cost", op->get_req()->get_cost());
9456 op->mark_queued_for_pg();
9457 logger->tinc(l_osd_op_before_queue_op_lat, latency);
9458 op_shardedwq.queue(make_pair(pg, PGQueueable(op, epoch)));
9459 }
9460
9461
9462
9463 /*
9464 * NOTE: dequeue called in worker thread, with pg lock
9465 */
9466 void OSD::dequeue_op(
9467 PGRef pg, OpRequestRef op,
9468 ThreadPool::TPHandle &handle)
9469 {
9470 FUNCTRACE();
9471 OID_EVENT_TRACE_WITH_MSG(op->get_req(), "DEQUEUE_OP_BEGIN", false);
9472
9473 utime_t now = ceph_clock_now();
9474 op->set_dequeued_time(now);
9475 utime_t latency = now - op->get_req()->get_recv_stamp();
9476 dout(10) << "dequeue_op " << op << " prio " << op->get_req()->get_priority()
9477 << " cost " << op->get_req()->get_cost()
9478 << " latency " << latency
9479 << " " << *(op->get_req())
9480 << " pg " << *pg << dendl;
9481
9482 logger->tinc(l_osd_op_before_dequeue_op_lat, latency);
9483
9484 Session *session = static_cast<Session *>(
9485 op->get_req()->get_connection()->get_priv());
9486 if (session) {
9487 maybe_share_map(session, op, pg->get_osdmap());
9488 session->put();
9489 }
9490
9491 if (pg->deleting)
9492 return;
9493
9494 op->mark_reached_pg();
9495 op->osd_trace.event("dequeue_op");
9496
9497 pg->do_request(op, handle);
9498
9499 // finish
9500 dout(10) << "dequeue_op " << op << " finish" << dendl;
9501 OID_EVENT_TRACE_WITH_MSG(op->get_req(), "DEQUEUE_OP_END", false);
9502 }
9503
9504
9505 struct C_CompleteSplits : public Context {
9506 OSD *osd;
9507 set<PGRef> pgs;
9508 C_CompleteSplits(OSD *osd, const set<PGRef> &in)
9509 : osd(osd), pgs(in) {}
9510 void finish(int r) override {
9511 Mutex::Locker l(osd->osd_lock);
9512 if (osd->is_stopping())
9513 return;
9514 PG::RecoveryCtx rctx = osd->create_context();
9515 for (set<PGRef>::iterator i = pgs.begin();
9516 i != pgs.end();
9517 ++i) {
9518 osd->pg_map_lock.get_write();
9519 (*i)->lock();
9520 PG *pg = i->get();
9521 osd->add_newly_split_pg(pg, &rctx);
9522 if (!((*i)->deleting)) {
9523 set<spg_t> to_complete;
9524 to_complete.insert((*i)->info.pgid);
9525 osd->service.complete_split(to_complete);
9526 }
9527 osd->pg_map_lock.put_write();
9528 osd->dispatch_context_transaction(rctx, pg);
9529 osd->wake_pg_waiters(*i);
9530 (*i)->unlock();
9531 }
9532
9533 osd->dispatch_context(rctx, 0, osd->service.get_osdmap());
9534 }
9535 };
9536
9537 void OSD::process_peering_events(
9538 const list<PG*> &pgs,
9539 ThreadPool::TPHandle &handle
9540 )
9541 {
9542 bool need_up_thru = false;
9543 epoch_t same_interval_since = 0;
9544 OSDMapRef curmap;
9545 PG::RecoveryCtx rctx = create_context();
9546 rctx.handle = &handle;
9547 for (list<PG*>::const_iterator i = pgs.begin();
9548 i != pgs.end();
9549 ++i) {
9550 set<PGRef> split_pgs;
9551 PG *pg = *i;
9552 pg->lock_suspend_timeout(handle);
9553 curmap = service.get_osdmap();
9554 if (pg->deleting) {
9555 pg->unlock();
9556 continue;
9557 }
9558 if (!advance_pg(curmap->get_epoch(), pg, handle, &rctx, &split_pgs)) {
9559 // we need to requeue the PG explicitly since we didn't actually
9560 // handle an event
9561 peering_wq.queue(pg);
9562 } else {
9563 assert(!pg->peering_queue.empty());
9564 PG::CephPeeringEvtRef evt = pg->peering_queue.front();
9565 pg->peering_queue.pop_front();
9566 pg->handle_peering_event(evt, &rctx);
9567 }
9568 need_up_thru = pg->need_up_thru || need_up_thru;
9569 same_interval_since = MAX(pg->info.history.same_interval_since,
9570 same_interval_since);
9571 pg->write_if_dirty(*rctx.transaction);
9572 if (!split_pgs.empty()) {
9573 rctx.on_applied->add(new C_CompleteSplits(this, split_pgs));
9574 split_pgs.clear();
9575 }
9576 dispatch_context_transaction(rctx, pg, &handle);
9577 pg->unlock();
9578 }
9579 if (need_up_thru)
9580 queue_want_up_thru(same_interval_since);
9581 dispatch_context(rctx, 0, curmap, &handle);
9582
9583 service.send_pg_temp();
9584 }
9585
9586 // --------------------------------
9587
9588 const char** OSD::get_tracked_conf_keys() const
9589 {
9590 static const char* KEYS[] = {
9591 "osd_max_backfills",
9592 "osd_min_recovery_priority",
9593 "osd_max_trimming_pgs",
9594 "osd_op_complaint_time",
9595 "osd_op_log_threshold",
9596 "osd_op_history_size",
9597 "osd_op_history_duration",
9598 "osd_op_history_slow_op_size",
9599 "osd_op_history_slow_op_threshold",
9600 "osd_enable_op_tracker",
9601 "osd_map_cache_size",
9602 "osd_map_max_advance",
9603 "osd_pg_epoch_persisted_max_stale",
9604 "osd_disk_thread_ioprio_class",
9605 "osd_disk_thread_ioprio_priority",
9606 // clog & admin clog
9607 "clog_to_monitors",
9608 "clog_to_syslog",
9609 "clog_to_syslog_facility",
9610 "clog_to_syslog_level",
9611 "osd_objectstore_fuse",
9612 "clog_to_graylog",
9613 "clog_to_graylog_host",
9614 "clog_to_graylog_port",
9615 "host",
9616 "fsid",
9617 "osd_recovery_delay_start",
9618 "osd_client_message_size_cap",
9619 "osd_client_message_cap",
9620 "osd_heartbeat_min_size",
9621 "osd_heartbeat_interval",
9622 NULL
9623 };
9624 return KEYS;
9625 }
9626
9627 void OSD::handle_conf_change(const struct md_config_t *conf,
9628 const std::set <std::string> &changed)
9629 {
9630 if (changed.count("osd_max_backfills")) {
9631 service.local_reserver.set_max(cct->_conf->osd_max_backfills);
9632 service.remote_reserver.set_max(cct->_conf->osd_max_backfills);
9633 }
9634 if (changed.count("osd_min_recovery_priority")) {
9635 service.local_reserver.set_min_priority(cct->_conf->osd_min_recovery_priority);
9636 service.remote_reserver.set_min_priority(cct->_conf->osd_min_recovery_priority);
9637 }
9638 if (changed.count("osd_max_trimming_pgs")) {
9639 service.snap_reserver.set_max(cct->_conf->osd_max_trimming_pgs);
9640 }
9641 if (changed.count("osd_op_complaint_time") ||
9642 changed.count("osd_op_log_threshold")) {
9643 op_tracker.set_complaint_and_threshold(cct->_conf->osd_op_complaint_time,
9644 cct->_conf->osd_op_log_threshold);
9645 }
9646 if (changed.count("osd_op_history_size") ||
9647 changed.count("osd_op_history_duration")) {
9648 op_tracker.set_history_size_and_duration(cct->_conf->osd_op_history_size,
9649 cct->_conf->osd_op_history_duration);
9650 }
9651 if (changed.count("osd_op_history_slow_op_size") ||
9652 changed.count("osd_op_history_slow_op_threshold")) {
9653 op_tracker.set_history_slow_op_size_and_threshold(cct->_conf->osd_op_history_slow_op_size,
9654 cct->_conf->osd_op_history_slow_op_threshold);
9655 }
9656 if (changed.count("osd_enable_op_tracker")) {
9657 op_tracker.set_tracking(cct->_conf->osd_enable_op_tracker);
9658 }
9659 if (changed.count("osd_disk_thread_ioprio_class") ||
9660 changed.count("osd_disk_thread_ioprio_priority")) {
9661 set_disk_tp_priority();
9662 }
9663 if (changed.count("osd_map_cache_size")) {
9664 service.map_cache.set_size(cct->_conf->osd_map_cache_size);
9665 service.map_bl_cache.set_size(cct->_conf->osd_map_cache_size);
9666 service.map_bl_inc_cache.set_size(cct->_conf->osd_map_cache_size);
9667 }
9668 if (changed.count("clog_to_monitors") ||
9669 changed.count("clog_to_syslog") ||
9670 changed.count("clog_to_syslog_level") ||
9671 changed.count("clog_to_syslog_facility") ||
9672 changed.count("clog_to_graylog") ||
9673 changed.count("clog_to_graylog_host") ||
9674 changed.count("clog_to_graylog_port") ||
9675 changed.count("host") ||
9676 changed.count("fsid")) {
9677 update_log_config();
9678 }
9679
9680 #ifdef HAVE_LIBFUSE
9681 if (changed.count("osd_objectstore_fuse")) {
9682 if (store) {
9683 enable_disable_fuse(false);
9684 }
9685 }
9686 #endif
9687
9688 if (changed.count("osd_recovery_delay_start")) {
9689 service.defer_recovery(cct->_conf->osd_recovery_delay_start);
9690 service.kick_recovery_queue();
9691 }
9692
9693 if (changed.count("osd_client_message_cap")) {
9694 uint64_t newval = cct->_conf->osd_client_message_cap;
9695 Messenger::Policy pol = client_messenger->get_policy(entity_name_t::TYPE_CLIENT);
9696 if (pol.throttler_messages && newval > 0) {
9697 pol.throttler_messages->reset_max(newval);
9698 }
9699 }
9700 if (changed.count("osd_client_message_size_cap")) {
9701 uint64_t newval = cct->_conf->osd_client_message_size_cap;
9702 Messenger::Policy pol = client_messenger->get_policy(entity_name_t::TYPE_CLIENT);
9703 if (pol.throttler_bytes && newval > 0) {
9704 pol.throttler_bytes->reset_max(newval);
9705 }
9706 }
9707
9708 check_config();
9709 }
9710
9711 void OSD::update_log_config()
9712 {
9713 map<string,string> log_to_monitors;
9714 map<string,string> log_to_syslog;
9715 map<string,string> log_channel;
9716 map<string,string> log_prio;
9717 map<string,string> log_to_graylog;
9718 map<string,string> log_to_graylog_host;
9719 map<string,string> log_to_graylog_port;
9720 uuid_d fsid;
9721 string host;
9722
9723 if (parse_log_client_options(cct, log_to_monitors, log_to_syslog,
9724 log_channel, log_prio, log_to_graylog,
9725 log_to_graylog_host, log_to_graylog_port,
9726 fsid, host) == 0)
9727 clog->update_config(log_to_monitors, log_to_syslog,
9728 log_channel, log_prio, log_to_graylog,
9729 log_to_graylog_host, log_to_graylog_port,
9730 fsid, host);
9731 derr << "log_to_monitors " << log_to_monitors << dendl;
9732 }
9733
9734 void OSD::check_config()
9735 {
9736 // some sanity checks
9737 if (cct->_conf->osd_map_cache_size <= cct->_conf->osd_map_max_advance + 2) {
9738 clog->warn() << "osd_map_cache_size (" << cct->_conf->osd_map_cache_size << ")"
9739 << " is not > osd_map_max_advance ("
9740 << cct->_conf->osd_map_max_advance << ")";
9741 }
9742 if (cct->_conf->osd_map_cache_size <= (int)cct->_conf->osd_pg_epoch_persisted_max_stale + 2) {
9743 clog->warn() << "osd_map_cache_size (" << cct->_conf->osd_map_cache_size << ")"
9744 << " is not > osd_pg_epoch_persisted_max_stale ("
9745 << cct->_conf->osd_pg_epoch_persisted_max_stale << ")";
9746 }
9747 }
9748
9749 void OSD::set_disk_tp_priority()
9750 {
9751 dout(10) << __func__
9752 << " class " << cct->_conf->osd_disk_thread_ioprio_class
9753 << " priority " << cct->_conf->osd_disk_thread_ioprio_priority
9754 << dendl;
9755 if (cct->_conf->osd_disk_thread_ioprio_class.empty() ||
9756 cct->_conf->osd_disk_thread_ioprio_priority < 0)
9757 return;
9758 int cls =
9759 ceph_ioprio_string_to_class(cct->_conf->osd_disk_thread_ioprio_class);
9760 if (cls < 0)
9761 derr << __func__ << cpp_strerror(cls) << ": "
9762 << "osd_disk_thread_ioprio_class is " << cct->_conf->osd_disk_thread_ioprio_class
9763 << " but only the following values are allowed: idle, be or rt" << dendl;
9764 else
9765 disk_tp.set_ioprio(cls, cct->_conf->osd_disk_thread_ioprio_priority);
9766 }
9767
9768 // --------------------------------
9769
9770 void OSD::get_latest_osdmap()
9771 {
9772 dout(10) << __func__ << " -- start" << dendl;
9773
9774 C_SaferCond cond;
9775 service.objecter->wait_for_latest_osdmap(&cond);
9776 cond.wait();
9777
9778 dout(10) << __func__ << " -- finish" << dendl;
9779 }
9780
9781 // --------------------------------
9782
9783 int OSD::init_op_flags(OpRequestRef& op)
9784 {
9785 const MOSDOp *m = static_cast<const MOSDOp*>(op->get_req());
9786 vector<OSDOp>::const_iterator iter;
9787
9788 // client flags have no bearing on whether an op is a read, write, etc.
9789 op->rmw_flags = 0;
9790
9791 if (m->has_flag(CEPH_OSD_FLAG_RWORDERED)) {
9792 op->set_force_rwordered();
9793 }
9794
9795 // set bits based on op codes, called methods.
9796 for (iter = m->ops.begin(); iter != m->ops.end(); ++iter) {
9797 if ((iter->op.op == CEPH_OSD_OP_WATCH &&
9798 iter->op.watch.op == CEPH_OSD_WATCH_OP_PING)) {
9799 /* This a bit odd. PING isn't actually a write. It can't
9800 * result in an update to the object_info. PINGs also aren'ty
9801 * resent, so there's no reason to write out a log entry
9802 *
9803 * However, we pipeline them behind writes, so let's force
9804 * the write_ordered flag.
9805 */
9806 op->set_force_rwordered();
9807 } else {
9808 if (ceph_osd_op_mode_modify(iter->op.op))
9809 op->set_write();
9810 }
9811 if (ceph_osd_op_mode_read(iter->op.op))
9812 op->set_read();
9813
9814 // set READ flag if there are src_oids
9815 if (iter->soid.oid.name.length())
9816 op->set_read();
9817
9818 // set PGOP flag if there are PG ops
9819 if (ceph_osd_op_type_pg(iter->op.op))
9820 op->set_pg_op();
9821
9822 if (ceph_osd_op_mode_cache(iter->op.op))
9823 op->set_cache();
9824
9825 // check for ec base pool
9826 int64_t poolid = m->get_pg().pool();
9827 const pg_pool_t *pool = osdmap->get_pg_pool(poolid);
9828 if (pool && pool->is_tier()) {
9829 const pg_pool_t *base_pool = osdmap->get_pg_pool(pool->tier_of);
9830 if (base_pool && base_pool->require_rollback()) {
9831 if ((iter->op.op != CEPH_OSD_OP_READ) &&
9832 (iter->op.op != CEPH_OSD_OP_CHECKSUM) &&
9833 (iter->op.op != CEPH_OSD_OP_CMPEXT) &&
9834 (iter->op.op != CEPH_OSD_OP_STAT) &&
9835 (iter->op.op != CEPH_OSD_OP_ISDIRTY) &&
9836 (iter->op.op != CEPH_OSD_OP_UNDIRTY) &&
9837 (iter->op.op != CEPH_OSD_OP_GETXATTR) &&
9838 (iter->op.op != CEPH_OSD_OP_GETXATTRS) &&
9839 (iter->op.op != CEPH_OSD_OP_CMPXATTR) &&
9840 (iter->op.op != CEPH_OSD_OP_ASSERT_VER) &&
9841 (iter->op.op != CEPH_OSD_OP_LIST_WATCHERS) &&
9842 (iter->op.op != CEPH_OSD_OP_LIST_SNAPS) &&
9843 (iter->op.op != CEPH_OSD_OP_SETALLOCHINT) &&
9844 (iter->op.op != CEPH_OSD_OP_WRITEFULL) &&
9845 (iter->op.op != CEPH_OSD_OP_ROLLBACK) &&
9846 (iter->op.op != CEPH_OSD_OP_CREATE) &&
9847 (iter->op.op != CEPH_OSD_OP_DELETE) &&
9848 (iter->op.op != CEPH_OSD_OP_SETXATTR) &&
9849 (iter->op.op != CEPH_OSD_OP_RMXATTR) &&
9850 (iter->op.op != CEPH_OSD_OP_STARTSYNC) &&
9851 (iter->op.op != CEPH_OSD_OP_COPY_GET) &&
9852 (iter->op.op != CEPH_OSD_OP_COPY_FROM)) {
9853 op->set_promote();
9854 }
9855 }
9856 }
9857
9858 switch (iter->op.op) {
9859 case CEPH_OSD_OP_CALL:
9860 {
9861 bufferlist::iterator bp = const_cast<bufferlist&>(iter->indata).begin();
9862 int is_write, is_read;
9863 string cname, mname;
9864 bp.copy(iter->op.cls.class_len, cname);
9865 bp.copy(iter->op.cls.method_len, mname);
9866
9867 ClassHandler::ClassData *cls;
9868 int r = class_handler->open_class(cname, &cls);
9869 if (r) {
9870 derr << "class " << cname << " open got " << cpp_strerror(r) << dendl;
9871 if (r == -ENOENT)
9872 r = -EOPNOTSUPP;
9873 else if (r != -EPERM) // propagate permission errors
9874 r = -EIO;
9875 return r;
9876 }
9877 int flags = cls->get_method_flags(mname.c_str());
9878 if (flags < 0) {
9879 if (flags == -ENOENT)
9880 r = -EOPNOTSUPP;
9881 else
9882 r = flags;
9883 return r;
9884 }
9885 is_read = flags & CLS_METHOD_RD;
9886 is_write = flags & CLS_METHOD_WR;
9887 bool is_promote = flags & CLS_METHOD_PROMOTE;
9888
9889 dout(10) << "class " << cname << " method " << mname << " "
9890 << "flags=" << (is_read ? "r" : "")
9891 << (is_write ? "w" : "")
9892 << (is_promote ? "p" : "")
9893 << dendl;
9894 if (is_read)
9895 op->set_class_read();
9896 if (is_write)
9897 op->set_class_write();
9898 if (is_promote)
9899 op->set_promote();
9900 op->add_class(cname, is_read, is_write, cls->whitelisted);
9901 break;
9902 }
9903
9904 case CEPH_OSD_OP_WATCH:
9905 // force the read bit for watch since it is depends on previous
9906 // watch state (and may return early if the watch exists) or, in
9907 // the case of ping, is simply a read op.
9908 op->set_read();
9909 // fall through
9910 case CEPH_OSD_OP_NOTIFY:
9911 case CEPH_OSD_OP_NOTIFY_ACK:
9912 {
9913 op->set_promote();
9914 break;
9915 }
9916
9917 case CEPH_OSD_OP_DELETE:
9918 // if we get a delete with FAILOK we can skip handle cache. without
9919 // FAILOK we still need to promote (or do something smarter) to
9920 // determine whether to return ENOENT or 0.
9921 if (iter == m->ops.begin() &&
9922 iter->op.flags == CEPH_OSD_OP_FLAG_FAILOK) {
9923 op->set_skip_handle_cache();
9924 }
9925 // skip promotion when proxying a delete op
9926 if (m->ops.size() == 1) {
9927 op->set_skip_promote();
9928 }
9929 break;
9930
9931 case CEPH_OSD_OP_CACHE_TRY_FLUSH:
9932 case CEPH_OSD_OP_CACHE_FLUSH:
9933 case CEPH_OSD_OP_CACHE_EVICT:
9934 // If try_flush/flush/evict is the only op, can skip handle cache.
9935 if (m->ops.size() == 1) {
9936 op->set_skip_handle_cache();
9937 }
9938 break;
9939
9940 case CEPH_OSD_OP_READ:
9941 case CEPH_OSD_OP_SYNC_READ:
9942 case CEPH_OSD_OP_SPARSE_READ:
9943 case CEPH_OSD_OP_CHECKSUM:
9944 case CEPH_OSD_OP_WRITEFULL:
9945 if (m->ops.size() == 1 &&
9946 (iter->op.flags & CEPH_OSD_OP_FLAG_FADVISE_NOCACHE ||
9947 iter->op.flags & CEPH_OSD_OP_FLAG_FADVISE_DONTNEED)) {
9948 op->set_skip_promote();
9949 }
9950 break;
9951
9952 // force promotion when pin an object in cache tier
9953 case CEPH_OSD_OP_CACHE_PIN:
9954 op->set_promote();
9955 break;
9956
9957 default:
9958 break;
9959 }
9960 }
9961
9962 if (op->rmw_flags == 0)
9963 return -EINVAL;
9964
9965 return 0;
9966 }
9967
9968 void OSD::PeeringWQ::_dequeue(list<PG*> *out) {
9969 for (list<PG*>::iterator i = peering_queue.begin();
9970 i != peering_queue.end() &&
9971 out->size() < osd->cct->_conf->osd_peering_wq_batch_size;
9972 ) {
9973 if (in_use.count(*i)) {
9974 ++i;
9975 } else {
9976 out->push_back(*i);
9977 peering_queue.erase(i++);
9978 }
9979 }
9980 in_use.insert(out->begin(), out->end());
9981 }
9982
9983
9984 // =============================================================
9985
9986 #undef dout_context
9987 #define dout_context osd->cct
9988 #undef dout_prefix
9989 #define dout_prefix *_dout << "osd." << osd->whoami << " op_wq "
9990
9991 void OSD::ShardedOpWQ::wake_pg_waiters(spg_t pgid)
9992 {
9993 uint32_t shard_index = pgid.hash_to_shard(shard_list.size());
9994 auto sdata = shard_list[shard_index];
9995 bool queued = false;
9996 unsigned pushes_to_free = 0;
9997 {
9998 Mutex::Locker l(sdata->sdata_op_ordering_lock);
9999 auto p = sdata->pg_slots.find(pgid);
10000 if (p != sdata->pg_slots.end()) {
10001 dout(20) << __func__ << " " << pgid
10002 << " to_process " << p->second.to_process
10003 << " waiting_for_pg=" << (int)p->second.waiting_for_pg << dendl;
10004 for (auto i = p->second.to_process.rbegin();
10005 i != p->second.to_process.rend();
10006 ++i) {
10007 sdata->_enqueue_front(make_pair(pgid, *i), osd->op_prio_cutoff);
10008 }
10009 for (auto& q : p->second.to_process) {
10010 pushes_to_free += q.get_reserved_pushes();
10011 }
10012 p->second.to_process.clear();
10013 p->second.waiting_for_pg = false;
10014 ++p->second.requeue_seq;
10015 queued = true;
10016 }
10017 }
10018 if (pushes_to_free > 0) {
10019 osd->service.release_reserved_pushes(pushes_to_free);
10020 }
10021 if (queued) {
10022 sdata->sdata_lock.Lock();
10023 sdata->sdata_cond.SignalOne();
10024 sdata->sdata_lock.Unlock();
10025 }
10026 }
10027
10028 void OSD::ShardedOpWQ::prune_pg_waiters(OSDMapRef osdmap, int whoami)
10029 {
10030 unsigned pushes_to_free = 0;
10031 for (auto sdata : shard_list) {
10032 Mutex::Locker l(sdata->sdata_op_ordering_lock);
10033 sdata->waiting_for_pg_osdmap = osdmap;
10034 auto p = sdata->pg_slots.begin();
10035 while (p != sdata->pg_slots.end()) {
10036 ShardData::pg_slot& slot = p->second;
10037 if (!slot.to_process.empty() && slot.num_running == 0) {
10038 if (osdmap->is_up_acting_osd_shard(p->first, whoami)) {
10039 dout(20) << __func__ << " " << p->first << " maps to us, keeping"
10040 << dendl;
10041 ++p;
10042 continue;
10043 }
10044 while (!slot.to_process.empty() &&
10045 slot.to_process.front().get_map_epoch() <= osdmap->get_epoch()) {
10046 auto& qi = slot.to_process.front();
10047 dout(20) << __func__ << " " << p->first
10048 << " item " << qi
10049 << " epoch " << qi.get_map_epoch()
10050 << " <= " << osdmap->get_epoch()
10051 << ", stale, dropping" << dendl;
10052 pushes_to_free += qi.get_reserved_pushes();
10053 slot.to_process.pop_front();
10054 }
10055 }
10056 if (slot.to_process.empty() &&
10057 slot.num_running == 0 &&
10058 !slot.pg) {
10059 dout(20) << __func__ << " " << p->first << " empty, pruning" << dendl;
10060 p = sdata->pg_slots.erase(p);
10061 } else {
10062 ++p;
10063 }
10064 }
10065 }
10066 if (pushes_to_free > 0) {
10067 osd->service.release_reserved_pushes(pushes_to_free);
10068 }
10069 }
10070
10071 void OSD::ShardedOpWQ::clear_pg_pointer(spg_t pgid)
10072 {
10073 uint32_t shard_index = pgid.hash_to_shard(shard_list.size());
10074 auto sdata = shard_list[shard_index];
10075 Mutex::Locker l(sdata->sdata_op_ordering_lock);
10076 auto p = sdata->pg_slots.find(pgid);
10077 if (p != sdata->pg_slots.end()) {
10078 auto& slot = p->second;
10079 dout(20) << __func__ << " " << pgid << " pg " << slot.pg << dendl;
10080 assert(!slot.pg || slot.pg->deleting);
10081 slot.pg = nullptr;
10082 }
10083 }
10084
10085 void OSD::ShardedOpWQ::clear_pg_slots()
10086 {
10087 for (auto sdata : shard_list) {
10088 Mutex::Locker l(sdata->sdata_op_ordering_lock);
10089 sdata->pg_slots.clear();
10090 sdata->waiting_for_pg_osdmap.reset();
10091 // don't bother with reserved pushes; we are shutting down
10092 }
10093 }
10094
10095 #undef dout_prefix
10096 #define dout_prefix *_dout << "osd." << osd->whoami << " op_wq(" << shard_index << ") "
10097
10098 void OSD::ShardedOpWQ::_process(uint32_t thread_index, heartbeat_handle_d *hb)
10099 {
10100 uint32_t shard_index = thread_index % num_shards;
10101 ShardData *sdata = shard_list[shard_index];
10102 assert(NULL != sdata);
10103
10104 // peek at spg_t
10105 sdata->sdata_op_ordering_lock.Lock();
10106 if (sdata->pqueue->empty()) {
10107 dout(20) << __func__ << " empty q, waiting" << dendl;
10108 // optimistically sleep a moment; maybe another work item will come along.
10109 osd->cct->get_heartbeat_map()->reset_timeout(hb,
10110 osd->cct->_conf->threadpool_default_timeout, 0);
10111 sdata->sdata_lock.Lock();
10112 sdata->sdata_op_ordering_lock.Unlock();
10113 sdata->sdata_cond.WaitInterval(sdata->sdata_lock,
10114 utime_t(osd->cct->_conf->threadpool_empty_queue_max_wait, 0));
10115 sdata->sdata_lock.Unlock();
10116 sdata->sdata_op_ordering_lock.Lock();
10117 if (sdata->pqueue->empty()) {
10118 sdata->sdata_op_ordering_lock.Unlock();
10119 return;
10120 }
10121 }
10122 pair<spg_t, PGQueueable> item = sdata->pqueue->dequeue();
10123 if (osd->is_stopping()) {
10124 sdata->sdata_op_ordering_lock.Unlock();
10125 return; // OSD shutdown, discard.
10126 }
10127 PGRef pg;
10128 uint64_t requeue_seq;
10129 {
10130 auto& slot = sdata->pg_slots[item.first];
10131 dout(30) << __func__ << " " << item.first
10132 << " to_process " << slot.to_process
10133 << " waiting_for_pg=" << (int)slot.waiting_for_pg << dendl;
10134 slot.to_process.push_back(item.second);
10135 // note the requeue seq now...
10136 requeue_seq = slot.requeue_seq;
10137 if (slot.waiting_for_pg) {
10138 // save ourselves a bit of effort
10139 dout(20) << __func__ << " " << item.first << " item " << item.second
10140 << " queued, waiting_for_pg" << dendl;
10141 sdata->sdata_op_ordering_lock.Unlock();
10142 return;
10143 }
10144 pg = slot.pg;
10145 dout(20) << __func__ << " " << item.first << " item " << item.second
10146 << " queued" << dendl;
10147 ++slot.num_running;
10148 }
10149 sdata->sdata_op_ordering_lock.Unlock();
10150
10151 osd->service.maybe_inject_dispatch_delay();
10152
10153 // [lookup +] lock pg (if we have it)
10154 if (!pg) {
10155 pg = osd->_lookup_lock_pg(item.first);
10156 } else {
10157 pg->lock();
10158 }
10159
10160 osd->service.maybe_inject_dispatch_delay();
10161
10162 boost::optional<PGQueueable> qi;
10163
10164 // we don't use a Mutex::Locker here because of the
10165 // osd->service.release_reserved_pushes() call below
10166 sdata->sdata_op_ordering_lock.Lock();
10167
10168 auto q = sdata->pg_slots.find(item.first);
10169 assert(q != sdata->pg_slots.end());
10170 auto& slot = q->second;
10171 --slot.num_running;
10172
10173 if (slot.to_process.empty()) {
10174 // raced with wake_pg_waiters or prune_pg_waiters
10175 dout(20) << __func__ << " " << item.first << " nothing queued" << dendl;
10176 if (pg) {
10177 pg->unlock();
10178 }
10179 sdata->sdata_op_ordering_lock.Unlock();
10180 return;
10181 }
10182 if (requeue_seq != slot.requeue_seq) {
10183 dout(20) << __func__ << " " << item.first
10184 << " requeue_seq " << slot.requeue_seq << " > our "
10185 << requeue_seq << ", we raced with wake_pg_waiters"
10186 << dendl;
10187 if (pg) {
10188 pg->unlock();
10189 }
10190 sdata->sdata_op_ordering_lock.Unlock();
10191 return;
10192 }
10193 if (pg && !slot.pg && !pg->deleting) {
10194 dout(20) << __func__ << " " << item.first << " set pg to " << pg << dendl;
10195 slot.pg = pg;
10196 }
10197 dout(30) << __func__ << " " << item.first << " to_process " << slot.to_process
10198 << " waiting_for_pg=" << (int)slot.waiting_for_pg << dendl;
10199
10200 // make sure we're not already waiting for this pg
10201 if (slot.waiting_for_pg) {
10202 dout(20) << __func__ << " " << item.first << " item " << item.second
10203 << " slot is waiting_for_pg" << dendl;
10204 if (pg) {
10205 pg->unlock();
10206 }
10207 sdata->sdata_op_ordering_lock.Unlock();
10208 return;
10209 }
10210
10211 // take next item
10212 qi = slot.to_process.front();
10213 slot.to_process.pop_front();
10214 dout(20) << __func__ << " " << item.first << " item " << *qi
10215 << " pg " << pg << dendl;
10216
10217 if (!pg) {
10218 // should this pg shard exist on this osd in this (or a later) epoch?
10219 OSDMapRef osdmap = sdata->waiting_for_pg_osdmap;
10220 if (osdmap->is_up_acting_osd_shard(item.first, osd->whoami)) {
10221 dout(20) << __func__ << " " << item.first
10222 << " no pg, should exist, will wait" << " on " << *qi << dendl;
10223 slot.to_process.push_front(*qi);
10224 slot.waiting_for_pg = true;
10225 } else if (qi->get_map_epoch() > osdmap->get_epoch()) {
10226 dout(20) << __func__ << " " << item.first << " no pg, item epoch is "
10227 << qi->get_map_epoch() << " > " << osdmap->get_epoch()
10228 << ", will wait on " << *qi << dendl;
10229 slot.to_process.push_front(*qi);
10230 slot.waiting_for_pg = true;
10231 } else {
10232 dout(20) << __func__ << " " << item.first << " no pg, shouldn't exist,"
10233 << " dropping " << *qi << dendl;
10234 // share map with client?
10235 if (boost::optional<OpRequestRef> _op = qi->maybe_get_op()) {
10236 Session *session = static_cast<Session *>(
10237 (*_op)->get_req()->get_connection()->get_priv());
10238 if (session) {
10239 osd->maybe_share_map(session, *_op, sdata->waiting_for_pg_osdmap);
10240 session->put();
10241 }
10242 }
10243 unsigned pushes_to_free = qi->get_reserved_pushes();
10244 if (pushes_to_free > 0) {
10245 sdata->sdata_op_ordering_lock.Unlock();
10246 osd->service.release_reserved_pushes(pushes_to_free);
10247 return;
10248 }
10249 }
10250 sdata->sdata_op_ordering_lock.Unlock();
10251 return;
10252 }
10253 sdata->sdata_op_ordering_lock.Unlock();
10254
10255
10256 // osd_opwq_process marks the point at which an operation has been dequeued
10257 // and will begin to be handled by a worker thread.
10258 {
10259 #ifdef WITH_LTTNG
10260 osd_reqid_t reqid;
10261 if (boost::optional<OpRequestRef> _op = qi->maybe_get_op()) {
10262 reqid = (*_op)->get_reqid();
10263 }
10264 #endif
10265 tracepoint(osd, opwq_process_start, reqid.name._type,
10266 reqid.name._num, reqid.tid, reqid.inc);
10267 }
10268
10269 lgeneric_subdout(osd->cct, osd, 30) << "dequeue status: ";
10270 Formatter *f = Formatter::create("json");
10271 f->open_object_section("q");
10272 dump(f);
10273 f->close_section();
10274 f->flush(*_dout);
10275 delete f;
10276 *_dout << dendl;
10277
10278 ThreadPool::TPHandle tp_handle(osd->cct, hb, timeout_interval,
10279 suicide_interval);
10280 qi->run(osd, pg, tp_handle);
10281
10282 {
10283 #ifdef WITH_LTTNG
10284 osd_reqid_t reqid;
10285 if (boost::optional<OpRequestRef> _op = qi->maybe_get_op()) {
10286 reqid = (*_op)->get_reqid();
10287 }
10288 #endif
10289 tracepoint(osd, opwq_process_finish, reqid.name._type,
10290 reqid.name._num, reqid.tid, reqid.inc);
10291 }
10292
10293 pg->unlock();
10294 }
10295
10296 void OSD::ShardedOpWQ::_enqueue(pair<spg_t, PGQueueable> item) {
10297 uint32_t shard_index =
10298 item.first.hash_to_shard(shard_list.size());
10299
10300 ShardData* sdata = shard_list[shard_index];
10301 assert (NULL != sdata);
10302 unsigned priority = item.second.get_priority();
10303 unsigned cost = item.second.get_cost();
10304 sdata->sdata_op_ordering_lock.Lock();
10305
10306 dout(20) << __func__ << " " << item.first << " " << item.second << dendl;
10307 if (priority >= osd->op_prio_cutoff)
10308 sdata->pqueue->enqueue_strict(
10309 item.second.get_owner(), priority, item);
10310 else
10311 sdata->pqueue->enqueue(
10312 item.second.get_owner(),
10313 priority, cost, item);
10314 sdata->sdata_op_ordering_lock.Unlock();
10315
10316 sdata->sdata_lock.Lock();
10317 sdata->sdata_cond.SignalOne();
10318 sdata->sdata_lock.Unlock();
10319
10320 }
10321
10322 void OSD::ShardedOpWQ::_enqueue_front(pair<spg_t, PGQueueable> item)
10323 {
10324 uint32_t shard_index = item.first.hash_to_shard(shard_list.size());
10325 ShardData* sdata = shard_list[shard_index];
10326 assert (NULL != sdata);
10327 sdata->sdata_op_ordering_lock.Lock();
10328 auto p = sdata->pg_slots.find(item.first);
10329 if (p != sdata->pg_slots.end() && !p->second.to_process.empty()) {
10330 // we may be racing with _process, which has dequeued a new item
10331 // from pqueue, put it on to_process, and is now busy taking the
10332 // pg lock. ensure this old requeued item is ordered before any
10333 // such newer item in to_process.
10334 p->second.to_process.push_front(item.second);
10335 item.second = p->second.to_process.back();
10336 p->second.to_process.pop_back();
10337 dout(20) << __func__ << " " << item.first
10338 << " " << p->second.to_process.front()
10339 << " shuffled w/ " << item.second << dendl;
10340 } else {
10341 dout(20) << __func__ << " " << item.first << " " << item.second << dendl;
10342 }
10343 sdata->_enqueue_front(item, osd->op_prio_cutoff);
10344 sdata->sdata_op_ordering_lock.Unlock();
10345 sdata->sdata_lock.Lock();
10346 sdata->sdata_cond.SignalOne();
10347 sdata->sdata_lock.Unlock();
10348 }
10349
10350 namespace ceph {
10351 namespace osd_cmds {
10352
10353 int heap(CephContext& cct, cmdmap_t& cmdmap, Formatter& f, std::ostream& os)
10354 {
10355 if (!ceph_using_tcmalloc()) {
10356 os << "could not issue heap profiler command -- not using tcmalloc!";
10357 return -EOPNOTSUPP;
10358 }
10359
10360 string cmd;
10361 if (!cmd_getval(&cct, cmdmap, "heapcmd", cmd)) {
10362 os << "unable to get value for command \"" << cmd << "\"";
10363 return -EINVAL;
10364 }
10365
10366 std::vector<std::string> cmd_vec;
10367 get_str_vec(cmd, cmd_vec);
10368
10369 ceph_heap_profiler_handle_command(cmd_vec, os);
10370
10371 return 0;
10372 }
10373
10374 }} // namespace ceph::osd_cmds
10375
10376
10377 std::ostream& operator<<(std::ostream& out, const OSD::io_queue& q) {
10378 switch(q) {
10379 case OSD::io_queue::prioritized:
10380 out << "prioritized";
10381 break;
10382 case OSD::io_queue::weightedpriority:
10383 out << "weightedpriority";
10384 break;
10385 case OSD::io_queue::mclock_opclass:
10386 out << "mclock_opclass";
10387 break;
10388 case OSD::io_queue::mclock_client:
10389 out << "mclock_client";
10390 break;
10391 }
10392 return out;
10393 }