1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
20 #include "msg/Dispatcher.h"
22 #include "common/Timer.h"
23 #include "common/WorkQueue.h"
24 #include "common/AsyncReserver.h"
25 #include "common/ceph_context.h"
26 #include "common/config_cacher.h"
27 #include "common/zipkin_trace.h"
28 #include "common/ceph_timer.h"
30 #include "mgr/MgrClient.h"
32 #include "os/ObjectStore.h"
35 #include "auth/KeyRing.h"
37 #include "osd/ClassHandler.h"
39 #include "include/CompatSet.h"
40 #include "include/common_fwd.h"
42 #include "OpRequest.h"
45 #include "osd/scheduler/OpScheduler.h"
52 #include "include/unordered_map.h"
54 #include "common/shared_cache.hpp"
55 #include "common/simple_cache.hpp"
56 #include "common/sharedptr_registry.hpp"
57 #include "common/WeightedPriorityQueue.h"
58 #include "common/PrioritizedQueue.h"
59 #include "messages/MOSDOp.h"
60 #include "common/EventTrace.h"
61 #include "osd/osd_perf_counters.h"
63 #define CEPH_OSD_PROTOCOL 10 /* cluster internal */
67 lock ordering for pg map
88 class TestOpsSocketHook
;
89 struct C_FinishSplits
;
99 class MOSDForceRecovery
;
100 class MMonGetPurgedSnapsReply
;
105 using OpSchedulerItem
= ceph::osd::scheduler::OpSchedulerItem
;
109 ObjectStore::CollectionHandle meta_ch
;
112 LogClient
&log_client
;
114 PGRecoveryStats
&pg_recovery_stats
;
116 Messenger
*&cluster_messenger
;
117 Messenger
*&client_messenger
;
119 PerfCounters
*&logger
;
120 PerfCounters
*&recoverystate_perf
;
123 md_config_cacher_t
<Option::size_t> osd_max_object_size
;
124 md_config_cacher_t
<bool> osd_skip_data_digest
;
126 void enqueue_back(OpSchedulerItem
&& qi
);
127 void enqueue_front(OpSchedulerItem
&& qi
);
129 void maybe_inject_dispatch_delay() {
130 if (g_conf()->osd_debug_inject_dispatch_delay_probability
> 0) {
132 g_conf()->osd_debug_inject_dispatch_delay_probability
* 10000) {
134 t
.set_from_double(g_conf()->osd_debug_inject_dispatch_delay_duration
);
140 ceph::signedspan
get_mnow();
144 ceph::mutex publish_lock
, pre_publish_lock
; // pre-publish orders before publish
145 OSDSuperblock superblock
;
148 OSDSuperblock
get_superblock() {
149 std::lock_guard
l(publish_lock
);
152 void publish_superblock(const OSDSuperblock
&block
) {
153 std::lock_guard
l(publish_lock
);
157 int get_nodeid() const { return whoami
; }
159 std::atomic
<epoch_t
> max_oldest_map
;
164 OSDMapRef
get_osdmap() {
165 std::lock_guard
l(publish_lock
);
168 epoch_t
get_osdmap_epoch() {
169 std::lock_guard
l(publish_lock
);
170 return osdmap
? osdmap
->get_epoch() : 0;
172 void publish_map(OSDMapRef map
) {
173 std::lock_guard
l(publish_lock
);
178 * osdmap - current published map
179 * next_osdmap - pre_published map that is about to be published.
181 * We use the next_osdmap to send messages and initiate connections,
182 * but only if the target is the same instance as the one in the map
183 * epoch the current user is working from (i.e., the result is
184 * equivalent to what is in next_osdmap).
186 * This allows the helpers to start ignoring osds that are about to
187 * go down, and let OSD::handle_osd_map()/note_down_osd() mark them
188 * down, without worrying about reopening connections from threads
189 * working from old maps.
192 OSDMapRef next_osdmap
;
193 ceph::condition_variable pre_publish_cond
;
194 int pre_publish_waiter
= 0;
197 void pre_publish_map(OSDMapRef map
) {
198 std::lock_guard
l(pre_publish_lock
);
199 next_osdmap
= std::move(map
);
203 /// map epochs reserved below
204 map
<epoch_t
, unsigned> map_reservations
;
206 /// gets ref to next_osdmap and registers the epoch as reserved
207 OSDMapRef
get_nextmap_reserved() {
208 std::lock_guard
l(pre_publish_lock
);
211 epoch_t e
= next_osdmap
->get_epoch();
212 map
<epoch_t
, unsigned>::iterator i
=
213 map_reservations
.insert(make_pair(e
, 0)).first
;
217 /// releases reservation on map
218 void release_map(OSDMapRef osdmap
) {
219 std::lock_guard
l(pre_publish_lock
);
220 map
<epoch_t
, unsigned>::iterator i
=
221 map_reservations
.find(osdmap
->get_epoch());
222 ceph_assert(i
!= map_reservations
.end());
223 ceph_assert(i
->second
> 0);
224 if (--(i
->second
) == 0) {
225 map_reservations
.erase(i
);
227 if (pre_publish_waiter
) {
228 pre_publish_cond
.notify_all();
231 /// blocks until there are no reserved maps prior to next_osdmap
232 void await_reserved_maps() {
233 std::unique_lock l
{pre_publish_lock
};
234 ceph_assert(next_osdmap
);
235 pre_publish_waiter
++;
236 pre_publish_cond
.wait(l
, [this] {
237 auto i
= map_reservations
.cbegin();
238 return (i
== map_reservations
.cend() ||
239 i
->first
>= next_osdmap
->get_epoch());
241 pre_publish_waiter
--;
243 OSDMapRef
get_next_osdmap() {
244 std::lock_guard
l(pre_publish_lock
);
250 void maybe_share_map(Connection
*con
,
251 const OSDMapRef
& osdmap
,
252 epoch_t peer_epoch_lb
=0);
254 void send_map(class MOSDMap
*m
, Connection
*con
);
255 void send_incremental_map(epoch_t since
, Connection
*con
,
256 const OSDMapRef
& osdmap
);
257 MOSDMap
*build_incremental_map_msg(epoch_t from
, epoch_t to
,
258 OSDSuperblock
& superblock
);
260 ConnectionRef
get_con_osd_cluster(int peer
, epoch_t from_epoch
);
261 pair
<ConnectionRef
,ConnectionRef
> get_con_osd_hb(int peer
, epoch_t from_epoch
); // (back, front)
262 void send_message_osd_cluster(int peer
, Message
*m
, epoch_t from_epoch
);
263 void send_message_osd_cluster(std::vector
<std::pair
<int, Message
*>>& messages
, epoch_t from_epoch
);
264 void send_message_osd_cluster(Message
*m
, Connection
*con
) {
265 con
->send_message(m
);
267 void send_message_osd_cluster(Message
*m
, const ConnectionRef
& con
) {
268 con
->send_message(m
);
270 void send_message_osd_client(Message
*m
, const ConnectionRef
& con
) {
271 con
->send_message(m
);
273 entity_name_t
get_cluster_msgr_name() const;
276 // -- scrub scheduling --
277 ceph::mutex sched_scrub_lock
= ceph::make_mutex("OSDService::sched_scrub_lock");
284 /// pg to be scrubbed
286 /// a time scheduled for scrub. but the scrub could be delayed if system
287 /// load is too high or it fails to fall in the scrub hours
289 /// the hard upper bound of scrub time
291 ScrubJob() : cct(nullptr) {}
292 explicit ScrubJob(CephContext
* cct
, const spg_t
& pg
,
293 const utime_t
& timestamp
,
294 double pool_scrub_min_interval
= 0,
295 double pool_scrub_max_interval
= 0, bool must
= true);
296 /// order the jobs by sched_time
297 bool operator<(const ScrubJob
& rhs
) const;
299 set
<ScrubJob
> sched_scrub_pg
;
301 /// @returns the scrub_reg_stamp used for unregister the scrub job
302 utime_t
reg_pg_scrub(spg_t pgid
, utime_t t
, double pool_scrub_min_interval
,
303 double pool_scrub_max_interval
, bool must
) {
304 ScrubJob
scrub(cct
, pgid
, t
, pool_scrub_min_interval
, pool_scrub_max_interval
,
306 std::lock_guard
l(sched_scrub_lock
);
307 sched_scrub_pg
.insert(scrub
);
308 return scrub
.sched_time
;
310 void unreg_pg_scrub(spg_t pgid
, utime_t t
) {
311 std::lock_guard
l(sched_scrub_lock
);
312 size_t removed
= sched_scrub_pg
.erase(ScrubJob(cct
, pgid
, t
));
313 ceph_assert(removed
);
315 bool first_scrub_stamp(ScrubJob
*out
) {
316 std::lock_guard
l(sched_scrub_lock
);
317 if (sched_scrub_pg
.empty())
319 set
<ScrubJob
>::iterator iter
= sched_scrub_pg
.begin();
323 bool next_scrub_stamp(const ScrubJob
& next
,
325 std::lock_guard
l(sched_scrub_lock
);
326 if (sched_scrub_pg
.empty())
328 set
<ScrubJob
>::const_iterator iter
= sched_scrub_pg
.lower_bound(next
);
329 if (iter
== sched_scrub_pg
.cend())
332 if (iter
== sched_scrub_pg
.cend())
338 void dumps_scrub(Formatter
*f
) {
339 ceph_assert(f
!= nullptr);
340 std::lock_guard
l(sched_scrub_lock
);
342 f
->open_array_section("scrubs");
343 for (const auto &i
: sched_scrub_pg
) {
344 f
->open_object_section("scrub");
345 f
->dump_stream("pgid") << i
.pgid
;
346 f
->dump_stream("sched_time") << i
.sched_time
;
347 f
->dump_stream("deadline") << i
.deadline
;
348 f
->dump_bool("forced", i
.sched_time
== PG::Scrubber::scrub_must_stamp());
354 bool can_inc_scrubs();
355 bool inc_scrubs_local();
356 void dec_scrubs_local();
357 bool inc_scrubs_remote();
358 void dec_scrubs_remote();
359 void dump_scrub_reservations(Formatter
*f
);
361 void reply_op_error(OpRequestRef op
, int err
);
362 void reply_op_error(OpRequestRef op
, int err
, eversion_t v
, version_t uv
,
363 vector
<pg_log_op_return_item_t
> op_returns
);
364 void handle_misdirected_op(PG
*pg
, OpRequestRef op
);
368 // -- agent shared state --
369 ceph::mutex agent_lock
= ceph::make_mutex("OSDService::agent_lock");
370 ceph::condition_variable agent_cond
;
371 map
<uint64_t, set
<PGRef
> > agent_queue
;
372 set
<PGRef
>::iterator agent_queue_pos
;
373 bool agent_valid_iterator
;
375 int flush_mode_high_count
; //once have one pg with FLUSH_MODE_HIGH then flush objects with high speed
376 set
<hobject_t
> agent_oids
;
378 struct AgentThread
: public Thread
{
380 explicit AgentThread(OSDService
*o
) : osd(o
) {}
381 void *entry() override
{
386 bool agent_stop_flag
;
387 ceph::mutex agent_timer_lock
= ceph::make_mutex("OSDService::agent_timer_lock");
388 SafeTimer agent_timer
;
394 void _enqueue(PG
*pg
, uint64_t priority
) {
395 if (!agent_queue
.empty() &&
396 agent_queue
.rbegin()->first
< priority
)
397 agent_valid_iterator
= false; // inserting higher-priority queue
398 set
<PGRef
>& nq
= agent_queue
[priority
];
400 agent_cond
.notify_all();
404 void _dequeue(PG
*pg
, uint64_t old_priority
) {
405 set
<PGRef
>& oq
= agent_queue
[old_priority
];
406 set
<PGRef
>::iterator p
= oq
.find(pg
);
407 ceph_assert(p
!= oq
.end());
408 if (p
== agent_queue_pos
)
412 if (agent_queue
.rbegin()->first
== old_priority
)
413 agent_valid_iterator
= false;
414 agent_queue
.erase(old_priority
);
418 /// enable agent for a pg
419 void agent_enable_pg(PG
*pg
, uint64_t priority
) {
420 std::lock_guard
l(agent_lock
);
421 _enqueue(pg
, priority
);
424 /// adjust priority for an enagled pg
425 void agent_adjust_pg(PG
*pg
, uint64_t old_priority
, uint64_t new_priority
) {
426 std::lock_guard
l(agent_lock
);
427 ceph_assert(new_priority
!= old_priority
);
428 _enqueue(pg
, new_priority
);
429 _dequeue(pg
, old_priority
);
432 /// disable agent for a pg
433 void agent_disable_pg(PG
*pg
, uint64_t old_priority
) {
434 std::lock_guard
l(agent_lock
);
435 _dequeue(pg
, old_priority
);
438 /// note start of an async (evict) op
439 void agent_start_evict_op() {
440 std::lock_guard
l(agent_lock
);
444 /// note finish or cancellation of an async (evict) op
445 void agent_finish_evict_op() {
446 std::lock_guard
l(agent_lock
);
447 ceph_assert(agent_ops
> 0);
449 agent_cond
.notify_all();
452 /// note start of an async (flush) op
453 void agent_start_op(const hobject_t
& oid
) {
454 std::lock_guard
l(agent_lock
);
456 ceph_assert(agent_oids
.count(oid
) == 0);
457 agent_oids
.insert(oid
);
460 /// note finish or cancellation of an async (flush) op
461 void agent_finish_op(const hobject_t
& oid
) {
462 std::lock_guard
l(agent_lock
);
463 ceph_assert(agent_ops
> 0);
465 ceph_assert(agent_oids
.count(oid
) == 1);
466 agent_oids
.erase(oid
);
467 agent_cond
.notify_all();
470 /// check if we are operating on an object
471 bool agent_is_active_oid(const hobject_t
& oid
) {
472 std::lock_guard
l(agent_lock
);
473 return agent_oids
.count(oid
);
476 /// get count of active agent ops
477 int agent_get_num_ops() {
478 std::lock_guard
l(agent_lock
);
482 void agent_inc_high_count() {
483 std::lock_guard
l(agent_lock
);
484 flush_mode_high_count
++;
487 void agent_dec_high_count() {
488 std::lock_guard
l(agent_lock
);
489 flush_mode_high_count
--;
493 /// throttle promotion attempts
494 std::atomic
<unsigned int> promote_probability_millis
{1000}; ///< probability thousands. one word.
495 PromoteCounter promote_counter
;
496 utime_t last_recalibrate
;
497 unsigned long promote_max_objects
, promote_max_bytes
;
500 bool promote_throttle() {
501 // NOTE: lockless! we rely on the probability being a single word.
502 promote_counter
.attempt();
503 if ((unsigned)rand() % 1000 > promote_probability_millis
)
504 return true; // yes throttle (no promote)
505 if (promote_max_objects
&&
506 promote_counter
.objects
> promote_max_objects
)
507 return true; // yes throttle
508 if (promote_max_bytes
&&
509 promote_counter
.bytes
> promote_max_bytes
)
510 return true; // yes throttle
511 return false; // no throttle (promote)
513 void promote_finish(uint64_t bytes
) {
514 promote_counter
.finish(bytes
);
516 void promote_throttle_recalibrate();
517 unsigned get_num_shards() const {
518 return m_objecter_finishers
;
520 Finisher
* get_objecter_finisher(int shard
) {
521 return objecter_finishers
[shard
].get();
524 // -- Objecter, for tiering reads/writes from/to other OSDs --
525 std::unique_ptr
<Objecter
> objecter
;
526 int m_objecter_finishers
;
527 std::vector
<std::unique_ptr
<Finisher
>> objecter_finishers
;
530 ceph::mutex watch_lock
= ceph::make_mutex("OSDService::watch_lock");
531 SafeTimer watch_timer
;
532 uint64_t next_notif_id
;
533 uint64_t get_next_id(epoch_t cur_epoch
) {
534 std::lock_guard
l(watch_lock
);
535 return (((uint64_t)cur_epoch
) << 32) | ((uint64_t)(next_notif_id
++));
538 // -- Recovery/Backfill Request Scheduling --
539 ceph::mutex recovery_request_lock
= ceph::make_mutex("OSDService::recovery_request_lock");
540 SafeTimer recovery_request_timer
;
542 // For async recovery sleep
543 bool recovery_needs_sleep
= true;
544 ceph::real_clock::time_point recovery_schedule_time
;
546 // For recovery & scrub & snap
547 ceph::mutex sleep_lock
= ceph::make_mutex("OSDService::sleep_lock");
548 SafeTimer sleep_timer
;
552 std::atomic
<unsigned int> last_tid
{0};
553 ceph_tid_t
get_tid() {
554 return (ceph_tid_t
)last_tid
++;
557 // -- backfill_reservation --
558 Finisher reserver_finisher
;
559 AsyncReserver
<spg_t
> local_reserver
;
560 AsyncReserver
<spg_t
> remote_reserver
;
563 ceph::mutex merge_lock
= ceph::make_mutex("OSD::merge_lock");
564 map
<pg_t
,eversion_t
> ready_to_merge_source
; // pg -> version
565 map
<pg_t
,std::tuple
<eversion_t
,epoch_t
,epoch_t
>> ready_to_merge_target
; // pg -> (version,les,lec)
566 set
<pg_t
> not_ready_to_merge_source
;
567 map
<pg_t
,pg_t
> not_ready_to_merge_target
;
568 set
<pg_t
> sent_ready_to_merge_source
;
570 void set_ready_to_merge_source(PG
*pg
,
572 void set_ready_to_merge_target(PG
*pg
,
574 epoch_t last_epoch_started
,
575 epoch_t last_epoch_clean
);
576 void set_not_ready_to_merge_source(pg_t source
);
577 void set_not_ready_to_merge_target(pg_t target
, pg_t source
);
578 void clear_ready_to_merge(PG
*pg
);
579 void send_ready_to_merge();
580 void _send_ready_to_merge();
581 void clear_sent_ready_to_merge();
582 void prune_sent_ready_to_merge(const OSDMapRef
& osdmap
);
586 ceph::mutex pg_temp_lock
= ceph::make_mutex("OSDService::pg_temp_lock");
591 map
<pg_t
, pg_temp_t
> pg_temp_wanted
;
592 map
<pg_t
, pg_temp_t
> pg_temp_pending
;
593 void _sent_pg_temp();
594 friend std::ostream
& operator<<(std::ostream
&, const pg_temp_t
&);
596 void queue_want_pg_temp(pg_t pgid
, const vector
<int>& want
,
597 bool forced
= false);
598 void remove_want_pg_temp(pg_t pgid
);
599 void requeue_pg_temp();
602 ceph::mutex pg_created_lock
= ceph::make_mutex("OSDService::pg_created_lock");
603 set
<pg_t
> pg_created
;
604 void send_pg_created(pg_t pgid
);
605 void prune_pg_created();
606 void send_pg_created();
608 AsyncReserver
<spg_t
> snap_reserver
;
609 void queue_recovery_context(PG
*pg
, GenContext
<ThreadPool::TPHandle
&> *c
);
610 void queue_for_snap_trim(PG
*pg
);
611 void queue_for_scrub(PG
*pg
, bool with_high_priority
);
612 void queue_for_pg_delete(spg_t pgid
, epoch_t e
);
613 bool try_finish_pg_delete(PG
*pg
, unsigned old_pg_num
);
616 // -- pg recovery and associated throttling --
617 ceph::mutex recovery_lock
= ceph::make_mutex("OSDService::recovery_lock");
618 list
<pair
<epoch_t
, PGRef
> > awaiting_throttle
;
620 utime_t defer_recovery_until
;
621 uint64_t recovery_ops_active
;
622 uint64_t recovery_ops_reserved
;
623 bool recovery_paused
;
624 #ifdef DEBUG_RECOVERY_OIDS
625 map
<spg_t
, set
<hobject_t
> > recovery_oids
;
627 bool _recover_now(uint64_t *available_pushes
);
628 void _maybe_queue_recovery();
629 void _queue_for_recovery(
630 pair
<epoch_t
, PGRef
> p
, uint64_t reserved_pushes
);
632 void start_recovery_op(PG
*pg
, const hobject_t
& soid
);
633 void finish_recovery_op(PG
*pg
, const hobject_t
& soid
, bool dequeue
);
634 bool is_recovery_active();
635 void release_reserved_pushes(uint64_t pushes
);
636 void defer_recovery(float defer_for
) {
637 defer_recovery_until
= ceph_clock_now();
638 defer_recovery_until
+= defer_for
;
640 void pause_recovery() {
641 std::lock_guard
l(recovery_lock
);
642 recovery_paused
= true;
644 bool recovery_is_paused() {
645 std::lock_guard
l(recovery_lock
);
646 return recovery_paused
;
648 void unpause_recovery() {
649 std::lock_guard
l(recovery_lock
);
650 recovery_paused
= false;
651 _maybe_queue_recovery();
653 void kick_recovery_queue() {
654 std::lock_guard
l(recovery_lock
);
655 _maybe_queue_recovery();
657 void clear_queued_recovery(PG
*pg
) {
658 std::lock_guard
l(recovery_lock
);
659 awaiting_throttle
.remove_if(
660 [pg
](decltype(awaiting_throttle
)::const_reference awaiting
) {
661 return awaiting
.second
.get() == pg
;
665 unsigned get_target_pg_log_entries() const;
667 // delayed pg activation
668 void queue_for_recovery(PG
*pg
) {
669 std::lock_guard
l(recovery_lock
);
671 if (pg
->is_forced_recovery_or_backfill()) {
672 awaiting_throttle
.push_front(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
674 awaiting_throttle
.push_back(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
676 _maybe_queue_recovery();
678 void queue_recovery_after_sleep(PG
*pg
, epoch_t queued
, uint64_t reserved_pushes
) {
679 std::lock_guard
l(recovery_lock
);
680 _queue_for_recovery(make_pair(queued
, pg
), reserved_pushes
);
683 void queue_check_readable(spg_t spgid
,
685 ceph::signedspan delay
= ceph::signedspan::zero());
687 // osd map cache (past osd maps)
688 ceph::mutex map_cache_lock
= ceph::make_mutex("OSDService::map_cache_lock");
689 SharedLRU
<epoch_t
, const OSDMap
> map_cache
;
690 SimpleLRU
<epoch_t
, bufferlist
> map_bl_cache
;
691 SimpleLRU
<epoch_t
, bufferlist
> map_bl_inc_cache
;
693 OSDMapRef
try_get_map(epoch_t e
);
694 OSDMapRef
get_map(epoch_t e
) {
695 OSDMapRef
ret(try_get_map(e
));
699 OSDMapRef
add_map(OSDMap
*o
) {
700 std::lock_guard
l(map_cache_lock
);
703 OSDMapRef
_add_map(OSDMap
*o
);
705 void _add_map_bl(epoch_t e
, bufferlist
& bl
);
706 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
707 std::lock_guard
l(map_cache_lock
);
708 return _get_map_bl(e
, bl
);
710 bool _get_map_bl(epoch_t e
, bufferlist
& bl
);
712 void _add_map_inc_bl(epoch_t e
, bufferlist
& bl
);
713 bool get_inc_map_bl(epoch_t e
, bufferlist
& bl
);
715 /// identify split child pgids over a osdmap interval
716 void identify_splits_and_merges(
720 set
<pair
<spg_t
,epoch_t
>> *new_children
,
721 set
<pair
<spg_t
,epoch_t
>> *merge_pgs
);
723 void need_heartbeat_peer_update();
727 void start_shutdown();
728 void shutdown_reserver();
732 ceph::mutex stat_lock
= ceph::make_mutex("OSDService::stat_lock");
736 void set_statfs(const struct store_statfs_t
&stbuf
,
737 osd_alert_list_t
& alerts
);
738 osd_stat_t
set_osd_stat(vector
<int>& hb_peers
, int num_pgs
);
739 void inc_osd_stat_repaired(void);
740 float compute_adjusted_ratio(osd_stat_t new_stat
, float *pratio
, uint64_t adjust_used
= 0);
741 osd_stat_t
get_osd_stat() {
742 std::lock_guard
l(stat_lock
);
744 osd_stat
.up_from
= up_epoch
;
745 osd_stat
.seq
= ((uint64_t)osd_stat
.up_from
<< 32) + seq
;
748 uint64_t get_osd_stat_seq() {
749 std::lock_guard
l(stat_lock
);
752 void get_hb_pingtime(map
<int, osd_stat_t::Interfaces
> *pp
)
754 std::lock_guard
l(stat_lock
);
755 *pp
= osd_stat
.hb_pingtime
;
759 // -- OSD Full Status --
761 friend TestOpsSocketHook
;
762 mutable ceph::mutex full_status_lock
= ceph::make_mutex("OSDService::full_status_lock");
763 enum s_names
{ INVALID
= -1, NONE
, NEARFULL
, BACKFILLFULL
, FULL
, FAILSAFE
} cur_state
; // ascending
764 const char *get_full_state_name(s_names s
) const {
766 case NONE
: return "none";
767 case NEARFULL
: return "nearfull";
768 case BACKFILLFULL
: return "backfillfull";
769 case FULL
: return "full";
770 case FAILSAFE
: return "failsafe";
771 default: return "???";
774 s_names
get_full_state(string type
) const {
777 else if (type
== "failsafe")
779 else if (type
== "full")
781 else if (type
== "backfillfull")
783 else if (type
== "nearfull")
788 double cur_ratio
, physical_ratio
; ///< current utilization
789 mutable int64_t injectfull
= 0;
790 s_names injectfull_state
= NONE
;
791 float get_failsafe_full_ratio();
792 bool _check_inject_full(DoutPrefixProvider
*dpp
, s_names type
) const;
793 bool _check_full(DoutPrefixProvider
*dpp
, s_names type
) const;
795 void check_full_status(float ratio
, float pratio
);
796 s_names
recalc_full_state(float ratio
, float pratio
, string
&inject
);
797 bool _tentative_full(DoutPrefixProvider
*dpp
, s_names type
, uint64_t adjust_used
, osd_stat_t
);
798 bool check_failsafe_full(DoutPrefixProvider
*dpp
) const;
799 bool check_full(DoutPrefixProvider
*dpp
) const;
800 bool tentative_backfill_full(DoutPrefixProvider
*dpp
, uint64_t adjust_used
, osd_stat_t
);
801 bool check_backfill_full(DoutPrefixProvider
*dpp
) const;
802 bool check_nearfull(DoutPrefixProvider
*dpp
) const;
803 bool is_failsafe_full() const;
804 bool is_full() const;
805 bool is_backfillfull() const;
806 bool is_nearfull() const;
807 bool need_fullness_update(); ///< osdmap state needs update
808 void set_injectfull(s_names type
, int64_t count
);
813 // protects access to boot_epoch, up_epoch, bind_epoch
814 mutable ceph::mutex epoch_lock
= ceph::make_mutex("OSDService::epoch_lock");
815 epoch_t boot_epoch
; // _first_ epoch we were marked up (after this process started)
816 epoch_t up_epoch
; // _most_recent_ epoch we were marked up
817 epoch_t bind_epoch
; // epoch we last did a bind to new ip:ports
820 * Retrieve the boot_, up_, and bind_ epochs the OSD has set. The params
821 * can be NULL if you don't care about them.
823 void retrieve_epochs(epoch_t
*_boot_epoch
, epoch_t
*_up_epoch
,
824 epoch_t
*_bind_epoch
) const;
826 * Set the boot, up, and bind epochs. Any NULL params will not be set.
828 void set_epochs(const epoch_t
*_boot_epoch
, const epoch_t
*_up_epoch
,
829 const epoch_t
*_bind_epoch
);
830 epoch_t
get_boot_epoch() const {
832 retrieve_epochs(&ret
, NULL
, NULL
);
835 epoch_t
get_up_epoch() const {
837 retrieve_epochs(NULL
, &ret
, NULL
);
840 epoch_t
get_bind_epoch() const {
842 retrieve_epochs(NULL
, NULL
, &ret
);
846 void request_osdmap_update(epoch_t e
);
849 ceph::mutex hb_stamp_lock
= ceph::make_mutex("OSDServce::hb_stamp_lock");
851 /// osd -> heartbeat stamps
852 vector
<HeartbeatStampsRef
> hb_stamps
;
854 /// get or create a ref for a peer's HeartbeatStamps
855 HeartbeatStampsRef
get_hb_stamps(unsigned osd
);
858 // Timer for readable leases
859 ceph::timer
<ceph::mono_clock
> mono_timer
= ceph::timer
<ceph::mono_clock
>{ceph::construct_suspended
};
861 void queue_renew_lease(epoch_t epoch
, spg_t spgid
);
864 ceph::mutex is_stopping_lock
= ceph::make_mutex("OSDService::is_stopping_lock");
865 ceph::condition_variable is_stopping_cond
;
870 std::atomic
<int> state
{NOT_STOPPING
};
871 int get_state() const {
874 void set_state(int s
) {
877 bool is_stopping() const {
878 return state
== STOPPING
;
880 bool is_preparing_to_stop() const {
881 return state
== PREPARING_TO_STOP
;
883 bool prepare_to_stop();
888 ceph::mutex pgid_lock
= ceph::make_mutex("OSDService::pgid_lock");
889 map
<spg_t
, int> pgid_tracker
;
890 map
<spg_t
, PG
*> live_pgs
;
891 void add_pgid(spg_t pgid
, PG
*pg
);
892 void remove_pgid(spg_t pgid
, PG
*pg
);
893 void dump_live_pgids();
896 explicit OSDService(OSD
*osd
);
897 ~OSDService() = default;
902 Each PG slot includes queues for events that are processing and/or waiting
903 for a PG to be materialized in the slot.
905 These are the constraints:
907 - client ops must remained ordered by client, regardless of map epoch
908 - peering messages/events from peers must remain ordered by peer
909 - peering messages and client ops need not be ordered relative to each other
911 - some peering events can create a pg (e.g., notify)
912 - the query peering event can proceed when a PG doesn't exist
914 Implementation notes:
916 - everybody waits for split. If the OSD has the parent PG it will instantiate
917 the PGSlot early and mark it waiting_for_split. Everything will wait until
918 the parent is able to commit the split operation and the child PG's are
919 materialized in the child slots.
921 - every event has an epoch property and will wait for the OSDShard to catch
922 up to that epoch. For example, if we get a peering event from a future
923 epoch, the event will wait in the slot until the local OSD has caught up.
924 (We should be judicious in specifying the required epoch [by, e.g., setting
925 it to the same_interval_since epoch] so that we don't wait for epochs that
926 don't affect the given PG.)
928 - we maintain two separate wait lists, *waiting* and *waiting_peering*. The
929 OpSchedulerItem has an is_peering() bool to determine which we use. Waiting
930 peering events are queued up by epoch required.
932 - when we wake a PG slot (e.g., we finished split, or got a newer osdmap, or
933 materialized the PG), we wake *all* waiting items. (This could be optimized,
934 probably, but we don't bother.) We always requeue peering items ahead of
937 - some peering events are marked !peering_requires_pg (PGQuery). if we do
938 not have a PG these are processed immediately (under the shard lock).
940 - we do not have a PG present, we check if the slot maps to the current host.
941 if so, we either queue the item and wait for the PG to materialize, or
942 (if the event is a pg creating event like PGNotify), we materialize the PG.
944 - when we advance the osdmap on the OSDShard, we scan pg slots and
945 discard any slots with no pg (and not waiting_for_split) that no
946 longer map to the current host.
950 struct OSDShardPGSlot
{
951 using OpSchedulerItem
= ceph::osd::scheduler::OpSchedulerItem
;
952 PGRef pg
; ///< pg reference
953 deque
<OpSchedulerItem
> to_process
; ///< order items for this slot
954 int num_running
= 0; ///< _process threads doing pg lookup/lock
956 deque
<OpSchedulerItem
> waiting
; ///< waiting for pg (or map + pg)
958 /// waiting for map (peering evt)
959 map
<epoch_t
,deque
<OpSchedulerItem
>> waiting_peering
;
961 /// incremented by wake_pg_waiters; indicates racing _process threads
962 /// should bail out (their op has been requeued)
963 uint64_t requeue_seq
= 0;
965 /// waiting for split child to materialize in these epoch(s)
966 set
<epoch_t
> waiting_for_split
;
969 boost::intrusive::set_member_hook
<> pg_epoch_item
;
971 /// waiting for a merge (source or target) by this epoch
972 epoch_t waiting_for_merge_epoch
= 0;
976 const unsigned shard_id
;
982 string sdata_wait_lock_name
;
983 ceph::mutex sdata_wait_lock
;
984 ceph::condition_variable sdata_cond
;
986 ceph::mutex osdmap_lock
; ///< protect shard_osdmap updates vs users w/o shard_lock
987 OSDMapRef shard_osdmap
;
989 OSDMapRef
get_osdmap() {
990 std::lock_guard
l(osdmap_lock
);
994 string shard_lock_name
;
995 ceph::mutex shard_lock
; ///< protects remaining members below
997 /// map of slots for each spg_t. maintains ordering of items dequeued
998 /// from scheduler while _process thread drops shard lock to acquire the
999 /// pg lock. stale slots are removed by consume_map.
1000 unordered_map
<spg_t
,unique_ptr
<OSDShardPGSlot
>> pg_slots
;
1002 struct pg_slot_compare_by_epoch
{
1003 bool operator()(const OSDShardPGSlot
& l
, const OSDShardPGSlot
& r
) const {
1004 return l
.epoch
< r
.epoch
;
1008 /// maintain an ordering of pg slots by pg epoch
1009 boost::intrusive::multiset
<
1011 boost::intrusive::member_hook
<
1013 boost::intrusive::set_member_hook
<>,
1014 &OSDShardPGSlot::pg_epoch_item
>,
1015 boost::intrusive::compare
<pg_slot_compare_by_epoch
>> pg_slots_by_epoch
;
1016 int waiting_for_min_pg_epoch
= 0;
1017 ceph::condition_variable min_pg_epoch_cond
;
1020 ceph::osd::scheduler::OpSchedulerRef scheduler
;
1022 bool stop_waiting
= false;
1024 ContextQueue context_queue
;
1026 void _attach_pg(OSDShardPGSlot
*slot
, PG
*pg
);
1027 void _detach_pg(OSDShardPGSlot
*slot
);
1029 void update_pg_epoch(OSDShardPGSlot
*slot
, epoch_t epoch
);
1030 epoch_t
get_min_pg_epoch();
1031 void wait_min_pg_epoch(epoch_t need
);
1033 /// return newest epoch we are waiting for
1034 epoch_t
get_max_waiting_epoch();
1036 /// push osdmap into shard
1038 const OSDMapRef
& osdmap
,
1039 unsigned *pushes_to_free
);
1041 void _wake_pg_slot(spg_t pgid
, OSDShardPGSlot
*slot
);
1043 void identify_splits_and_merges(
1044 const OSDMapRef
& as_of_osdmap
,
1045 set
<pair
<spg_t
,epoch_t
>> *split_children
,
1046 set
<pair
<spg_t
,epoch_t
>> *merge_pgs
);
1047 void _prime_splits(set
<pair
<spg_t
,epoch_t
>> *pgids
);
1048 void prime_splits(const OSDMapRef
& as_of_osdmap
,
1049 set
<pair
<spg_t
,epoch_t
>> *pgids
);
1050 void prime_merges(const OSDMapRef
& as_of_osdmap
,
1051 set
<pair
<spg_t
,epoch_t
>> *merge_pgs
);
1052 void register_and_wake_split_child(PG
*pg
);
1053 void unprime_split_children(spg_t parent
, unsigned old_pg_num
);
1061 class OSD
: public Dispatcher
,
1062 public md_config_obs_t
{
1063 using OpSchedulerItem
= ceph::osd::scheduler::OpSchedulerItem
;
1067 ceph::mutex osd_lock
= ceph::make_mutex("OSD::osd_lock");
1068 SafeTimer tick_timer
; // safe timer (osd_lock)
1070 // Tick timer for those stuff that do not need osd_lock
1071 ceph::mutex tick_timer_lock
= ceph::make_mutex("OSD::tick_timer_lock");
1072 SafeTimer tick_timer_without_osd_lock
;
1073 std::string gss_ktfile_client
{};
1076 // config observer bits
1077 const char** get_tracked_conf_keys() const override
;
1078 void handle_conf_change(const ConfigProxy
& conf
,
1079 const std::set
<std::string
> &changed
) override
;
1080 void update_log_config();
1081 void check_config();
1085 const double OSD_TICK_INTERVAL
= { 1.0 };
1086 double get_tick_interval() const;
1088 Messenger
*cluster_messenger
;
1089 Messenger
*client_messenger
;
1090 Messenger
*objecter_messenger
;
1091 MonClient
*monc
; // check the "monc helpers" list before accessing directly
1093 PerfCounters
*logger
;
1094 PerfCounters
*recoverystate_perf
;
1097 FuseStore
*fuse_store
= nullptr;
1099 LogClient log_client
;
1103 std::string dev_path
, journal_path
;
1105 ceph_release_t last_require_osd_release
{ceph_release_t::unknown
};
1108 size_t numa_cpu_set_size
= 0;
1109 cpu_set_t numa_cpu_set
;
1111 bool store_is_rotational
= true;
1112 bool journal_is_rotational
= true;
1114 ZTracer::Endpoint trace_endpoint
;
1115 void create_logger();
1116 void create_recoverystate_perf();
1118 void tick_without_osd_lock();
1119 void _dispatch(Message
*m
);
1120 void dispatch_op(OpRequestRef op
);
1122 void check_osdmap_features();
1125 friend class OSDSocketHook
;
1126 class OSDSocketHook
*asok_hook
;
1128 std::string_view prefix
,
1129 const cmdmap_t
& cmdmap
,
1131 const bufferlist
& inbl
,
1132 std::function
<void(int,const std::string
&,bufferlist
&)> on_finish
);
1135 int get_nodeid() { return whoami
; }
1137 static ghobject_t
get_osdmap_pobject_name(epoch_t epoch
) {
1139 snprintf(foo
, sizeof(foo
), "osdmap.%d", epoch
);
1140 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1142 static ghobject_t
get_inc_osdmap_pobject_name(epoch_t epoch
) {
1144 snprintf(foo
, sizeof(foo
), "inc_osdmap.%d", epoch
);
1145 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1148 static ghobject_t
make_snapmapper_oid() {
1149 return ghobject_t(hobject_t(
1151 object_t("snapmapper"),
1154 static ghobject_t
make_purged_snaps_oid() {
1155 return ghobject_t(hobject_t(
1157 object_t("purged_snaps"),
1161 static ghobject_t
make_pg_log_oid(spg_t pg
) {
1163 ss
<< "pglog_" << pg
;
1166 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1169 static ghobject_t
make_pg_biginfo_oid(spg_t pg
) {
1171 ss
<< "pginfo_" << pg
;
1174 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1176 static ghobject_t
make_infos_oid() {
1177 hobject_t
oid(sobject_t("infos", CEPH_NOSNAP
));
1178 return ghobject_t(oid
);
1181 static ghobject_t
make_final_pool_info_oid(int64_t pool
) {
1185 object_t(string("final_pool_") + stringify(pool
)),
1189 static ghobject_t
make_pg_num_history_oid() {
1190 return ghobject_t(hobject_t(sobject_t("pg_num_history", CEPH_NOSNAP
)));
1193 static void recursive_remove_collection(CephContext
* cct
,
1199 * get_osd_initial_compat_set()
1201 * Get the initial feature set for this OSD. Features
1202 * here are automatically upgraded.
1204 * Return value: Initial osd CompatSet
1206 static CompatSet
get_osd_initial_compat_set();
1209 * get_osd_compat_set()
1211 * Get all features supported by this OSD
1213 * Return value: CompatSet of all supported features
1215 static CompatSet
get_osd_compat_set();
1220 class C_Tick_WithoutOSDLock
;
1222 // -- config settings --
1223 float m_osd_pg_epoch_max_lag_factor
;
1226 OSDSuperblock superblock
;
1228 void write_superblock();
1229 void write_superblock(ObjectStore::Transaction
& t
);
1230 int read_superblock();
1232 void clear_temp_objects();
1234 CompatSet osd_compat
;
1239 STATE_INITIALIZING
= 1,
1244 STATE_WAITING_FOR_HEALTHY
1247 static const char *get_state_name(int s
) {
1249 case STATE_INITIALIZING
: return "initializing";
1250 case STATE_PREBOOT
: return "preboot";
1251 case STATE_BOOTING
: return "booting";
1252 case STATE_ACTIVE
: return "active";
1253 case STATE_STOPPING
: return "stopping";
1254 case STATE_WAITING_FOR_HEALTHY
: return "waiting_for_healthy";
1255 default: return "???";
1260 std::atomic
<int> state
{STATE_INITIALIZING
};
1263 int get_state() const {
1266 void set_state(int s
) {
1269 bool is_initializing() const {
1270 return state
== STATE_INITIALIZING
;
1272 bool is_preboot() const {
1273 return state
== STATE_PREBOOT
;
1275 bool is_booting() const {
1276 return state
== STATE_BOOTING
;
1278 bool is_active() const {
1279 return state
== STATE_ACTIVE
;
1281 bool is_stopping() const {
1282 return state
== STATE_STOPPING
;
1284 bool is_waiting_for_healthy() const {
1285 return state
== STATE_WAITING_FOR_HEALTHY
;
1290 ShardedThreadPool osd_op_tp
;
1292 void get_latest_osdmap();
1296 void dispatch_session_waiting(const ceph::ref_t
<Session
>& session
, OSDMapRef osdmap
);
1298 ceph::mutex session_waiting_lock
= ceph::make_mutex("OSD::session_waiting_lock");
1299 set
<ceph::ref_t
<Session
>> session_waiting_for_map
;
1301 /// Caller assumes refs for included Sessions
1302 void get_sessions_waiting_for_map(set
<ceph::ref_t
<Session
>> *out
) {
1303 std::lock_guard
l(session_waiting_lock
);
1304 out
->swap(session_waiting_for_map
);
1306 void register_session_waiting_on_map(const ceph::ref_t
<Session
>& session
) {
1307 std::lock_guard
l(session_waiting_lock
);
1308 session_waiting_for_map
.insert(session
);
1310 void clear_session_waiting_on_map(const ceph::ref_t
<Session
>& session
) {
1311 std::lock_guard
l(session_waiting_lock
);
1312 session_waiting_for_map
.erase(session
);
1314 void dispatch_sessions_waiting_on_map() {
1315 set
<ceph::ref_t
<Session
>> sessions_to_check
;
1316 get_sessions_waiting_for_map(&sessions_to_check
);
1317 for (auto i
= sessions_to_check
.begin();
1318 i
!= sessions_to_check
.end();
1319 sessions_to_check
.erase(i
++)) {
1320 std::lock_guard l
{(*i
)->session_dispatch_lock
};
1321 dispatch_session_waiting(*i
, get_osdmap());
1324 void session_handle_reset(const ceph::ref_t
<Session
>& session
) {
1325 std::lock_guard
l(session
->session_dispatch_lock
);
1326 clear_session_waiting_on_map(session
);
1328 session
->clear_backoffs();
1330 /* Messages have connection refs, we need to clear the
1331 * connection->session->message->connection
1332 * cycles which result.
1335 session
->waiting_on_map
.clear_and_dispose(TrackedOp::Putter());
1340 * @defgroup monc helpers
1342 * Right now we only have the one
1346 * Ask the Monitors for a sequence of OSDMaps.
1348 * @param epoch The epoch to start with when replying
1349 * @param force_request True if this request forces a new subscription to
1350 * the monitors; false if an outstanding request that encompasses it is
1353 void osdmap_subscribe(version_t epoch
, bool force_request
);
1354 /** @} monc helpers */
1356 ceph::mutex osdmap_subscribe_lock
= ceph::make_mutex("OSD::osdmap_subscribe_lock");
1357 epoch_t latest_subscribed_epoch
{0};
1360 /// information about a heartbeat peer
1361 struct HeartbeatInfo
{
1363 ConnectionRef con_front
; ///< peer connection (front)
1364 ConnectionRef con_back
; ///< peer connection (back)
1365 utime_t first_tx
; ///< time we sent our first ping request
1366 utime_t last_tx
; ///< last time we sent a ping request
1367 utime_t last_rx_front
; ///< last time we got a ping reply on the front side
1368 utime_t last_rx_back
; ///< last time we got a ping reply on the back side
1369 epoch_t epoch
; ///< most recent epoch we wanted this peer
1370 /// number of connections we send and receive heartbeat pings/replies
1371 static constexpr int HEARTBEAT_MAX_CONN
= 2;
1372 /// history of inflight pings, arranging by timestamp we sent
1373 /// send time -> deadline -> remaining replies
1374 map
<utime_t
, pair
<utime_t
, int>> ping_history
;
1376 utime_t hb_interval_start
;
1377 uint32_t hb_average_count
= 0;
1378 uint32_t hb_index
= 0;
1380 uint32_t hb_total_back
= 0;
1381 uint32_t hb_min_back
= UINT_MAX
;
1382 uint32_t hb_max_back
= 0;
1383 vector
<uint32_t> hb_back_pingtime
;
1384 vector
<uint32_t> hb_back_min
;
1385 vector
<uint32_t> hb_back_max
;
1387 uint32_t hb_total_front
= 0;
1388 uint32_t hb_min_front
= UINT_MAX
;
1389 uint32_t hb_max_front
= 0;
1390 vector
<uint32_t> hb_front_pingtime
;
1391 vector
<uint32_t> hb_front_min
;
1392 vector
<uint32_t> hb_front_max
;
1394 bool is_stale(utime_t stale
) const {
1395 if (ping_history
.empty()) {
1398 utime_t oldest_deadline
= ping_history
.begin()->second
.first
;
1399 return oldest_deadline
<= stale
;
1402 bool is_unhealthy(utime_t now
) const {
1403 if (ping_history
.empty()) {
1404 /// we haven't sent a ping yet or we have got all replies,
1405 /// in either way we are safe and healthy for now
1409 utime_t oldest_deadline
= ping_history
.begin()->second
.first
;
1410 return now
> oldest_deadline
;
1413 bool is_healthy(utime_t now
) const {
1414 if (last_rx_front
== utime_t() || last_rx_back
== utime_t()) {
1415 // only declare to be healthy until we have received the first
1416 // replies from both front/back connections
1419 return !is_unhealthy(now
);
1422 void clear_mark_down(Connection
*except
= nullptr) {
1423 if (con_back
&& con_back
!= except
) {
1424 con_back
->mark_down();
1425 con_back
->clear_priv();
1426 con_back
.reset(nullptr);
1428 if (con_front
&& con_front
!= except
) {
1429 con_front
->mark_down();
1430 con_front
->clear_priv();
1431 con_front
.reset(nullptr);
1436 ceph::mutex heartbeat_lock
= ceph::make_mutex("OSD::heartbeat_lock");
1437 map
<int, int> debug_heartbeat_drops_remaining
;
1438 ceph::condition_variable heartbeat_cond
;
1439 bool heartbeat_stop
;
1440 std::atomic
<bool> heartbeat_need_update
;
1441 map
<int,HeartbeatInfo
> heartbeat_peers
; ///< map of osd id to HeartbeatInfo
1442 utime_t last_mon_heartbeat
;
1443 Messenger
*hb_front_client_messenger
;
1444 Messenger
*hb_back_client_messenger
;
1445 Messenger
*hb_front_server_messenger
;
1446 Messenger
*hb_back_server_messenger
;
1447 utime_t last_heartbeat_resample
; ///< last time we chose random peers in waiting-for-healthy state
1448 double daily_loadavg
;
1449 ceph::mono_time startup_time
;
1451 // Track ping repsonse times using vector as a circular buffer
1452 // MUST BE A POWER OF 2
1453 const uint32_t hb_vector_size
= 16;
1455 void _add_heartbeat_peer(int p
);
1456 void _remove_heartbeat_peer(int p
);
1457 bool heartbeat_reset(Connection
*con
);
1458 void maybe_update_heartbeat_peers();
1459 void reset_heartbeat_peers(bool all
);
1460 bool heartbeat_peers_need_update() {
1461 return heartbeat_need_update
.load();
1463 void heartbeat_set_peers_need_update() {
1464 heartbeat_need_update
.store(true);
1466 void heartbeat_clear_peers_need_update() {
1467 heartbeat_need_update
.store(false);
1470 void heartbeat_check();
1471 void heartbeat_entry();
1472 void need_heartbeat_peer_update();
1474 void heartbeat_kick() {
1475 std::lock_guard
l(heartbeat_lock
);
1476 heartbeat_cond
.notify_all();
1479 struct T_Heartbeat
: public Thread
{
1481 explicit T_Heartbeat(OSD
*o
) : osd(o
) {}
1482 void *entry() override
{
1483 osd
->heartbeat_entry();
1489 bool heartbeat_dispatch(Message
*m
);
1491 struct HeartbeatDispatcher
: public Dispatcher
{
1493 explicit HeartbeatDispatcher(OSD
*o
) : Dispatcher(o
->cct
), osd(o
) {}
1495 bool ms_can_fast_dispatch_any() const override
{ return true; }
1496 bool ms_can_fast_dispatch(const Message
*m
) const override
{
1497 switch (m
->get_type()) {
1505 void ms_fast_dispatch(Message
*m
) override
{
1506 osd
->heartbeat_dispatch(m
);
1508 bool ms_dispatch(Message
*m
) override
{
1509 return osd
->heartbeat_dispatch(m
);
1511 bool ms_handle_reset(Connection
*con
) override
{
1512 return osd
->heartbeat_reset(con
);
1514 void ms_handle_remote_reset(Connection
*con
) override
{}
1515 bool ms_handle_refused(Connection
*con
) override
{
1516 return osd
->ms_handle_refused(con
);
1518 int ms_handle_authentication(Connection
*con
) override
{
1521 } heartbeat_dispatcher
;
1525 list
<OpRequestRef
> finished
;
1527 void take_waiters(list
<OpRequestRef
>& ls
) {
1528 ceph_assert(ceph_mutex_is_locked(osd_lock
));
1529 finished
.splice(finished
.end(), ls
);
1533 // -- op tracking --
1534 OpTracker op_tracker
;
1535 void test_ops(std::string command
, std::string args
, ostream
& ss
);
1536 friend class TestOpsSocketHook
;
1537 TestOpsSocketHook
*test_ops_hook
;
1538 friend struct C_FinishSplits
;
1539 friend struct C_OpenPGs
;
1544 * The ordered op delivery chain is:
1546 * fast dispatch -> scheduler back
1547 * scheduler front <-> to_process back
1548 * to_process front -> RunVis(item)
1551 * The scheduler is per-shard, and to_process is per pg_slot. Items can be
1552 * pushed back up into to_process and/or scheduler while order is preserved.
1554 * Multiple worker threads can operate on each shard.
1556 * Under normal circumstances, num_running == to_process.size(). There are
1557 * two times when that is not true: (1) when waiting_for_pg == true and
1558 * to_process is accumulating requests that are waiting for the pg to be
1559 * instantiated; in that case they will all get requeued together by
1560 * wake_pg_waiters, and (2) when wake_pg_waiters just ran, waiting_for_pg
1561 * and already requeued the items.
1563 friend class ceph::osd::scheduler::PGOpItem
;
1564 friend class ceph::osd::scheduler::PGPeeringItem
;
1565 friend class ceph::osd::scheduler::PGRecovery
;
1566 friend class ceph::osd::scheduler::PGDelete
;
1569 : public ShardedThreadPool::ShardedWQ
<OpSchedulerItem
>
1577 ShardedThreadPool
* tp
)
1578 : ShardedThreadPool::ShardedWQ
<OpSchedulerItem
>(ti
, si
, tp
),
1582 void _add_slot_waiter(
1584 OSDShardPGSlot
*slot
,
1585 OpSchedulerItem
&& qi
);
1587 /// try to do some work
1588 void _process(uint32_t thread_index
, heartbeat_handle_d
*hb
) override
;
1590 /// enqueue a new item
1591 void _enqueue(OpSchedulerItem
&& item
) override
;
1593 /// requeue an old item (at the front of the line)
1594 void _enqueue_front(OpSchedulerItem
&& item
) override
;
1596 void return_waiting_threads() override
{
1597 for(uint32_t i
= 0; i
< osd
->num_shards
; i
++) {
1598 OSDShard
* sdata
= osd
->shards
[i
];
1599 assert (NULL
!= sdata
);
1600 std::scoped_lock l
{sdata
->sdata_wait_lock
};
1601 sdata
->stop_waiting
= true;
1602 sdata
->sdata_cond
.notify_all();
1606 void stop_return_waiting_threads() override
{
1607 for(uint32_t i
= 0; i
< osd
->num_shards
; i
++) {
1608 OSDShard
* sdata
= osd
->shards
[i
];
1609 assert (NULL
!= sdata
);
1610 std::scoped_lock l
{sdata
->sdata_wait_lock
};
1611 sdata
->stop_waiting
= false;
1615 void dump(Formatter
*f
) {
1616 for(uint32_t i
= 0; i
< osd
->num_shards
; i
++) {
1617 auto &&sdata
= osd
->shards
[i
];
1619 char queue_name
[32] = {0};
1620 snprintf(queue_name
, sizeof(queue_name
), "%s%" PRIu32
, "OSD:ShardedOpWQ:", i
);
1621 ceph_assert(NULL
!= sdata
);
1623 std::scoped_lock l
{sdata
->shard_lock
};
1624 f
->open_object_section(queue_name
);
1625 sdata
->scheduler
->dump(*f
);
1630 bool is_shard_empty(uint32_t thread_index
) override
{
1631 uint32_t shard_index
= thread_index
% osd
->num_shards
;
1632 auto &&sdata
= osd
->shards
[shard_index
];
1634 std::lock_guard
l(sdata
->shard_lock
);
1635 if (thread_index
< osd
->num_shards
) {
1636 return sdata
->scheduler
->empty() && sdata
->context_queue
.empty();
1638 return sdata
->scheduler
->empty();
1642 void handle_oncommits(list
<Context
*>& oncommits
) {
1643 for (auto p
: oncommits
) {
1650 void enqueue_op(spg_t pg
, OpRequestRef
&& op
, epoch_t epoch
);
1652 PGRef pg
, OpRequestRef op
,
1653 ThreadPool::TPHandle
&handle
);
1655 void enqueue_peering_evt(
1657 PGPeeringEventRef ref
);
1658 void dequeue_peering_evt(
1661 PGPeeringEventRef ref
,
1662 ThreadPool::TPHandle
& handle
);
1664 void dequeue_delete(
1668 ThreadPool::TPHandle
& handle
);
1671 friend class OSDShard
;
1672 friend class PrimaryLogPG
;
1678 // TODO: switch to std::atomic<OSDMapRef> when C++20 will be available.
1680 void set_osdmap(OSDMapRef osdmap
) {
1681 std::atomic_store(&_osdmap
, osdmap
);
1683 OSDMapRef
get_osdmap() const {
1684 return std::atomic_load(&_osdmap
);
1686 epoch_t
get_osdmap_epoch() const {
1687 // XXX: performance?
1688 auto osdmap
= get_osdmap();
1689 return osdmap
? osdmap
->get_epoch() : 0;
1692 pool_pg_num_history_t pg_num_history
;
1694 ceph::shared_mutex map_lock
= ceph::make_shared_mutex("OSD::map_lock");
1695 list
<OpRequestRef
> waiting_for_osdmap
;
1696 deque
<utime_t
> osd_markdown_log
;
1698 friend struct send_map_on_destruct
;
1700 void wait_for_new_map(OpRequestRef op
);
1701 void handle_osd_map(class MOSDMap
*m
);
1702 void _committed_osd_maps(epoch_t first
, epoch_t last
, class MOSDMap
*m
);
1703 void trim_maps(epoch_t oldest
, int nreceived
, bool skip_maps
);
1704 void note_down_osd(int osd
);
1705 void note_up_osd(int osd
);
1706 friend class C_OnMapCommit
;
1711 ThreadPool::TPHandle
&handle
,
1714 void activate_map();
1716 // osd map cache (past osd maps)
1717 OSDMapRef
get_map(epoch_t e
) {
1718 return service
.get_map(e
);
1720 OSDMapRef
add_map(OSDMap
*o
) {
1721 return service
.add_map(o
);
1723 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
1724 return service
.get_map_bl(e
, bl
);
1729 vector
<OSDShard
*> shards
;
1730 uint32_t num_shards
= 0;
1732 void inc_num_pgs() {
1735 void dec_num_pgs() {
1738 int get_num_pgs() const {
1743 ceph::mutex merge_lock
= ceph::make_mutex("OSD::merge_lock");
1744 /// merge epoch -> target pgid -> source pgid -> pg
1745 map
<epoch_t
,map
<spg_t
,map
<spg_t
,PGRef
>>> merge_waiters
;
1747 bool add_merge_waiter(OSDMapRef nextmap
, spg_t target
, PGRef source
,
1750 // -- placement groups --
1751 std::atomic
<size_t> num_pgs
= {0};
1753 std::mutex pending_creates_lock
;
1754 using create_from_osd_t
= std::pair
<spg_t
, bool /* is primary*/>;
1755 std::set
<create_from_osd_t
> pending_creates_from_osd
;
1756 unsigned pending_creates_from_mon
= 0;
1758 PGRecoveryStats pg_recovery_stats
;
1760 PGRef
_lookup_pg(spg_t pgid
);
1761 PGRef
_lookup_lock_pg(spg_t pgid
);
1762 void register_pg(PGRef pg
);
1763 bool try_finish_pg_delete(PG
*pg
, unsigned old_pg_num
);
1765 void _get_pgs(vector
<PGRef
> *v
, bool clear_too
=false);
1766 void _get_pgids(vector
<spg_t
> *v
);
1769 PGRef
lookup_lock_pg(spg_t pgid
);
1771 std::set
<int64_t> get_mapped_pools();
1774 PG
* _make_pg(OSDMapRef createmap
, spg_t pgid
);
1776 bool maybe_wait_for_max_pg(const OSDMapRef
& osdmap
,
1777 spg_t pgid
, bool is_mon_create
);
1778 void resume_creating_pg();
1782 /// build initial pg history and intervals on create
1783 void build_initial_pg_history(
1786 utime_t created_stamp
,
1790 epoch_t last_pg_create_epoch
;
1792 void handle_pg_create(OpRequestRef op
);
1796 const set
<spg_t
> &childpgids
, set
<PGRef
> *out_pgs
,
1800 void _finish_splits(set
<PGRef
>& pgs
);
1802 // == monitor interaction ==
1803 ceph::mutex mon_report_lock
= ceph::make_mutex("OSD::mon_report_lock");
1804 utime_t last_mon_report
;
1805 Finisher boot_finisher
;
1809 void _got_mon_epochs(epoch_t oldest
, epoch_t newest
);
1810 void _preboot(epoch_t oldest
, epoch_t newest
);
1812 void _collect_metadata(map
<string
,string
> *pmeta
);
1813 void _get_purged_snaps();
1814 void handle_get_purged_snaps_reply(MMonGetPurgedSnapsReply
*r
);
1816 void start_waiting_for_healthy();
1819 void send_full_update();
1821 friend struct C_OSD_GetVersion
;
1824 epoch_t up_thru_wanted
;
1826 void queue_want_up_thru(epoch_t want
);
1829 // -- full map requests --
1830 epoch_t requested_full_first
, requested_full_last
;
1832 void request_full_map(epoch_t first
, epoch_t last
);
1833 void rerequest_full_maps() {
1834 epoch_t first
= requested_full_first
;
1835 epoch_t last
= requested_full_last
;
1836 requested_full_first
= 0;
1837 requested_full_last
= 0;
1838 request_full_map(first
, last
);
1840 void got_full_map(epoch_t e
);
1843 map
<int,utime_t
> failure_queue
;
1844 map
<int,pair
<utime_t
,entity_addrvec_t
> > failure_pending
;
1846 void requeue_failures();
1847 void send_failures();
1848 void send_still_alive(epoch_t epoch
, int osd
, const entity_addrvec_t
&addrs
);
1849 void cancel_pending_failures();
1851 ceph::coarse_mono_clock::time_point last_sent_beacon
;
1852 ceph::mutex min_last_epoch_clean_lock
= ceph::make_mutex("OSD::min_last_epoch_clean_lock");
1853 epoch_t min_last_epoch_clean
= 0;
1854 // which pgs were scanned for min_lec
1855 std::vector
<pg_t
> min_last_epoch_clean_pgs
;
1856 void send_beacon(const ceph::coarse_mono_clock::time_point
& now
);
1858 ceph_tid_t
get_tid() {
1859 return service
.get_tid();
1862 double scrub_sleep_time(bool must_scrub
);
1864 // -- generic pg peering --
1865 PeeringCtx
create_context();
1866 void dispatch_context(PeeringCtx
&ctx
, PG
*pg
, OSDMapRef curmap
,
1867 ThreadPool::TPHandle
*handle
= NULL
);
1869 bool require_mon_peer(const Message
*m
);
1870 bool require_mon_or_mgr_peer(const Message
*m
);
1871 bool require_osd_peer(const Message
*m
);
1873 * Verifies that we were alive in the given epoch, and that
1876 bool require_self_aliveness(const Message
*m
, epoch_t alive_since
);
1878 * Verifies that the OSD who sent the given op has the same
1879 * address as in the given map.
1880 * @pre op was sent by an OSD using the cluster messenger
1882 bool require_same_peer_instance(const Message
*m
, const OSDMapRef
& map
,
1883 bool is_fast_dispatch
);
1885 bool require_same_or_newer_map(OpRequestRef
& op
, epoch_t e
,
1886 bool is_fast_dispatch
);
1888 void handle_fast_pg_create(MOSDPGCreate2
*m
);
1889 void handle_fast_pg_query(MOSDPGQuery
*m
);
1890 void handle_pg_query_nopg(const MQuery
& q
);
1891 void handle_fast_pg_notify(MOSDPGNotify
*m
);
1892 void handle_pg_notify_nopg(const MNotifyRec
& q
);
1893 void handle_fast_pg_info(MOSDPGInfo
*m
);
1894 void handle_fast_pg_remove(MOSDPGRemove
*m
);
1898 PGRef
handle_pg_create_info(const OSDMapRef
& osdmap
, const PGCreateInfo
*info
);
1901 void handle_fast_force_recovery(MOSDForceRecovery
*m
);
1904 void handle_command(class MCommand
*m
);
1907 // -- pg recovery --
1908 void do_recovery(PG
*pg
, epoch_t epoch_queued
, uint64_t pushes_reserved
,
1909 ThreadPool::TPHandle
&handle
);
1914 void resched_all_scrubs();
1915 bool scrub_random_backoff();
1916 bool scrub_load_below_threshold();
1917 bool scrub_time_permit(utime_t now
);
1919 // -- status reporting --
1920 MPGStats
*collect_pg_stats();
1921 std::vector
<DaemonHealthMetric
> get_health_metrics();
1925 bool ms_can_fast_dispatch_any() const override
{ return true; }
1926 bool ms_can_fast_dispatch(const Message
*m
) const override
{
1927 switch (m
->get_type()) {
1929 case CEPH_MSG_OSD_OP
:
1930 case CEPH_MSG_OSD_BACKOFF
:
1931 case MSG_OSD_SCRUB2
:
1932 case MSG_OSD_FORCE_RECOVERY
:
1933 case MSG_MON_COMMAND
:
1934 case MSG_OSD_PG_CREATE2
:
1935 case MSG_OSD_PG_QUERY
:
1936 case MSG_OSD_PG_QUERY2
:
1937 case MSG_OSD_PG_INFO
:
1938 case MSG_OSD_PG_INFO2
:
1939 case MSG_OSD_PG_NOTIFY
:
1940 case MSG_OSD_PG_NOTIFY2
:
1941 case MSG_OSD_PG_LOG
:
1942 case MSG_OSD_PG_TRIM
:
1943 case MSG_OSD_PG_REMOVE
:
1944 case MSG_OSD_BACKFILL_RESERVE
:
1945 case MSG_OSD_RECOVERY_RESERVE
:
1947 case MSG_OSD_REPOPREPLY
:
1948 case MSG_OSD_PG_PUSH
:
1949 case MSG_OSD_PG_PULL
:
1950 case MSG_OSD_PG_PUSH_REPLY
:
1951 case MSG_OSD_PG_SCAN
:
1952 case MSG_OSD_PG_BACKFILL
:
1953 case MSG_OSD_PG_BACKFILL_REMOVE
:
1954 case MSG_OSD_EC_WRITE
:
1955 case MSG_OSD_EC_WRITE_REPLY
:
1956 case MSG_OSD_EC_READ
:
1957 case MSG_OSD_EC_READ_REPLY
:
1958 case MSG_OSD_SCRUB_RESERVE
:
1959 case MSG_OSD_REP_SCRUB
:
1960 case MSG_OSD_REP_SCRUBMAP
:
1961 case MSG_OSD_PG_UPDATE_LOG_MISSING
:
1962 case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY
:
1963 case MSG_OSD_PG_RECOVERY_DELETE
:
1964 case MSG_OSD_PG_RECOVERY_DELETE_REPLY
:
1965 case MSG_OSD_PG_LEASE
:
1966 case MSG_OSD_PG_LEASE_ACK
:
1972 void ms_fast_dispatch(Message
*m
) override
;
1973 bool ms_dispatch(Message
*m
) override
;
1974 void ms_handle_connect(Connection
*con
) override
;
1975 void ms_handle_fast_connect(Connection
*con
) override
;
1976 void ms_handle_fast_accept(Connection
*con
) override
;
1977 int ms_handle_authentication(Connection
*con
) override
;
1978 bool ms_handle_reset(Connection
*con
) override
;
1979 void ms_handle_remote_reset(Connection
*con
) override
{}
1980 bool ms_handle_refused(Connection
*con
) override
;
1983 /* internal and external can point to the same messenger, they will still
1984 * be cleaned up properly*/
1985 OSD(CephContext
*cct_
,
1986 ObjectStore
*store_
,
1988 Messenger
*internal
,
1989 Messenger
*external
,
1990 Messenger
*hb_front_client
,
1991 Messenger
*hb_back_client
,
1992 Messenger
*hb_front_server
,
1993 Messenger
*hb_back_server
,
1994 Messenger
*osdc_messenger
,
1995 MonClient
*mc
, const std::string
&dev
, const std::string
&jdev
);
1999 static int mkfs(CephContext
*cct
, ObjectStore
*store
, uuid_d fsid
, int whoami
, std::string osdspec_affinity
);
2001 /* remove any non-user xattrs from a map of them */
2002 void filter_xattrs(map
<string
, bufferptr
>& attrs
) {
2003 for (map
<string
, bufferptr
>::iterator iter
= attrs
.begin();
2004 iter
!= attrs
.end();
2006 if (('_' != iter
->first
.at(0)) || (iter
->first
.size() == 1))
2007 attrs
.erase(iter
++);
2013 int mon_cmd_maybe_osd_create(string
&cmd
);
2014 int update_crush_device_class();
2015 int update_crush_location();
2017 static int write_meta(CephContext
*cct
,
2019 uuid_d
& cluster_fsid
, uuid_d
& osd_fsid
, int whoami
, std::string
& osdspec_affinity
);
2021 void handle_scrub(struct MOSDScrub
*m
);
2022 void handle_fast_scrub(struct MOSDScrub2
*m
);
2023 void handle_osd_ping(class MOSDPing
*m
);
2025 size_t get_num_cache_shards();
2026 int get_num_op_shards();
2027 int get_num_op_threads();
2029 float get_osd_recovery_sleep();
2030 float get_osd_delete_sleep();
2031 float get_osd_snap_trim_sleep();
2033 int get_recovery_max_active();
2035 void scrub_purged_snaps();
2036 void probe_smart(const string
& devid
, ostream
& ss
);
2039 static int peek_meta(ObjectStore
*store
,
2041 uuid_d
*cluster_fsid
,
2044 ceph_release_t
*min_osd_release
);
2052 int enable_disable_fuse(bool stop
);
2053 int set_numa_affinity();
2055 void suicide(int exitcode
);
2058 void handle_signal(int signum
);
2060 /// check if we can throw out op from a disconnected client
2061 static bool op_is_discardable(const MOSDOp
*m
);
2065 friend class OSDService
;
2068 void set_perf_queries(const ConfigPayload
&config_payload
);
2069 MetricPayload
get_perf_reports();
2071 ceph::mutex m_perf_queries_lock
= ceph::make_mutex("OSD::m_perf_queries_lock");
2072 std::list
<OSDPerfMetricQuery
> m_perf_queries
;
2073 std::map
<OSDPerfMetricQuery
, OSDPerfMetricLimits
> m_perf_limits
;
2077 //compatibility of the executable
2078 extern const CompatSet::Feature ceph_osd_feature_compat
[];
2079 extern const CompatSet::Feature ceph_osd_feature_ro_compat
[];
2080 extern const CompatSet::Feature ceph_osd_feature_incompat
[];
2082 #endif // CEPH_OSD_H