1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
20 #include "msg/Dispatcher.h"
22 #include "common/Mutex.h"
23 #include "common/RWLock.h"
24 #include "common/Timer.h"
25 #include "common/WorkQueue.h"
26 #include "common/AsyncReserver.h"
27 #include "common/ceph_context.h"
28 #include "common/zipkin_trace.h"
30 #include "mgr/MgrClient.h"
32 #include "os/ObjectStore.h"
35 #include "auth/KeyRing.h"
36 #include "osd/ClassHandler.h"
38 #include "include/CompatSet.h"
40 #include "OpRequest.h"
43 #include "osd/PGQueueable.h"
48 #include "include/memory.h"
51 #include "include/unordered_map.h"
53 #include "common/shared_cache.hpp"
54 #include "common/simple_cache.hpp"
55 #include "common/sharedptr_registry.hpp"
56 #include "common/WeightedPriorityQueue.h"
57 #include "common/PrioritizedQueue.h"
58 #include "osd/mClockOpClassQueue.h"
59 #include "osd/mClockClientQueue.h"
60 #include "messages/MOSDOp.h"
61 #include "include/Spinlock.h"
62 #include "common/EventTrace.h"
64 #define CEPH_OSD_PROTOCOL 10 /* cluster internal */
79 l_osd_op_r_lat_outb_hist
,
80 l_osd_op_r_process_lat
,
81 l_osd_op_r_prepare_lat
,
85 l_osd_op_w_lat_inb_hist
,
86 l_osd_op_w_process_lat
,
87 l_osd_op_w_prepare_lat
,
92 l_osd_op_rw_lat_inb_hist
,
93 l_osd_op_rw_lat_outb_hist
,
94 l_osd_op_rw_process_lat
,
95 l_osd_op_rw_prepare_lat
,
97 l_osd_op_before_queue_op_lat
,
98 l_osd_op_before_dequeue_op_lat
,
120 l_osd_history_alloc_bytes
,
121 l_osd_history_alloc_num
,
123 l_osd_cached_crc_adjusted
,
136 l_osd_waiting_for_map
,
139 l_osd_map_cache_miss
,
140 l_osd_map_cache_miss_low
,
141 l_osd_map_cache_miss_low_avg
,
142 l_osd_map_bl_cache_hit
,
143 l_osd_map_bl_cache_miss
,
146 l_osd_stat_bytes_used
,
147 l_osd_stat_bytes_avail
,
153 l_osd_tier_flush_fail
,
154 l_osd_tier_try_flush
,
155 l_osd_tier_try_flush_fail
,
161 l_osd_tier_proxy_read
,
162 l_osd_tier_proxy_write
,
169 l_osd_object_ctx_cache_hit
,
170 l_osd_object_ctx_cache_total
,
173 l_osd_tier_flush_lat
,
174 l_osd_tier_promote_lat
,
184 // RecoveryState perf counters
193 rs_backfilling_latency
,
194 rs_waitremotebackfillreserved_latency
,
195 rs_waitlocalbackfillreserved_latency
,
196 rs_notbackfilling_latency
,
197 rs_repnotrecovering_latency
,
198 rs_repwaitrecoveryreserved_latency
,
199 rs_repwaitbackfillreserved_latency
,
200 rs_reprecovering_latency
,
201 rs_activating_latency
,
202 rs_waitlocalrecoveryreserved_latency
,
203 rs_waitremoterecoveryreserved_latency
,
204 rs_recovering_latency
,
205 rs_recovered_latency
,
208 rs_replicaactive_latency
,
212 rs_waitactingchange_latency
,
213 rs_incomplete_latency
,
215 rs_getmissing_latency
,
216 rs_waitupthru_latency
,
217 rs_notrecovering_latency
,
234 class AuthAuthorizeHandlerRegistry
;
236 class TestOpsSocketHook
;
237 struct C_CompleteSplits
;
241 typedef ceph::shared_ptr
<ObjectStore::Sequencer
> SequencerRef
;
244 class DeletingState
{
258 const PGRef old_pg_state
;
259 explicit DeletingState(const pair
<spg_t
, PGRef
> &in
) :
260 lock("DeletingState::lock"), status(QUEUED
), stop_deleting(false),
261 pgid(in
.first
), old_pg_state(in
.second
) {
264 /// transition status to CLEARING_WAITING
265 bool pause_clearing() {
266 Mutex::Locker
l(lock
);
267 assert(status
== CLEARING_DIR
);
273 status
= CLEARING_WAITING
;
275 } ///< @return false if we should cancel deletion
277 /// start or resume the clearing - transition the status to CLEARING_DIR
278 bool start_or_resume_clearing() {
279 Mutex::Locker
l(lock
);
282 status
== DELETED_DIR
||
283 status
== CLEARING_WAITING
);
289 status
= CLEARING_DIR
;
291 } ///< @return false if we should cancel the deletion
293 /// transition status to CLEARING_DIR
294 bool resume_clearing() {
295 Mutex::Locker
l(lock
);
296 assert(status
== CLEARING_WAITING
);
302 status
= CLEARING_DIR
;
304 } ///< @return false if we should cancel deletion
306 /// transition status to deleting
307 bool start_deleting() {
308 Mutex::Locker
l(lock
);
309 assert(status
== CLEARING_DIR
);
315 status
= DELETING_DIR
;
317 } ///< @return false if we should cancel deletion
319 /// signal collection removal queued
320 void finish_deleting() {
321 Mutex::Locker
l(lock
);
322 assert(status
== DELETING_DIR
);
323 status
= DELETED_DIR
;
327 /// try to halt the deletion
328 bool try_stop_deletion() {
329 Mutex::Locker
l(lock
);
330 stop_deleting
= true;
332 * If we are in DELETING_DIR or CLEARING_DIR, there are in progress
333 * operations we have to wait for before continuing on. States
334 * CLEARING_WAITING and QUEUED indicate that the remover will check
335 * stop_deleting before queueing any further operations. CANCELED
336 * indicates that the remover has already halted. DELETED_DIR
337 * indicates that the deletion has been fully queued.
339 while (status
== DELETING_DIR
|| status
== CLEARING_DIR
)
341 return status
!= DELETED_DIR
;
342 } ///< @return true if we don't need to recreate the collection
344 typedef ceph::shared_ptr
<DeletingState
> DeletingStateRef
;
352 SharedPtrRegistry
<spg_t
, ObjectStore::Sequencer
> osr_registry
;
353 ceph::shared_ptr
<ObjectStore::Sequencer
> meta_osr
;
354 SharedPtrRegistry
<spg_t
, DeletingState
> deleting_pgs
;
357 LogClient
&log_client
;
359 PGRecoveryStats
&pg_recovery_stats
;
361 Messenger
*&cluster_messenger
;
362 Messenger
*&client_messenger
;
364 PerfCounters
*&logger
;
365 PerfCounters
*&recoverystate_perf
;
367 ThreadPool::BatchWorkQueue
<PG
> &peering_wq
;
368 GenContextWQ recovery_gen_wq
;
369 ClassHandler
*&class_handler
;
371 void enqueue_back(spg_t pgid
, PGQueueable qi
);
372 void enqueue_front(spg_t pgid
, PGQueueable qi
);
374 void maybe_inject_dispatch_delay() {
375 if (g_conf
->osd_debug_inject_dispatch_delay_probability
> 0) {
377 g_conf
->osd_debug_inject_dispatch_delay_probability
* 10000) {
379 t
.set_from_double(g_conf
->osd_debug_inject_dispatch_delay_duration
);
386 // -- map epoch lower bound --
388 multiset
<epoch_t
> pg_epochs
;
389 map
<spg_t
,epoch_t
> pg_epoch
;
392 void pg_add_epoch(spg_t pgid
, epoch_t epoch
) {
393 Mutex::Locker
l(pg_epoch_lock
);
394 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
395 assert(t
== pg_epoch
.end());
396 pg_epoch
[pgid
] = epoch
;
397 pg_epochs
.insert(epoch
);
399 void pg_update_epoch(spg_t pgid
, epoch_t epoch
) {
400 Mutex::Locker
l(pg_epoch_lock
);
401 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
402 assert(t
!= pg_epoch
.end());
403 pg_epochs
.erase(pg_epochs
.find(t
->second
));
405 pg_epochs
.insert(epoch
);
407 void pg_remove_epoch(spg_t pgid
) {
408 Mutex::Locker
l(pg_epoch_lock
);
409 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
410 if (t
!= pg_epoch
.end()) {
411 pg_epochs
.erase(pg_epochs
.find(t
->second
));
415 epoch_t
get_min_pg_epoch() {
416 Mutex::Locker
l(pg_epoch_lock
);
417 if (pg_epochs
.empty())
420 return *pg_epochs
.begin();
425 Mutex publish_lock
, pre_publish_lock
; // pre-publish orders before publish
426 OSDSuperblock superblock
;
429 OSDSuperblock
get_superblock() {
430 Mutex::Locker
l(publish_lock
);
433 void publish_superblock(const OSDSuperblock
&block
) {
434 Mutex::Locker
l(publish_lock
);
438 int get_nodeid() const { return whoami
; }
440 std::atomic
<epoch_t
> max_oldest_map
;
445 OSDMapRef
get_osdmap() {
446 Mutex::Locker
l(publish_lock
);
449 epoch_t
get_osdmap_epoch() {
450 Mutex::Locker
l(publish_lock
);
451 return osdmap
? osdmap
->get_epoch() : 0;
453 void publish_map(OSDMapRef map
) {
454 Mutex::Locker
l(publish_lock
);
459 * osdmap - current published map
460 * next_osdmap - pre_published map that is about to be published.
462 * We use the next_osdmap to send messages and initiate connections,
463 * but only if the target is the same instance as the one in the map
464 * epoch the current user is working from (i.e., the result is
465 * equivalent to what is in next_osdmap).
467 * This allows the helpers to start ignoring osds that are about to
468 * go down, and let OSD::handle_osd_map()/note_down_osd() mark them
469 * down, without worrying about reopening connections from threads
470 * working from old maps.
473 OSDMapRef next_osdmap
;
474 Cond pre_publish_cond
;
477 void pre_publish_map(OSDMapRef map
) {
478 Mutex::Locker
l(pre_publish_lock
);
479 next_osdmap
= std::move(map
);
483 /// map epochs reserved below
484 map
<epoch_t
, unsigned> map_reservations
;
486 /// gets ref to next_osdmap and registers the epoch as reserved
487 OSDMapRef
get_nextmap_reserved() {
488 Mutex::Locker
l(pre_publish_lock
);
491 epoch_t e
= next_osdmap
->get_epoch();
492 map
<epoch_t
, unsigned>::iterator i
=
493 map_reservations
.insert(make_pair(e
, 0)).first
;
497 /// releases reservation on map
498 void release_map(OSDMapRef osdmap
) {
499 Mutex::Locker
l(pre_publish_lock
);
500 map
<epoch_t
, unsigned>::iterator i
=
501 map_reservations
.find(osdmap
->get_epoch());
502 assert(i
!= map_reservations
.end());
503 assert(i
->second
> 0);
504 if (--(i
->second
) == 0) {
505 map_reservations
.erase(i
);
507 pre_publish_cond
.Signal();
509 /// blocks until there are no reserved maps prior to next_osdmap
510 void await_reserved_maps() {
511 Mutex::Locker
l(pre_publish_lock
);
514 map
<epoch_t
, unsigned>::const_iterator i
= map_reservations
.cbegin();
515 if (i
== map_reservations
.cend() || i
->first
>= next_osdmap
->get_epoch()) {
518 pre_publish_cond
.Wait(pre_publish_lock
);
524 Mutex peer_map_epoch_lock
;
525 map
<int, epoch_t
> peer_map_epoch
;
527 epoch_t
get_peer_epoch(int p
);
528 epoch_t
note_peer_epoch(int p
, epoch_t e
);
529 void forget_peer_epoch(int p
, epoch_t e
);
531 void send_map(class MOSDMap
*m
, Connection
*con
);
532 void send_incremental_map(epoch_t since
, Connection
*con
, OSDMapRef
& osdmap
);
533 MOSDMap
*build_incremental_map_msg(epoch_t from
, epoch_t to
,
534 OSDSuperblock
& superblock
);
535 bool should_share_map(entity_name_t name
, Connection
*con
, epoch_t epoch
,
536 const OSDMapRef
& osdmap
, const epoch_t
*sent_epoch_p
);
537 void share_map(entity_name_t name
, Connection
*con
, epoch_t epoch
,
538 OSDMapRef
& osdmap
, epoch_t
*sent_epoch_p
);
539 void share_map_peer(int peer
, Connection
*con
,
540 OSDMapRef map
= OSDMapRef());
542 ConnectionRef
get_con_osd_cluster(int peer
, epoch_t from_epoch
);
543 pair
<ConnectionRef
,ConnectionRef
> get_con_osd_hb(int peer
, epoch_t from_epoch
); // (back, front)
544 void send_message_osd_cluster(int peer
, Message
*m
, epoch_t from_epoch
);
545 void send_message_osd_cluster(Message
*m
, Connection
*con
) {
546 con
->send_message(m
);
548 void send_message_osd_cluster(Message
*m
, const ConnectionRef
& con
) {
549 con
->send_message(m
);
551 void send_message_osd_client(Message
*m
, Connection
*con
) {
552 con
->send_message(m
);
554 void send_message_osd_client(Message
*m
, const ConnectionRef
& con
) {
555 con
->send_message(m
);
557 entity_name_t
get_cluster_msgr_name() {
558 return cluster_messenger
->get_myname();
562 // -- scrub scheduling --
563 Mutex sched_scrub_lock
;
570 /// pg to be scrubbed
572 /// a time scheduled for scrub. but the scrub could be delayed if system
573 /// load is too high or it fails to fall in the scrub hours
575 /// the hard upper bound of scrub time
577 ScrubJob() : cct(nullptr) {}
578 explicit ScrubJob(CephContext
* cct
, const spg_t
& pg
,
579 const utime_t
& timestamp
,
580 double pool_scrub_min_interval
= 0,
581 double pool_scrub_max_interval
= 0, bool must
= true);
582 /// order the jobs by sched_time
583 bool operator<(const ScrubJob
& rhs
) const;
585 set
<ScrubJob
> sched_scrub_pg
;
587 /// @returns the scrub_reg_stamp used for unregister the scrub job
588 utime_t
reg_pg_scrub(spg_t pgid
, utime_t t
, double pool_scrub_min_interval
,
589 double pool_scrub_max_interval
, bool must
) {
590 ScrubJob
scrub(cct
, pgid
, t
, pool_scrub_min_interval
, pool_scrub_max_interval
,
592 Mutex::Locker
l(sched_scrub_lock
);
593 sched_scrub_pg
.insert(scrub
);
594 return scrub
.sched_time
;
596 void unreg_pg_scrub(spg_t pgid
, utime_t t
) {
597 Mutex::Locker
l(sched_scrub_lock
);
598 size_t removed
= sched_scrub_pg
.erase(ScrubJob(cct
, pgid
, t
));
601 bool first_scrub_stamp(ScrubJob
*out
) {
602 Mutex::Locker
l(sched_scrub_lock
);
603 if (sched_scrub_pg
.empty())
605 set
<ScrubJob
>::iterator iter
= sched_scrub_pg
.begin();
609 bool next_scrub_stamp(const ScrubJob
& next
,
611 Mutex::Locker
l(sched_scrub_lock
);
612 if (sched_scrub_pg
.empty())
614 set
<ScrubJob
>::const_iterator iter
= sched_scrub_pg
.lower_bound(next
);
615 if (iter
== sched_scrub_pg
.cend())
618 if (iter
== sched_scrub_pg
.cend())
624 void dumps_scrub(Formatter
*f
) {
625 assert(f
!= nullptr);
626 Mutex::Locker
l(sched_scrub_lock
);
628 f
->open_array_section("scrubs");
629 for (const auto &i
: sched_scrub_pg
) {
630 f
->open_object_section("scrub");
631 f
->dump_stream("pgid") << i
.pgid
;
632 f
->dump_stream("sched_time") << i
.sched_time
;
633 f
->dump_stream("deadline") << i
.deadline
;
634 f
->dump_bool("forced", i
.sched_time
== i
.deadline
);
640 bool can_inc_scrubs_pending();
641 bool inc_scrubs_pending();
642 void inc_scrubs_active(bool reserved
);
643 void dec_scrubs_pending();
644 void dec_scrubs_active();
646 void reply_op_error(OpRequestRef op
, int err
);
647 void reply_op_error(OpRequestRef op
, int err
, eversion_t v
, version_t uv
);
648 void handle_misdirected_op(PG
*pg
, OpRequestRef op
);
652 // -- agent shared state --
655 map
<uint64_t, set
<PGRef
> > agent_queue
;
656 set
<PGRef
>::iterator agent_queue_pos
;
657 bool agent_valid_iterator
;
659 int flush_mode_high_count
; //once have one pg with FLUSH_MODE_HIGH then flush objects with high speed
660 set
<hobject_t
> agent_oids
;
662 struct AgentThread
: public Thread
{
664 explicit AgentThread(OSDService
*o
) : osd(o
) {}
665 void *entry() override
{
670 bool agent_stop_flag
;
671 Mutex agent_timer_lock
;
672 SafeTimer agent_timer
;
678 void _enqueue(PG
*pg
, uint64_t priority
) {
679 if (!agent_queue
.empty() &&
680 agent_queue
.rbegin()->first
< priority
)
681 agent_valid_iterator
= false; // inserting higher-priority queue
682 set
<PGRef
>& nq
= agent_queue
[priority
];
688 void _dequeue(PG
*pg
, uint64_t old_priority
) {
689 set
<PGRef
>& oq
= agent_queue
[old_priority
];
690 set
<PGRef
>::iterator p
= oq
.find(pg
);
691 assert(p
!= oq
.end());
692 if (p
== agent_queue_pos
)
696 if (agent_queue
.rbegin()->first
== old_priority
)
697 agent_valid_iterator
= false;
698 agent_queue
.erase(old_priority
);
702 /// enable agent for a pg
703 void agent_enable_pg(PG
*pg
, uint64_t priority
) {
704 Mutex::Locker
l(agent_lock
);
705 _enqueue(pg
, priority
);
708 /// adjust priority for an enagled pg
709 void agent_adjust_pg(PG
*pg
, uint64_t old_priority
, uint64_t new_priority
) {
710 Mutex::Locker
l(agent_lock
);
711 assert(new_priority
!= old_priority
);
712 _enqueue(pg
, new_priority
);
713 _dequeue(pg
, old_priority
);
716 /// disable agent for a pg
717 void agent_disable_pg(PG
*pg
, uint64_t old_priority
) {
718 Mutex::Locker
l(agent_lock
);
719 _dequeue(pg
, old_priority
);
722 /// note start of an async (evict) op
723 void agent_start_evict_op() {
724 Mutex::Locker
l(agent_lock
);
728 /// note finish or cancellation of an async (evict) op
729 void agent_finish_evict_op() {
730 Mutex::Locker
l(agent_lock
);
731 assert(agent_ops
> 0);
736 /// note start of an async (flush) op
737 void agent_start_op(const hobject_t
& oid
) {
738 Mutex::Locker
l(agent_lock
);
740 assert(agent_oids
.count(oid
) == 0);
741 agent_oids
.insert(oid
);
744 /// note finish or cancellation of an async (flush) op
745 void agent_finish_op(const hobject_t
& oid
) {
746 Mutex::Locker
l(agent_lock
);
747 assert(agent_ops
> 0);
749 assert(agent_oids
.count(oid
) == 1);
750 agent_oids
.erase(oid
);
754 /// check if we are operating on an object
755 bool agent_is_active_oid(const hobject_t
& oid
) {
756 Mutex::Locker
l(agent_lock
);
757 return agent_oids
.count(oid
);
760 /// get count of active agent ops
761 int agent_get_num_ops() {
762 Mutex::Locker
l(agent_lock
);
766 void agent_inc_high_count() {
767 Mutex::Locker
l(agent_lock
);
768 flush_mode_high_count
++;
771 void agent_dec_high_count() {
772 Mutex::Locker
l(agent_lock
);
773 flush_mode_high_count
--;
777 /// throttle promotion attempts
778 std::atomic_uint promote_probability_millis
{1000}; ///< probability thousands. one word.
779 PromoteCounter promote_counter
;
780 utime_t last_recalibrate
;
781 unsigned long promote_max_objects
, promote_max_bytes
;
784 bool promote_throttle() {
785 // NOTE: lockless! we rely on the probability being a single word.
786 promote_counter
.attempt();
787 if ((unsigned)rand() % 1000 > promote_probability_millis
)
788 return true; // yes throttle (no promote)
789 if (promote_max_objects
&&
790 promote_counter
.objects
> promote_max_objects
)
791 return true; // yes throttle
792 if (promote_max_bytes
&&
793 promote_counter
.bytes
> promote_max_bytes
)
794 return true; // yes throttle
795 return false; // no throttle (promote)
797 void promote_finish(uint64_t bytes
) {
798 promote_counter
.finish(bytes
);
800 void promote_throttle_recalibrate();
802 // -- Objecter, for tiering reads/writes from/to other OSDs --
804 Finisher objecter_finisher
;
808 SafeTimer watch_timer
;
809 uint64_t next_notif_id
;
810 uint64_t get_next_id(epoch_t cur_epoch
) {
811 Mutex::Locker
l(watch_lock
);
812 return (((uint64_t)cur_epoch
) << 32) | ((uint64_t)(next_notif_id
++));
815 // -- Recovery/Backfill Request Scheduling --
816 Mutex recovery_request_lock
;
817 SafeTimer recovery_request_timer
;
819 // For async recovery sleep
820 bool recovery_needs_sleep
= true;
821 utime_t recovery_schedule_time
= utime_t();
823 Mutex recovery_sleep_lock
;
824 SafeTimer recovery_sleep_timer
;
828 std::atomic_uint last_tid
{0};
829 ceph_tid_t
get_tid() {
830 return (ceph_tid_t
)last_tid
++;
833 // -- backfill_reservation --
834 Finisher reserver_finisher
;
835 AsyncReserver
<spg_t
> local_reserver
;
836 AsyncReserver
<spg_t
> remote_reserver
;
844 pg_temp_t(vector
<int> v
, bool f
)
845 : acting
{v
}, forced
{f
}
850 map
<pg_t
, pg_temp_t
> pg_temp_wanted
;
851 map
<pg_t
, pg_temp_t
> pg_temp_pending
;
852 void _sent_pg_temp();
853 friend std::ostream
& operator<<(std::ostream
&, const pg_temp_t
&);
855 void queue_want_pg_temp(pg_t pgid
, const vector
<int>& want
,
856 bool forced
= false);
857 void remove_want_pg_temp(pg_t pgid
);
858 void requeue_pg_temp();
861 void send_pg_created(pg_t pgid
);
863 void queue_for_peering(PG
*pg
);
865 Mutex snap_sleep_lock
;
866 SafeTimer snap_sleep_timer
;
868 Mutex scrub_sleep_lock
;
869 SafeTimer scrub_sleep_timer
;
871 AsyncReserver
<spg_t
> snap_reserver
;
872 void queue_for_snap_trim(PG
*pg
);
874 void queue_for_scrub(PG
*pg
, bool with_high_priority
) {
875 unsigned scrub_queue_priority
= pg
->scrubber
.priority
;
876 if (with_high_priority
&& scrub_queue_priority
< cct
->_conf
->osd_client_op_priority
) {
877 scrub_queue_priority
= cct
->_conf
->osd_client_op_priority
;
882 PGScrub(pg
->get_osdmap()->get_epoch()),
883 cct
->_conf
->osd_scrub_cost
,
884 scrub_queue_priority
,
887 pg
->get_osdmap()->get_epoch()));
891 // -- pg recovery and associated throttling --
893 list
<pair
<epoch_t
, PGRef
> > awaiting_throttle
;
895 utime_t defer_recovery_until
;
896 uint64_t recovery_ops_active
;
897 uint64_t recovery_ops_reserved
;
898 bool recovery_paused
;
899 #ifdef DEBUG_RECOVERY_OIDS
900 map
<spg_t
, set
<hobject_t
> > recovery_oids
;
902 bool _recover_now(uint64_t *available_pushes
);
903 void _maybe_queue_recovery();
904 void _queue_for_recovery(
905 pair
<epoch_t
, PGRef
> p
, uint64_t reserved_pushes
) {
906 assert(recovery_lock
.is_locked_by_me());
910 PGRecovery(p
.first
, reserved_pushes
),
911 cct
->_conf
->osd_recovery_cost
,
912 cct
->_conf
->osd_recovery_priority
,
918 void start_recovery_op(PG
*pg
, const hobject_t
& soid
);
919 void finish_recovery_op(PG
*pg
, const hobject_t
& soid
, bool dequeue
);
920 bool is_recovery_active();
921 void release_reserved_pushes(uint64_t pushes
) {
922 Mutex::Locker
l(recovery_lock
);
923 assert(recovery_ops_reserved
>= pushes
);
924 recovery_ops_reserved
-= pushes
;
925 _maybe_queue_recovery();
927 void defer_recovery(float defer_for
) {
928 defer_recovery_until
= ceph_clock_now();
929 defer_recovery_until
+= defer_for
;
931 void pause_recovery() {
932 Mutex::Locker
l(recovery_lock
);
933 recovery_paused
= true;
935 bool recovery_is_paused() {
936 Mutex::Locker
l(recovery_lock
);
937 return recovery_paused
;
939 void unpause_recovery() {
940 Mutex::Locker
l(recovery_lock
);
941 recovery_paused
= false;
942 _maybe_queue_recovery();
944 void kick_recovery_queue() {
945 Mutex::Locker
l(recovery_lock
);
946 _maybe_queue_recovery();
948 void clear_queued_recovery(PG
*pg
) {
949 Mutex::Locker
l(recovery_lock
);
950 for (list
<pair
<epoch_t
, PGRef
> >::iterator i
= awaiting_throttle
.begin();
951 i
!= awaiting_throttle
.end();
953 if (i
->second
.get() == pg
) {
954 awaiting_throttle
.erase(i
);
961 // delayed pg activation
962 void queue_for_recovery(PG
*pg
) {
963 Mutex::Locker
l(recovery_lock
);
965 if (pg
->get_state() & (PG_STATE_FORCED_RECOVERY
| PG_STATE_FORCED_BACKFILL
)) {
966 awaiting_throttle
.push_front(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
968 awaiting_throttle
.push_back(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
970 _maybe_queue_recovery();
972 void queue_recovery_after_sleep(PG
*pg
, epoch_t queued
, uint64_t reserved_pushes
) {
973 Mutex::Locker
l(recovery_lock
);
974 _queue_for_recovery(make_pair(queued
, pg
), reserved_pushes
);
977 void adjust_pg_priorities(const vector
<PGRef
>& pgs
, int newflags
);
979 // osd map cache (past osd maps)
980 Mutex map_cache_lock
;
981 SharedLRU
<epoch_t
, const OSDMap
> map_cache
;
982 SimpleLRU
<epoch_t
, bufferlist
> map_bl_cache
;
983 SimpleLRU
<epoch_t
, bufferlist
> map_bl_inc_cache
;
985 OSDMapRef
try_get_map(epoch_t e
);
986 OSDMapRef
get_map(epoch_t e
) {
987 OSDMapRef
ret(try_get_map(e
));
991 OSDMapRef
add_map(OSDMap
*o
) {
992 Mutex::Locker
l(map_cache_lock
);
995 OSDMapRef
_add_map(OSDMap
*o
);
997 void add_map_bl(epoch_t e
, bufferlist
& bl
) {
998 Mutex::Locker
l(map_cache_lock
);
999 return _add_map_bl(e
, bl
);
1001 void pin_map_bl(epoch_t e
, bufferlist
&bl
);
1002 void _add_map_bl(epoch_t e
, bufferlist
& bl
);
1003 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
1004 Mutex::Locker
l(map_cache_lock
);
1005 return _get_map_bl(e
, bl
);
1007 bool _get_map_bl(epoch_t e
, bufferlist
& bl
);
1009 void add_map_inc_bl(epoch_t e
, bufferlist
& bl
) {
1010 Mutex::Locker
l(map_cache_lock
);
1011 return _add_map_inc_bl(e
, bl
);
1013 void pin_map_inc_bl(epoch_t e
, bufferlist
&bl
);
1014 void _add_map_inc_bl(epoch_t e
, bufferlist
& bl
);
1015 bool get_inc_map_bl(epoch_t e
, bufferlist
& bl
);
1017 void clear_map_bl_cache_pins(epoch_t e
);
1019 void need_heartbeat_peer_update();
1021 void pg_stat_queue_enqueue(PG
*pg
);
1022 void pg_stat_queue_dequeue(PG
*pg
);
1026 void start_shutdown();
1027 void shutdown_reserver();
1032 Mutex in_progress_split_lock
;
1033 map
<spg_t
, spg_t
> pending_splits
; // child -> parent
1034 map
<spg_t
, set
<spg_t
> > rev_pending_splits
; // parent -> [children]
1035 set
<spg_t
> in_progress_splits
; // child
1038 void _start_split(spg_t parent
, const set
<spg_t
> &children
);
1039 void start_split(spg_t parent
, const set
<spg_t
> &children
) {
1040 Mutex::Locker
l(in_progress_split_lock
);
1041 return _start_split(parent
, children
);
1043 void mark_split_in_progress(spg_t parent
, const set
<spg_t
> &pgs
);
1044 void complete_split(const set
<spg_t
> &pgs
);
1045 void cancel_pending_splits_for_parent(spg_t parent
);
1046 void _cancel_pending_splits_for_parent(spg_t parent
);
1047 bool splitting(spg_t pgid
);
1048 void expand_pg_num(OSDMapRef old_map
,
1050 void _maybe_split_pgid(OSDMapRef old_map
,
1053 void init_splits_between(spg_t pgid
, OSDMapRef frommap
, OSDMapRef tomap
);
1057 osd_stat_t osd_stat
;
1060 void update_osd_stat(vector
<int>& hb_peers
);
1061 osd_stat_t
set_osd_stat(const struct store_statfs_t
&stbuf
,
1062 vector
<int>& hb_peers
,
1064 osd_stat_t
get_osd_stat() {
1065 Mutex::Locker
l(stat_lock
);
1067 osd_stat
.up_from
= up_epoch
;
1068 osd_stat
.seq
= ((uint64_t)osd_stat
.up_from
<< 32) + seq
;
1071 uint64_t get_osd_stat_seq() {
1072 Mutex::Locker
l(stat_lock
);
1073 return osd_stat
.seq
;
1076 // -- OSD Full Status --
1078 friend TestOpsSocketHook
;
1079 mutable Mutex full_status_lock
;
1080 enum s_names
{ INVALID
= -1, NONE
, NEARFULL
, BACKFILLFULL
, FULL
, FAILSAFE
} cur_state
; // ascending
1081 const char *get_full_state_name(s_names s
) const {
1083 case NONE
: return "none";
1084 case NEARFULL
: return "nearfull";
1085 case BACKFILLFULL
: return "backfillfull";
1086 case FULL
: return "full";
1087 case FAILSAFE
: return "failsafe";
1088 default: return "???";
1091 s_names
get_full_state(string type
) const {
1094 else if (type
== "failsafe")
1096 else if (type
== "full")
1098 else if (type
== "backfillfull")
1099 return BACKFILLFULL
;
1100 else if (type
== "nearfull")
1105 double cur_ratio
; ///< current utilization
1106 mutable int64_t injectfull
= 0;
1107 s_names injectfull_state
= NONE
;
1108 float get_failsafe_full_ratio();
1109 void check_full_status(float ratio
);
1110 bool _check_full(s_names type
, ostream
&ss
) const;
1112 bool check_failsafe_full(ostream
&ss
) const;
1113 bool check_full(ostream
&ss
) const;
1114 bool check_backfill_full(ostream
&ss
) const;
1115 bool check_nearfull(ostream
&ss
) const;
1116 bool is_failsafe_full() const;
1117 bool is_full() const;
1118 bool is_backfillfull() const;
1119 bool is_nearfull() const;
1120 bool need_fullness_update(); ///< osdmap state needs update
1121 void set_injectfull(s_names type
, int64_t count
);
1122 bool check_osdmap_full(const set
<pg_shard_t
> &missing_on
);
1127 mutable Mutex epoch_lock
; // protects access to boot_epoch, up_epoch, bind_epoch
1128 epoch_t boot_epoch
; // _first_ epoch we were marked up (after this process started)
1129 epoch_t up_epoch
; // _most_recent_ epoch we were marked up
1130 epoch_t bind_epoch
; // epoch we last did a bind to new ip:ports
1133 * Retrieve the boot_, up_, and bind_ epochs the OSD has set. The params
1134 * can be NULL if you don't care about them.
1136 void retrieve_epochs(epoch_t
*_boot_epoch
, epoch_t
*_up_epoch
,
1137 epoch_t
*_bind_epoch
) const;
1139 * Set the boot, up, and bind epochs. Any NULL params will not be set.
1141 void set_epochs(const epoch_t
*_boot_epoch
, const epoch_t
*_up_epoch
,
1142 const epoch_t
*_bind_epoch
);
1143 epoch_t
get_boot_epoch() const {
1145 retrieve_epochs(&ret
, NULL
, NULL
);
1148 epoch_t
get_up_epoch() const {
1150 retrieve_epochs(NULL
, &ret
, NULL
);
1153 epoch_t
get_bind_epoch() const {
1155 retrieve_epochs(NULL
, NULL
, &ret
);
1159 void request_osdmap_update(epoch_t e
);
1162 Mutex is_stopping_lock
;
1163 Cond is_stopping_cond
;
1168 std::atomic_int state
{NOT_STOPPING
};
1172 void set_state(int s
) {
1175 bool is_stopping() const {
1176 return state
== STOPPING
;
1178 bool is_preparing_to_stop() const {
1179 return state
== PREPARING_TO_STOP
;
1181 bool prepare_to_stop();
1182 void got_stop_ack();
1185 #ifdef PG_DEBUG_REFS
1187 map
<spg_t
, int> pgid_tracker
;
1188 map
<spg_t
, PG
*> live_pgs
;
1189 void add_pgid(spg_t pgid
, PG
*pg
);
1190 void remove_pgid(spg_t pgid
, PG
*pg
);
1191 void dump_live_pgids();
1194 explicit OSDService(OSD
*osd
);
1198 class OSD
: public Dispatcher
,
1199 public md_config_obs_t
{
1201 Mutex osd_lock
; // global lock
1202 SafeTimer tick_timer
; // safe timer (osd_lock)
1204 // Tick timer for those stuff that do not need osd_lock
1205 Mutex tick_timer_lock
;
1206 SafeTimer tick_timer_without_osd_lock
;
1208 // config observer bits
1209 const char** get_tracked_conf_keys() const override
;
1210 void handle_conf_change(const struct md_config_t
*conf
,
1211 const std::set
<std::string
> &changed
) override
;
1212 void update_log_config();
1213 void check_config();
1217 static const double OSD_TICK_INTERVAL
; // tick interval for tick_timer and tick_timer_without_osd_lock
1219 AuthAuthorizeHandlerRegistry
*authorize_handler_cluster_registry
;
1220 AuthAuthorizeHandlerRegistry
*authorize_handler_service_registry
;
1222 Messenger
*cluster_messenger
;
1223 Messenger
*client_messenger
;
1224 Messenger
*objecter_messenger
;
1225 MonClient
*monc
; // check the "monc helpers" list before accessing directly
1227 PerfCounters
*logger
;
1228 PerfCounters
*recoverystate_perf
;
1231 FuseStore
*fuse_store
= nullptr;
1233 LogClient log_client
;
1237 std::string dev_path
, journal_path
;
1239 bool store_is_rotational
= true;
1240 bool journal_is_rotational
= true;
1242 ZTracer::Endpoint trace_endpoint
;
1243 void create_logger();
1244 void create_recoverystate_perf();
1246 void tick_without_osd_lock();
1247 void _dispatch(Message
*m
);
1248 void dispatch_op(OpRequestRef op
);
1250 void check_osdmap_features(ObjectStore
*store
);
1253 friend class OSDSocketHook
;
1254 class OSDSocketHook
*asok_hook
;
1255 bool asok_command(string admin_command
, cmdmap_t
& cmdmap
, string format
, ostream
& ss
);
1258 ClassHandler
*class_handler
= nullptr;
1259 int get_nodeid() { return whoami
; }
1261 static ghobject_t
get_osdmap_pobject_name(epoch_t epoch
) {
1263 snprintf(foo
, sizeof(foo
), "osdmap.%d", epoch
);
1264 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1266 static ghobject_t
get_inc_osdmap_pobject_name(epoch_t epoch
) {
1268 snprintf(foo
, sizeof(foo
), "inc_osdmap.%d", epoch
);
1269 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1272 static ghobject_t
make_snapmapper_oid() {
1273 return ghobject_t(hobject_t(
1275 object_t("snapmapper"),
1279 static ghobject_t
make_pg_log_oid(spg_t pg
) {
1281 ss
<< "pglog_" << pg
;
1284 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1287 static ghobject_t
make_pg_biginfo_oid(spg_t pg
) {
1289 ss
<< "pginfo_" << pg
;
1292 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1294 static ghobject_t
make_infos_oid() {
1295 hobject_t
oid(sobject_t("infos", CEPH_NOSNAP
));
1296 return ghobject_t(oid
);
1298 static void recursive_remove_collection(CephContext
* cct
,
1304 * get_osd_initial_compat_set()
1306 * Get the initial feature set for this OSD. Features
1307 * here are automatically upgraded.
1309 * Return value: Initial osd CompatSet
1311 static CompatSet
get_osd_initial_compat_set();
1314 * get_osd_compat_set()
1316 * Get all features supported by this OSD
1318 * Return value: CompatSet of all supported features
1320 static CompatSet
get_osd_compat_set();
1325 class C_Tick_WithoutOSDLock
;
1328 OSDSuperblock superblock
;
1330 void write_superblock();
1331 void write_superblock(ObjectStore::Transaction
& t
);
1332 int read_superblock();
1334 void clear_temp_objects();
1336 CompatSet osd_compat
;
1341 STATE_INITIALIZING
= 1,
1346 STATE_WAITING_FOR_HEALTHY
1349 static const char *get_state_name(int s
) {
1351 case STATE_INITIALIZING
: return "initializing";
1352 case STATE_PREBOOT
: return "preboot";
1353 case STATE_BOOTING
: return "booting";
1354 case STATE_ACTIVE
: return "active";
1355 case STATE_STOPPING
: return "stopping";
1356 case STATE_WAITING_FOR_HEALTHY
: return "waiting_for_healthy";
1357 default: return "???";
1362 std::atomic_int state
{STATE_INITIALIZING
};
1363 bool waiting_for_luminous_mons
= false;
1366 int get_state() const {
1369 void set_state(int s
) {
1372 bool is_initializing() const {
1373 return state
== STATE_INITIALIZING
;
1375 bool is_preboot() const {
1376 return state
== STATE_PREBOOT
;
1378 bool is_booting() const {
1379 return state
== STATE_BOOTING
;
1381 bool is_active() const {
1382 return state
== STATE_ACTIVE
;
1384 bool is_stopping() const {
1385 return state
== STATE_STOPPING
;
1387 bool is_waiting_for_healthy() const {
1388 return state
== STATE_WAITING_FOR_HEALTHY
;
1393 ThreadPool peering_tp
;
1394 ShardedThreadPool osd_op_tp
;
1396 ThreadPool command_tp
;
1398 void set_disk_tp_priority();
1399 void get_latest_osdmap();
1403 void dispatch_session_waiting(Session
*session
, OSDMapRef osdmap
);
1404 void maybe_share_map(Session
*session
, OpRequestRef op
, OSDMapRef osdmap
);
1406 Mutex session_waiting_lock
;
1407 set
<Session
*> session_waiting_for_map
;
1409 /// Caller assumes refs for included Sessions
1410 void get_sessions_waiting_for_map(set
<Session
*> *out
) {
1411 Mutex::Locker
l(session_waiting_lock
);
1412 out
->swap(session_waiting_for_map
);
1414 void register_session_waiting_on_map(Session
*session
) {
1415 Mutex::Locker
l(session_waiting_lock
);
1416 if (session_waiting_for_map
.insert(session
).second
) {
1420 void clear_session_waiting_on_map(Session
*session
) {
1421 Mutex::Locker
l(session_waiting_lock
);
1422 set
<Session
*>::iterator i
= session_waiting_for_map
.find(session
);
1423 if (i
!= session_waiting_for_map
.end()) {
1425 session_waiting_for_map
.erase(i
);
1428 void dispatch_sessions_waiting_on_map() {
1429 set
<Session
*> sessions_to_check
;
1430 get_sessions_waiting_for_map(&sessions_to_check
);
1431 for (set
<Session
*>::iterator i
= sessions_to_check
.begin();
1432 i
!= sessions_to_check
.end();
1433 sessions_to_check
.erase(i
++)) {
1434 (*i
)->session_dispatch_lock
.Lock();
1435 dispatch_session_waiting(*i
, osdmap
);
1436 (*i
)->session_dispatch_lock
.Unlock();
1440 void session_handle_reset(Session
*session
) {
1441 Mutex::Locker
l(session
->session_dispatch_lock
);
1442 clear_session_waiting_on_map(session
);
1444 session
->clear_backoffs();
1446 /* Messages have connection refs, we need to clear the
1447 * connection->session->message->connection
1448 * cycles which result.
1451 session
->waiting_on_map
.clear_and_dispose(TrackedOp::Putter());
1456 * @defgroup monc helpers
1458 * Right now we only have the one
1462 * Ask the Monitors for a sequence of OSDMaps.
1464 * @param epoch The epoch to start with when replying
1465 * @param force_request True if this request forces a new subscription to
1466 * the monitors; false if an outstanding request that encompasses it is
1469 void osdmap_subscribe(version_t epoch
, bool force_request
);
1470 /** @} monc helpers */
1472 Mutex osdmap_subscribe_lock
;
1473 epoch_t latest_subscribed_epoch
{0};
1476 /// information about a heartbeat peer
1477 struct HeartbeatInfo
{
1479 ConnectionRef con_front
; ///< peer connection (front)
1480 ConnectionRef con_back
; ///< peer connection (back)
1481 utime_t first_tx
; ///< time we sent our first ping request
1482 utime_t last_tx
; ///< last time we sent a ping request
1483 utime_t last_rx_front
; ///< last time we got a ping reply on the front side
1484 utime_t last_rx_back
; ///< last time we got a ping reply on the back side
1485 epoch_t epoch
; ///< most recent epoch we wanted this peer
1487 bool is_unhealthy(utime_t cutoff
) const {
1489 ! ((last_rx_front
> cutoff
||
1490 (last_rx_front
== utime_t() && (last_tx
== utime_t() ||
1491 first_tx
> cutoff
))) &&
1492 (last_rx_back
> cutoff
||
1493 (last_rx_back
== utime_t() && (last_tx
== utime_t() ||
1494 first_tx
> cutoff
))));
1496 bool is_healthy(utime_t cutoff
) const {
1497 return last_rx_front
> cutoff
&& last_rx_back
> cutoff
;
1501 /// state attached to outgoing heartbeat connections
1502 struct HeartbeatSession
: public RefCountedObject
{
1504 explicit HeartbeatSession(int p
) : peer(p
) {}
1506 Mutex heartbeat_lock
;
1507 map
<int, int> debug_heartbeat_drops_remaining
;
1508 Cond heartbeat_cond
;
1509 bool heartbeat_stop
;
1510 std::atomic_bool heartbeat_need_update
;
1511 map
<int,HeartbeatInfo
> heartbeat_peers
; ///< map of osd id to HeartbeatInfo
1512 utime_t last_mon_heartbeat
;
1513 Messenger
*hb_front_client_messenger
;
1514 Messenger
*hb_back_client_messenger
;
1515 Messenger
*hb_front_server_messenger
;
1516 Messenger
*hb_back_server_messenger
;
1517 utime_t last_heartbeat_resample
; ///< last time we chose random peers in waiting-for-healthy state
1518 double daily_loadavg
;
1520 void _add_heartbeat_peer(int p
);
1521 void _remove_heartbeat_peer(int p
);
1522 bool heartbeat_reset(Connection
*con
);
1523 void maybe_update_heartbeat_peers();
1524 void reset_heartbeat_peers();
1525 bool heartbeat_peers_need_update() {
1526 return heartbeat_need_update
.load();
1528 void heartbeat_set_peers_need_update() {
1529 heartbeat_need_update
.store(true);
1531 void heartbeat_clear_peers_need_update() {
1532 heartbeat_need_update
.store(false);
1535 void heartbeat_check();
1536 void heartbeat_entry();
1537 void need_heartbeat_peer_update();
1539 void heartbeat_kick() {
1540 Mutex::Locker
l(heartbeat_lock
);
1541 heartbeat_cond
.Signal();
1544 struct T_Heartbeat
: public Thread
{
1546 explicit T_Heartbeat(OSD
*o
) : osd(o
) {}
1547 void *entry() override
{
1548 osd
->heartbeat_entry();
1554 bool heartbeat_dispatch(Message
*m
);
1556 struct HeartbeatDispatcher
: public Dispatcher
{
1558 explicit HeartbeatDispatcher(OSD
*o
) : Dispatcher(o
->cct
), osd(o
) {}
1560 bool ms_can_fast_dispatch_any() const override
{ return true; }
1561 bool ms_can_fast_dispatch(const Message
*m
) const override
{
1562 switch (m
->get_type()) {
1570 void ms_fast_dispatch(Message
*m
) override
{
1571 osd
->heartbeat_dispatch(m
);
1573 bool ms_dispatch(Message
*m
) override
{
1574 return osd
->heartbeat_dispatch(m
);
1576 bool ms_handle_reset(Connection
*con
) override
{
1577 return osd
->heartbeat_reset(con
);
1579 void ms_handle_remote_reset(Connection
*con
) override
{}
1580 bool ms_handle_refused(Connection
*con
) override
{
1581 return osd
->ms_handle_refused(con
);
1583 bool ms_verify_authorizer(Connection
*con
, int peer_type
,
1584 int protocol
, bufferlist
& authorizer_data
, bufferlist
& authorizer_reply
,
1585 bool& isvalid
, CryptoKey
& session_key
,
1586 std::unique_ptr
<AuthAuthorizerChallenge
> *challenge
) override
{
1590 } heartbeat_dispatcher
;
1594 list
<OpRequestRef
> finished
;
1596 void take_waiters(list
<OpRequestRef
>& ls
) {
1597 assert(osd_lock
.is_locked());
1598 finished
.splice(finished
.end(), ls
);
1602 // -- op tracking --
1603 OpTracker op_tracker
;
1604 void check_ops_in_flight();
1605 void test_ops(std::string command
, std::string args
, ostream
& ss
);
1606 friend class TestOpsSocketHook
;
1607 TestOpsSocketHook
*test_ops_hook
;
1608 friend struct C_CompleteSplits
;
1609 friend struct C_OpenPGs
;
1612 enum class io_queue
{
1618 friend std::ostream
& operator<<(std::ostream
& out
, const OSD::io_queue
& q
);
1620 const io_queue op_queue
;
1621 const unsigned int op_prio_cutoff
;
1624 * The ordered op delivery chain is:
1626 * fast dispatch -> pqueue back
1627 * pqueue front <-> to_process back
1628 * to_process front -> RunVis(item)
1631 * The pqueue is per-shard, and to_process is per pg_slot. Items can be
1632 * pushed back up into to_process and/or pqueue while order is preserved.
1634 * Multiple worker threads can operate on each shard.
1636 * Under normal circumstances, num_running == to_proces.size(). There are
1637 * two times when that is not true: (1) when waiting_for_pg == true and
1638 * to_process is accumulating requests that are waiting for the pg to be
1639 * instantiated; in that case they will all get requeued together by
1640 * wake_pg_waiters, and (2) when wake_pg_waiters just ran, waiting_for_pg
1641 * and already requeued the items.
1643 friend class PGQueueable
;
1646 : public ShardedThreadPool::ShardedWQ
<pair
<spg_t
,PGQueueable
>>
1652 Mutex sdata_op_ordering_lock
; ///< protects all members below
1654 OSDMapRef waiting_for_pg_osdmap
;
1656 PGRef pg
; ///< cached pg reference [optional]
1657 list
<PGQueueable
> to_process
; ///< order items for this slot
1658 int num_running
= 0; ///< _process threads doing pg lookup/lock
1660 /// true if pg does/did not exist. if so all new items go directly to
1661 /// to_process. cleared by prune_pg_waiters.
1662 bool waiting_for_pg
= false;
1664 /// incremented by wake_pg_waiters; indicates racing _process threads
1665 /// should bail out (their op has been requeued)
1666 uint64_t requeue_seq
= 0;
1669 /// map of slots for each spg_t. maintains ordering of items dequeued
1670 /// from pqueue while _process thread drops shard lock to acquire the
1671 /// pg lock. slots are removed only by prune_pg_waiters.
1672 unordered_map
<spg_t
,pg_slot
> pg_slots
;
1675 std::unique_ptr
<OpQueue
< pair
<spg_t
, PGQueueable
>, entity_inst_t
>> pqueue
;
1677 void _enqueue_front(pair
<spg_t
, PGQueueable
> item
, unsigned cutoff
) {
1678 unsigned priority
= item
.second
.get_priority();
1679 unsigned cost
= item
.second
.get_cost();
1680 if (priority
>= cutoff
)
1681 pqueue
->enqueue_strict_front(
1682 item
.second
.get_owner(),
1685 pqueue
->enqueue_front(
1686 item
.second
.get_owner(),
1687 priority
, cost
, item
);
1691 string lock_name
, string ordering_lock
,
1692 uint64_t max_tok_per_prio
, uint64_t min_cost
, CephContext
*cct
,
1694 : sdata_lock(lock_name
.c_str(), false, true, false, cct
),
1695 sdata_op_ordering_lock(ordering_lock
.c_str(), false, true,
1697 if (opqueue
== io_queue::weightedpriority
) {
1698 pqueue
= std::unique_ptr
1699 <WeightedPriorityQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>>(
1700 new WeightedPriorityQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>(
1701 max_tok_per_prio
, min_cost
));
1702 } else if (opqueue
== io_queue::prioritized
) {
1703 pqueue
= std::unique_ptr
1704 <PrioritizedQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>>(
1705 new PrioritizedQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>(
1706 max_tok_per_prio
, min_cost
));
1707 } else if (opqueue
== io_queue::mclock_opclass
) {
1708 pqueue
= std::unique_ptr
1709 <ceph::mClockOpClassQueue
>(new ceph::mClockOpClassQueue(cct
));
1710 } else if (opqueue
== io_queue::mclock_client
) {
1711 pqueue
= std::unique_ptr
1712 <ceph::mClockClientQueue
>(new ceph::mClockClientQueue(cct
));
1715 }; // struct ShardData
1717 vector
<ShardData
*> shard_list
;
1719 uint32_t num_shards
;
1722 ShardedOpWQ(uint32_t pnum_shards
,
1726 ShardedThreadPool
* tp
)
1727 : ShardedThreadPool::ShardedWQ
<pair
<spg_t
,PGQueueable
>>(ti
, si
, tp
),
1729 num_shards(pnum_shards
) {
1730 for (uint32_t i
= 0; i
< num_shards
; i
++) {
1731 char lock_name
[32] = {0};
1732 snprintf(lock_name
, sizeof(lock_name
), "%s.%d", "OSD:ShardedOpWQ:", i
);
1733 char order_lock
[32] = {0};
1734 snprintf(order_lock
, sizeof(order_lock
), "%s.%d",
1735 "OSD:ShardedOpWQ:order:", i
);
1736 ShardData
* one_shard
= new ShardData(
1737 lock_name
, order_lock
,
1738 osd
->cct
->_conf
->osd_op_pq_max_tokens_per_priority
,
1739 osd
->cct
->_conf
->osd_op_pq_min_cost
, osd
->cct
, osd
->op_queue
);
1740 shard_list
.push_back(one_shard
);
1743 ~ShardedOpWQ() override
{
1744 while (!shard_list
.empty()) {
1745 delete shard_list
.back();
1746 shard_list
.pop_back();
1750 /// wake any pg waiters after a PG is created/instantiated
1751 void wake_pg_waiters(spg_t pgid
);
1753 /// prune ops (and possiblye pg_slots) for pgs that shouldn't be here
1754 void prune_pg_waiters(OSDMapRef osdmap
, int whoami
);
1756 /// clear cached PGRef on pg deletion
1757 void clear_pg_pointer(spg_t pgid
);
1759 /// clear pg_slots on shutdown
1760 void clear_pg_slots();
1762 /// try to do some work
1763 void _process(uint32_t thread_index
, heartbeat_handle_d
*hb
) override
;
1765 /// enqueue a new item
1766 void _enqueue(pair
<spg_t
, PGQueueable
> item
) override
;
1768 /// requeue an old item (at the front of the line)
1769 void _enqueue_front(pair
<spg_t
, PGQueueable
> item
) override
;
1771 void return_waiting_threads() override
{
1772 for(uint32_t i
= 0; i
< num_shards
; i
++) {
1773 ShardData
* sdata
= shard_list
[i
];
1774 assert (NULL
!= sdata
);
1775 sdata
->sdata_lock
.Lock();
1776 sdata
->sdata_cond
.Signal();
1777 sdata
->sdata_lock
.Unlock();
1781 void dump(Formatter
*f
) {
1782 for(uint32_t i
= 0; i
< num_shards
; i
++) {
1783 ShardData
* sdata
= shard_list
[i
];
1784 char lock_name
[32] = {0};
1785 snprintf(lock_name
, sizeof(lock_name
), "%s%d", "OSD:ShardedOpWQ:", i
);
1786 assert (NULL
!= sdata
);
1787 sdata
->sdata_op_ordering_lock
.Lock();
1788 f
->open_object_section(lock_name
);
1789 sdata
->pqueue
->dump(f
);
1791 sdata
->sdata_op_ordering_lock
.Unlock();
1795 /// Must be called on ops queued back to front
1798 list
<OpRequestRef
> *out_ops
;
1799 uint64_t reserved_pushes_to_free
;
1800 Pred(spg_t pg
, list
<OpRequestRef
> *out_ops
= 0)
1801 : pgid(pg
), out_ops(out_ops
), reserved_pushes_to_free(0) {}
1802 void accumulate(const PGQueueable
&op
) {
1803 reserved_pushes_to_free
+= op
.get_reserved_pushes();
1805 boost::optional
<OpRequestRef
> mop
= op
.maybe_get_op();
1807 out_ops
->push_front(*mop
);
1810 bool operator()(const pair
<spg_t
, PGQueueable
> &op
) {
1811 if (op
.first
== pgid
) {
1812 accumulate(op
.second
);
1818 uint64_t get_reserved_pushes_to_free() const {
1819 return reserved_pushes_to_free
;
1823 bool is_shard_empty(uint32_t thread_index
) override
{
1824 uint32_t shard_index
= thread_index
% num_shards
;
1825 ShardData
* sdata
= shard_list
[shard_index
];
1826 assert(NULL
!= sdata
);
1827 Mutex::Locker
l(sdata
->sdata_op_ordering_lock
);
1828 return sdata
->pqueue
->empty();
1833 void enqueue_op(spg_t pg
, OpRequestRef
& op
, epoch_t epoch
);
1835 PGRef pg
, OpRequestRef op
,
1836 ThreadPool::TPHandle
&handle
);
1838 // -- peering queue --
1839 struct PeeringWQ
: public ThreadPool::BatchWorkQueue
<PG
> {
1840 list
<PG
*> peering_queue
;
1843 PeeringWQ(OSD
*o
, time_t ti
, time_t si
, ThreadPool
*tp
)
1844 : ThreadPool::BatchWorkQueue
<PG
>(
1845 "OSD::PeeringWQ", ti
, si
, tp
), osd(o
) {}
1847 void _dequeue(PG
*pg
) override
{
1848 for (list
<PG
*>::iterator i
= peering_queue
.begin();
1849 i
!= peering_queue
.end();
1852 peering_queue
.erase(i
++);
1853 pg
->put("PeeringWQ");
1859 bool _enqueue(PG
*pg
) override
{
1860 pg
->get("PeeringWQ");
1861 peering_queue
.push_back(pg
);
1864 bool _empty() override
{
1865 return peering_queue
.empty();
1867 void _dequeue(list
<PG
*> *out
) override
;
1869 const list
<PG
*> &pgs
,
1870 ThreadPool::TPHandle
&handle
) override
{
1871 assert(!pgs
.empty());
1872 osd
->process_peering_events(pgs
, handle
);
1873 for (list
<PG
*>::const_iterator i
= pgs
.begin();
1876 (*i
)->put("PeeringWQ");
1879 void _process_finish(const list
<PG
*> &pgs
) override
{
1880 for (list
<PG
*>::const_iterator i
= pgs
.begin();
1886 void _clear() override
{
1887 assert(peering_queue
.empty());
1891 void process_peering_events(
1892 const list
<PG
*> &pg
,
1893 ThreadPool::TPHandle
&handle
);
1896 friend class PrimaryLogPG
;
1903 OSDMapRef
get_osdmap() {
1906 epoch_t
get_osdmap_epoch() const {
1907 return osdmap
? osdmap
->get_epoch() : 0;
1910 utime_t had_map_since
;
1912 list
<OpRequestRef
> waiting_for_osdmap
;
1913 deque
<utime_t
> osd_markdown_log
;
1915 friend struct send_map_on_destruct
;
1917 void wait_for_new_map(OpRequestRef op
);
1918 void handle_osd_map(class MOSDMap
*m
);
1919 void _committed_osd_maps(epoch_t first
, epoch_t last
, class MOSDMap
*m
);
1920 void trim_maps(epoch_t oldest
, int nreceived
, bool skip_maps
);
1921 void note_down_osd(int osd
);
1922 void note_up_osd(int osd
);
1923 friend class C_OnMapCommit
;
1926 epoch_t advance_to
, PG
*pg
,
1927 ThreadPool::TPHandle
&handle
,
1928 PG::RecoveryCtx
*rctx
,
1929 set
<PGRef
> *split_pgs
1932 void activate_map();
1934 // osd map cache (past osd maps)
1935 OSDMapRef
get_map(epoch_t e
) {
1936 return service
.get_map(e
);
1938 OSDMapRef
add_map(OSDMap
*o
) {
1939 return service
.add_map(o
);
1941 void add_map_bl(epoch_t e
, bufferlist
& bl
) {
1942 return service
.add_map_bl(e
, bl
);
1944 void pin_map_bl(epoch_t e
, bufferlist
&bl
) {
1945 return service
.pin_map_bl(e
, bl
);
1947 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
1948 return service
.get_map_bl(e
, bl
);
1950 void add_map_inc_bl(epoch_t e
, bufferlist
& bl
) {
1951 return service
.add_map_inc_bl(e
, bl
);
1953 void pin_map_inc_bl(epoch_t e
, bufferlist
&bl
) {
1954 return service
.pin_map_inc_bl(e
, bl
);
1958 // -- placement groups --
1959 RWLock pg_map_lock
; // this lock orders *above* individual PG _locks
1960 ceph::unordered_map
<spg_t
, PG
*> pg_map
; // protected by pg_map lock
1962 std::mutex pending_creates_lock
;
1963 using create_from_osd_t
= std::pair
<pg_t
, bool /* is primary*/>;
1964 std::set
<create_from_osd_t
> pending_creates_from_osd
;
1965 unsigned pending_creates_from_mon
= 0;
1967 map
<spg_t
, list
<PG::CephPeeringEvtRef
> > peering_wait_for_split
;
1968 PGRecoveryStats pg_recovery_stats
;
1970 PGPool
_get_pool(int id
, OSDMapRef createmap
);
1972 PG
*_lookup_lock_pg_with_map_lock_held(spg_t pgid
);
1973 PG
*_lookup_lock_pg(spg_t pgid
);
1976 PG
*lookup_lock_pg(spg_t pgid
);
1979 RWLock::RLocker
l(pg_map_lock
);
1980 return pg_map
.size();
1984 PG
*_open_lock_pg(OSDMapRef createmap
,
1985 spg_t pg
, bool no_lockdep_check
=false);
1987 RES_PARENT
, // resurrected a parent
1988 RES_SELF
, // resurrected self
1989 RES_NONE
// nothing relevant deleting
1991 res_result
_try_resurrect_pg(
1992 OSDMapRef curmap
, spg_t pgid
, spg_t
*resurrected
, PGRef
*old_pg_state
);
1994 PG
*_create_lock_pg(
1995 OSDMapRef createmap
,
2000 vector
<int>& up
, int up_primary
,
2001 vector
<int>& acting
, int acting_primary
,
2002 pg_history_t history
,
2003 const PastIntervals
& pi
,
2004 ObjectStore::Transaction
& t
);
2006 PG
* _make_pg(OSDMapRef createmap
, spg_t pgid
);
2007 void add_newly_split_pg(PG
*pg
,
2008 PG::RecoveryCtx
*rctx
);
2010 int handle_pg_peering_evt(
2012 const pg_history_t
& orig_history
,
2013 const PastIntervals
& pi
,
2015 PG::CephPeeringEvtRef evt
);
2016 bool maybe_wait_for_max_pg(spg_t pgid
, bool is_mon_create
);
2017 void resume_creating_pg();
2020 void build_past_intervals_parallel();
2022 /// build initial pg history and intervals on create
2023 void build_initial_pg_history(
2026 utime_t created_stamp
,
2030 /// project pg history from from to now
2031 bool project_pg_history(
2032 spg_t pgid
, pg_history_t
& h
, epoch_t from
,
2033 const vector
<int>& lastup
,
2035 const vector
<int>& lastacting
,
2036 int lastactingprimary
2037 ); ///< @return false if there was a map gap between from and now
2039 // this must be called with pg->lock held on any pg addition to pg_map
2040 void wake_pg_waiters(PGRef pg
) {
2041 assert(pg
->is_locked());
2042 op_shardedwq
.wake_pg_waiters(pg
->info
.pgid
);
2044 epoch_t last_pg_create_epoch
;
2046 void handle_pg_create(OpRequestRef op
);
2050 const set
<spg_t
> &childpgids
, set
<PGRef
> *out_pgs
,
2053 PG::RecoveryCtx
*rctx
);
2055 // == monitor interaction ==
2056 Mutex mon_report_lock
;
2057 utime_t last_mon_report
;
2058 utime_t last_pg_stats_sent
;
2060 /* if our monitor dies, we want to notice it and reconnect.
2061 * So we keep track of when it last acked our stat updates,
2062 * and if too much time passes (and we've been sending
2063 * more updates) then we can call it dead and reconnect
2066 utime_t last_pg_stats_ack
;
2067 float stats_ack_timeout
;
2068 set
<uint64_t> outstanding_pg_stats
; // how many stat updates haven't been acked yet
2072 void _got_mon_epochs(epoch_t oldest
, epoch_t newest
);
2073 void _preboot(epoch_t oldest
, epoch_t newest
);
2075 void _collect_metadata(map
<string
,string
> *pmeta
);
2077 void start_waiting_for_healthy();
2080 void send_full_update();
2082 friend struct C_OSD_GetVersion
;
2085 epoch_t up_thru_wanted
;
2087 void queue_want_up_thru(epoch_t want
);
2090 // -- full map requests --
2091 epoch_t requested_full_first
, requested_full_last
;
2093 void request_full_map(epoch_t first
, epoch_t last
);
2094 void rerequest_full_maps() {
2095 epoch_t first
= requested_full_first
;
2096 epoch_t last
= requested_full_last
;
2097 requested_full_first
= 0;
2098 requested_full_last
= 0;
2099 request_full_map(first
, last
);
2101 void got_full_map(epoch_t e
);
2104 map
<int,utime_t
> failure_queue
;
2105 map
<int,pair
<utime_t
,entity_inst_t
> > failure_pending
;
2107 void requeue_failures();
2108 void send_failures();
2109 void send_still_alive(epoch_t epoch
, const entity_inst_t
&i
);
2112 Mutex pg_stat_queue_lock
;
2113 Cond pg_stat_queue_cond
;
2114 xlist
<PG
*> pg_stat_queue
;
2115 bool osd_stat_updated
;
2116 uint64_t pg_stat_tid
, pg_stat_tid_flushed
;
2118 void send_pg_stats(const utime_t
&now
);
2119 void handle_pg_stats_ack(class MPGStatsAck
*ack
);
2120 void flush_pg_stats();
2122 ceph::coarse_mono_clock::time_point last_sent_beacon
;
2123 Mutex min_last_epoch_clean_lock
{"OSD::min_last_epoch_clean_lock"};
2124 epoch_t min_last_epoch_clean
= 0;
2125 // which pgs were scanned for min_lec
2126 std::vector
<pg_t
> min_last_epoch_clean_pgs
;
2127 void send_beacon(const ceph::coarse_mono_clock::time_point
& now
);
2129 void pg_stat_queue_enqueue(PG
*pg
) {
2130 pg_stat_queue_lock
.Lock();
2131 if (pg
->is_primary() && !pg
->stat_queue_item
.is_on_list()) {
2132 pg
->get("pg_stat_queue");
2133 pg_stat_queue
.push_back(&pg
->stat_queue_item
);
2135 osd_stat_updated
= true;
2136 pg_stat_queue_lock
.Unlock();
2138 void pg_stat_queue_dequeue(PG
*pg
) {
2139 pg_stat_queue_lock
.Lock();
2140 if (pg
->stat_queue_item
.remove_myself())
2141 pg
->put("pg_stat_queue");
2142 pg_stat_queue_lock
.Unlock();
2144 void clear_pg_stat_queue() {
2145 pg_stat_queue_lock
.Lock();
2146 while (!pg_stat_queue
.empty()) {
2147 PG
*pg
= pg_stat_queue
.front();
2148 pg_stat_queue
.pop_front();
2149 pg
->put("pg_stat_queue");
2151 pg_stat_queue_lock
.Unlock();
2153 void clear_outstanding_pg_stats(){
2154 Mutex::Locker
l(pg_stat_queue_lock
);
2155 outstanding_pg_stats
.clear();
2158 ceph_tid_t
get_tid() {
2159 return service
.get_tid();
2162 // -- generic pg peering --
2163 PG::RecoveryCtx
create_context();
2164 void dispatch_context(PG::RecoveryCtx
&ctx
, PG
*pg
, OSDMapRef curmap
,
2165 ThreadPool::TPHandle
*handle
= NULL
);
2166 void dispatch_context_transaction(PG::RecoveryCtx
&ctx
, PG
*pg
,
2167 ThreadPool::TPHandle
*handle
= NULL
);
2168 void do_notifies(map
<int,
2169 vector
<pair
<pg_notify_t
, PastIntervals
> > >&
2172 void do_queries(map
<int, map
<spg_t
,pg_query_t
> >& query_map
,
2174 void do_infos(map
<int,
2175 vector
<pair
<pg_notify_t
, PastIntervals
> > >& info_map
,
2178 bool require_mon_peer(const Message
*m
);
2179 bool require_mon_or_mgr_peer(const Message
*m
);
2180 bool require_osd_peer(const Message
*m
);
2182 * Verifies that we were alive in the given epoch, and that
2185 bool require_self_aliveness(const Message
*m
, epoch_t alive_since
);
2187 * Verifies that the OSD who sent the given op has the same
2188 * address as in the given map.
2189 * @pre op was sent by an OSD using the cluster messenger
2191 bool require_same_peer_instance(const Message
*m
, OSDMapRef
& map
,
2192 bool is_fast_dispatch
);
2194 bool require_same_or_newer_map(OpRequestRef
& op
, epoch_t e
,
2195 bool is_fast_dispatch
);
2197 void handle_pg_query(OpRequestRef op
);
2198 void handle_pg_notify(OpRequestRef op
);
2199 void handle_pg_log(OpRequestRef op
);
2200 void handle_pg_info(OpRequestRef op
);
2201 void handle_pg_trim(OpRequestRef op
);
2203 void handle_pg_backfill_reserve(OpRequestRef op
);
2204 void handle_pg_recovery_reserve(OpRequestRef op
);
2206 void handle_force_recovery(Message
*m
);
2208 void handle_pg_remove(OpRequestRef op
);
2209 void _remove_pg(PG
*pg
);
2218 Command(vector
<string
>& c
, ceph_tid_t t
, bufferlist
& bl
, Connection
*co
)
2219 : cmd(c
), tid(t
), indata(bl
), con(co
) {}
2221 list
<Command
*> command_queue
;
2222 struct CommandWQ
: public ThreadPool::WorkQueue
<Command
> {
2224 CommandWQ(OSD
*o
, time_t ti
, time_t si
, ThreadPool
*tp
)
2225 : ThreadPool::WorkQueue
<Command
>("OSD::CommandWQ", ti
, si
, tp
), osd(o
) {}
2227 bool _empty() override
{
2228 return osd
->command_queue
.empty();
2230 bool _enqueue(Command
*c
) override
{
2231 osd
->command_queue
.push_back(c
);
2234 void _dequeue(Command
*pg
) override
{
2237 Command
*_dequeue() override
{
2238 if (osd
->command_queue
.empty())
2240 Command
*c
= osd
->command_queue
.front();
2241 osd
->command_queue
.pop_front();
2244 void _process(Command
*c
, ThreadPool::TPHandle
&) override
{
2245 osd
->osd_lock
.Lock();
2246 if (osd
->is_stopping()) {
2247 osd
->osd_lock
.Unlock();
2251 osd
->do_command(c
->con
.get(), c
->tid
, c
->cmd
, c
->indata
);
2252 osd
->osd_lock
.Unlock();
2255 void _clear() override
{
2256 while (!osd
->command_queue
.empty()) {
2257 Command
*c
= osd
->command_queue
.front();
2258 osd
->command_queue
.pop_front();
2264 void handle_command(class MMonCommand
*m
);
2265 void handle_command(class MCommand
*m
);
2266 void do_command(Connection
*con
, ceph_tid_t tid
, vector
<string
>& cmd
, bufferlist
& data
);
2268 // -- pg recovery --
2269 void do_recovery(PG
*pg
, epoch_t epoch_queued
, uint64_t pushes_reserved
,
2270 ThreadPool::TPHandle
&handle
);
2275 bool scrub_random_backoff();
2276 bool scrub_load_below_threshold();
2277 bool scrub_time_permit(utime_t now
);
2281 public ThreadPool::WorkQueueVal
<pair
<PGRef
, DeletingStateRef
> > {
2283 ObjectStore
*&store
;
2284 list
<pair
<PGRef
, DeletingStateRef
> > remove_queue
;
2285 RemoveWQ(CephContext
* cct
, ObjectStore
*&o
, time_t ti
, time_t si
,
2287 : ThreadPool::WorkQueueVal
<pair
<PGRef
, DeletingStateRef
> >(
2288 "OSD::RemoveWQ", ti
, si
, tp
), cct(cct
), store(o
) {}
2290 bool _empty() override
{
2291 return remove_queue
.empty();
2293 void _enqueue(pair
<PGRef
, DeletingStateRef
> item
) override
{
2294 remove_queue
.push_back(item
);
2296 void _enqueue_front(pair
<PGRef
, DeletingStateRef
> item
) override
{
2297 remove_queue
.push_front(item
);
2299 bool _dequeue(pair
<PGRef
, DeletingStateRef
> item
) {
2302 pair
<PGRef
, DeletingStateRef
> _dequeue() override
{
2303 assert(!remove_queue
.empty());
2304 pair
<PGRef
, DeletingStateRef
> item
= remove_queue
.front();
2305 remove_queue
.pop_front();
2308 void _process(pair
<PGRef
, DeletingStateRef
>,
2309 ThreadPool::TPHandle
&) override
;
2310 void _clear() override
{
2311 remove_queue
.clear();
2313 int get_remove_queue_len() {
2315 int r
= remove_queue
.size();
2321 // -- status reporting --
2322 MPGStats
*collect_pg_stats();
2323 std::vector
<OSDHealthMetric
> get_health_metrics();
2326 bool ms_can_fast_dispatch_any() const override
{ return true; }
2327 bool ms_can_fast_dispatch(const Message
*m
) const override
{
2328 switch (m
->get_type()) {
2329 case CEPH_MSG_OSD_OP
:
2330 case CEPH_MSG_OSD_BACKOFF
:
2333 case MSG_OSD_SUBOPREPLY
:
2334 case MSG_OSD_REPOPREPLY
:
2335 case MSG_OSD_PG_PUSH
:
2336 case MSG_OSD_PG_PULL
:
2337 case MSG_OSD_PG_PUSH_REPLY
:
2338 case MSG_OSD_PG_SCAN
:
2339 case MSG_OSD_PG_BACKFILL
:
2340 case MSG_OSD_PG_BACKFILL_REMOVE
:
2341 case MSG_OSD_EC_WRITE
:
2342 case MSG_OSD_EC_WRITE_REPLY
:
2343 case MSG_OSD_EC_READ
:
2344 case MSG_OSD_EC_READ_REPLY
:
2345 case MSG_OSD_SCRUB_RESERVE
:
2346 case MSG_OSD_REP_SCRUB
:
2347 case MSG_OSD_REP_SCRUBMAP
:
2348 case MSG_OSD_PG_UPDATE_LOG_MISSING
:
2349 case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY
:
2350 case MSG_OSD_PG_RECOVERY_DELETE
:
2351 case MSG_OSD_PG_RECOVERY_DELETE_REPLY
:
2357 void ms_fast_dispatch(Message
*m
) override
;
2358 void ms_fast_preprocess(Message
*m
) override
;
2359 bool ms_dispatch(Message
*m
) override
;
2360 bool ms_get_authorizer(int dest_type
, AuthAuthorizer
**authorizer
, bool force_new
) override
;
2361 bool ms_verify_authorizer(Connection
*con
, int peer_type
,
2362 int protocol
, bufferlist
& authorizer
, bufferlist
& authorizer_reply
,
2363 bool& isvalid
, CryptoKey
& session_key
,
2364 std::unique_ptr
<AuthAuthorizerChallenge
> *challenge
) override
;
2365 void ms_handle_connect(Connection
*con
) override
;
2366 void ms_handle_fast_connect(Connection
*con
) override
;
2367 void ms_handle_fast_accept(Connection
*con
) override
;
2368 bool ms_handle_reset(Connection
*con
) override
;
2369 void ms_handle_remote_reset(Connection
*con
) override
{}
2370 bool ms_handle_refused(Connection
*con
) override
;
2372 io_queue
get_io_queue() const {
2373 if (cct
->_conf
->osd_op_queue
== "debug_random") {
2374 static io_queue index_lookup
[] = { io_queue::prioritized
,
2375 io_queue::weightedpriority
,
2376 io_queue::mclock_opclass
,
2377 io_queue::mclock_client
};
2379 unsigned which
= rand() % (sizeof(index_lookup
) / sizeof(index_lookup
[0]));
2380 return index_lookup
[which
];
2381 } else if (cct
->_conf
->osd_op_queue
== "prioritized") {
2382 return io_queue::prioritized
;
2383 } else if (cct
->_conf
->osd_op_queue
== "mclock_opclass") {
2384 return io_queue::mclock_opclass
;
2385 } else if (cct
->_conf
->osd_op_queue
== "mclock_client") {
2386 return io_queue::mclock_client
;
2388 // default / catch-all is 'wpq'
2389 return io_queue::weightedpriority
;
2393 unsigned int get_io_prio_cut() const {
2394 if (cct
->_conf
->osd_op_queue_cut_off
== "debug_random") {
2396 return (rand() % 2 < 1) ? CEPH_MSG_PRIO_HIGH
: CEPH_MSG_PRIO_LOW
;
2397 } else if (cct
->_conf
->osd_op_queue_cut_off
== "high") {
2398 return CEPH_MSG_PRIO_HIGH
;
2400 // default / catch-all is 'low'
2401 return CEPH_MSG_PRIO_LOW
;
2406 /* internal and external can point to the same messenger, they will still
2407 * be cleaned up properly*/
2408 OSD(CephContext
*cct_
,
2409 ObjectStore
*store_
,
2411 Messenger
*internal
,
2412 Messenger
*external
,
2413 Messenger
*hb_front_client
,
2414 Messenger
*hb_back_client
,
2415 Messenger
*hb_front_server
,
2416 Messenger
*hb_back_server
,
2417 Messenger
*osdc_messenger
,
2418 MonClient
*mc
, const std::string
&dev
, const std::string
&jdev
);
2422 static int mkfs(CephContext
*cct
, ObjectStore
*store
,
2424 uuid_d fsid
, int whoami
);
2425 /* remove any non-user xattrs from a map of them */
2426 void filter_xattrs(map
<string
, bufferptr
>& attrs
) {
2427 for (map
<string
, bufferptr
>::iterator iter
= attrs
.begin();
2428 iter
!= attrs
.end();
2430 if (('_' != iter
->first
.at(0)) || (iter
->first
.size() == 1))
2431 attrs
.erase(iter
++);
2437 int mon_cmd_maybe_osd_create(string
&cmd
);
2438 int update_crush_device_class();
2439 int update_crush_location();
2441 static int write_meta(CephContext
*cct
,
2443 uuid_d
& cluster_fsid
, uuid_d
& osd_fsid
, int whoami
);
2445 void handle_pg_scrub(struct MOSDScrub
*m
, PG
* pg
);
2446 void handle_scrub(struct MOSDScrub
*m
);
2447 void handle_osd_ping(class MOSDPing
*m
);
2449 int init_op_flags(OpRequestRef
& op
);
2451 int get_num_op_shards();
2452 int get_num_op_threads();
2454 float get_osd_recovery_sleep();
2457 static int peek_meta(ObjectStore
*store
, string
& magic
,
2458 uuid_d
& cluster_fsid
, uuid_d
& osd_fsid
, int& whoami
);
2466 int enable_disable_fuse(bool stop
);
2468 void suicide(int exitcode
);
2471 void handle_signal(int signum
);
2473 /// check if we can throw out op from a disconnected client
2474 static bool op_is_discardable(const MOSDOp
*m
);
2478 friend class OSDService
;
2482 std::ostream
& operator<<(std::ostream
& out
, const OSD::io_queue
& q
);
2485 //compatibility of the executable
2486 extern const CompatSet::Feature ceph_osd_feature_compat
[];
2487 extern const CompatSet::Feature ceph_osd_feature_ro_compat
[];
2488 extern const CompatSet::Feature ceph_osd_feature_incompat
[];
2490 #endif // CEPH_OSD_H