1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
20 #include "msg/Dispatcher.h"
22 #include "common/Mutex.h"
23 #include "common/RWLock.h"
24 #include "common/Timer.h"
25 #include "common/WorkQueue.h"
26 #include "common/AsyncReserver.h"
27 #include "common/ceph_context.h"
28 #include "common/zipkin_trace.h"
30 #include "mgr/MgrClient.h"
32 #include "os/ObjectStore.h"
35 #include "auth/KeyRing.h"
36 #include "osd/ClassHandler.h"
38 #include "include/CompatSet.h"
40 #include "OpRequest.h"
43 #include "osd/PGQueueable.h"
48 #include "include/memory.h"
51 #include "include/unordered_map.h"
53 #include "common/shared_cache.hpp"
54 #include "common/simple_cache.hpp"
55 #include "common/sharedptr_registry.hpp"
56 #include "common/WeightedPriorityQueue.h"
57 #include "common/PrioritizedQueue.h"
58 #include "osd/mClockOpClassQueue.h"
59 #include "osd/mClockClientQueue.h"
60 #include "messages/MOSDOp.h"
61 #include "include/Spinlock.h"
62 #include "common/EventTrace.h"
64 #define CEPH_OSD_PROTOCOL 10 /* cluster internal */
79 l_osd_op_r_lat_outb_hist
,
80 l_osd_op_r_process_lat
,
81 l_osd_op_r_prepare_lat
,
85 l_osd_op_w_lat_inb_hist
,
86 l_osd_op_w_process_lat
,
87 l_osd_op_w_prepare_lat
,
92 l_osd_op_rw_lat_inb_hist
,
93 l_osd_op_rw_lat_outb_hist
,
94 l_osd_op_rw_process_lat
,
95 l_osd_op_rw_prepare_lat
,
97 l_osd_op_before_queue_op_lat
,
98 l_osd_op_before_dequeue_op_lat
,
120 l_osd_history_alloc_bytes
,
121 l_osd_history_alloc_num
,
123 l_osd_cached_crc_adjusted
,
135 l_osd_waiting_for_map
,
138 l_osd_map_cache_miss
,
139 l_osd_map_cache_miss_low
,
140 l_osd_map_cache_miss_low_avg
,
141 l_osd_map_bl_cache_hit
,
142 l_osd_map_bl_cache_miss
,
145 l_osd_stat_bytes_used
,
146 l_osd_stat_bytes_avail
,
152 l_osd_tier_flush_fail
,
153 l_osd_tier_try_flush
,
154 l_osd_tier_try_flush_fail
,
160 l_osd_tier_proxy_read
,
161 l_osd_tier_proxy_write
,
168 l_osd_object_ctx_cache_hit
,
169 l_osd_object_ctx_cache_total
,
172 l_osd_tier_flush_lat
,
173 l_osd_tier_promote_lat
,
183 // RecoveryState perf counters
192 rs_backfilling_latency
,
193 rs_waitremotebackfillreserved_latency
,
194 rs_waitlocalbackfillreserved_latency
,
195 rs_notbackfilling_latency
,
196 rs_repnotrecovering_latency
,
197 rs_repwaitrecoveryreserved_latency
,
198 rs_repwaitbackfillreserved_latency
,
199 rs_reprecovering_latency
,
200 rs_activating_latency
,
201 rs_waitlocalrecoveryreserved_latency
,
202 rs_waitremoterecoveryreserved_latency
,
203 rs_recovering_latency
,
204 rs_recovered_latency
,
207 rs_replicaactive_latency
,
211 rs_waitactingchange_latency
,
212 rs_incomplete_latency
,
214 rs_getmissing_latency
,
215 rs_waitupthru_latency
,
216 rs_notrecovering_latency
,
233 class AuthAuthorizeHandlerRegistry
;
235 class TestOpsSocketHook
;
236 struct C_CompleteSplits
;
240 typedef ceph::shared_ptr
<ObjectStore::Sequencer
> SequencerRef
;
243 class DeletingState
{
257 const PGRef old_pg_state
;
258 explicit DeletingState(const pair
<spg_t
, PGRef
> &in
) :
259 lock("DeletingState::lock"), status(QUEUED
), stop_deleting(false),
260 pgid(in
.first
), old_pg_state(in
.second
) {
263 /// transition status to CLEARING_WAITING
264 bool pause_clearing() {
265 Mutex::Locker
l(lock
);
266 assert(status
== CLEARING_DIR
);
272 status
= CLEARING_WAITING
;
274 } ///< @return false if we should cancel deletion
276 /// start or resume the clearing - transition the status to CLEARING_DIR
277 bool start_or_resume_clearing() {
278 Mutex::Locker
l(lock
);
281 status
== DELETED_DIR
||
282 status
== CLEARING_WAITING
);
288 status
= CLEARING_DIR
;
290 } ///< @return false if we should cancel the deletion
292 /// transition status to CLEARING_DIR
293 bool resume_clearing() {
294 Mutex::Locker
l(lock
);
295 assert(status
== CLEARING_WAITING
);
301 status
= CLEARING_DIR
;
303 } ///< @return false if we should cancel deletion
305 /// transition status to deleting
306 bool start_deleting() {
307 Mutex::Locker
l(lock
);
308 assert(status
== CLEARING_DIR
);
314 status
= DELETING_DIR
;
316 } ///< @return false if we should cancel deletion
318 /// signal collection removal queued
319 void finish_deleting() {
320 Mutex::Locker
l(lock
);
321 assert(status
== DELETING_DIR
);
322 status
= DELETED_DIR
;
326 /// try to halt the deletion
327 bool try_stop_deletion() {
328 Mutex::Locker
l(lock
);
329 stop_deleting
= true;
331 * If we are in DELETING_DIR or CLEARING_DIR, there are in progress
332 * operations we have to wait for before continuing on. States
333 * CLEARING_WAITING and QUEUED indicate that the remover will check
334 * stop_deleting before queueing any further operations. CANCELED
335 * indicates that the remover has already halted. DELETED_DIR
336 * indicates that the deletion has been fully queued.
338 while (status
== DELETING_DIR
|| status
== CLEARING_DIR
)
340 return status
!= DELETED_DIR
;
341 } ///< @return true if we don't need to recreate the collection
343 typedef ceph::shared_ptr
<DeletingState
> DeletingStateRef
;
351 SharedPtrRegistry
<spg_t
, ObjectStore::Sequencer
> osr_registry
;
352 ceph::shared_ptr
<ObjectStore::Sequencer
> meta_osr
;
353 SharedPtrRegistry
<spg_t
, DeletingState
> deleting_pgs
;
356 LogClient
&log_client
;
358 PGRecoveryStats
&pg_recovery_stats
;
360 Messenger
*&cluster_messenger
;
361 Messenger
*&client_messenger
;
363 PerfCounters
*&logger
;
364 PerfCounters
*&recoverystate_perf
;
366 ThreadPool::BatchWorkQueue
<PG
> &peering_wq
;
367 GenContextWQ recovery_gen_wq
;
368 ClassHandler
*&class_handler
;
370 void enqueue_back(spg_t pgid
, PGQueueable qi
);
371 void enqueue_front(spg_t pgid
, PGQueueable qi
);
373 void maybe_inject_dispatch_delay() {
374 if (g_conf
->osd_debug_inject_dispatch_delay_probability
> 0) {
376 g_conf
->osd_debug_inject_dispatch_delay_probability
* 10000) {
378 t
.set_from_double(g_conf
->osd_debug_inject_dispatch_delay_duration
);
385 // -- map epoch lower bound --
387 multiset
<epoch_t
> pg_epochs
;
388 map
<spg_t
,epoch_t
> pg_epoch
;
391 void pg_add_epoch(spg_t pgid
, epoch_t epoch
) {
392 Mutex::Locker
l(pg_epoch_lock
);
393 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
394 assert(t
== pg_epoch
.end());
395 pg_epoch
[pgid
] = epoch
;
396 pg_epochs
.insert(epoch
);
398 void pg_update_epoch(spg_t pgid
, epoch_t epoch
) {
399 Mutex::Locker
l(pg_epoch_lock
);
400 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
401 assert(t
!= pg_epoch
.end());
402 pg_epochs
.erase(pg_epochs
.find(t
->second
));
404 pg_epochs
.insert(epoch
);
406 void pg_remove_epoch(spg_t pgid
) {
407 Mutex::Locker
l(pg_epoch_lock
);
408 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
409 if (t
!= pg_epoch
.end()) {
410 pg_epochs
.erase(pg_epochs
.find(t
->second
));
414 epoch_t
get_min_pg_epoch() {
415 Mutex::Locker
l(pg_epoch_lock
);
416 if (pg_epochs
.empty())
419 return *pg_epochs
.begin();
424 Mutex publish_lock
, pre_publish_lock
; // pre-publish orders before publish
425 OSDSuperblock superblock
;
428 OSDSuperblock
get_superblock() {
429 Mutex::Locker
l(publish_lock
);
432 void publish_superblock(const OSDSuperblock
&block
) {
433 Mutex::Locker
l(publish_lock
);
437 int get_nodeid() const { return whoami
; }
439 std::atomic
<epoch_t
> max_oldest_map
;
444 OSDMapRef
get_osdmap() {
445 Mutex::Locker
l(publish_lock
);
448 epoch_t
get_osdmap_epoch() {
449 Mutex::Locker
l(publish_lock
);
450 return osdmap
? osdmap
->get_epoch() : 0;
452 void publish_map(OSDMapRef map
) {
453 Mutex::Locker
l(publish_lock
);
458 * osdmap - current published map
459 * next_osdmap - pre_published map that is about to be published.
461 * We use the next_osdmap to send messages and initiate connections,
462 * but only if the target is the same instance as the one in the map
463 * epoch the current user is working from (i.e., the result is
464 * equivalent to what is in next_osdmap).
466 * This allows the helpers to start ignoring osds that are about to
467 * go down, and let OSD::handle_osd_map()/note_down_osd() mark them
468 * down, without worrying about reopening connections from threads
469 * working from old maps.
472 OSDMapRef next_osdmap
;
473 Cond pre_publish_cond
;
476 void pre_publish_map(OSDMapRef map
) {
477 Mutex::Locker
l(pre_publish_lock
);
478 next_osdmap
= std::move(map
);
482 /// map epochs reserved below
483 map
<epoch_t
, unsigned> map_reservations
;
485 /// gets ref to next_osdmap and registers the epoch as reserved
486 OSDMapRef
get_nextmap_reserved() {
487 Mutex::Locker
l(pre_publish_lock
);
490 epoch_t e
= next_osdmap
->get_epoch();
491 map
<epoch_t
, unsigned>::iterator i
=
492 map_reservations
.insert(make_pair(e
, 0)).first
;
496 /// releases reservation on map
497 void release_map(OSDMapRef osdmap
) {
498 Mutex::Locker
l(pre_publish_lock
);
499 map
<epoch_t
, unsigned>::iterator i
=
500 map_reservations
.find(osdmap
->get_epoch());
501 assert(i
!= map_reservations
.end());
502 assert(i
->second
> 0);
503 if (--(i
->second
) == 0) {
504 map_reservations
.erase(i
);
506 pre_publish_cond
.Signal();
508 /// blocks until there are no reserved maps prior to next_osdmap
509 void await_reserved_maps() {
510 Mutex::Locker
l(pre_publish_lock
);
513 map
<epoch_t
, unsigned>::const_iterator i
= map_reservations
.cbegin();
514 if (i
== map_reservations
.cend() || i
->first
>= next_osdmap
->get_epoch()) {
517 pre_publish_cond
.Wait(pre_publish_lock
);
523 Mutex peer_map_epoch_lock
;
524 map
<int, epoch_t
> peer_map_epoch
;
526 epoch_t
get_peer_epoch(int p
);
527 epoch_t
note_peer_epoch(int p
, epoch_t e
);
528 void forget_peer_epoch(int p
, epoch_t e
);
530 void send_map(class MOSDMap
*m
, Connection
*con
);
531 void send_incremental_map(epoch_t since
, Connection
*con
, OSDMapRef
& osdmap
);
532 MOSDMap
*build_incremental_map_msg(epoch_t from
, epoch_t to
,
533 OSDSuperblock
& superblock
);
534 bool should_share_map(entity_name_t name
, Connection
*con
, epoch_t epoch
,
535 const OSDMapRef
& osdmap
, const epoch_t
*sent_epoch_p
);
536 void share_map(entity_name_t name
, Connection
*con
, epoch_t epoch
,
537 OSDMapRef
& osdmap
, epoch_t
*sent_epoch_p
);
538 void share_map_peer(int peer
, Connection
*con
,
539 OSDMapRef map
= OSDMapRef());
541 ConnectionRef
get_con_osd_cluster(int peer
, epoch_t from_epoch
);
542 pair
<ConnectionRef
,ConnectionRef
> get_con_osd_hb(int peer
, epoch_t from_epoch
); // (back, front)
543 void send_message_osd_cluster(int peer
, Message
*m
, epoch_t from_epoch
);
544 void send_message_osd_cluster(Message
*m
, Connection
*con
) {
545 con
->send_message(m
);
547 void send_message_osd_cluster(Message
*m
, const ConnectionRef
& con
) {
548 con
->send_message(m
);
550 void send_message_osd_client(Message
*m
, Connection
*con
) {
551 con
->send_message(m
);
553 void send_message_osd_client(Message
*m
, const ConnectionRef
& con
) {
554 con
->send_message(m
);
556 entity_name_t
get_cluster_msgr_name() {
557 return cluster_messenger
->get_myname();
561 // -- scrub scheduling --
562 Mutex sched_scrub_lock
;
569 /// pg to be scrubbed
571 /// a time scheduled for scrub. but the scrub could be delayed if system
572 /// load is too high or it fails to fall in the scrub hours
574 /// the hard upper bound of scrub time
576 ScrubJob() : cct(nullptr) {}
577 explicit ScrubJob(CephContext
* cct
, const spg_t
& pg
,
578 const utime_t
& timestamp
,
579 double pool_scrub_min_interval
= 0,
580 double pool_scrub_max_interval
= 0, bool must
= true);
581 /// order the jobs by sched_time
582 bool operator<(const ScrubJob
& rhs
) const;
584 set
<ScrubJob
> sched_scrub_pg
;
586 /// @returns the scrub_reg_stamp used for unregister the scrub job
587 utime_t
reg_pg_scrub(spg_t pgid
, utime_t t
, double pool_scrub_min_interval
,
588 double pool_scrub_max_interval
, bool must
) {
589 ScrubJob
scrub(cct
, pgid
, t
, pool_scrub_min_interval
, pool_scrub_max_interval
,
591 Mutex::Locker
l(sched_scrub_lock
);
592 sched_scrub_pg
.insert(scrub
);
593 return scrub
.sched_time
;
595 void unreg_pg_scrub(spg_t pgid
, utime_t t
) {
596 Mutex::Locker
l(sched_scrub_lock
);
597 size_t removed
= sched_scrub_pg
.erase(ScrubJob(cct
, pgid
, t
));
600 bool first_scrub_stamp(ScrubJob
*out
) {
601 Mutex::Locker
l(sched_scrub_lock
);
602 if (sched_scrub_pg
.empty())
604 set
<ScrubJob
>::iterator iter
= sched_scrub_pg
.begin();
608 bool next_scrub_stamp(const ScrubJob
& next
,
610 Mutex::Locker
l(sched_scrub_lock
);
611 if (sched_scrub_pg
.empty())
613 set
<ScrubJob
>::const_iterator iter
= sched_scrub_pg
.lower_bound(next
);
614 if (iter
== sched_scrub_pg
.cend())
617 if (iter
== sched_scrub_pg
.cend())
623 void dumps_scrub(Formatter
*f
) {
624 assert(f
!= nullptr);
625 Mutex::Locker
l(sched_scrub_lock
);
627 f
->open_array_section("scrubs");
628 for (const auto &i
: sched_scrub_pg
) {
629 f
->open_object_section("scrub");
630 f
->dump_stream("pgid") << i
.pgid
;
631 f
->dump_stream("sched_time") << i
.sched_time
;
632 f
->dump_stream("deadline") << i
.deadline
;
633 f
->dump_bool("forced", i
.sched_time
== i
.deadline
);
639 bool can_inc_scrubs_pending();
640 bool inc_scrubs_pending();
641 void inc_scrubs_active(bool reserved
);
642 void dec_scrubs_pending();
643 void dec_scrubs_active();
645 void reply_op_error(OpRequestRef op
, int err
);
646 void reply_op_error(OpRequestRef op
, int err
, eversion_t v
, version_t uv
);
647 void handle_misdirected_op(PG
*pg
, OpRequestRef op
);
651 // -- agent shared state --
654 map
<uint64_t, set
<PGRef
> > agent_queue
;
655 set
<PGRef
>::iterator agent_queue_pos
;
656 bool agent_valid_iterator
;
658 int flush_mode_high_count
; //once have one pg with FLUSH_MODE_HIGH then flush objects with high speed
659 set
<hobject_t
> agent_oids
;
661 struct AgentThread
: public Thread
{
663 explicit AgentThread(OSDService
*o
) : osd(o
) {}
664 void *entry() override
{
669 bool agent_stop_flag
;
670 Mutex agent_timer_lock
;
671 SafeTimer agent_timer
;
677 void _enqueue(PG
*pg
, uint64_t priority
) {
678 if (!agent_queue
.empty() &&
679 agent_queue
.rbegin()->first
< priority
)
680 agent_valid_iterator
= false; // inserting higher-priority queue
681 set
<PGRef
>& nq
= agent_queue
[priority
];
687 void _dequeue(PG
*pg
, uint64_t old_priority
) {
688 set
<PGRef
>& oq
= agent_queue
[old_priority
];
689 set
<PGRef
>::iterator p
= oq
.find(pg
);
690 assert(p
!= oq
.end());
691 if (p
== agent_queue_pos
)
695 if (agent_queue
.rbegin()->first
== old_priority
)
696 agent_valid_iterator
= false;
697 agent_queue
.erase(old_priority
);
701 /// enable agent for a pg
702 void agent_enable_pg(PG
*pg
, uint64_t priority
) {
703 Mutex::Locker
l(agent_lock
);
704 _enqueue(pg
, priority
);
707 /// adjust priority for an enagled pg
708 void agent_adjust_pg(PG
*pg
, uint64_t old_priority
, uint64_t new_priority
) {
709 Mutex::Locker
l(agent_lock
);
710 assert(new_priority
!= old_priority
);
711 _enqueue(pg
, new_priority
);
712 _dequeue(pg
, old_priority
);
715 /// disable agent for a pg
716 void agent_disable_pg(PG
*pg
, uint64_t old_priority
) {
717 Mutex::Locker
l(agent_lock
);
718 _dequeue(pg
, old_priority
);
721 /// note start of an async (evict) op
722 void agent_start_evict_op() {
723 Mutex::Locker
l(agent_lock
);
727 /// note finish or cancellation of an async (evict) op
728 void agent_finish_evict_op() {
729 Mutex::Locker
l(agent_lock
);
730 assert(agent_ops
> 0);
735 /// note start of an async (flush) op
736 void agent_start_op(const hobject_t
& oid
) {
737 Mutex::Locker
l(agent_lock
);
739 assert(agent_oids
.count(oid
) == 0);
740 agent_oids
.insert(oid
);
743 /// note finish or cancellation of an async (flush) op
744 void agent_finish_op(const hobject_t
& oid
) {
745 Mutex::Locker
l(agent_lock
);
746 assert(agent_ops
> 0);
748 assert(agent_oids
.count(oid
) == 1);
749 agent_oids
.erase(oid
);
753 /// check if we are operating on an object
754 bool agent_is_active_oid(const hobject_t
& oid
) {
755 Mutex::Locker
l(agent_lock
);
756 return agent_oids
.count(oid
);
759 /// get count of active agent ops
760 int agent_get_num_ops() {
761 Mutex::Locker
l(agent_lock
);
765 void agent_inc_high_count() {
766 Mutex::Locker
l(agent_lock
);
767 flush_mode_high_count
++;
770 void agent_dec_high_count() {
771 Mutex::Locker
l(agent_lock
);
772 flush_mode_high_count
--;
776 /// throttle promotion attempts
777 std::atomic_uint promote_probability_millis
{1000}; ///< probability thousands. one word.
778 PromoteCounter promote_counter
;
779 utime_t last_recalibrate
;
780 unsigned long promote_max_objects
, promote_max_bytes
;
783 bool promote_throttle() {
784 // NOTE: lockless! we rely on the probability being a single word.
785 promote_counter
.attempt();
786 if ((unsigned)rand() % 1000 > promote_probability_millis
)
787 return true; // yes throttle (no promote)
788 if (promote_max_objects
&&
789 promote_counter
.objects
> promote_max_objects
)
790 return true; // yes throttle
791 if (promote_max_bytes
&&
792 promote_counter
.bytes
> promote_max_bytes
)
793 return true; // yes throttle
794 return false; // no throttle (promote)
796 void promote_finish(uint64_t bytes
) {
797 promote_counter
.finish(bytes
);
799 void promote_throttle_recalibrate();
801 // -- Objecter, for tiering reads/writes from/to other OSDs --
803 Finisher objecter_finisher
;
807 SafeTimer watch_timer
;
808 uint64_t next_notif_id
;
809 uint64_t get_next_id(epoch_t cur_epoch
) {
810 Mutex::Locker
l(watch_lock
);
811 return (((uint64_t)cur_epoch
) << 32) | ((uint64_t)(next_notif_id
++));
814 // -- Recovery/Backfill Request Scheduling --
815 Mutex recovery_request_lock
;
816 SafeTimer recovery_request_timer
;
818 // For async recovery sleep
819 bool recovery_needs_sleep
= true;
820 utime_t recovery_schedule_time
= utime_t();
822 Mutex recovery_sleep_lock
;
823 SafeTimer recovery_sleep_timer
;
827 std::atomic_uint last_tid
{0};
828 ceph_tid_t
get_tid() {
829 return (ceph_tid_t
)last_tid
++;
832 // -- backfill_reservation --
833 Finisher reserver_finisher
;
834 AsyncReserver
<spg_t
> local_reserver
;
835 AsyncReserver
<spg_t
> remote_reserver
;
840 map
<pg_t
, vector
<int> > pg_temp_wanted
;
841 map
<pg_t
, vector
<int> > pg_temp_pending
;
842 void _sent_pg_temp();
844 void queue_want_pg_temp(pg_t pgid
, vector
<int>& want
);
845 void remove_want_pg_temp(pg_t pgid
);
846 void requeue_pg_temp();
849 void send_pg_created(pg_t pgid
);
851 void queue_for_peering(PG
*pg
);
853 Mutex snap_sleep_lock
;
854 SafeTimer snap_sleep_timer
;
856 Mutex scrub_sleep_lock
;
857 SafeTimer scrub_sleep_timer
;
859 AsyncReserver
<spg_t
> snap_reserver
;
860 void queue_for_snap_trim(PG
*pg
);
862 void queue_for_scrub(PG
*pg
, bool with_high_priority
) {
863 unsigned scrub_queue_priority
= pg
->scrubber
.priority
;
864 if (with_high_priority
&& scrub_queue_priority
< cct
->_conf
->osd_client_op_priority
) {
865 scrub_queue_priority
= cct
->_conf
->osd_client_op_priority
;
870 PGScrub(pg
->get_osdmap()->get_epoch()),
871 cct
->_conf
->osd_scrub_cost
,
872 scrub_queue_priority
,
875 pg
->get_osdmap()->get_epoch()));
879 // -- pg recovery and associated throttling --
881 list
<pair
<epoch_t
, PGRef
> > awaiting_throttle
;
883 utime_t defer_recovery_until
;
884 uint64_t recovery_ops_active
;
885 uint64_t recovery_ops_reserved
;
886 bool recovery_paused
;
887 #ifdef DEBUG_RECOVERY_OIDS
888 map
<spg_t
, set
<hobject_t
> > recovery_oids
;
890 bool _recover_now(uint64_t *available_pushes
);
891 void _maybe_queue_recovery();
892 void _queue_for_recovery(
893 pair
<epoch_t
, PGRef
> p
, uint64_t reserved_pushes
) {
894 assert(recovery_lock
.is_locked_by_me());
898 PGRecovery(p
.first
, reserved_pushes
),
899 cct
->_conf
->osd_recovery_cost
,
900 cct
->_conf
->osd_recovery_priority
,
906 void start_recovery_op(PG
*pg
, const hobject_t
& soid
);
907 void finish_recovery_op(PG
*pg
, const hobject_t
& soid
, bool dequeue
);
908 bool is_recovery_active();
909 void release_reserved_pushes(uint64_t pushes
) {
910 Mutex::Locker
l(recovery_lock
);
911 assert(recovery_ops_reserved
>= pushes
);
912 recovery_ops_reserved
-= pushes
;
913 _maybe_queue_recovery();
915 void defer_recovery(float defer_for
) {
916 defer_recovery_until
= ceph_clock_now();
917 defer_recovery_until
+= defer_for
;
919 void pause_recovery() {
920 Mutex::Locker
l(recovery_lock
);
921 recovery_paused
= true;
923 bool recovery_is_paused() {
924 Mutex::Locker
l(recovery_lock
);
925 return recovery_paused
;
927 void unpause_recovery() {
928 Mutex::Locker
l(recovery_lock
);
929 recovery_paused
= false;
930 _maybe_queue_recovery();
932 void kick_recovery_queue() {
933 Mutex::Locker
l(recovery_lock
);
934 _maybe_queue_recovery();
936 void clear_queued_recovery(PG
*pg
) {
937 Mutex::Locker
l(recovery_lock
);
938 for (list
<pair
<epoch_t
, PGRef
> >::iterator i
= awaiting_throttle
.begin();
939 i
!= awaiting_throttle
.end();
941 if (i
->second
.get() == pg
) {
942 awaiting_throttle
.erase(i
);
949 // delayed pg activation
950 void queue_for_recovery(PG
*pg
) {
951 Mutex::Locker
l(recovery_lock
);
953 if (pg
->get_state() & (PG_STATE_FORCED_RECOVERY
| PG_STATE_FORCED_BACKFILL
)) {
954 awaiting_throttle
.push_front(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
956 awaiting_throttle
.push_back(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
958 _maybe_queue_recovery();
960 void queue_recovery_after_sleep(PG
*pg
, epoch_t queued
, uint64_t reserved_pushes
) {
961 Mutex::Locker
l(recovery_lock
);
962 _queue_for_recovery(make_pair(queued
, pg
), reserved_pushes
);
965 void adjust_pg_priorities(const vector
<PGRef
>& pgs
, int newflags
);
967 // osd map cache (past osd maps)
968 Mutex map_cache_lock
;
969 SharedLRU
<epoch_t
, const OSDMap
> map_cache
;
970 SimpleLRU
<epoch_t
, bufferlist
> map_bl_cache
;
971 SimpleLRU
<epoch_t
, bufferlist
> map_bl_inc_cache
;
973 OSDMapRef
try_get_map(epoch_t e
);
974 OSDMapRef
get_map(epoch_t e
) {
975 OSDMapRef
ret(try_get_map(e
));
979 OSDMapRef
add_map(OSDMap
*o
) {
980 Mutex::Locker
l(map_cache_lock
);
983 OSDMapRef
_add_map(OSDMap
*o
);
985 void add_map_bl(epoch_t e
, bufferlist
& bl
) {
986 Mutex::Locker
l(map_cache_lock
);
987 return _add_map_bl(e
, bl
);
989 void pin_map_bl(epoch_t e
, bufferlist
&bl
);
990 void _add_map_bl(epoch_t e
, bufferlist
& bl
);
991 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
992 Mutex::Locker
l(map_cache_lock
);
993 return _get_map_bl(e
, bl
);
995 bool _get_map_bl(epoch_t e
, bufferlist
& bl
);
997 void add_map_inc_bl(epoch_t e
, bufferlist
& bl
) {
998 Mutex::Locker
l(map_cache_lock
);
999 return _add_map_inc_bl(e
, bl
);
1001 void pin_map_inc_bl(epoch_t e
, bufferlist
&bl
);
1002 void _add_map_inc_bl(epoch_t e
, bufferlist
& bl
);
1003 bool get_inc_map_bl(epoch_t e
, bufferlist
& bl
);
1005 void clear_map_bl_cache_pins(epoch_t e
);
1007 void need_heartbeat_peer_update();
1009 void pg_stat_queue_enqueue(PG
*pg
);
1010 void pg_stat_queue_dequeue(PG
*pg
);
1014 void start_shutdown();
1015 void shutdown_reserver();
1020 Mutex in_progress_split_lock
;
1021 map
<spg_t
, spg_t
> pending_splits
; // child -> parent
1022 map
<spg_t
, set
<spg_t
> > rev_pending_splits
; // parent -> [children]
1023 set
<spg_t
> in_progress_splits
; // child
1026 void _start_split(spg_t parent
, const set
<spg_t
> &children
);
1027 void start_split(spg_t parent
, const set
<spg_t
> &children
) {
1028 Mutex::Locker
l(in_progress_split_lock
);
1029 return _start_split(parent
, children
);
1031 void mark_split_in_progress(spg_t parent
, const set
<spg_t
> &pgs
);
1032 void complete_split(const set
<spg_t
> &pgs
);
1033 void cancel_pending_splits_for_parent(spg_t parent
);
1034 void _cancel_pending_splits_for_parent(spg_t parent
);
1035 bool splitting(spg_t pgid
);
1036 void expand_pg_num(OSDMapRef old_map
,
1038 void _maybe_split_pgid(OSDMapRef old_map
,
1041 void init_splits_between(spg_t pgid
, OSDMapRef frommap
, OSDMapRef tomap
);
1045 osd_stat_t osd_stat
;
1048 void update_osd_stat(vector
<int>& hb_peers
);
1049 osd_stat_t
set_osd_stat(const struct store_statfs_t
&stbuf
,
1050 vector
<int>& hb_peers
);
1051 osd_stat_t
get_osd_stat() {
1052 Mutex::Locker
l(stat_lock
);
1054 osd_stat
.up_from
= up_epoch
;
1055 osd_stat
.seq
= ((uint64_t)osd_stat
.up_from
<< 32) + seq
;
1058 uint64_t get_osd_stat_seq() {
1059 Mutex::Locker
l(stat_lock
);
1060 return osd_stat
.seq
;
1063 // -- OSD Full Status --
1065 friend TestOpsSocketHook
;
1066 mutable Mutex full_status_lock
;
1067 enum s_names
{ INVALID
= -1, NONE
, NEARFULL
, BACKFILLFULL
, FULL
, FAILSAFE
} cur_state
; // ascending
1068 const char *get_full_state_name(s_names s
) const {
1070 case NONE
: return "none";
1071 case NEARFULL
: return "nearfull";
1072 case BACKFILLFULL
: return "backfillfull";
1073 case FULL
: return "full";
1074 case FAILSAFE
: return "failsafe";
1075 default: return "???";
1078 s_names
get_full_state(string type
) const {
1081 else if (type
== "failsafe")
1083 else if (type
== "full")
1085 else if (type
== "backfillfull")
1086 return BACKFILLFULL
;
1087 else if (type
== "nearfull")
1092 double cur_ratio
; ///< current utilization
1093 mutable int64_t injectfull
= 0;
1094 s_names injectfull_state
= NONE
;
1095 float get_failsafe_full_ratio();
1096 void check_full_status(float ratio
);
1097 bool _check_full(s_names type
, ostream
&ss
) const;
1099 bool check_failsafe_full(ostream
&ss
) const;
1100 bool check_full(ostream
&ss
) const;
1101 bool check_backfill_full(ostream
&ss
) const;
1102 bool check_nearfull(ostream
&ss
) const;
1103 bool is_failsafe_full() const;
1104 bool is_full() const;
1105 bool is_backfillfull() const;
1106 bool is_nearfull() const;
1107 bool need_fullness_update(); ///< osdmap state needs update
1108 void set_injectfull(s_names type
, int64_t count
);
1109 bool check_osdmap_full(const set
<pg_shard_t
> &missing_on
);
1114 mutable Mutex epoch_lock
; // protects access to boot_epoch, up_epoch, bind_epoch
1115 epoch_t boot_epoch
; // _first_ epoch we were marked up (after this process started)
1116 epoch_t up_epoch
; // _most_recent_ epoch we were marked up
1117 epoch_t bind_epoch
; // epoch we last did a bind to new ip:ports
1120 * Retrieve the boot_, up_, and bind_ epochs the OSD has set. The params
1121 * can be NULL if you don't care about them.
1123 void retrieve_epochs(epoch_t
*_boot_epoch
, epoch_t
*_up_epoch
,
1124 epoch_t
*_bind_epoch
) const;
1126 * Set the boot, up, and bind epochs. Any NULL params will not be set.
1128 void set_epochs(const epoch_t
*_boot_epoch
, const epoch_t
*_up_epoch
,
1129 const epoch_t
*_bind_epoch
);
1130 epoch_t
get_boot_epoch() const {
1132 retrieve_epochs(&ret
, NULL
, NULL
);
1135 epoch_t
get_up_epoch() const {
1137 retrieve_epochs(NULL
, &ret
, NULL
);
1140 epoch_t
get_bind_epoch() const {
1142 retrieve_epochs(NULL
, NULL
, &ret
);
1147 Mutex is_stopping_lock
;
1148 Cond is_stopping_cond
;
1153 std::atomic_int state
{NOT_STOPPING
};
1157 void set_state(int s
) {
1160 bool is_stopping() const {
1161 return state
== STOPPING
;
1163 bool is_preparing_to_stop() const {
1164 return state
== PREPARING_TO_STOP
;
1166 bool prepare_to_stop();
1167 void got_stop_ack();
1170 #ifdef PG_DEBUG_REFS
1172 map
<spg_t
, int> pgid_tracker
;
1173 map
<spg_t
, PG
*> live_pgs
;
1174 void add_pgid(spg_t pgid
, PG
*pg
);
1175 void remove_pgid(spg_t pgid
, PG
*pg
);
1176 void dump_live_pgids();
1179 explicit OSDService(OSD
*osd
);
1183 class OSD
: public Dispatcher
,
1184 public md_config_obs_t
{
1186 Mutex osd_lock
; // global lock
1187 SafeTimer tick_timer
; // safe timer (osd_lock)
1189 // Tick timer for those stuff that do not need osd_lock
1190 Mutex tick_timer_lock
;
1191 SafeTimer tick_timer_without_osd_lock
;
1193 // config observer bits
1194 const char** get_tracked_conf_keys() const override
;
1195 void handle_conf_change(const struct md_config_t
*conf
,
1196 const std::set
<std::string
> &changed
) override
;
1197 void update_log_config();
1198 void check_config();
1202 static const double OSD_TICK_INTERVAL
; // tick interval for tick_timer and tick_timer_without_osd_lock
1204 AuthAuthorizeHandlerRegistry
*authorize_handler_cluster_registry
;
1205 AuthAuthorizeHandlerRegistry
*authorize_handler_service_registry
;
1207 Messenger
*cluster_messenger
;
1208 Messenger
*client_messenger
;
1209 Messenger
*objecter_messenger
;
1210 MonClient
*monc
; // check the "monc helpers" list before accessing directly
1212 PerfCounters
*logger
;
1213 PerfCounters
*recoverystate_perf
;
1216 FuseStore
*fuse_store
= nullptr;
1218 LogClient log_client
;
1222 std::string dev_path
, journal_path
;
1224 bool store_is_rotational
= true;
1225 bool journal_is_rotational
= true;
1227 ZTracer::Endpoint trace_endpoint
;
1228 void create_logger();
1229 void create_recoverystate_perf();
1231 void tick_without_osd_lock();
1232 void _dispatch(Message
*m
);
1233 void dispatch_op(OpRequestRef op
);
1235 void check_osdmap_features(ObjectStore
*store
);
1238 friend class OSDSocketHook
;
1239 class OSDSocketHook
*asok_hook
;
1240 bool asok_command(string admin_command
, cmdmap_t
& cmdmap
, string format
, ostream
& ss
);
1243 ClassHandler
*class_handler
= nullptr;
1244 int get_nodeid() { return whoami
; }
1246 static ghobject_t
get_osdmap_pobject_name(epoch_t epoch
) {
1248 snprintf(foo
, sizeof(foo
), "osdmap.%d", epoch
);
1249 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1251 static ghobject_t
get_inc_osdmap_pobject_name(epoch_t epoch
) {
1253 snprintf(foo
, sizeof(foo
), "inc_osdmap.%d", epoch
);
1254 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1257 static ghobject_t
make_snapmapper_oid() {
1258 return ghobject_t(hobject_t(
1260 object_t("snapmapper"),
1264 static ghobject_t
make_pg_log_oid(spg_t pg
) {
1266 ss
<< "pglog_" << pg
;
1269 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1272 static ghobject_t
make_pg_biginfo_oid(spg_t pg
) {
1274 ss
<< "pginfo_" << pg
;
1277 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1279 static ghobject_t
make_infos_oid() {
1280 hobject_t
oid(sobject_t("infos", CEPH_NOSNAP
));
1281 return ghobject_t(oid
);
1283 static void recursive_remove_collection(CephContext
* cct
,
1289 * get_osd_initial_compat_set()
1291 * Get the initial feature set for this OSD. Features
1292 * here are automatically upgraded.
1294 * Return value: Initial osd CompatSet
1296 static CompatSet
get_osd_initial_compat_set();
1299 * get_osd_compat_set()
1301 * Get all features supported by this OSD
1303 * Return value: CompatSet of all supported features
1305 static CompatSet
get_osd_compat_set();
1310 class C_Tick_WithoutOSDLock
;
1313 OSDSuperblock superblock
;
1315 void write_superblock();
1316 void write_superblock(ObjectStore::Transaction
& t
);
1317 int read_superblock();
1319 void clear_temp_objects();
1321 CompatSet osd_compat
;
1326 STATE_INITIALIZING
= 1,
1331 STATE_WAITING_FOR_HEALTHY
1334 static const char *get_state_name(int s
) {
1336 case STATE_INITIALIZING
: return "initializing";
1337 case STATE_PREBOOT
: return "preboot";
1338 case STATE_BOOTING
: return "booting";
1339 case STATE_ACTIVE
: return "active";
1340 case STATE_STOPPING
: return "stopping";
1341 case STATE_WAITING_FOR_HEALTHY
: return "waiting_for_healthy";
1342 default: return "???";
1347 std::atomic_int state
{STATE_INITIALIZING
};
1348 bool waiting_for_luminous_mons
= false;
1351 int get_state() const {
1354 void set_state(int s
) {
1357 bool is_initializing() const {
1358 return state
== STATE_INITIALIZING
;
1360 bool is_preboot() const {
1361 return state
== STATE_PREBOOT
;
1363 bool is_booting() const {
1364 return state
== STATE_BOOTING
;
1366 bool is_active() const {
1367 return state
== STATE_ACTIVE
;
1369 bool is_stopping() const {
1370 return state
== STATE_STOPPING
;
1372 bool is_waiting_for_healthy() const {
1373 return state
== STATE_WAITING_FOR_HEALTHY
;
1378 ThreadPool peering_tp
;
1379 ShardedThreadPool osd_op_tp
;
1381 ThreadPool command_tp
;
1383 void set_disk_tp_priority();
1384 void get_latest_osdmap();
1388 void dispatch_session_waiting(Session
*session
, OSDMapRef osdmap
);
1389 void maybe_share_map(Session
*session
, OpRequestRef op
, OSDMapRef osdmap
);
1391 Mutex session_waiting_lock
;
1392 set
<Session
*> session_waiting_for_map
;
1394 /// Caller assumes refs for included Sessions
1395 void get_sessions_waiting_for_map(set
<Session
*> *out
) {
1396 Mutex::Locker
l(session_waiting_lock
);
1397 out
->swap(session_waiting_for_map
);
1399 void register_session_waiting_on_map(Session
*session
) {
1400 Mutex::Locker
l(session_waiting_lock
);
1401 if (session_waiting_for_map
.insert(session
).second
) {
1405 void clear_session_waiting_on_map(Session
*session
) {
1406 Mutex::Locker
l(session_waiting_lock
);
1407 set
<Session
*>::iterator i
= session_waiting_for_map
.find(session
);
1408 if (i
!= session_waiting_for_map
.end()) {
1410 session_waiting_for_map
.erase(i
);
1413 void dispatch_sessions_waiting_on_map() {
1414 set
<Session
*> sessions_to_check
;
1415 get_sessions_waiting_for_map(&sessions_to_check
);
1416 for (set
<Session
*>::iterator i
= sessions_to_check
.begin();
1417 i
!= sessions_to_check
.end();
1418 sessions_to_check
.erase(i
++)) {
1419 (*i
)->session_dispatch_lock
.Lock();
1420 dispatch_session_waiting(*i
, osdmap
);
1421 (*i
)->session_dispatch_lock
.Unlock();
1425 void session_handle_reset(Session
*session
) {
1426 Mutex::Locker
l(session
->session_dispatch_lock
);
1427 clear_session_waiting_on_map(session
);
1429 session
->clear_backoffs();
1431 /* Messages have connection refs, we need to clear the
1432 * connection->session->message->connection
1433 * cycles which result.
1436 session
->waiting_on_map
.clear_and_dispose(TrackedOp::Putter());
1441 * @defgroup monc helpers
1443 * Right now we only have the one
1447 * Ask the Monitors for a sequence of OSDMaps.
1449 * @param epoch The epoch to start with when replying
1450 * @param force_request True if this request forces a new subscription to
1451 * the monitors; false if an outstanding request that encompasses it is
1454 void osdmap_subscribe(version_t epoch
, bool force_request
);
1455 /** @} monc helpers */
1458 /// information about a heartbeat peer
1459 struct HeartbeatInfo
{
1461 ConnectionRef con_front
; ///< peer connection (front)
1462 ConnectionRef con_back
; ///< peer connection (back)
1463 utime_t first_tx
; ///< time we sent our first ping request
1464 utime_t last_tx
; ///< last time we sent a ping request
1465 utime_t last_rx_front
; ///< last time we got a ping reply on the front side
1466 utime_t last_rx_back
; ///< last time we got a ping reply on the back side
1467 epoch_t epoch
; ///< most recent epoch we wanted this peer
1469 bool is_unhealthy(utime_t cutoff
) const {
1471 ! ((last_rx_front
> cutoff
||
1472 (last_rx_front
== utime_t() && (last_tx
== utime_t() ||
1473 first_tx
> cutoff
))) &&
1474 (last_rx_back
> cutoff
||
1475 (last_rx_back
== utime_t() && (last_tx
== utime_t() ||
1476 first_tx
> cutoff
))));
1478 bool is_healthy(utime_t cutoff
) const {
1479 return last_rx_front
> cutoff
&& last_rx_back
> cutoff
;
1483 /// state attached to outgoing heartbeat connections
1484 struct HeartbeatSession
: public RefCountedObject
{
1486 explicit HeartbeatSession(int p
) : peer(p
) {}
1488 Mutex heartbeat_lock
;
1489 map
<int, int> debug_heartbeat_drops_remaining
;
1490 Cond heartbeat_cond
;
1491 bool heartbeat_stop
;
1492 std::atomic_bool heartbeat_need_update
;
1493 map
<int,HeartbeatInfo
> heartbeat_peers
; ///< map of osd id to HeartbeatInfo
1494 utime_t last_mon_heartbeat
;
1495 Messenger
*hb_front_client_messenger
;
1496 Messenger
*hb_back_client_messenger
;
1497 Messenger
*hb_front_server_messenger
;
1498 Messenger
*hb_back_server_messenger
;
1499 utime_t last_heartbeat_resample
; ///< last time we chose random peers in waiting-for-healthy state
1500 double daily_loadavg
;
1502 void _add_heartbeat_peer(int p
);
1503 void _remove_heartbeat_peer(int p
);
1504 bool heartbeat_reset(Connection
*con
);
1505 void maybe_update_heartbeat_peers();
1506 void reset_heartbeat_peers();
1507 bool heartbeat_peers_need_update() {
1508 return heartbeat_need_update
.load();
1510 void heartbeat_set_peers_need_update() {
1511 heartbeat_need_update
.store(true);
1513 void heartbeat_clear_peers_need_update() {
1514 heartbeat_need_update
.store(false);
1517 void heartbeat_check();
1518 void heartbeat_entry();
1519 void need_heartbeat_peer_update();
1521 void heartbeat_kick() {
1522 Mutex::Locker
l(heartbeat_lock
);
1523 heartbeat_cond
.Signal();
1526 struct T_Heartbeat
: public Thread
{
1528 explicit T_Heartbeat(OSD
*o
) : osd(o
) {}
1529 void *entry() override
{
1530 osd
->heartbeat_entry();
1536 bool heartbeat_dispatch(Message
*m
);
1538 struct HeartbeatDispatcher
: public Dispatcher
{
1540 explicit HeartbeatDispatcher(OSD
*o
) : Dispatcher(o
->cct
), osd(o
) {}
1542 bool ms_can_fast_dispatch_any() const override
{ return true; }
1543 bool ms_can_fast_dispatch(const Message
*m
) const override
{
1544 switch (m
->get_type()) {
1552 void ms_fast_dispatch(Message
*m
) override
{
1553 osd
->heartbeat_dispatch(m
);
1555 bool ms_dispatch(Message
*m
) override
{
1556 return osd
->heartbeat_dispatch(m
);
1558 bool ms_handle_reset(Connection
*con
) override
{
1559 return osd
->heartbeat_reset(con
);
1561 void ms_handle_remote_reset(Connection
*con
) override
{}
1562 bool ms_handle_refused(Connection
*con
) override
{
1563 return osd
->ms_handle_refused(con
);
1565 bool ms_verify_authorizer(Connection
*con
, int peer_type
,
1566 int protocol
, bufferlist
& authorizer_data
, bufferlist
& authorizer_reply
,
1567 bool& isvalid
, CryptoKey
& session_key
) override
{
1571 } heartbeat_dispatcher
;
1575 list
<OpRequestRef
> finished
;
1577 void take_waiters(list
<OpRequestRef
>& ls
) {
1578 assert(osd_lock
.is_locked());
1579 finished
.splice(finished
.end(), ls
);
1583 // -- op tracking --
1584 OpTracker op_tracker
;
1585 void check_ops_in_flight();
1586 void test_ops(std::string command
, std::string args
, ostream
& ss
);
1587 friend class TestOpsSocketHook
;
1588 TestOpsSocketHook
*test_ops_hook
;
1589 friend struct C_CompleteSplits
;
1590 friend struct C_OpenPGs
;
1593 enum class io_queue
{
1599 friend std::ostream
& operator<<(std::ostream
& out
, const OSD::io_queue
& q
);
1601 const io_queue op_queue
;
1602 const unsigned int op_prio_cutoff
;
1605 * The ordered op delivery chain is:
1607 * fast dispatch -> pqueue back
1608 * pqueue front <-> to_process back
1609 * to_process front -> RunVis(item)
1612 * The pqueue is per-shard, and to_process is per pg_slot. Items can be
1613 * pushed back up into to_process and/or pqueue while order is preserved.
1615 * Multiple worker threads can operate on each shard.
1617 * Under normal circumstances, num_running == to_proces.size(). There are
1618 * two times when that is not true: (1) when waiting_for_pg == true and
1619 * to_process is accumulating requests that are waiting for the pg to be
1620 * instantiated; in that case they will all get requeued together by
1621 * wake_pg_waiters, and (2) when wake_pg_waiters just ran, waiting_for_pg
1622 * and already requeued the items.
1624 friend class PGQueueable
;
1627 : public ShardedThreadPool::ShardedWQ
<pair
<spg_t
,PGQueueable
>>
1633 Mutex sdata_op_ordering_lock
; ///< protects all members below
1635 OSDMapRef waiting_for_pg_osdmap
;
1637 PGRef pg
; ///< cached pg reference [optional]
1638 list
<PGQueueable
> to_process
; ///< order items for this slot
1639 int num_running
= 0; ///< _process threads doing pg lookup/lock
1641 /// true if pg does/did not exist. if so all new items go directly to
1642 /// to_process. cleared by prune_pg_waiters.
1643 bool waiting_for_pg
= false;
1645 /// incremented by wake_pg_waiters; indicates racing _process threads
1646 /// should bail out (their op has been requeued)
1647 uint64_t requeue_seq
= 0;
1650 /// map of slots for each spg_t. maintains ordering of items dequeued
1651 /// from pqueue while _process thread drops shard lock to acquire the
1652 /// pg lock. slots are removed only by prune_pg_waiters.
1653 unordered_map
<spg_t
,pg_slot
> pg_slots
;
1656 std::unique_ptr
<OpQueue
< pair
<spg_t
, PGQueueable
>, entity_inst_t
>> pqueue
;
1658 void _enqueue_front(pair
<spg_t
, PGQueueable
> item
, unsigned cutoff
) {
1659 unsigned priority
= item
.second
.get_priority();
1660 unsigned cost
= item
.second
.get_cost();
1661 if (priority
>= cutoff
)
1662 pqueue
->enqueue_strict_front(
1663 item
.second
.get_owner(),
1666 pqueue
->enqueue_front(
1667 item
.second
.get_owner(),
1668 priority
, cost
, item
);
1672 string lock_name
, string ordering_lock
,
1673 uint64_t max_tok_per_prio
, uint64_t min_cost
, CephContext
*cct
,
1675 : sdata_lock(lock_name
.c_str(), false, true, false, cct
),
1676 sdata_op_ordering_lock(ordering_lock
.c_str(), false, true,
1678 if (opqueue
== io_queue::weightedpriority
) {
1679 pqueue
= std::unique_ptr
1680 <WeightedPriorityQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>>(
1681 new WeightedPriorityQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>(
1682 max_tok_per_prio
, min_cost
));
1683 } else if (opqueue
== io_queue::prioritized
) {
1684 pqueue
= std::unique_ptr
1685 <PrioritizedQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>>(
1686 new PrioritizedQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>(
1687 max_tok_per_prio
, min_cost
));
1688 } else if (opqueue
== io_queue::mclock_opclass
) {
1689 pqueue
= std::unique_ptr
1690 <ceph::mClockOpClassQueue
>(new ceph::mClockOpClassQueue(cct
));
1691 } else if (opqueue
== io_queue::mclock_client
) {
1692 pqueue
= std::unique_ptr
1693 <ceph::mClockClientQueue
>(new ceph::mClockClientQueue(cct
));
1696 }; // struct ShardData
1698 vector
<ShardData
*> shard_list
;
1700 uint32_t num_shards
;
1703 ShardedOpWQ(uint32_t pnum_shards
,
1707 ShardedThreadPool
* tp
)
1708 : ShardedThreadPool::ShardedWQ
<pair
<spg_t
,PGQueueable
>>(ti
, si
, tp
),
1710 num_shards(pnum_shards
) {
1711 for (uint32_t i
= 0; i
< num_shards
; i
++) {
1712 char lock_name
[32] = {0};
1713 snprintf(lock_name
, sizeof(lock_name
), "%s.%d", "OSD:ShardedOpWQ:", i
);
1714 char order_lock
[32] = {0};
1715 snprintf(order_lock
, sizeof(order_lock
), "%s.%d",
1716 "OSD:ShardedOpWQ:order:", i
);
1717 ShardData
* one_shard
= new ShardData(
1718 lock_name
, order_lock
,
1719 osd
->cct
->_conf
->osd_op_pq_max_tokens_per_priority
,
1720 osd
->cct
->_conf
->osd_op_pq_min_cost
, osd
->cct
, osd
->op_queue
);
1721 shard_list
.push_back(one_shard
);
1724 ~ShardedOpWQ() override
{
1725 while (!shard_list
.empty()) {
1726 delete shard_list
.back();
1727 shard_list
.pop_back();
1731 /// wake any pg waiters after a PG is created/instantiated
1732 void wake_pg_waiters(spg_t pgid
);
1734 /// prune ops (and possiblye pg_slots) for pgs that shouldn't be here
1735 void prune_pg_waiters(OSDMapRef osdmap
, int whoami
);
1737 /// clear cached PGRef on pg deletion
1738 void clear_pg_pointer(spg_t pgid
);
1740 /// clear pg_slots on shutdown
1741 void clear_pg_slots();
1743 /// try to do some work
1744 void _process(uint32_t thread_index
, heartbeat_handle_d
*hb
) override
;
1746 /// enqueue a new item
1747 void _enqueue(pair
<spg_t
, PGQueueable
> item
) override
;
1749 /// requeue an old item (at the front of the line)
1750 void _enqueue_front(pair
<spg_t
, PGQueueable
> item
) override
;
1752 void return_waiting_threads() override
{
1753 for(uint32_t i
= 0; i
< num_shards
; i
++) {
1754 ShardData
* sdata
= shard_list
[i
];
1755 assert (NULL
!= sdata
);
1756 sdata
->sdata_lock
.Lock();
1757 sdata
->sdata_cond
.Signal();
1758 sdata
->sdata_lock
.Unlock();
1762 void dump(Formatter
*f
) {
1763 for(uint32_t i
= 0; i
< num_shards
; i
++) {
1764 ShardData
* sdata
= shard_list
[i
];
1765 char lock_name
[32] = {0};
1766 snprintf(lock_name
, sizeof(lock_name
), "%s%d", "OSD:ShardedOpWQ:", i
);
1767 assert (NULL
!= sdata
);
1768 sdata
->sdata_op_ordering_lock
.Lock();
1769 f
->open_object_section(lock_name
);
1770 sdata
->pqueue
->dump(f
);
1772 sdata
->sdata_op_ordering_lock
.Unlock();
1776 /// Must be called on ops queued back to front
1779 list
<OpRequestRef
> *out_ops
;
1780 uint64_t reserved_pushes_to_free
;
1781 Pred(spg_t pg
, list
<OpRequestRef
> *out_ops
= 0)
1782 : pgid(pg
), out_ops(out_ops
), reserved_pushes_to_free(0) {}
1783 void accumulate(const PGQueueable
&op
) {
1784 reserved_pushes_to_free
+= op
.get_reserved_pushes();
1786 boost::optional
<OpRequestRef
> mop
= op
.maybe_get_op();
1788 out_ops
->push_front(*mop
);
1791 bool operator()(const pair
<spg_t
, PGQueueable
> &op
) {
1792 if (op
.first
== pgid
) {
1793 accumulate(op
.second
);
1799 uint64_t get_reserved_pushes_to_free() const {
1800 return reserved_pushes_to_free
;
1804 bool is_shard_empty(uint32_t thread_index
) override
{
1805 uint32_t shard_index
= thread_index
% num_shards
;
1806 ShardData
* sdata
= shard_list
[shard_index
];
1807 assert(NULL
!= sdata
);
1808 Mutex::Locker
l(sdata
->sdata_op_ordering_lock
);
1809 return sdata
->pqueue
->empty();
1814 void enqueue_op(spg_t pg
, OpRequestRef
& op
, epoch_t epoch
);
1816 PGRef pg
, OpRequestRef op
,
1817 ThreadPool::TPHandle
&handle
);
1819 // -- peering queue --
1820 struct PeeringWQ
: public ThreadPool::BatchWorkQueue
<PG
> {
1821 list
<PG
*> peering_queue
;
1824 PeeringWQ(OSD
*o
, time_t ti
, time_t si
, ThreadPool
*tp
)
1825 : ThreadPool::BatchWorkQueue
<PG
>(
1826 "OSD::PeeringWQ", ti
, si
, tp
), osd(o
) {}
1828 void _dequeue(PG
*pg
) override
{
1829 for (list
<PG
*>::iterator i
= peering_queue
.begin();
1830 i
!= peering_queue
.end();
1833 peering_queue
.erase(i
++);
1834 pg
->put("PeeringWQ");
1840 bool _enqueue(PG
*pg
) override
{
1841 pg
->get("PeeringWQ");
1842 peering_queue
.push_back(pg
);
1845 bool _empty() override
{
1846 return peering_queue
.empty();
1848 void _dequeue(list
<PG
*> *out
) override
;
1850 const list
<PG
*> &pgs
,
1851 ThreadPool::TPHandle
&handle
) override
{
1852 assert(!pgs
.empty());
1853 osd
->process_peering_events(pgs
, handle
);
1854 for (list
<PG
*>::const_iterator i
= pgs
.begin();
1857 (*i
)->put("PeeringWQ");
1860 void _process_finish(const list
<PG
*> &pgs
) override
{
1861 for (list
<PG
*>::const_iterator i
= pgs
.begin();
1867 void _clear() override
{
1868 assert(peering_queue
.empty());
1872 void process_peering_events(
1873 const list
<PG
*> &pg
,
1874 ThreadPool::TPHandle
&handle
);
1877 friend class PrimaryLogPG
;
1884 OSDMapRef
get_osdmap() {
1887 epoch_t
get_osdmap_epoch() const {
1888 return osdmap
? osdmap
->get_epoch() : 0;
1891 utime_t had_map_since
;
1893 list
<OpRequestRef
> waiting_for_osdmap
;
1894 deque
<utime_t
> osd_markdown_log
;
1896 friend struct send_map_on_destruct
;
1898 void wait_for_new_map(OpRequestRef op
);
1899 void handle_osd_map(class MOSDMap
*m
);
1900 void _committed_osd_maps(epoch_t first
, epoch_t last
, class MOSDMap
*m
);
1901 void trim_maps(epoch_t oldest
, int nreceived
, bool skip_maps
);
1902 void note_down_osd(int osd
);
1903 void note_up_osd(int osd
);
1904 friend class C_OnMapCommit
;
1907 epoch_t advance_to
, PG
*pg
,
1908 ThreadPool::TPHandle
&handle
,
1909 PG::RecoveryCtx
*rctx
,
1910 set
<PGRef
> *split_pgs
1913 void activate_map();
1915 // osd map cache (past osd maps)
1916 OSDMapRef
get_map(epoch_t e
) {
1917 return service
.get_map(e
);
1919 OSDMapRef
add_map(OSDMap
*o
) {
1920 return service
.add_map(o
);
1922 void add_map_bl(epoch_t e
, bufferlist
& bl
) {
1923 return service
.add_map_bl(e
, bl
);
1925 void pin_map_bl(epoch_t e
, bufferlist
&bl
) {
1926 return service
.pin_map_bl(e
, bl
);
1928 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
1929 return service
.get_map_bl(e
, bl
);
1931 void add_map_inc_bl(epoch_t e
, bufferlist
& bl
) {
1932 return service
.add_map_inc_bl(e
, bl
);
1934 void pin_map_inc_bl(epoch_t e
, bufferlist
&bl
) {
1935 return service
.pin_map_inc_bl(e
, bl
);
1939 // -- placement groups --
1940 RWLock pg_map_lock
; // this lock orders *above* individual PG _locks
1941 ceph::unordered_map
<spg_t
, PG
*> pg_map
; // protected by pg_map lock
1943 map
<spg_t
, list
<PG::CephPeeringEvtRef
> > peering_wait_for_split
;
1944 PGRecoveryStats pg_recovery_stats
;
1946 PGPool
_get_pool(int id
, OSDMapRef createmap
);
1948 PG
*_lookup_lock_pg_with_map_lock_held(spg_t pgid
);
1949 PG
*_lookup_lock_pg(spg_t pgid
);
1952 PG
*lookup_lock_pg(spg_t pgid
);
1955 PG
*_open_lock_pg(OSDMapRef createmap
,
1956 spg_t pg
, bool no_lockdep_check
=false);
1958 RES_PARENT
, // resurrected a parent
1959 RES_SELF
, // resurrected self
1960 RES_NONE
// nothing relevant deleting
1962 res_result
_try_resurrect_pg(
1963 OSDMapRef curmap
, spg_t pgid
, spg_t
*resurrected
, PGRef
*old_pg_state
);
1965 PG
*_create_lock_pg(
1966 OSDMapRef createmap
,
1971 vector
<int>& up
, int up_primary
,
1972 vector
<int>& acting
, int acting_primary
,
1973 pg_history_t history
,
1974 const PastIntervals
& pi
,
1975 ObjectStore::Transaction
& t
);
1977 PG
* _make_pg(OSDMapRef createmap
, spg_t pgid
);
1978 void add_newly_split_pg(PG
*pg
,
1979 PG::RecoveryCtx
*rctx
);
1981 int handle_pg_peering_evt(
1983 const pg_history_t
& orig_history
,
1984 const PastIntervals
& pi
,
1986 PG::CephPeeringEvtRef evt
);
1989 void build_past_intervals_parallel();
1991 /// build initial pg history and intervals on create
1992 void build_initial_pg_history(
1995 utime_t created_stamp
,
1999 /// project pg history from from to now
2000 bool project_pg_history(
2001 spg_t pgid
, pg_history_t
& h
, epoch_t from
,
2002 const vector
<int>& lastup
,
2004 const vector
<int>& lastacting
,
2005 int lastactingprimary
2006 ); ///< @return false if there was a map gap between from and now
2008 // this must be called with pg->lock held on any pg addition to pg_map
2009 void wake_pg_waiters(PGRef pg
) {
2010 assert(pg
->is_locked());
2011 op_shardedwq
.wake_pg_waiters(pg
->info
.pgid
);
2013 epoch_t last_pg_create_epoch
;
2015 void handle_pg_create(OpRequestRef op
);
2019 const set
<spg_t
> &childpgids
, set
<PGRef
> *out_pgs
,
2022 PG::RecoveryCtx
*rctx
);
2024 // == monitor interaction ==
2025 Mutex mon_report_lock
;
2026 utime_t last_mon_report
;
2027 utime_t last_pg_stats_sent
;
2029 /* if our monitor dies, we want to notice it and reconnect.
2030 * So we keep track of when it last acked our stat updates,
2031 * and if too much time passes (and we've been sending
2032 * more updates) then we can call it dead and reconnect
2035 utime_t last_pg_stats_ack
;
2036 float stats_ack_timeout
;
2037 set
<uint64_t> outstanding_pg_stats
; // how many stat updates haven't been acked yet
2041 void _got_mon_epochs(epoch_t oldest
, epoch_t newest
);
2042 void _preboot(epoch_t oldest
, epoch_t newest
);
2044 void _collect_metadata(map
<string
,string
> *pmeta
);
2046 void start_waiting_for_healthy();
2049 void send_full_update();
2051 friend struct C_OSD_GetVersion
;
2054 epoch_t up_thru_wanted
;
2056 void queue_want_up_thru(epoch_t want
);
2059 // -- full map requests --
2060 epoch_t requested_full_first
, requested_full_last
;
2062 void request_full_map(epoch_t first
, epoch_t last
);
2063 void rerequest_full_maps() {
2064 epoch_t first
= requested_full_first
;
2065 epoch_t last
= requested_full_last
;
2066 requested_full_first
= 0;
2067 requested_full_last
= 0;
2068 request_full_map(first
, last
);
2070 void got_full_map(epoch_t e
);
2073 map
<int,utime_t
> failure_queue
;
2074 map
<int,pair
<utime_t
,entity_inst_t
> > failure_pending
;
2076 void requeue_failures();
2077 void send_failures();
2078 void send_still_alive(epoch_t epoch
, const entity_inst_t
&i
);
2081 Mutex pg_stat_queue_lock
;
2082 Cond pg_stat_queue_cond
;
2083 xlist
<PG
*> pg_stat_queue
;
2084 bool osd_stat_updated
;
2085 uint64_t pg_stat_tid
, pg_stat_tid_flushed
;
2087 void send_pg_stats(const utime_t
&now
);
2088 void handle_pg_stats_ack(class MPGStatsAck
*ack
);
2089 void flush_pg_stats();
2091 ceph::coarse_mono_clock::time_point last_sent_beacon
;
2092 Mutex min_last_epoch_clean_lock
{"OSD::min_last_epoch_clean_lock"};
2093 epoch_t min_last_epoch_clean
= 0;
2094 // which pgs were scanned for min_lec
2095 std::vector
<pg_t
> min_last_epoch_clean_pgs
;
2096 void send_beacon(const ceph::coarse_mono_clock::time_point
& now
);
2098 void pg_stat_queue_enqueue(PG
*pg
) {
2099 pg_stat_queue_lock
.Lock();
2100 if (pg
->is_primary() && !pg
->stat_queue_item
.is_on_list()) {
2101 pg
->get("pg_stat_queue");
2102 pg_stat_queue
.push_back(&pg
->stat_queue_item
);
2104 osd_stat_updated
= true;
2105 pg_stat_queue_lock
.Unlock();
2107 void pg_stat_queue_dequeue(PG
*pg
) {
2108 pg_stat_queue_lock
.Lock();
2109 if (pg
->stat_queue_item
.remove_myself())
2110 pg
->put("pg_stat_queue");
2111 pg_stat_queue_lock
.Unlock();
2113 void clear_pg_stat_queue() {
2114 pg_stat_queue_lock
.Lock();
2115 while (!pg_stat_queue
.empty()) {
2116 PG
*pg
= pg_stat_queue
.front();
2117 pg_stat_queue
.pop_front();
2118 pg
->put("pg_stat_queue");
2120 pg_stat_queue_lock
.Unlock();
2122 void clear_outstanding_pg_stats(){
2123 Mutex::Locker
l(pg_stat_queue_lock
);
2124 outstanding_pg_stats
.clear();
2127 ceph_tid_t
get_tid() {
2128 return service
.get_tid();
2131 // -- generic pg peering --
2132 PG::RecoveryCtx
create_context();
2133 void dispatch_context(PG::RecoveryCtx
&ctx
, PG
*pg
, OSDMapRef curmap
,
2134 ThreadPool::TPHandle
*handle
= NULL
);
2135 void dispatch_context_transaction(PG::RecoveryCtx
&ctx
, PG
*pg
,
2136 ThreadPool::TPHandle
*handle
= NULL
);
2137 void do_notifies(map
<int,
2138 vector
<pair
<pg_notify_t
, PastIntervals
> > >&
2141 void do_queries(map
<int, map
<spg_t
,pg_query_t
> >& query_map
,
2143 void do_infos(map
<int,
2144 vector
<pair
<pg_notify_t
, PastIntervals
> > >& info_map
,
2147 bool require_mon_peer(const Message
*m
);
2148 bool require_mon_or_mgr_peer(const Message
*m
);
2149 bool require_osd_peer(const Message
*m
);
2151 * Verifies that we were alive in the given epoch, and that
2154 bool require_self_aliveness(const Message
*m
, epoch_t alive_since
);
2156 * Verifies that the OSD who sent the given op has the same
2157 * address as in the given map.
2158 * @pre op was sent by an OSD using the cluster messenger
2160 bool require_same_peer_instance(const Message
*m
, OSDMapRef
& map
,
2161 bool is_fast_dispatch
);
2163 bool require_same_or_newer_map(OpRequestRef
& op
, epoch_t e
,
2164 bool is_fast_dispatch
);
2166 void handle_pg_query(OpRequestRef op
);
2167 void handle_pg_notify(OpRequestRef op
);
2168 void handle_pg_log(OpRequestRef op
);
2169 void handle_pg_info(OpRequestRef op
);
2170 void handle_pg_trim(OpRequestRef op
);
2172 void handle_pg_backfill_reserve(OpRequestRef op
);
2173 void handle_pg_recovery_reserve(OpRequestRef op
);
2175 void handle_force_recovery(Message
*m
);
2177 void handle_pg_remove(OpRequestRef op
);
2178 void _remove_pg(PG
*pg
);
2187 Command(vector
<string
>& c
, ceph_tid_t t
, bufferlist
& bl
, Connection
*co
)
2188 : cmd(c
), tid(t
), indata(bl
), con(co
) {}
2190 list
<Command
*> command_queue
;
2191 struct CommandWQ
: public ThreadPool::WorkQueue
<Command
> {
2193 CommandWQ(OSD
*o
, time_t ti
, time_t si
, ThreadPool
*tp
)
2194 : ThreadPool::WorkQueue
<Command
>("OSD::CommandWQ", ti
, si
, tp
), osd(o
) {}
2196 bool _empty() override
{
2197 return osd
->command_queue
.empty();
2199 bool _enqueue(Command
*c
) override
{
2200 osd
->command_queue
.push_back(c
);
2203 void _dequeue(Command
*pg
) override
{
2206 Command
*_dequeue() override
{
2207 if (osd
->command_queue
.empty())
2209 Command
*c
= osd
->command_queue
.front();
2210 osd
->command_queue
.pop_front();
2213 void _process(Command
*c
, ThreadPool::TPHandle
&) override
{
2214 osd
->osd_lock
.Lock();
2215 if (osd
->is_stopping()) {
2216 osd
->osd_lock
.Unlock();
2220 osd
->do_command(c
->con
.get(), c
->tid
, c
->cmd
, c
->indata
);
2221 osd
->osd_lock
.Unlock();
2224 void _clear() override
{
2225 while (!osd
->command_queue
.empty()) {
2226 Command
*c
= osd
->command_queue
.front();
2227 osd
->command_queue
.pop_front();
2233 void handle_command(class MMonCommand
*m
);
2234 void handle_command(class MCommand
*m
);
2235 void do_command(Connection
*con
, ceph_tid_t tid
, vector
<string
>& cmd
, bufferlist
& data
);
2237 // -- pg recovery --
2238 void do_recovery(PG
*pg
, epoch_t epoch_queued
, uint64_t pushes_reserved
,
2239 ThreadPool::TPHandle
&handle
);
2244 bool scrub_random_backoff();
2245 bool scrub_load_below_threshold();
2246 bool scrub_time_permit(utime_t now
);
2250 public ThreadPool::WorkQueueVal
<pair
<PGRef
, DeletingStateRef
> > {
2252 ObjectStore
*&store
;
2253 list
<pair
<PGRef
, DeletingStateRef
> > remove_queue
;
2254 RemoveWQ(CephContext
* cct
, ObjectStore
*&o
, time_t ti
, time_t si
,
2256 : ThreadPool::WorkQueueVal
<pair
<PGRef
, DeletingStateRef
> >(
2257 "OSD::RemoveWQ", ti
, si
, tp
), cct(cct
), store(o
) {}
2259 bool _empty() override
{
2260 return remove_queue
.empty();
2262 void _enqueue(pair
<PGRef
, DeletingStateRef
> item
) override
{
2263 remove_queue
.push_back(item
);
2265 void _enqueue_front(pair
<PGRef
, DeletingStateRef
> item
) override
{
2266 remove_queue
.push_front(item
);
2268 bool _dequeue(pair
<PGRef
, DeletingStateRef
> item
) {
2271 pair
<PGRef
, DeletingStateRef
> _dequeue() override
{
2272 assert(!remove_queue
.empty());
2273 pair
<PGRef
, DeletingStateRef
> item
= remove_queue
.front();
2274 remove_queue
.pop_front();
2277 void _process(pair
<PGRef
, DeletingStateRef
>,
2278 ThreadPool::TPHandle
&) override
;
2279 void _clear() override
{
2280 remove_queue
.clear();
2285 bool ms_can_fast_dispatch_any() const override
{ return true; }
2286 bool ms_can_fast_dispatch(const Message
*m
) const override
{
2287 switch (m
->get_type()) {
2288 case CEPH_MSG_OSD_OP
:
2289 case CEPH_MSG_OSD_BACKOFF
:
2292 case MSG_OSD_SUBOPREPLY
:
2293 case MSG_OSD_REPOPREPLY
:
2294 case MSG_OSD_PG_PUSH
:
2295 case MSG_OSD_PG_PULL
:
2296 case MSG_OSD_PG_PUSH_REPLY
:
2297 case MSG_OSD_PG_SCAN
:
2298 case MSG_OSD_PG_BACKFILL
:
2299 case MSG_OSD_PG_BACKFILL_REMOVE
:
2300 case MSG_OSD_EC_WRITE
:
2301 case MSG_OSD_EC_WRITE_REPLY
:
2302 case MSG_OSD_EC_READ
:
2303 case MSG_OSD_EC_READ_REPLY
:
2304 case MSG_OSD_SCRUB_RESERVE
:
2305 case MSG_OSD_REP_SCRUB
:
2306 case MSG_OSD_REP_SCRUBMAP
:
2307 case MSG_OSD_PG_UPDATE_LOG_MISSING
:
2308 case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY
:
2309 case MSG_OSD_PG_RECOVERY_DELETE
:
2310 case MSG_OSD_PG_RECOVERY_DELETE_REPLY
:
2316 void ms_fast_dispatch(Message
*m
) override
;
2317 void ms_fast_preprocess(Message
*m
) override
;
2318 bool ms_dispatch(Message
*m
) override
;
2319 bool ms_get_authorizer(int dest_type
, AuthAuthorizer
**authorizer
, bool force_new
) override
;
2320 bool ms_verify_authorizer(Connection
*con
, int peer_type
,
2321 int protocol
, bufferlist
& authorizer
, bufferlist
& authorizer_reply
,
2322 bool& isvalid
, CryptoKey
& session_key
) override
;
2323 void ms_handle_connect(Connection
*con
) override
;
2324 void ms_handle_fast_connect(Connection
*con
) override
;
2325 void ms_handle_fast_accept(Connection
*con
) override
;
2326 bool ms_handle_reset(Connection
*con
) override
;
2327 void ms_handle_remote_reset(Connection
*con
) override
{}
2328 bool ms_handle_refused(Connection
*con
) override
;
2330 io_queue
get_io_queue() const {
2331 if (cct
->_conf
->osd_op_queue
== "debug_random") {
2332 static io_queue index_lookup
[] = { io_queue::prioritized
,
2333 io_queue::weightedpriority
,
2334 io_queue::mclock_opclass
,
2335 io_queue::mclock_client
};
2337 unsigned which
= rand() % (sizeof(index_lookup
) / sizeof(index_lookup
[0]));
2338 return index_lookup
[which
];
2339 } else if (cct
->_conf
->osd_op_queue
== "prioritized") {
2340 return io_queue::prioritized
;
2341 } else if (cct
->_conf
->osd_op_queue
== "mclock_opclass") {
2342 return io_queue::mclock_opclass
;
2343 } else if (cct
->_conf
->osd_op_queue
== "mclock_client") {
2344 return io_queue::mclock_client
;
2346 // default / catch-all is 'wpq'
2347 return io_queue::weightedpriority
;
2351 unsigned int get_io_prio_cut() const {
2352 if (cct
->_conf
->osd_op_queue_cut_off
== "debug_random") {
2354 return (rand() % 2 < 1) ? CEPH_MSG_PRIO_HIGH
: CEPH_MSG_PRIO_LOW
;
2355 } else if (cct
->_conf
->osd_op_queue_cut_off
== "high") {
2356 return CEPH_MSG_PRIO_HIGH
;
2358 // default / catch-all is 'low'
2359 return CEPH_MSG_PRIO_LOW
;
2364 /* internal and external can point to the same messenger, they will still
2365 * be cleaned up properly*/
2366 OSD(CephContext
*cct_
,
2367 ObjectStore
*store_
,
2369 Messenger
*internal
,
2370 Messenger
*external
,
2371 Messenger
*hb_front_client
,
2372 Messenger
*hb_back_client
,
2373 Messenger
*hb_front_server
,
2374 Messenger
*hb_back_server
,
2375 Messenger
*osdc_messenger
,
2376 MonClient
*mc
, const std::string
&dev
, const std::string
&jdev
);
2380 static int mkfs(CephContext
*cct
, ObjectStore
*store
,
2382 uuid_d fsid
, int whoami
);
2383 /* remove any non-user xattrs from a map of them */
2384 void filter_xattrs(map
<string
, bufferptr
>& attrs
) {
2385 for (map
<string
, bufferptr
>::iterator iter
= attrs
.begin();
2386 iter
!= attrs
.end();
2388 if (('_' != iter
->first
.at(0)) || (iter
->first
.size() == 1))
2389 attrs
.erase(iter
++);
2395 int mon_cmd_maybe_osd_create(string
&cmd
);
2396 int update_crush_device_class();
2397 int update_crush_location();
2399 static int write_meta(ObjectStore
*store
,
2400 uuid_d
& cluster_fsid
, uuid_d
& osd_fsid
, int whoami
);
2402 void handle_pg_scrub(struct MOSDScrub
*m
, PG
* pg
);
2403 void handle_scrub(struct MOSDScrub
*m
);
2404 void handle_osd_ping(class MOSDPing
*m
);
2406 int init_op_flags(OpRequestRef
& op
);
2408 int get_num_op_shards();
2409 int get_num_op_threads();
2411 float get_osd_recovery_sleep();
2414 static int peek_meta(ObjectStore
*store
, string
& magic
,
2415 uuid_d
& cluster_fsid
, uuid_d
& osd_fsid
, int& whoami
);
2423 int enable_disable_fuse(bool stop
);
2425 void suicide(int exitcode
);
2428 void handle_signal(int signum
);
2430 /// check if we can throw out op from a disconnected client
2431 static bool op_is_discardable(const MOSDOp
*m
);
2435 friend class OSDService
;
2439 std::ostream
& operator<<(std::ostream
& out
, const OSD::io_queue
& q
);
2442 //compatibility of the executable
2443 extern const CompatSet::Feature ceph_osd_feature_compat
[];
2444 extern const CompatSet::Feature ceph_osd_feature_ro_compat
[];
2445 extern const CompatSet::Feature ceph_osd_feature_incompat
[];
2447 #endif // CEPH_OSD_H