1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
20 #include "msg/Dispatcher.h"
22 #include "common/Mutex.h"
23 #include "common/RWLock.h"
24 #include "common/Timer.h"
25 #include "common/WorkQueue.h"
26 #include "common/AsyncReserver.h"
27 #include "common/ceph_context.h"
28 #include "common/zipkin_trace.h"
30 #include "mgr/MgrClient.h"
32 #include "os/ObjectStore.h"
35 #include "auth/KeyRing.h"
36 #include "osd/ClassHandler.h"
38 #include "include/CompatSet.h"
40 #include "OpRequest.h"
43 #include "osd/PGQueueable.h"
48 #include "include/memory.h"
51 #include "include/unordered_map.h"
53 #include "common/shared_cache.hpp"
54 #include "common/simple_cache.hpp"
55 #include "common/sharedptr_registry.hpp"
56 #include "common/WeightedPriorityQueue.h"
57 #include "common/PrioritizedQueue.h"
58 #include "osd/mClockOpClassQueue.h"
59 #include "osd/mClockClientQueue.h"
60 #include "messages/MOSDOp.h"
61 #include "include/Spinlock.h"
62 #include "common/EventTrace.h"
64 #define CEPH_OSD_PROTOCOL 10 /* cluster internal */
79 l_osd_op_r_lat_outb_hist
,
80 l_osd_op_r_process_lat
,
81 l_osd_op_r_prepare_lat
,
85 l_osd_op_w_lat_inb_hist
,
86 l_osd_op_w_process_lat
,
87 l_osd_op_w_prepare_lat
,
92 l_osd_op_rw_lat_inb_hist
,
93 l_osd_op_rw_lat_outb_hist
,
94 l_osd_op_rw_process_lat
,
95 l_osd_op_rw_prepare_lat
,
97 l_osd_op_before_queue_op_lat
,
98 l_osd_op_before_dequeue_op_lat
,
120 l_osd_history_alloc_bytes
,
121 l_osd_history_alloc_num
,
123 l_osd_cached_crc_adjusted
,
135 l_osd_waiting_for_map
,
138 l_osd_map_cache_miss
,
139 l_osd_map_cache_miss_low
,
140 l_osd_map_cache_miss_low_avg
,
141 l_osd_map_bl_cache_hit
,
142 l_osd_map_bl_cache_miss
,
145 l_osd_stat_bytes_used
,
146 l_osd_stat_bytes_avail
,
152 l_osd_tier_flush_fail
,
153 l_osd_tier_try_flush
,
154 l_osd_tier_try_flush_fail
,
160 l_osd_tier_proxy_read
,
161 l_osd_tier_proxy_write
,
168 l_osd_object_ctx_cache_hit
,
169 l_osd_object_ctx_cache_total
,
172 l_osd_tier_flush_lat
,
173 l_osd_tier_promote_lat
,
183 // RecoveryState perf counters
192 rs_backfilling_latency
,
193 rs_waitremotebackfillreserved_latency
,
194 rs_waitlocalbackfillreserved_latency
,
195 rs_notbackfilling_latency
,
196 rs_repnotrecovering_latency
,
197 rs_repwaitrecoveryreserved_latency
,
198 rs_repwaitbackfillreserved_latency
,
199 rs_reprecovering_latency
,
200 rs_activating_latency
,
201 rs_waitlocalrecoveryreserved_latency
,
202 rs_waitremoterecoveryreserved_latency
,
203 rs_recovering_latency
,
204 rs_recovered_latency
,
207 rs_replicaactive_latency
,
211 rs_waitactingchange_latency
,
212 rs_incomplete_latency
,
214 rs_getmissing_latency
,
215 rs_waitupthru_latency
,
216 rs_notrecovering_latency
,
233 class AuthAuthorizeHandlerRegistry
;
235 class TestOpsSocketHook
;
236 struct C_CompleteSplits
;
240 typedef ceph::shared_ptr
<ObjectStore::Sequencer
> SequencerRef
;
243 class DeletingState
{
257 const PGRef old_pg_state
;
258 explicit DeletingState(const pair
<spg_t
, PGRef
> &in
) :
259 lock("DeletingState::lock"), status(QUEUED
), stop_deleting(false),
260 pgid(in
.first
), old_pg_state(in
.second
) {
263 /// transition status to CLEARING_WAITING
264 bool pause_clearing() {
265 Mutex::Locker
l(lock
);
266 assert(status
== CLEARING_DIR
);
272 status
= CLEARING_WAITING
;
274 } ///< @return false if we should cancel deletion
276 /// start or resume the clearing - transition the status to CLEARING_DIR
277 bool start_or_resume_clearing() {
278 Mutex::Locker
l(lock
);
281 status
== DELETED_DIR
||
282 status
== CLEARING_WAITING
);
288 status
= CLEARING_DIR
;
290 } ///< @return false if we should cancel the deletion
292 /// transition status to CLEARING_DIR
293 bool resume_clearing() {
294 Mutex::Locker
l(lock
);
295 assert(status
== CLEARING_WAITING
);
301 status
= CLEARING_DIR
;
303 } ///< @return false if we should cancel deletion
305 /// transition status to deleting
306 bool start_deleting() {
307 Mutex::Locker
l(lock
);
308 assert(status
== CLEARING_DIR
);
314 status
= DELETING_DIR
;
316 } ///< @return false if we should cancel deletion
318 /// signal collection removal queued
319 void finish_deleting() {
320 Mutex::Locker
l(lock
);
321 assert(status
== DELETING_DIR
);
322 status
= DELETED_DIR
;
326 /// try to halt the deletion
327 bool try_stop_deletion() {
328 Mutex::Locker
l(lock
);
329 stop_deleting
= true;
331 * If we are in DELETING_DIR or CLEARING_DIR, there are in progress
332 * operations we have to wait for before continuing on. States
333 * CLEARING_WAITING and QUEUED indicate that the remover will check
334 * stop_deleting before queueing any further operations. CANCELED
335 * indicates that the remover has already halted. DELETED_DIR
336 * indicates that the deletion has been fully queued.
338 while (status
== DELETING_DIR
|| status
== CLEARING_DIR
)
340 return status
!= DELETED_DIR
;
341 } ///< @return true if we don't need to recreate the collection
343 typedef ceph::shared_ptr
<DeletingState
> DeletingStateRef
;
351 SharedPtrRegistry
<spg_t
, ObjectStore::Sequencer
> osr_registry
;
352 ceph::shared_ptr
<ObjectStore::Sequencer
> meta_osr
;
353 SharedPtrRegistry
<spg_t
, DeletingState
> deleting_pgs
;
356 LogClient
&log_client
;
358 PGRecoveryStats
&pg_recovery_stats
;
360 Messenger
*&cluster_messenger
;
361 Messenger
*&client_messenger
;
363 PerfCounters
*&logger
;
364 PerfCounters
*&recoverystate_perf
;
366 ThreadPool::BatchWorkQueue
<PG
> &peering_wq
;
367 GenContextWQ recovery_gen_wq
;
368 ClassHandler
*&class_handler
;
370 void enqueue_back(spg_t pgid
, PGQueueable qi
);
371 void enqueue_front(spg_t pgid
, PGQueueable qi
);
373 void maybe_inject_dispatch_delay() {
374 if (g_conf
->osd_debug_inject_dispatch_delay_probability
> 0) {
376 g_conf
->osd_debug_inject_dispatch_delay_probability
* 10000) {
378 t
.set_from_double(g_conf
->osd_debug_inject_dispatch_delay_duration
);
385 // -- map epoch lower bound --
387 multiset
<epoch_t
> pg_epochs
;
388 map
<spg_t
,epoch_t
> pg_epoch
;
391 void pg_add_epoch(spg_t pgid
, epoch_t epoch
) {
392 Mutex::Locker
l(pg_epoch_lock
);
393 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
394 assert(t
== pg_epoch
.end());
395 pg_epoch
[pgid
] = epoch
;
396 pg_epochs
.insert(epoch
);
398 void pg_update_epoch(spg_t pgid
, epoch_t epoch
) {
399 Mutex::Locker
l(pg_epoch_lock
);
400 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
401 assert(t
!= pg_epoch
.end());
402 pg_epochs
.erase(pg_epochs
.find(t
->second
));
404 pg_epochs
.insert(epoch
);
406 void pg_remove_epoch(spg_t pgid
) {
407 Mutex::Locker
l(pg_epoch_lock
);
408 map
<spg_t
,epoch_t
>::iterator t
= pg_epoch
.find(pgid
);
409 if (t
!= pg_epoch
.end()) {
410 pg_epochs
.erase(pg_epochs
.find(t
->second
));
414 epoch_t
get_min_pg_epoch() {
415 Mutex::Locker
l(pg_epoch_lock
);
416 if (pg_epochs
.empty())
419 return *pg_epochs
.begin();
424 Mutex publish_lock
, pre_publish_lock
; // pre-publish orders before publish
425 OSDSuperblock superblock
;
428 OSDSuperblock
get_superblock() {
429 Mutex::Locker
l(publish_lock
);
432 void publish_superblock(const OSDSuperblock
&block
) {
433 Mutex::Locker
l(publish_lock
);
437 int get_nodeid() const { return whoami
; }
439 std::atomic
<epoch_t
> max_oldest_map
;
444 OSDMapRef
get_osdmap() {
445 Mutex::Locker
l(publish_lock
);
448 epoch_t
get_osdmap_epoch() {
449 Mutex::Locker
l(publish_lock
);
450 return osdmap
? osdmap
->get_epoch() : 0;
452 void publish_map(OSDMapRef map
) {
453 Mutex::Locker
l(publish_lock
);
458 * osdmap - current published map
459 * next_osdmap - pre_published map that is about to be published.
461 * We use the next_osdmap to send messages and initiate connections,
462 * but only if the target is the same instance as the one in the map
463 * epoch the current user is working from (i.e., the result is
464 * equivalent to what is in next_osdmap).
466 * This allows the helpers to start ignoring osds that are about to
467 * go down, and let OSD::handle_osd_map()/note_down_osd() mark them
468 * down, without worrying about reopening connections from threads
469 * working from old maps.
472 OSDMapRef next_osdmap
;
473 Cond pre_publish_cond
;
476 void pre_publish_map(OSDMapRef map
) {
477 Mutex::Locker
l(pre_publish_lock
);
478 next_osdmap
= std::move(map
);
482 /// map epochs reserved below
483 map
<epoch_t
, unsigned> map_reservations
;
485 /// gets ref to next_osdmap and registers the epoch as reserved
486 OSDMapRef
get_nextmap_reserved() {
487 Mutex::Locker
l(pre_publish_lock
);
490 epoch_t e
= next_osdmap
->get_epoch();
491 map
<epoch_t
, unsigned>::iterator i
=
492 map_reservations
.insert(make_pair(e
, 0)).first
;
496 /// releases reservation on map
497 void release_map(OSDMapRef osdmap
) {
498 Mutex::Locker
l(pre_publish_lock
);
499 map
<epoch_t
, unsigned>::iterator i
=
500 map_reservations
.find(osdmap
->get_epoch());
501 assert(i
!= map_reservations
.end());
502 assert(i
->second
> 0);
503 if (--(i
->second
) == 0) {
504 map_reservations
.erase(i
);
506 pre_publish_cond
.Signal();
508 /// blocks until there are no reserved maps prior to next_osdmap
509 void await_reserved_maps() {
510 Mutex::Locker
l(pre_publish_lock
);
513 map
<epoch_t
, unsigned>::const_iterator i
= map_reservations
.cbegin();
514 if (i
== map_reservations
.cend() || i
->first
>= next_osdmap
->get_epoch()) {
517 pre_publish_cond
.Wait(pre_publish_lock
);
523 Mutex peer_map_epoch_lock
;
524 map
<int, epoch_t
> peer_map_epoch
;
526 epoch_t
get_peer_epoch(int p
);
527 epoch_t
note_peer_epoch(int p
, epoch_t e
);
528 void forget_peer_epoch(int p
, epoch_t e
);
530 void send_map(class MOSDMap
*m
, Connection
*con
);
531 void send_incremental_map(epoch_t since
, Connection
*con
, OSDMapRef
& osdmap
);
532 MOSDMap
*build_incremental_map_msg(epoch_t from
, epoch_t to
,
533 OSDSuperblock
& superblock
);
534 bool should_share_map(entity_name_t name
, Connection
*con
, epoch_t epoch
,
535 const OSDMapRef
& osdmap
, const epoch_t
*sent_epoch_p
);
536 void share_map(entity_name_t name
, Connection
*con
, epoch_t epoch
,
537 OSDMapRef
& osdmap
, epoch_t
*sent_epoch_p
);
538 void share_map_peer(int peer
, Connection
*con
,
539 OSDMapRef map
= OSDMapRef());
541 ConnectionRef
get_con_osd_cluster(int peer
, epoch_t from_epoch
);
542 pair
<ConnectionRef
,ConnectionRef
> get_con_osd_hb(int peer
, epoch_t from_epoch
); // (back, front)
543 void send_message_osd_cluster(int peer
, Message
*m
, epoch_t from_epoch
);
544 void send_message_osd_cluster(Message
*m
, Connection
*con
) {
545 con
->send_message(m
);
547 void send_message_osd_cluster(Message
*m
, const ConnectionRef
& con
) {
548 con
->send_message(m
);
550 void send_message_osd_client(Message
*m
, Connection
*con
) {
551 con
->send_message(m
);
553 void send_message_osd_client(Message
*m
, const ConnectionRef
& con
) {
554 con
->send_message(m
);
556 entity_name_t
get_cluster_msgr_name() {
557 return cluster_messenger
->get_myname();
561 // -- scrub scheduling --
562 Mutex sched_scrub_lock
;
569 /// pg to be scrubbed
571 /// a time scheduled for scrub. but the scrub could be delayed if system
572 /// load is too high or it fails to fall in the scrub hours
574 /// the hard upper bound of scrub time
576 ScrubJob() : cct(nullptr) {}
577 explicit ScrubJob(CephContext
* cct
, const spg_t
& pg
,
578 const utime_t
& timestamp
,
579 double pool_scrub_min_interval
= 0,
580 double pool_scrub_max_interval
= 0, bool must
= true);
581 /// order the jobs by sched_time
582 bool operator<(const ScrubJob
& rhs
) const;
584 set
<ScrubJob
> sched_scrub_pg
;
586 /// @returns the scrub_reg_stamp used for unregister the scrub job
587 utime_t
reg_pg_scrub(spg_t pgid
, utime_t t
, double pool_scrub_min_interval
,
588 double pool_scrub_max_interval
, bool must
) {
589 ScrubJob
scrub(cct
, pgid
, t
, pool_scrub_min_interval
, pool_scrub_max_interval
,
591 Mutex::Locker
l(sched_scrub_lock
);
592 sched_scrub_pg
.insert(scrub
);
593 return scrub
.sched_time
;
595 void unreg_pg_scrub(spg_t pgid
, utime_t t
) {
596 Mutex::Locker
l(sched_scrub_lock
);
597 size_t removed
= sched_scrub_pg
.erase(ScrubJob(cct
, pgid
, t
));
600 bool first_scrub_stamp(ScrubJob
*out
) {
601 Mutex::Locker
l(sched_scrub_lock
);
602 if (sched_scrub_pg
.empty())
604 set
<ScrubJob
>::iterator iter
= sched_scrub_pg
.begin();
608 bool next_scrub_stamp(const ScrubJob
& next
,
610 Mutex::Locker
l(sched_scrub_lock
);
611 if (sched_scrub_pg
.empty())
613 set
<ScrubJob
>::const_iterator iter
= sched_scrub_pg
.lower_bound(next
);
614 if (iter
== sched_scrub_pg
.cend())
617 if (iter
== sched_scrub_pg
.cend())
623 void dumps_scrub(Formatter
*f
) {
624 assert(f
!= nullptr);
625 Mutex::Locker
l(sched_scrub_lock
);
627 f
->open_array_section("scrubs");
628 for (const auto &i
: sched_scrub_pg
) {
629 f
->open_object_section("scrub");
630 f
->dump_stream("pgid") << i
.pgid
;
631 f
->dump_stream("sched_time") << i
.sched_time
;
632 f
->dump_stream("deadline") << i
.deadline
;
633 f
->dump_bool("forced", i
.sched_time
== i
.deadline
);
639 bool can_inc_scrubs_pending();
640 bool inc_scrubs_pending();
641 void inc_scrubs_active(bool reserved
);
642 void dec_scrubs_pending();
643 void dec_scrubs_active();
645 void reply_op_error(OpRequestRef op
, int err
);
646 void reply_op_error(OpRequestRef op
, int err
, eversion_t v
, version_t uv
);
647 void handle_misdirected_op(PG
*pg
, OpRequestRef op
);
651 // -- agent shared state --
654 map
<uint64_t, set
<PGRef
> > agent_queue
;
655 set
<PGRef
>::iterator agent_queue_pos
;
656 bool agent_valid_iterator
;
658 int flush_mode_high_count
; //once have one pg with FLUSH_MODE_HIGH then flush objects with high speed
659 set
<hobject_t
> agent_oids
;
661 struct AgentThread
: public Thread
{
663 explicit AgentThread(OSDService
*o
) : osd(o
) {}
664 void *entry() override
{
669 bool agent_stop_flag
;
670 Mutex agent_timer_lock
;
671 SafeTimer agent_timer
;
677 void _enqueue(PG
*pg
, uint64_t priority
) {
678 if (!agent_queue
.empty() &&
679 agent_queue
.rbegin()->first
< priority
)
680 agent_valid_iterator
= false; // inserting higher-priority queue
681 set
<PGRef
>& nq
= agent_queue
[priority
];
687 void _dequeue(PG
*pg
, uint64_t old_priority
) {
688 set
<PGRef
>& oq
= agent_queue
[old_priority
];
689 set
<PGRef
>::iterator p
= oq
.find(pg
);
690 assert(p
!= oq
.end());
691 if (p
== agent_queue_pos
)
695 if (agent_queue
.rbegin()->first
== old_priority
)
696 agent_valid_iterator
= false;
697 agent_queue
.erase(old_priority
);
701 /// enable agent for a pg
702 void agent_enable_pg(PG
*pg
, uint64_t priority
) {
703 Mutex::Locker
l(agent_lock
);
704 _enqueue(pg
, priority
);
707 /// adjust priority for an enagled pg
708 void agent_adjust_pg(PG
*pg
, uint64_t old_priority
, uint64_t new_priority
) {
709 Mutex::Locker
l(agent_lock
);
710 assert(new_priority
!= old_priority
);
711 _enqueue(pg
, new_priority
);
712 _dequeue(pg
, old_priority
);
715 /// disable agent for a pg
716 void agent_disable_pg(PG
*pg
, uint64_t old_priority
) {
717 Mutex::Locker
l(agent_lock
);
718 _dequeue(pg
, old_priority
);
721 /// note start of an async (evict) op
722 void agent_start_evict_op() {
723 Mutex::Locker
l(agent_lock
);
727 /// note finish or cancellation of an async (evict) op
728 void agent_finish_evict_op() {
729 Mutex::Locker
l(agent_lock
);
730 assert(agent_ops
> 0);
735 /// note start of an async (flush) op
736 void agent_start_op(const hobject_t
& oid
) {
737 Mutex::Locker
l(agent_lock
);
739 assert(agent_oids
.count(oid
) == 0);
740 agent_oids
.insert(oid
);
743 /// note finish or cancellation of an async (flush) op
744 void agent_finish_op(const hobject_t
& oid
) {
745 Mutex::Locker
l(agent_lock
);
746 assert(agent_ops
> 0);
748 assert(agent_oids
.count(oid
) == 1);
749 agent_oids
.erase(oid
);
753 /// check if we are operating on an object
754 bool agent_is_active_oid(const hobject_t
& oid
) {
755 Mutex::Locker
l(agent_lock
);
756 return agent_oids
.count(oid
);
759 /// get count of active agent ops
760 int agent_get_num_ops() {
761 Mutex::Locker
l(agent_lock
);
765 void agent_inc_high_count() {
766 Mutex::Locker
l(agent_lock
);
767 flush_mode_high_count
++;
770 void agent_dec_high_count() {
771 Mutex::Locker
l(agent_lock
);
772 flush_mode_high_count
--;
776 /// throttle promotion attempts
777 std::atomic_uint promote_probability_millis
{1000}; ///< probability thousands. one word.
778 PromoteCounter promote_counter
;
779 utime_t last_recalibrate
;
780 unsigned long promote_max_objects
, promote_max_bytes
;
783 bool promote_throttle() {
784 // NOTE: lockless! we rely on the probability being a single word.
785 promote_counter
.attempt();
786 if ((unsigned)rand() % 1000 > promote_probability_millis
)
787 return true; // yes throttle (no promote)
788 if (promote_max_objects
&&
789 promote_counter
.objects
> promote_max_objects
)
790 return true; // yes throttle
791 if (promote_max_bytes
&&
792 promote_counter
.bytes
> promote_max_bytes
)
793 return true; // yes throttle
794 return false; // no throttle (promote)
796 void promote_finish(uint64_t bytes
) {
797 promote_counter
.finish(bytes
);
799 void promote_throttle_recalibrate();
801 // -- Objecter, for tiering reads/writes from/to other OSDs --
803 Finisher objecter_finisher
;
807 SafeTimer watch_timer
;
808 uint64_t next_notif_id
;
809 uint64_t get_next_id(epoch_t cur_epoch
) {
810 Mutex::Locker
l(watch_lock
);
811 return (((uint64_t)cur_epoch
) << 32) | ((uint64_t)(next_notif_id
++));
814 // -- Recovery/Backfill Request Scheduling --
815 Mutex recovery_request_lock
;
816 SafeTimer recovery_request_timer
;
818 // For async recovery sleep
819 bool recovery_needs_sleep
= true;
820 utime_t recovery_schedule_time
= utime_t();
822 Mutex recovery_sleep_lock
;
823 SafeTimer recovery_sleep_timer
;
827 std::atomic_uint last_tid
{0};
828 ceph_tid_t
get_tid() {
829 return (ceph_tid_t
)last_tid
++;
832 // -- backfill_reservation --
833 Finisher reserver_finisher
;
834 AsyncReserver
<spg_t
> local_reserver
;
835 AsyncReserver
<spg_t
> remote_reserver
;
840 map
<pg_t
, vector
<int> > pg_temp_wanted
;
841 map
<pg_t
, vector
<int> > pg_temp_pending
;
842 void _sent_pg_temp();
844 void queue_want_pg_temp(pg_t pgid
, vector
<int>& want
);
845 void remove_want_pg_temp(pg_t pgid
);
846 void requeue_pg_temp();
849 void send_pg_created(pg_t pgid
);
851 void queue_for_peering(PG
*pg
);
853 Mutex snap_sleep_lock
;
854 SafeTimer snap_sleep_timer
;
856 Mutex scrub_sleep_lock
;
857 SafeTimer scrub_sleep_timer
;
859 AsyncReserver
<spg_t
> snap_reserver
;
860 void queue_for_snap_trim(PG
*pg
);
862 void queue_for_scrub(PG
*pg
, bool with_high_priority
) {
863 unsigned scrub_queue_priority
= pg
->scrubber
.priority
;
864 if (with_high_priority
&& scrub_queue_priority
< cct
->_conf
->osd_client_op_priority
) {
865 scrub_queue_priority
= cct
->_conf
->osd_client_op_priority
;
870 PGScrub(pg
->get_osdmap()->get_epoch()),
871 cct
->_conf
->osd_scrub_cost
,
872 scrub_queue_priority
,
875 pg
->get_osdmap()->get_epoch()));
879 // -- pg recovery and associated throttling --
881 list
<pair
<epoch_t
, PGRef
> > awaiting_throttle
;
883 utime_t defer_recovery_until
;
884 uint64_t recovery_ops_active
;
885 uint64_t recovery_ops_reserved
;
886 bool recovery_paused
;
887 #ifdef DEBUG_RECOVERY_OIDS
888 map
<spg_t
, set
<hobject_t
> > recovery_oids
;
890 bool _recover_now(uint64_t *available_pushes
);
891 void _maybe_queue_recovery();
892 void _queue_for_recovery(
893 pair
<epoch_t
, PGRef
> p
, uint64_t reserved_pushes
) {
894 assert(recovery_lock
.is_locked_by_me());
898 PGRecovery(p
.first
, reserved_pushes
),
899 cct
->_conf
->osd_recovery_cost
,
900 cct
->_conf
->osd_recovery_priority
,
906 void start_recovery_op(PG
*pg
, const hobject_t
& soid
);
907 void finish_recovery_op(PG
*pg
, const hobject_t
& soid
, bool dequeue
);
908 bool is_recovery_active();
909 void release_reserved_pushes(uint64_t pushes
) {
910 Mutex::Locker
l(recovery_lock
);
911 assert(recovery_ops_reserved
>= pushes
);
912 recovery_ops_reserved
-= pushes
;
913 _maybe_queue_recovery();
915 void defer_recovery(float defer_for
) {
916 defer_recovery_until
= ceph_clock_now();
917 defer_recovery_until
+= defer_for
;
919 void pause_recovery() {
920 Mutex::Locker
l(recovery_lock
);
921 recovery_paused
= true;
923 bool recovery_is_paused() {
924 Mutex::Locker
l(recovery_lock
);
925 return recovery_paused
;
927 void unpause_recovery() {
928 Mutex::Locker
l(recovery_lock
);
929 recovery_paused
= false;
930 _maybe_queue_recovery();
932 void kick_recovery_queue() {
933 Mutex::Locker
l(recovery_lock
);
934 _maybe_queue_recovery();
936 void clear_queued_recovery(PG
*pg
) {
937 Mutex::Locker
l(recovery_lock
);
938 for (list
<pair
<epoch_t
, PGRef
> >::iterator i
= awaiting_throttle
.begin();
939 i
!= awaiting_throttle
.end();
941 if (i
->second
.get() == pg
) {
942 awaiting_throttle
.erase(i
);
949 // delayed pg activation
950 void queue_for_recovery(PG
*pg
) {
951 Mutex::Locker
l(recovery_lock
);
953 if (pg
->get_state() & (PG_STATE_FORCED_RECOVERY
| PG_STATE_FORCED_BACKFILL
)) {
954 awaiting_throttle
.push_front(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
956 awaiting_throttle
.push_back(make_pair(pg
->get_osdmap()->get_epoch(), pg
));
958 _maybe_queue_recovery();
960 void queue_recovery_after_sleep(PG
*pg
, epoch_t queued
, uint64_t reserved_pushes
) {
961 Mutex::Locker
l(recovery_lock
);
962 _queue_for_recovery(make_pair(queued
, pg
), reserved_pushes
);
965 void adjust_pg_priorities(const vector
<PGRef
>& pgs
, int newflags
);
967 // osd map cache (past osd maps)
968 Mutex map_cache_lock
;
969 SharedLRU
<epoch_t
, const OSDMap
> map_cache
;
970 SimpleLRU
<epoch_t
, bufferlist
> map_bl_cache
;
971 SimpleLRU
<epoch_t
, bufferlist
> map_bl_inc_cache
;
973 OSDMapRef
try_get_map(epoch_t e
);
974 OSDMapRef
get_map(epoch_t e
) {
975 OSDMapRef
ret(try_get_map(e
));
979 OSDMapRef
add_map(OSDMap
*o
) {
980 Mutex::Locker
l(map_cache_lock
);
983 OSDMapRef
_add_map(OSDMap
*o
);
985 void add_map_bl(epoch_t e
, bufferlist
& bl
) {
986 Mutex::Locker
l(map_cache_lock
);
987 return _add_map_bl(e
, bl
);
989 void pin_map_bl(epoch_t e
, bufferlist
&bl
);
990 void _add_map_bl(epoch_t e
, bufferlist
& bl
);
991 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
992 Mutex::Locker
l(map_cache_lock
);
993 return _get_map_bl(e
, bl
);
995 bool _get_map_bl(epoch_t e
, bufferlist
& bl
);
997 void add_map_inc_bl(epoch_t e
, bufferlist
& bl
) {
998 Mutex::Locker
l(map_cache_lock
);
999 return _add_map_inc_bl(e
, bl
);
1001 void pin_map_inc_bl(epoch_t e
, bufferlist
&bl
);
1002 void _add_map_inc_bl(epoch_t e
, bufferlist
& bl
);
1003 bool get_inc_map_bl(epoch_t e
, bufferlist
& bl
);
1005 void clear_map_bl_cache_pins(epoch_t e
);
1007 void need_heartbeat_peer_update();
1009 void pg_stat_queue_enqueue(PG
*pg
);
1010 void pg_stat_queue_dequeue(PG
*pg
);
1014 void start_shutdown();
1015 void shutdown_reserver();
1020 Mutex in_progress_split_lock
;
1021 map
<spg_t
, spg_t
> pending_splits
; // child -> parent
1022 map
<spg_t
, set
<spg_t
> > rev_pending_splits
; // parent -> [children]
1023 set
<spg_t
> in_progress_splits
; // child
1026 void _start_split(spg_t parent
, const set
<spg_t
> &children
);
1027 void start_split(spg_t parent
, const set
<spg_t
> &children
) {
1028 Mutex::Locker
l(in_progress_split_lock
);
1029 return _start_split(parent
, children
);
1031 void mark_split_in_progress(spg_t parent
, const set
<spg_t
> &pgs
);
1032 void complete_split(const set
<spg_t
> &pgs
);
1033 void cancel_pending_splits_for_parent(spg_t parent
);
1034 void _cancel_pending_splits_for_parent(spg_t parent
);
1035 bool splitting(spg_t pgid
);
1036 void expand_pg_num(OSDMapRef old_map
,
1038 void _maybe_split_pgid(OSDMapRef old_map
,
1041 void init_splits_between(spg_t pgid
, OSDMapRef frommap
, OSDMapRef tomap
);
1045 osd_stat_t osd_stat
;
1048 void update_osd_stat(vector
<int>& hb_peers
);
1049 osd_stat_t
set_osd_stat(const struct store_statfs_t
&stbuf
,
1050 vector
<int>& hb_peers
,
1052 osd_stat_t
get_osd_stat() {
1053 Mutex::Locker
l(stat_lock
);
1055 osd_stat
.up_from
= up_epoch
;
1056 osd_stat
.seq
= ((uint64_t)osd_stat
.up_from
<< 32) + seq
;
1059 uint64_t get_osd_stat_seq() {
1060 Mutex::Locker
l(stat_lock
);
1061 return osd_stat
.seq
;
1064 // -- OSD Full Status --
1066 friend TestOpsSocketHook
;
1067 mutable Mutex full_status_lock
;
1068 enum s_names
{ INVALID
= -1, NONE
, NEARFULL
, BACKFILLFULL
, FULL
, FAILSAFE
} cur_state
; // ascending
1069 const char *get_full_state_name(s_names s
) const {
1071 case NONE
: return "none";
1072 case NEARFULL
: return "nearfull";
1073 case BACKFILLFULL
: return "backfillfull";
1074 case FULL
: return "full";
1075 case FAILSAFE
: return "failsafe";
1076 default: return "???";
1079 s_names
get_full_state(string type
) const {
1082 else if (type
== "failsafe")
1084 else if (type
== "full")
1086 else if (type
== "backfillfull")
1087 return BACKFILLFULL
;
1088 else if (type
== "nearfull")
1093 double cur_ratio
; ///< current utilization
1094 mutable int64_t injectfull
= 0;
1095 s_names injectfull_state
= NONE
;
1096 float get_failsafe_full_ratio();
1097 void check_full_status(float ratio
);
1098 bool _check_full(s_names type
, ostream
&ss
) const;
1100 bool check_failsafe_full(ostream
&ss
) const;
1101 bool check_full(ostream
&ss
) const;
1102 bool check_backfill_full(ostream
&ss
) const;
1103 bool check_nearfull(ostream
&ss
) const;
1104 bool is_failsafe_full() const;
1105 bool is_full() const;
1106 bool is_backfillfull() const;
1107 bool is_nearfull() const;
1108 bool need_fullness_update(); ///< osdmap state needs update
1109 void set_injectfull(s_names type
, int64_t count
);
1110 bool check_osdmap_full(const set
<pg_shard_t
> &missing_on
);
1115 mutable Mutex epoch_lock
; // protects access to boot_epoch, up_epoch, bind_epoch
1116 epoch_t boot_epoch
; // _first_ epoch we were marked up (after this process started)
1117 epoch_t up_epoch
; // _most_recent_ epoch we were marked up
1118 epoch_t bind_epoch
; // epoch we last did a bind to new ip:ports
1121 * Retrieve the boot_, up_, and bind_ epochs the OSD has set. The params
1122 * can be NULL if you don't care about them.
1124 void retrieve_epochs(epoch_t
*_boot_epoch
, epoch_t
*_up_epoch
,
1125 epoch_t
*_bind_epoch
) const;
1127 * Set the boot, up, and bind epochs. Any NULL params will not be set.
1129 void set_epochs(const epoch_t
*_boot_epoch
, const epoch_t
*_up_epoch
,
1130 const epoch_t
*_bind_epoch
);
1131 epoch_t
get_boot_epoch() const {
1133 retrieve_epochs(&ret
, NULL
, NULL
);
1136 epoch_t
get_up_epoch() const {
1138 retrieve_epochs(NULL
, &ret
, NULL
);
1141 epoch_t
get_bind_epoch() const {
1143 retrieve_epochs(NULL
, NULL
, &ret
);
1148 Mutex is_stopping_lock
;
1149 Cond is_stopping_cond
;
1154 std::atomic_int state
{NOT_STOPPING
};
1158 void set_state(int s
) {
1161 bool is_stopping() const {
1162 return state
== STOPPING
;
1164 bool is_preparing_to_stop() const {
1165 return state
== PREPARING_TO_STOP
;
1167 bool prepare_to_stop();
1168 void got_stop_ack();
1171 #ifdef PG_DEBUG_REFS
1173 map
<spg_t
, int> pgid_tracker
;
1174 map
<spg_t
, PG
*> live_pgs
;
1175 void add_pgid(spg_t pgid
, PG
*pg
);
1176 void remove_pgid(spg_t pgid
, PG
*pg
);
1177 void dump_live_pgids();
1180 explicit OSDService(OSD
*osd
);
1184 class OSD
: public Dispatcher
,
1185 public md_config_obs_t
{
1187 Mutex osd_lock
; // global lock
1188 SafeTimer tick_timer
; // safe timer (osd_lock)
1190 // Tick timer for those stuff that do not need osd_lock
1191 Mutex tick_timer_lock
;
1192 SafeTimer tick_timer_without_osd_lock
;
1194 // config observer bits
1195 const char** get_tracked_conf_keys() const override
;
1196 void handle_conf_change(const struct md_config_t
*conf
,
1197 const std::set
<std::string
> &changed
) override
;
1198 void update_log_config();
1199 void check_config();
1203 static const double OSD_TICK_INTERVAL
; // tick interval for tick_timer and tick_timer_without_osd_lock
1205 AuthAuthorizeHandlerRegistry
*authorize_handler_cluster_registry
;
1206 AuthAuthorizeHandlerRegistry
*authorize_handler_service_registry
;
1208 Messenger
*cluster_messenger
;
1209 Messenger
*client_messenger
;
1210 Messenger
*objecter_messenger
;
1211 MonClient
*monc
; // check the "monc helpers" list before accessing directly
1213 PerfCounters
*logger
;
1214 PerfCounters
*recoverystate_perf
;
1217 FuseStore
*fuse_store
= nullptr;
1219 LogClient log_client
;
1223 std::string dev_path
, journal_path
;
1225 bool store_is_rotational
= true;
1226 bool journal_is_rotational
= true;
1228 ZTracer::Endpoint trace_endpoint
;
1229 void create_logger();
1230 void create_recoverystate_perf();
1232 void tick_without_osd_lock();
1233 void _dispatch(Message
*m
);
1234 void dispatch_op(OpRequestRef op
);
1236 void check_osdmap_features(ObjectStore
*store
);
1239 friend class OSDSocketHook
;
1240 class OSDSocketHook
*asok_hook
;
1241 bool asok_command(string admin_command
, cmdmap_t
& cmdmap
, string format
, ostream
& ss
);
1244 ClassHandler
*class_handler
= nullptr;
1245 int get_nodeid() { return whoami
; }
1247 static ghobject_t
get_osdmap_pobject_name(epoch_t epoch
) {
1249 snprintf(foo
, sizeof(foo
), "osdmap.%d", epoch
);
1250 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1252 static ghobject_t
get_inc_osdmap_pobject_name(epoch_t epoch
) {
1254 snprintf(foo
, sizeof(foo
), "inc_osdmap.%d", epoch
);
1255 return ghobject_t(hobject_t(sobject_t(object_t(foo
), 0)));
1258 static ghobject_t
make_snapmapper_oid() {
1259 return ghobject_t(hobject_t(
1261 object_t("snapmapper"),
1265 static ghobject_t
make_pg_log_oid(spg_t pg
) {
1267 ss
<< "pglog_" << pg
;
1270 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1273 static ghobject_t
make_pg_biginfo_oid(spg_t pg
) {
1275 ss
<< "pginfo_" << pg
;
1278 return ghobject_t(hobject_t(sobject_t(object_t(s
.c_str()), 0)));
1280 static ghobject_t
make_infos_oid() {
1281 hobject_t
oid(sobject_t("infos", CEPH_NOSNAP
));
1282 return ghobject_t(oid
);
1284 static void recursive_remove_collection(CephContext
* cct
,
1290 * get_osd_initial_compat_set()
1292 * Get the initial feature set for this OSD. Features
1293 * here are automatically upgraded.
1295 * Return value: Initial osd CompatSet
1297 static CompatSet
get_osd_initial_compat_set();
1300 * get_osd_compat_set()
1302 * Get all features supported by this OSD
1304 * Return value: CompatSet of all supported features
1306 static CompatSet
get_osd_compat_set();
1311 class C_Tick_WithoutOSDLock
;
1314 OSDSuperblock superblock
;
1316 void write_superblock();
1317 void write_superblock(ObjectStore::Transaction
& t
);
1318 int read_superblock();
1320 void clear_temp_objects();
1322 CompatSet osd_compat
;
1327 STATE_INITIALIZING
= 1,
1332 STATE_WAITING_FOR_HEALTHY
1335 static const char *get_state_name(int s
) {
1337 case STATE_INITIALIZING
: return "initializing";
1338 case STATE_PREBOOT
: return "preboot";
1339 case STATE_BOOTING
: return "booting";
1340 case STATE_ACTIVE
: return "active";
1341 case STATE_STOPPING
: return "stopping";
1342 case STATE_WAITING_FOR_HEALTHY
: return "waiting_for_healthy";
1343 default: return "???";
1348 std::atomic_int state
{STATE_INITIALIZING
};
1349 bool waiting_for_luminous_mons
= false;
1352 int get_state() const {
1355 void set_state(int s
) {
1358 bool is_initializing() const {
1359 return state
== STATE_INITIALIZING
;
1361 bool is_preboot() const {
1362 return state
== STATE_PREBOOT
;
1364 bool is_booting() const {
1365 return state
== STATE_BOOTING
;
1367 bool is_active() const {
1368 return state
== STATE_ACTIVE
;
1370 bool is_stopping() const {
1371 return state
== STATE_STOPPING
;
1373 bool is_waiting_for_healthy() const {
1374 return state
== STATE_WAITING_FOR_HEALTHY
;
1379 ThreadPool peering_tp
;
1380 ShardedThreadPool osd_op_tp
;
1382 ThreadPool command_tp
;
1384 void set_disk_tp_priority();
1385 void get_latest_osdmap();
1389 void dispatch_session_waiting(Session
*session
, OSDMapRef osdmap
);
1390 void maybe_share_map(Session
*session
, OpRequestRef op
, OSDMapRef osdmap
);
1392 Mutex session_waiting_lock
;
1393 set
<Session
*> session_waiting_for_map
;
1395 /// Caller assumes refs for included Sessions
1396 void get_sessions_waiting_for_map(set
<Session
*> *out
) {
1397 Mutex::Locker
l(session_waiting_lock
);
1398 out
->swap(session_waiting_for_map
);
1400 void register_session_waiting_on_map(Session
*session
) {
1401 Mutex::Locker
l(session_waiting_lock
);
1402 if (session_waiting_for_map
.insert(session
).second
) {
1406 void clear_session_waiting_on_map(Session
*session
) {
1407 Mutex::Locker
l(session_waiting_lock
);
1408 set
<Session
*>::iterator i
= session_waiting_for_map
.find(session
);
1409 if (i
!= session_waiting_for_map
.end()) {
1411 session_waiting_for_map
.erase(i
);
1414 void dispatch_sessions_waiting_on_map() {
1415 set
<Session
*> sessions_to_check
;
1416 get_sessions_waiting_for_map(&sessions_to_check
);
1417 for (set
<Session
*>::iterator i
= sessions_to_check
.begin();
1418 i
!= sessions_to_check
.end();
1419 sessions_to_check
.erase(i
++)) {
1420 (*i
)->session_dispatch_lock
.Lock();
1421 dispatch_session_waiting(*i
, osdmap
);
1422 (*i
)->session_dispatch_lock
.Unlock();
1426 void session_handle_reset(Session
*session
) {
1427 Mutex::Locker
l(session
->session_dispatch_lock
);
1428 clear_session_waiting_on_map(session
);
1430 session
->clear_backoffs();
1432 /* Messages have connection refs, we need to clear the
1433 * connection->session->message->connection
1434 * cycles which result.
1437 session
->waiting_on_map
.clear_and_dispose(TrackedOp::Putter());
1442 * @defgroup monc helpers
1444 * Right now we only have the one
1448 * Ask the Monitors for a sequence of OSDMaps.
1450 * @param epoch The epoch to start with when replying
1451 * @param force_request True if this request forces a new subscription to
1452 * the monitors; false if an outstanding request that encompasses it is
1455 void osdmap_subscribe(version_t epoch
, bool force_request
);
1456 /** @} monc helpers */
1459 /// information about a heartbeat peer
1460 struct HeartbeatInfo
{
1462 ConnectionRef con_front
; ///< peer connection (front)
1463 ConnectionRef con_back
; ///< peer connection (back)
1464 utime_t first_tx
; ///< time we sent our first ping request
1465 utime_t last_tx
; ///< last time we sent a ping request
1466 utime_t last_rx_front
; ///< last time we got a ping reply on the front side
1467 utime_t last_rx_back
; ///< last time we got a ping reply on the back side
1468 epoch_t epoch
; ///< most recent epoch we wanted this peer
1470 bool is_unhealthy(utime_t cutoff
) const {
1472 ! ((last_rx_front
> cutoff
||
1473 (last_rx_front
== utime_t() && (last_tx
== utime_t() ||
1474 first_tx
> cutoff
))) &&
1475 (last_rx_back
> cutoff
||
1476 (last_rx_back
== utime_t() && (last_tx
== utime_t() ||
1477 first_tx
> cutoff
))));
1479 bool is_healthy(utime_t cutoff
) const {
1480 return last_rx_front
> cutoff
&& last_rx_back
> cutoff
;
1484 /// state attached to outgoing heartbeat connections
1485 struct HeartbeatSession
: public RefCountedObject
{
1487 explicit HeartbeatSession(int p
) : peer(p
) {}
1489 Mutex heartbeat_lock
;
1490 map
<int, int> debug_heartbeat_drops_remaining
;
1491 Cond heartbeat_cond
;
1492 bool heartbeat_stop
;
1493 std::atomic_bool heartbeat_need_update
;
1494 map
<int,HeartbeatInfo
> heartbeat_peers
; ///< map of osd id to HeartbeatInfo
1495 utime_t last_mon_heartbeat
;
1496 Messenger
*hb_front_client_messenger
;
1497 Messenger
*hb_back_client_messenger
;
1498 Messenger
*hb_front_server_messenger
;
1499 Messenger
*hb_back_server_messenger
;
1500 utime_t last_heartbeat_resample
; ///< last time we chose random peers in waiting-for-healthy state
1501 double daily_loadavg
;
1503 void _add_heartbeat_peer(int p
);
1504 void _remove_heartbeat_peer(int p
);
1505 bool heartbeat_reset(Connection
*con
);
1506 void maybe_update_heartbeat_peers();
1507 void reset_heartbeat_peers();
1508 bool heartbeat_peers_need_update() {
1509 return heartbeat_need_update
.load();
1511 void heartbeat_set_peers_need_update() {
1512 heartbeat_need_update
.store(true);
1514 void heartbeat_clear_peers_need_update() {
1515 heartbeat_need_update
.store(false);
1518 void heartbeat_check();
1519 void heartbeat_entry();
1520 void need_heartbeat_peer_update();
1522 void heartbeat_kick() {
1523 Mutex::Locker
l(heartbeat_lock
);
1524 heartbeat_cond
.Signal();
1527 struct T_Heartbeat
: public Thread
{
1529 explicit T_Heartbeat(OSD
*o
) : osd(o
) {}
1530 void *entry() override
{
1531 osd
->heartbeat_entry();
1537 bool heartbeat_dispatch(Message
*m
);
1539 struct HeartbeatDispatcher
: public Dispatcher
{
1541 explicit HeartbeatDispatcher(OSD
*o
) : Dispatcher(o
->cct
), osd(o
) {}
1543 bool ms_can_fast_dispatch_any() const override
{ return true; }
1544 bool ms_can_fast_dispatch(const Message
*m
) const override
{
1545 switch (m
->get_type()) {
1553 void ms_fast_dispatch(Message
*m
) override
{
1554 osd
->heartbeat_dispatch(m
);
1556 bool ms_dispatch(Message
*m
) override
{
1557 return osd
->heartbeat_dispatch(m
);
1559 bool ms_handle_reset(Connection
*con
) override
{
1560 return osd
->heartbeat_reset(con
);
1562 void ms_handle_remote_reset(Connection
*con
) override
{}
1563 bool ms_handle_refused(Connection
*con
) override
{
1564 return osd
->ms_handle_refused(con
);
1566 bool ms_verify_authorizer(Connection
*con
, int peer_type
,
1567 int protocol
, bufferlist
& authorizer_data
, bufferlist
& authorizer_reply
,
1568 bool& isvalid
, CryptoKey
& session_key
) override
{
1572 } heartbeat_dispatcher
;
1576 list
<OpRequestRef
> finished
;
1578 void take_waiters(list
<OpRequestRef
>& ls
) {
1579 assert(osd_lock
.is_locked());
1580 finished
.splice(finished
.end(), ls
);
1584 // -- op tracking --
1585 OpTracker op_tracker
;
1586 void check_ops_in_flight();
1587 void test_ops(std::string command
, std::string args
, ostream
& ss
);
1588 friend class TestOpsSocketHook
;
1589 TestOpsSocketHook
*test_ops_hook
;
1590 friend struct C_CompleteSplits
;
1591 friend struct C_OpenPGs
;
1594 enum class io_queue
{
1600 friend std::ostream
& operator<<(std::ostream
& out
, const OSD::io_queue
& q
);
1602 const io_queue op_queue
;
1603 const unsigned int op_prio_cutoff
;
1606 * The ordered op delivery chain is:
1608 * fast dispatch -> pqueue back
1609 * pqueue front <-> to_process back
1610 * to_process front -> RunVis(item)
1613 * The pqueue is per-shard, and to_process is per pg_slot. Items can be
1614 * pushed back up into to_process and/or pqueue while order is preserved.
1616 * Multiple worker threads can operate on each shard.
1618 * Under normal circumstances, num_running == to_proces.size(). There are
1619 * two times when that is not true: (1) when waiting_for_pg == true and
1620 * to_process is accumulating requests that are waiting for the pg to be
1621 * instantiated; in that case they will all get requeued together by
1622 * wake_pg_waiters, and (2) when wake_pg_waiters just ran, waiting_for_pg
1623 * and already requeued the items.
1625 friend class PGQueueable
;
1628 : public ShardedThreadPool::ShardedWQ
<pair
<spg_t
,PGQueueable
>>
1634 Mutex sdata_op_ordering_lock
; ///< protects all members below
1636 OSDMapRef waiting_for_pg_osdmap
;
1638 PGRef pg
; ///< cached pg reference [optional]
1639 list
<PGQueueable
> to_process
; ///< order items for this slot
1640 int num_running
= 0; ///< _process threads doing pg lookup/lock
1642 /// true if pg does/did not exist. if so all new items go directly to
1643 /// to_process. cleared by prune_pg_waiters.
1644 bool waiting_for_pg
= false;
1646 /// incremented by wake_pg_waiters; indicates racing _process threads
1647 /// should bail out (their op has been requeued)
1648 uint64_t requeue_seq
= 0;
1651 /// map of slots for each spg_t. maintains ordering of items dequeued
1652 /// from pqueue while _process thread drops shard lock to acquire the
1653 /// pg lock. slots are removed only by prune_pg_waiters.
1654 unordered_map
<spg_t
,pg_slot
> pg_slots
;
1657 std::unique_ptr
<OpQueue
< pair
<spg_t
, PGQueueable
>, entity_inst_t
>> pqueue
;
1659 void _enqueue_front(pair
<spg_t
, PGQueueable
> item
, unsigned cutoff
) {
1660 unsigned priority
= item
.second
.get_priority();
1661 unsigned cost
= item
.second
.get_cost();
1662 if (priority
>= cutoff
)
1663 pqueue
->enqueue_strict_front(
1664 item
.second
.get_owner(),
1667 pqueue
->enqueue_front(
1668 item
.second
.get_owner(),
1669 priority
, cost
, item
);
1673 string lock_name
, string ordering_lock
,
1674 uint64_t max_tok_per_prio
, uint64_t min_cost
, CephContext
*cct
,
1676 : sdata_lock(lock_name
.c_str(), false, true, false, cct
),
1677 sdata_op_ordering_lock(ordering_lock
.c_str(), false, true,
1679 if (opqueue
== io_queue::weightedpriority
) {
1680 pqueue
= std::unique_ptr
1681 <WeightedPriorityQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>>(
1682 new WeightedPriorityQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>(
1683 max_tok_per_prio
, min_cost
));
1684 } else if (opqueue
== io_queue::prioritized
) {
1685 pqueue
= std::unique_ptr
1686 <PrioritizedQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>>(
1687 new PrioritizedQueue
<pair
<spg_t
,PGQueueable
>,entity_inst_t
>(
1688 max_tok_per_prio
, min_cost
));
1689 } else if (opqueue
== io_queue::mclock_opclass
) {
1690 pqueue
= std::unique_ptr
1691 <ceph::mClockOpClassQueue
>(new ceph::mClockOpClassQueue(cct
));
1692 } else if (opqueue
== io_queue::mclock_client
) {
1693 pqueue
= std::unique_ptr
1694 <ceph::mClockClientQueue
>(new ceph::mClockClientQueue(cct
));
1697 }; // struct ShardData
1699 vector
<ShardData
*> shard_list
;
1701 uint32_t num_shards
;
1704 ShardedOpWQ(uint32_t pnum_shards
,
1708 ShardedThreadPool
* tp
)
1709 : ShardedThreadPool::ShardedWQ
<pair
<spg_t
,PGQueueable
>>(ti
, si
, tp
),
1711 num_shards(pnum_shards
) {
1712 for (uint32_t i
= 0; i
< num_shards
; i
++) {
1713 char lock_name
[32] = {0};
1714 snprintf(lock_name
, sizeof(lock_name
), "%s.%d", "OSD:ShardedOpWQ:", i
);
1715 char order_lock
[32] = {0};
1716 snprintf(order_lock
, sizeof(order_lock
), "%s.%d",
1717 "OSD:ShardedOpWQ:order:", i
);
1718 ShardData
* one_shard
= new ShardData(
1719 lock_name
, order_lock
,
1720 osd
->cct
->_conf
->osd_op_pq_max_tokens_per_priority
,
1721 osd
->cct
->_conf
->osd_op_pq_min_cost
, osd
->cct
, osd
->op_queue
);
1722 shard_list
.push_back(one_shard
);
1725 ~ShardedOpWQ() override
{
1726 while (!shard_list
.empty()) {
1727 delete shard_list
.back();
1728 shard_list
.pop_back();
1732 /// wake any pg waiters after a PG is created/instantiated
1733 void wake_pg_waiters(spg_t pgid
);
1735 /// prune ops (and possiblye pg_slots) for pgs that shouldn't be here
1736 void prune_pg_waiters(OSDMapRef osdmap
, int whoami
);
1738 /// clear cached PGRef on pg deletion
1739 void clear_pg_pointer(spg_t pgid
);
1741 /// clear pg_slots on shutdown
1742 void clear_pg_slots();
1744 /// try to do some work
1745 void _process(uint32_t thread_index
, heartbeat_handle_d
*hb
) override
;
1747 /// enqueue a new item
1748 void _enqueue(pair
<spg_t
, PGQueueable
> item
) override
;
1750 /// requeue an old item (at the front of the line)
1751 void _enqueue_front(pair
<spg_t
, PGQueueable
> item
) override
;
1753 void return_waiting_threads() override
{
1754 for(uint32_t i
= 0; i
< num_shards
; i
++) {
1755 ShardData
* sdata
= shard_list
[i
];
1756 assert (NULL
!= sdata
);
1757 sdata
->sdata_lock
.Lock();
1758 sdata
->sdata_cond
.Signal();
1759 sdata
->sdata_lock
.Unlock();
1763 void dump(Formatter
*f
) {
1764 for(uint32_t i
= 0; i
< num_shards
; i
++) {
1765 ShardData
* sdata
= shard_list
[i
];
1766 char lock_name
[32] = {0};
1767 snprintf(lock_name
, sizeof(lock_name
), "%s%d", "OSD:ShardedOpWQ:", i
);
1768 assert (NULL
!= sdata
);
1769 sdata
->sdata_op_ordering_lock
.Lock();
1770 f
->open_object_section(lock_name
);
1771 sdata
->pqueue
->dump(f
);
1773 sdata
->sdata_op_ordering_lock
.Unlock();
1777 /// Must be called on ops queued back to front
1780 list
<OpRequestRef
> *out_ops
;
1781 uint64_t reserved_pushes_to_free
;
1782 Pred(spg_t pg
, list
<OpRequestRef
> *out_ops
= 0)
1783 : pgid(pg
), out_ops(out_ops
), reserved_pushes_to_free(0) {}
1784 void accumulate(const PGQueueable
&op
) {
1785 reserved_pushes_to_free
+= op
.get_reserved_pushes();
1787 boost::optional
<OpRequestRef
> mop
= op
.maybe_get_op();
1789 out_ops
->push_front(*mop
);
1792 bool operator()(const pair
<spg_t
, PGQueueable
> &op
) {
1793 if (op
.first
== pgid
) {
1794 accumulate(op
.second
);
1800 uint64_t get_reserved_pushes_to_free() const {
1801 return reserved_pushes_to_free
;
1805 bool is_shard_empty(uint32_t thread_index
) override
{
1806 uint32_t shard_index
= thread_index
% num_shards
;
1807 ShardData
* sdata
= shard_list
[shard_index
];
1808 assert(NULL
!= sdata
);
1809 Mutex::Locker
l(sdata
->sdata_op_ordering_lock
);
1810 return sdata
->pqueue
->empty();
1815 void enqueue_op(spg_t pg
, OpRequestRef
& op
, epoch_t epoch
);
1817 PGRef pg
, OpRequestRef op
,
1818 ThreadPool::TPHandle
&handle
);
1820 // -- peering queue --
1821 struct PeeringWQ
: public ThreadPool::BatchWorkQueue
<PG
> {
1822 list
<PG
*> peering_queue
;
1825 PeeringWQ(OSD
*o
, time_t ti
, time_t si
, ThreadPool
*tp
)
1826 : ThreadPool::BatchWorkQueue
<PG
>(
1827 "OSD::PeeringWQ", ti
, si
, tp
), osd(o
) {}
1829 void _dequeue(PG
*pg
) override
{
1830 for (list
<PG
*>::iterator i
= peering_queue
.begin();
1831 i
!= peering_queue
.end();
1834 peering_queue
.erase(i
++);
1835 pg
->put("PeeringWQ");
1841 bool _enqueue(PG
*pg
) override
{
1842 pg
->get("PeeringWQ");
1843 peering_queue
.push_back(pg
);
1846 bool _empty() override
{
1847 return peering_queue
.empty();
1849 void _dequeue(list
<PG
*> *out
) override
;
1851 const list
<PG
*> &pgs
,
1852 ThreadPool::TPHandle
&handle
) override
{
1853 assert(!pgs
.empty());
1854 osd
->process_peering_events(pgs
, handle
);
1855 for (list
<PG
*>::const_iterator i
= pgs
.begin();
1858 (*i
)->put("PeeringWQ");
1861 void _process_finish(const list
<PG
*> &pgs
) override
{
1862 for (list
<PG
*>::const_iterator i
= pgs
.begin();
1868 void _clear() override
{
1869 assert(peering_queue
.empty());
1873 void process_peering_events(
1874 const list
<PG
*> &pg
,
1875 ThreadPool::TPHandle
&handle
);
1878 friend class PrimaryLogPG
;
1885 OSDMapRef
get_osdmap() {
1888 epoch_t
get_osdmap_epoch() const {
1889 return osdmap
? osdmap
->get_epoch() : 0;
1892 utime_t had_map_since
;
1894 list
<OpRequestRef
> waiting_for_osdmap
;
1895 deque
<utime_t
> osd_markdown_log
;
1897 friend struct send_map_on_destruct
;
1899 void wait_for_new_map(OpRequestRef op
);
1900 void handle_osd_map(class MOSDMap
*m
);
1901 void _committed_osd_maps(epoch_t first
, epoch_t last
, class MOSDMap
*m
);
1902 void trim_maps(epoch_t oldest
, int nreceived
, bool skip_maps
);
1903 void note_down_osd(int osd
);
1904 void note_up_osd(int osd
);
1905 friend class C_OnMapCommit
;
1908 epoch_t advance_to
, PG
*pg
,
1909 ThreadPool::TPHandle
&handle
,
1910 PG::RecoveryCtx
*rctx
,
1911 set
<PGRef
> *split_pgs
1914 void activate_map();
1916 // osd map cache (past osd maps)
1917 OSDMapRef
get_map(epoch_t e
) {
1918 return service
.get_map(e
);
1920 OSDMapRef
add_map(OSDMap
*o
) {
1921 return service
.add_map(o
);
1923 void add_map_bl(epoch_t e
, bufferlist
& bl
) {
1924 return service
.add_map_bl(e
, bl
);
1926 void pin_map_bl(epoch_t e
, bufferlist
&bl
) {
1927 return service
.pin_map_bl(e
, bl
);
1929 bool get_map_bl(epoch_t e
, bufferlist
& bl
) {
1930 return service
.get_map_bl(e
, bl
);
1932 void add_map_inc_bl(epoch_t e
, bufferlist
& bl
) {
1933 return service
.add_map_inc_bl(e
, bl
);
1935 void pin_map_inc_bl(epoch_t e
, bufferlist
&bl
) {
1936 return service
.pin_map_inc_bl(e
, bl
);
1940 // -- placement groups --
1941 RWLock pg_map_lock
; // this lock orders *above* individual PG _locks
1942 ceph::unordered_map
<spg_t
, PG
*> pg_map
; // protected by pg_map lock
1944 map
<spg_t
, list
<PG::CephPeeringEvtRef
> > peering_wait_for_split
;
1945 PGRecoveryStats pg_recovery_stats
;
1947 PGPool
_get_pool(int id
, OSDMapRef createmap
);
1949 PG
*_lookup_lock_pg_with_map_lock_held(spg_t pgid
);
1950 PG
*_lookup_lock_pg(spg_t pgid
);
1953 PG
*lookup_lock_pg(spg_t pgid
);
1956 RWLock::RLocker
l(pg_map_lock
);
1957 return pg_map
.size();
1961 PG
*_open_lock_pg(OSDMapRef createmap
,
1962 spg_t pg
, bool no_lockdep_check
=false);
1964 RES_PARENT
, // resurrected a parent
1965 RES_SELF
, // resurrected self
1966 RES_NONE
// nothing relevant deleting
1968 res_result
_try_resurrect_pg(
1969 OSDMapRef curmap
, spg_t pgid
, spg_t
*resurrected
, PGRef
*old_pg_state
);
1971 PG
*_create_lock_pg(
1972 OSDMapRef createmap
,
1977 vector
<int>& up
, int up_primary
,
1978 vector
<int>& acting
, int acting_primary
,
1979 pg_history_t history
,
1980 const PastIntervals
& pi
,
1981 ObjectStore::Transaction
& t
);
1983 PG
* _make_pg(OSDMapRef createmap
, spg_t pgid
);
1984 void add_newly_split_pg(PG
*pg
,
1985 PG::RecoveryCtx
*rctx
);
1987 int handle_pg_peering_evt(
1989 const pg_history_t
& orig_history
,
1990 const PastIntervals
& pi
,
1992 PG::CephPeeringEvtRef evt
);
1995 void build_past_intervals_parallel();
1997 /// build initial pg history and intervals on create
1998 void build_initial_pg_history(
2001 utime_t created_stamp
,
2005 /// project pg history from from to now
2006 bool project_pg_history(
2007 spg_t pgid
, pg_history_t
& h
, epoch_t from
,
2008 const vector
<int>& lastup
,
2010 const vector
<int>& lastacting
,
2011 int lastactingprimary
2012 ); ///< @return false if there was a map gap between from and now
2014 // this must be called with pg->lock held on any pg addition to pg_map
2015 void wake_pg_waiters(PGRef pg
) {
2016 assert(pg
->is_locked());
2017 op_shardedwq
.wake_pg_waiters(pg
->info
.pgid
);
2019 epoch_t last_pg_create_epoch
;
2021 void handle_pg_create(OpRequestRef op
);
2025 const set
<spg_t
> &childpgids
, set
<PGRef
> *out_pgs
,
2028 PG::RecoveryCtx
*rctx
);
2030 // == monitor interaction ==
2031 Mutex mon_report_lock
;
2032 utime_t last_mon_report
;
2033 utime_t last_pg_stats_sent
;
2035 /* if our monitor dies, we want to notice it and reconnect.
2036 * So we keep track of when it last acked our stat updates,
2037 * and if too much time passes (and we've been sending
2038 * more updates) then we can call it dead and reconnect
2041 utime_t last_pg_stats_ack
;
2042 float stats_ack_timeout
;
2043 set
<uint64_t> outstanding_pg_stats
; // how many stat updates haven't been acked yet
2047 void _got_mon_epochs(epoch_t oldest
, epoch_t newest
);
2048 void _preboot(epoch_t oldest
, epoch_t newest
);
2050 void _collect_metadata(map
<string
,string
> *pmeta
);
2052 void start_waiting_for_healthy();
2055 void send_full_update();
2057 friend struct C_OSD_GetVersion
;
2060 epoch_t up_thru_wanted
;
2062 void queue_want_up_thru(epoch_t want
);
2065 // -- full map requests --
2066 epoch_t requested_full_first
, requested_full_last
;
2068 void request_full_map(epoch_t first
, epoch_t last
);
2069 void rerequest_full_maps() {
2070 epoch_t first
= requested_full_first
;
2071 epoch_t last
= requested_full_last
;
2072 requested_full_first
= 0;
2073 requested_full_last
= 0;
2074 request_full_map(first
, last
);
2076 void got_full_map(epoch_t e
);
2079 map
<int,utime_t
> failure_queue
;
2080 map
<int,pair
<utime_t
,entity_inst_t
> > failure_pending
;
2082 void requeue_failures();
2083 void send_failures();
2084 void send_still_alive(epoch_t epoch
, const entity_inst_t
&i
);
2087 Mutex pg_stat_queue_lock
;
2088 Cond pg_stat_queue_cond
;
2089 xlist
<PG
*> pg_stat_queue
;
2090 bool osd_stat_updated
;
2091 uint64_t pg_stat_tid
, pg_stat_tid_flushed
;
2093 void send_pg_stats(const utime_t
&now
);
2094 void handle_pg_stats_ack(class MPGStatsAck
*ack
);
2095 void flush_pg_stats();
2097 ceph::coarse_mono_clock::time_point last_sent_beacon
;
2098 Mutex min_last_epoch_clean_lock
{"OSD::min_last_epoch_clean_lock"};
2099 epoch_t min_last_epoch_clean
= 0;
2100 // which pgs were scanned for min_lec
2101 std::vector
<pg_t
> min_last_epoch_clean_pgs
;
2102 void send_beacon(const ceph::coarse_mono_clock::time_point
& now
);
2104 void pg_stat_queue_enqueue(PG
*pg
) {
2105 pg_stat_queue_lock
.Lock();
2106 if (pg
->is_primary() && !pg
->stat_queue_item
.is_on_list()) {
2107 pg
->get("pg_stat_queue");
2108 pg_stat_queue
.push_back(&pg
->stat_queue_item
);
2110 osd_stat_updated
= true;
2111 pg_stat_queue_lock
.Unlock();
2113 void pg_stat_queue_dequeue(PG
*pg
) {
2114 pg_stat_queue_lock
.Lock();
2115 if (pg
->stat_queue_item
.remove_myself())
2116 pg
->put("pg_stat_queue");
2117 pg_stat_queue_lock
.Unlock();
2119 void clear_pg_stat_queue() {
2120 pg_stat_queue_lock
.Lock();
2121 while (!pg_stat_queue
.empty()) {
2122 PG
*pg
= pg_stat_queue
.front();
2123 pg_stat_queue
.pop_front();
2124 pg
->put("pg_stat_queue");
2126 pg_stat_queue_lock
.Unlock();
2128 void clear_outstanding_pg_stats(){
2129 Mutex::Locker
l(pg_stat_queue_lock
);
2130 outstanding_pg_stats
.clear();
2133 ceph_tid_t
get_tid() {
2134 return service
.get_tid();
2137 // -- generic pg peering --
2138 PG::RecoveryCtx
create_context();
2139 void dispatch_context(PG::RecoveryCtx
&ctx
, PG
*pg
, OSDMapRef curmap
,
2140 ThreadPool::TPHandle
*handle
= NULL
);
2141 void dispatch_context_transaction(PG::RecoveryCtx
&ctx
, PG
*pg
,
2142 ThreadPool::TPHandle
*handle
= NULL
);
2143 void do_notifies(map
<int,
2144 vector
<pair
<pg_notify_t
, PastIntervals
> > >&
2147 void do_queries(map
<int, map
<spg_t
,pg_query_t
> >& query_map
,
2149 void do_infos(map
<int,
2150 vector
<pair
<pg_notify_t
, PastIntervals
> > >& info_map
,
2153 bool require_mon_peer(const Message
*m
);
2154 bool require_mon_or_mgr_peer(const Message
*m
);
2155 bool require_osd_peer(const Message
*m
);
2157 * Verifies that we were alive in the given epoch, and that
2160 bool require_self_aliveness(const Message
*m
, epoch_t alive_since
);
2162 * Verifies that the OSD who sent the given op has the same
2163 * address as in the given map.
2164 * @pre op was sent by an OSD using the cluster messenger
2166 bool require_same_peer_instance(const Message
*m
, OSDMapRef
& map
,
2167 bool is_fast_dispatch
);
2169 bool require_same_or_newer_map(OpRequestRef
& op
, epoch_t e
,
2170 bool is_fast_dispatch
);
2172 void handle_pg_query(OpRequestRef op
);
2173 void handle_pg_notify(OpRequestRef op
);
2174 void handle_pg_log(OpRequestRef op
);
2175 void handle_pg_info(OpRequestRef op
);
2176 void handle_pg_trim(OpRequestRef op
);
2178 void handle_pg_backfill_reserve(OpRequestRef op
);
2179 void handle_pg_recovery_reserve(OpRequestRef op
);
2181 void handle_force_recovery(Message
*m
);
2183 void handle_pg_remove(OpRequestRef op
);
2184 void _remove_pg(PG
*pg
);
2193 Command(vector
<string
>& c
, ceph_tid_t t
, bufferlist
& bl
, Connection
*co
)
2194 : cmd(c
), tid(t
), indata(bl
), con(co
) {}
2196 list
<Command
*> command_queue
;
2197 struct CommandWQ
: public ThreadPool::WorkQueue
<Command
> {
2199 CommandWQ(OSD
*o
, time_t ti
, time_t si
, ThreadPool
*tp
)
2200 : ThreadPool::WorkQueue
<Command
>("OSD::CommandWQ", ti
, si
, tp
), osd(o
) {}
2202 bool _empty() override
{
2203 return osd
->command_queue
.empty();
2205 bool _enqueue(Command
*c
) override
{
2206 osd
->command_queue
.push_back(c
);
2209 void _dequeue(Command
*pg
) override
{
2212 Command
*_dequeue() override
{
2213 if (osd
->command_queue
.empty())
2215 Command
*c
= osd
->command_queue
.front();
2216 osd
->command_queue
.pop_front();
2219 void _process(Command
*c
, ThreadPool::TPHandle
&) override
{
2220 osd
->osd_lock
.Lock();
2221 if (osd
->is_stopping()) {
2222 osd
->osd_lock
.Unlock();
2226 osd
->do_command(c
->con
.get(), c
->tid
, c
->cmd
, c
->indata
);
2227 osd
->osd_lock
.Unlock();
2230 void _clear() override
{
2231 while (!osd
->command_queue
.empty()) {
2232 Command
*c
= osd
->command_queue
.front();
2233 osd
->command_queue
.pop_front();
2239 void handle_command(class MMonCommand
*m
);
2240 void handle_command(class MCommand
*m
);
2241 void do_command(Connection
*con
, ceph_tid_t tid
, vector
<string
>& cmd
, bufferlist
& data
);
2243 // -- pg recovery --
2244 void do_recovery(PG
*pg
, epoch_t epoch_queued
, uint64_t pushes_reserved
,
2245 ThreadPool::TPHandle
&handle
);
2250 bool scrub_random_backoff();
2251 bool scrub_load_below_threshold();
2252 bool scrub_time_permit(utime_t now
);
2256 public ThreadPool::WorkQueueVal
<pair
<PGRef
, DeletingStateRef
> > {
2258 ObjectStore
*&store
;
2259 list
<pair
<PGRef
, DeletingStateRef
> > remove_queue
;
2260 RemoveWQ(CephContext
* cct
, ObjectStore
*&o
, time_t ti
, time_t si
,
2262 : ThreadPool::WorkQueueVal
<pair
<PGRef
, DeletingStateRef
> >(
2263 "OSD::RemoveWQ", ti
, si
, tp
), cct(cct
), store(o
) {}
2265 bool _empty() override
{
2266 return remove_queue
.empty();
2268 void _enqueue(pair
<PGRef
, DeletingStateRef
> item
) override
{
2269 remove_queue
.push_back(item
);
2271 void _enqueue_front(pair
<PGRef
, DeletingStateRef
> item
) override
{
2272 remove_queue
.push_front(item
);
2274 bool _dequeue(pair
<PGRef
, DeletingStateRef
> item
) {
2277 pair
<PGRef
, DeletingStateRef
> _dequeue() override
{
2278 assert(!remove_queue
.empty());
2279 pair
<PGRef
, DeletingStateRef
> item
= remove_queue
.front();
2280 remove_queue
.pop_front();
2283 void _process(pair
<PGRef
, DeletingStateRef
>,
2284 ThreadPool::TPHandle
&) override
;
2285 void _clear() override
{
2286 remove_queue
.clear();
2291 bool ms_can_fast_dispatch_any() const override
{ return true; }
2292 bool ms_can_fast_dispatch(const Message
*m
) const override
{
2293 switch (m
->get_type()) {
2294 case CEPH_MSG_OSD_OP
:
2295 case CEPH_MSG_OSD_BACKOFF
:
2298 case MSG_OSD_SUBOPREPLY
:
2299 case MSG_OSD_REPOPREPLY
:
2300 case MSG_OSD_PG_PUSH
:
2301 case MSG_OSD_PG_PULL
:
2302 case MSG_OSD_PG_PUSH_REPLY
:
2303 case MSG_OSD_PG_SCAN
:
2304 case MSG_OSD_PG_BACKFILL
:
2305 case MSG_OSD_PG_BACKFILL_REMOVE
:
2306 case MSG_OSD_EC_WRITE
:
2307 case MSG_OSD_EC_WRITE_REPLY
:
2308 case MSG_OSD_EC_READ
:
2309 case MSG_OSD_EC_READ_REPLY
:
2310 case MSG_OSD_SCRUB_RESERVE
:
2311 case MSG_OSD_REP_SCRUB
:
2312 case MSG_OSD_REP_SCRUBMAP
:
2313 case MSG_OSD_PG_UPDATE_LOG_MISSING
:
2314 case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY
:
2315 case MSG_OSD_PG_RECOVERY_DELETE
:
2316 case MSG_OSD_PG_RECOVERY_DELETE_REPLY
:
2322 void ms_fast_dispatch(Message
*m
) override
;
2323 void ms_fast_preprocess(Message
*m
) override
;
2324 bool ms_dispatch(Message
*m
) override
;
2325 bool ms_get_authorizer(int dest_type
, AuthAuthorizer
**authorizer
, bool force_new
) override
;
2326 bool ms_verify_authorizer(Connection
*con
, int peer_type
,
2327 int protocol
, bufferlist
& authorizer
, bufferlist
& authorizer_reply
,
2328 bool& isvalid
, CryptoKey
& session_key
) override
;
2329 void ms_handle_connect(Connection
*con
) override
;
2330 void ms_handle_fast_connect(Connection
*con
) override
;
2331 void ms_handle_fast_accept(Connection
*con
) override
;
2332 bool ms_handle_reset(Connection
*con
) override
;
2333 void ms_handle_remote_reset(Connection
*con
) override
{}
2334 bool ms_handle_refused(Connection
*con
) override
;
2336 io_queue
get_io_queue() const {
2337 if (cct
->_conf
->osd_op_queue
== "debug_random") {
2338 static io_queue index_lookup
[] = { io_queue::prioritized
,
2339 io_queue::weightedpriority
,
2340 io_queue::mclock_opclass
,
2341 io_queue::mclock_client
};
2343 unsigned which
= rand() % (sizeof(index_lookup
) / sizeof(index_lookup
[0]));
2344 return index_lookup
[which
];
2345 } else if (cct
->_conf
->osd_op_queue
== "prioritized") {
2346 return io_queue::prioritized
;
2347 } else if (cct
->_conf
->osd_op_queue
== "mclock_opclass") {
2348 return io_queue::mclock_opclass
;
2349 } else if (cct
->_conf
->osd_op_queue
== "mclock_client") {
2350 return io_queue::mclock_client
;
2352 // default / catch-all is 'wpq'
2353 return io_queue::weightedpriority
;
2357 unsigned int get_io_prio_cut() const {
2358 if (cct
->_conf
->osd_op_queue_cut_off
== "debug_random") {
2360 return (rand() % 2 < 1) ? CEPH_MSG_PRIO_HIGH
: CEPH_MSG_PRIO_LOW
;
2361 } else if (cct
->_conf
->osd_op_queue_cut_off
== "high") {
2362 return CEPH_MSG_PRIO_HIGH
;
2364 // default / catch-all is 'low'
2365 return CEPH_MSG_PRIO_LOW
;
2370 /* internal and external can point to the same messenger, they will still
2371 * be cleaned up properly*/
2372 OSD(CephContext
*cct_
,
2373 ObjectStore
*store_
,
2375 Messenger
*internal
,
2376 Messenger
*external
,
2377 Messenger
*hb_front_client
,
2378 Messenger
*hb_back_client
,
2379 Messenger
*hb_front_server
,
2380 Messenger
*hb_back_server
,
2381 Messenger
*osdc_messenger
,
2382 MonClient
*mc
, const std::string
&dev
, const std::string
&jdev
);
2386 static int mkfs(CephContext
*cct
, ObjectStore
*store
,
2388 uuid_d fsid
, int whoami
);
2389 /* remove any non-user xattrs from a map of them */
2390 void filter_xattrs(map
<string
, bufferptr
>& attrs
) {
2391 for (map
<string
, bufferptr
>::iterator iter
= attrs
.begin();
2392 iter
!= attrs
.end();
2394 if (('_' != iter
->first
.at(0)) || (iter
->first
.size() == 1))
2395 attrs
.erase(iter
++);
2401 int mon_cmd_maybe_osd_create(string
&cmd
);
2402 int update_crush_device_class();
2403 int update_crush_location();
2405 static int write_meta(ObjectStore
*store
,
2406 uuid_d
& cluster_fsid
, uuid_d
& osd_fsid
, int whoami
);
2408 void handle_pg_scrub(struct MOSDScrub
*m
, PG
* pg
);
2409 void handle_scrub(struct MOSDScrub
*m
);
2410 void handle_osd_ping(class MOSDPing
*m
);
2412 int init_op_flags(OpRequestRef
& op
);
2414 int get_num_op_shards();
2415 int get_num_op_threads();
2417 float get_osd_recovery_sleep();
2420 static int peek_meta(ObjectStore
*store
, string
& magic
,
2421 uuid_d
& cluster_fsid
, uuid_d
& osd_fsid
, int& whoami
);
2429 int enable_disable_fuse(bool stop
);
2431 void suicide(int exitcode
);
2434 void handle_signal(int signum
);
2436 /// check if we can throw out op from a disconnected client
2437 static bool op_is_discardable(const MOSDOp
*m
);
2441 friend class OSDService
;
2445 std::ostream
& operator<<(std::ostream
& out
, const OSD::io_queue
& q
);
2448 //compatibility of the executable
2449 extern const CompatSet::Feature ceph_osd_feature_compat
[];
2450 extern const CompatSet::Feature ceph_osd_feature_ro_compat
[];
2451 extern const CompatSet::Feature ceph_osd_feature_incompat
[];
2453 #endif // CEPH_OSD_H