]> git.proxmox.com Git - ceph.git/blame - ceph/src/osd/OSD.h
bump version to 16.2.6-pve2
[ceph.git] / ceph / src / osd / OSD.h
CommitLineData
f67539c2 1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
7c673cae
FG
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 *
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
f67539c2 10 * License version 2.1, as published by the Free Software
7c673cae 11 * Foundation. See file COPYING.
f67539c2 12 *
7c673cae
FG
13 */
14
15#ifndef CEPH_OSD_H
16#define CEPH_OSD_H
17
18#include "PG.h"
19
20#include "msg/Dispatcher.h"
21
f67539c2 22#include "common/async/context_pool.h"
7c673cae
FG
23#include "common/Timer.h"
24#include "common/WorkQueue.h"
25#include "common/AsyncReserver.h"
26#include "common/ceph_context.h"
11fdf7f2 27#include "common/config_cacher.h"
7c673cae 28#include "common/zipkin_trace.h"
9f95a23c 29#include "common/ceph_timer.h"
7c673cae
FG
30
31#include "mgr/MgrClient.h"
32
33#include "os/ObjectStore.h"
7c673cae
FG
34
35#include "include/CompatSet.h"
9f95a23c 36#include "include/common_fwd.h"
7c673cae
FG
37
38#include "OpRequest.h"
39#include "Session.h"
40
9f95a23c 41#include "osd/scheduler/OpScheduler.h"
224ce89b 42
7c673cae
FG
43#include <atomic>
44#include <map>
45#include <memory>
11fdf7f2 46#include <string>
7c673cae
FG
47
48#include "include/unordered_map.h"
49
50#include "common/shared_cache.hpp"
51#include "common/simple_cache.hpp"
7c673cae 52#include "messages/MOSDOp.h"
7c673cae 53#include "common/EventTrace.h"
9f95a23c 54#include "osd/osd_perf_counters.h"
f67539c2 55#include "common/Finisher.h"
7c673cae
FG
56
57#define CEPH_OSD_PROTOCOL 10 /* cluster internal */
58
11fdf7f2
TL
59/*
60
61 lock ordering for pg map
62
63 PG::lock
64 ShardData::lock
65 OSD::pg_map_lock
66
67 */
7c673cae 68
7c673cae
FG
69class Messenger;
70class Message;
71class MonClient;
7c673cae
FG
72class ObjectStore;
73class FuseStore;
74class OSDMap;
75class MLog;
76class Objecter;
11fdf7f2 77class KeyStore;
7c673cae
FG
78
79class Watch;
80class PrimaryLogPG;
81
7c673cae 82class TestOpsSocketHook;
11fdf7f2 83struct C_FinishSplits;
7c673cae
FG
84struct C_OpenPGs;
85class LogChannel;
7c673cae 86
11fdf7f2
TL
87class MOSDPGCreate2;
88class MOSDPGQuery;
89class MOSDPGNotify;
90class MOSDPGInfo;
91class MOSDPGRemove;
92class MOSDForceRecovery;
9f95a23c 93class MMonGetPurgedSnapsReply;
7c673cae
FG
94
95class OSD;
96
7c673cae 97class OSDService {
9f95a23c 98 using OpSchedulerItem = ceph::osd::scheduler::OpSchedulerItem;
7c673cae
FG
99public:
100 OSD *osd;
101 CephContext *cct;
11fdf7f2 102 ObjectStore::CollectionHandle meta_ch;
7c673cae
FG
103 const int whoami;
104 ObjectStore *&store;
105 LogClient &log_client;
106 LogChannelRef clog;
107 PGRecoveryStats &pg_recovery_stats;
108private:
109 Messenger *&cluster_messenger;
110 Messenger *&client_messenger;
111public:
112 PerfCounters *&logger;
113 PerfCounters *&recoverystate_perf;
114 MonClient *&monc;
7c673cae 115
11fdf7f2
TL
116 md_config_cacher_t<Option::size_t> osd_max_object_size;
117 md_config_cacher_t<bool> osd_skip_data_digest;
118
9f95a23c
TL
119 void enqueue_back(OpSchedulerItem&& qi);
120 void enqueue_front(OpSchedulerItem&& qi);
7c673cae
FG
121
122 void maybe_inject_dispatch_delay() {
11fdf7f2 123 if (g_conf()->osd_debug_inject_dispatch_delay_probability > 0) {
7c673cae 124 if (rand() % 10000 <
11fdf7f2 125 g_conf()->osd_debug_inject_dispatch_delay_probability * 10000) {
7c673cae 126 utime_t t;
11fdf7f2 127 t.set_from_double(g_conf()->osd_debug_inject_dispatch_delay_duration);
7c673cae
FG
128 t.sleep();
129 }
130 }
131 }
132
9f95a23c
TL
133 ceph::signedspan get_mnow();
134
7c673cae
FG
135private:
136 // -- superblock --
11fdf7f2 137 ceph::mutex publish_lock, pre_publish_lock; // pre-publish orders before publish
7c673cae
FG
138 OSDSuperblock superblock;
139
140public:
141 OSDSuperblock get_superblock() {
11fdf7f2 142 std::lock_guard l(publish_lock);
7c673cae
FG
143 return superblock;
144 }
145 void publish_superblock(const OSDSuperblock &block) {
11fdf7f2 146 std::lock_guard l(publish_lock);
7c673cae
FG
147 superblock = block;
148 }
149
150 int get_nodeid() const { return whoami; }
151
152 std::atomic<epoch_t> max_oldest_map;
153private:
154 OSDMapRef osdmap;
155
156public:
157 OSDMapRef get_osdmap() {
11fdf7f2 158 std::lock_guard l(publish_lock);
7c673cae
FG
159 return osdmap;
160 }
161 epoch_t get_osdmap_epoch() {
11fdf7f2 162 std::lock_guard l(publish_lock);
7c673cae
FG
163 return osdmap ? osdmap->get_epoch() : 0;
164 }
165 void publish_map(OSDMapRef map) {
11fdf7f2 166 std::lock_guard l(publish_lock);
7c673cae
FG
167 osdmap = map;
168 }
169
170 /*
f67539c2
TL
171 * osdmap - current published std::map
172 * next_osdmap - pre_published std::map that is about to be published.
7c673cae
FG
173 *
174 * We use the next_osdmap to send messages and initiate connections,
f67539c2 175 * but only if the target is the same instance as the one in the std::map
7c673cae
FG
176 * epoch the current user is working from (i.e., the result is
177 * equivalent to what is in next_osdmap).
178 *
179 * This allows the helpers to start ignoring osds that are about to
180 * go down, and let OSD::handle_osd_map()/note_down_osd() mark them
181 * down, without worrying about reopening connections from threads
182 * working from old maps.
183 */
184private:
185 OSDMapRef next_osdmap;
11fdf7f2 186 ceph::condition_variable pre_publish_cond;
9f95a23c 187 int pre_publish_waiter = 0;
7c673cae
FG
188
189public:
190 void pre_publish_map(OSDMapRef map) {
11fdf7f2 191 std::lock_guard l(pre_publish_lock);
7c673cae
FG
192 next_osdmap = std::move(map);
193 }
194
195 void activate_map();
196 /// map epochs reserved below
f67539c2 197 std::map<epoch_t, unsigned> map_reservations;
7c673cae
FG
198
199 /// gets ref to next_osdmap and registers the epoch as reserved
200 OSDMapRef get_nextmap_reserved() {
11fdf7f2 201 std::lock_guard l(pre_publish_lock);
7c673cae 202 epoch_t e = next_osdmap->get_epoch();
f67539c2
TL
203 std::map<epoch_t, unsigned>::iterator i =
204 map_reservations.insert(std::make_pair(e, 0)).first;
7c673cae
FG
205 i->second++;
206 return next_osdmap;
207 }
208 /// releases reservation on map
209 void release_map(OSDMapRef osdmap) {
11fdf7f2 210 std::lock_guard l(pre_publish_lock);
f67539c2 211 std::map<epoch_t, unsigned>::iterator i =
7c673cae 212 map_reservations.find(osdmap->get_epoch());
11fdf7f2
TL
213 ceph_assert(i != map_reservations.end());
214 ceph_assert(i->second > 0);
7c673cae
FG
215 if (--(i->second) == 0) {
216 map_reservations.erase(i);
217 }
9f95a23c
TL
218 if (pre_publish_waiter) {
219 pre_publish_cond.notify_all();
220 }
7c673cae
FG
221 }
222 /// blocks until there are no reserved maps prior to next_osdmap
223 void await_reserved_maps() {
11fdf7f2
TL
224 std::unique_lock l{pre_publish_lock};
225 ceph_assert(next_osdmap);
9f95a23c 226 pre_publish_waiter++;
11fdf7f2
TL
227 pre_publish_cond.wait(l, [this] {
228 auto i = map_reservations.cbegin();
229 return (i == map_reservations.cend() ||
230 i->first >= next_osdmap->get_epoch());
231 });
9f95a23c 232 pre_publish_waiter--;
11fdf7f2
TL
233 }
234 OSDMapRef get_next_osdmap() {
235 std::lock_guard l(pre_publish_lock);
11fdf7f2 236 return next_osdmap;
7c673cae
FG
237 }
238
9f95a23c
TL
239 void maybe_share_map(Connection *con,
240 const OSDMapRef& osdmap,
241 epoch_t peer_epoch_lb=0);
7c673cae
FG
242
243 void send_map(class MOSDMap *m, Connection *con);
9f95a23c
TL
244 void send_incremental_map(epoch_t since, Connection *con,
245 const OSDMapRef& osdmap);
7c673cae
FG
246 MOSDMap *build_incremental_map_msg(epoch_t from, epoch_t to,
247 OSDSuperblock& superblock);
7c673cae
FG
248
249 ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch);
f67539c2 250 std::pair<ConnectionRef,ConnectionRef> get_con_osd_hb(int peer, epoch_t from_epoch); // (back, front)
7c673cae 251 void send_message_osd_cluster(int peer, Message *m, epoch_t from_epoch);
9f95a23c 252 void send_message_osd_cluster(std::vector<std::pair<int, Message*>>& messages, epoch_t from_epoch);
f67539c2
TL
253 void send_message_osd_cluster(MessageRef m, Connection *con) {
254 con->send_message2(std::move(m));
7c673cae
FG
255 }
256 void send_message_osd_cluster(Message *m, const ConnectionRef& con) {
257 con->send_message(m);
258 }
7c673cae
FG
259 void send_message_osd_client(Message *m, const ConnectionRef& con) {
260 con->send_message(m);
261 }
11fdf7f2 262 entity_name_t get_cluster_msgr_name() const;
7c673cae
FG
263
264private:
265 // -- scrub scheduling --
9f95a23c 266 ceph::mutex sched_scrub_lock = ceph::make_mutex("OSDService::sched_scrub_lock");
eafe8130
TL
267 int scrubs_local;
268 int scrubs_remote;
7c673cae
FG
269
270public:
271 struct ScrubJob {
272 CephContext* cct;
273 /// pg to be scrubbed
274 spg_t pgid;
275 /// a time scheduled for scrub. but the scrub could be delayed if system
276 /// load is too high or it fails to fall in the scrub hours
277 utime_t sched_time;
278 /// the hard upper bound of scrub time
279 utime_t deadline;
280 ScrubJob() : cct(nullptr) {}
281 explicit ScrubJob(CephContext* cct, const spg_t& pg,
282 const utime_t& timestamp,
283 double pool_scrub_min_interval = 0,
284 double pool_scrub_max_interval = 0, bool must = true);
285 /// order the jobs by sched_time
286 bool operator<(const ScrubJob& rhs) const;
287 };
f67539c2 288 std::set<ScrubJob> sched_scrub_pg;
7c673cae 289
f67539c2
TL
290 /// @returns the scrub_reg_stamp used for unregistering the scrub job
291 utime_t reg_pg_scrub(spg_t pgid,
292 utime_t t,
293 double pool_scrub_min_interval,
294 double pool_scrub_max_interval,
295 bool must) {
296 ScrubJob scrub_job(cct, pgid, t, pool_scrub_min_interval, pool_scrub_max_interval,
297 must);
298 std::lock_guard l(OSDService::sched_scrub_lock);
299 sched_scrub_pg.insert(scrub_job);
300 return scrub_job.sched_time;
7c673cae 301 }
f67539c2 302
7c673cae 303 void unreg_pg_scrub(spg_t pgid, utime_t t) {
11fdf7f2 304 std::lock_guard l(sched_scrub_lock);
7c673cae 305 size_t removed = sched_scrub_pg.erase(ScrubJob(cct, pgid, t));
11fdf7f2 306 ceph_assert(removed);
7c673cae 307 }
f67539c2 308
7c673cae 309 bool first_scrub_stamp(ScrubJob *out) {
11fdf7f2 310 std::lock_guard l(sched_scrub_lock);
7c673cae
FG
311 if (sched_scrub_pg.empty())
312 return false;
f67539c2 313 std::set<ScrubJob>::iterator iter = sched_scrub_pg.begin();
7c673cae
FG
314 *out = *iter;
315 return true;
316 }
317 bool next_scrub_stamp(const ScrubJob& next,
318 ScrubJob *out) {
11fdf7f2 319 std::lock_guard l(sched_scrub_lock);
7c673cae
FG
320 if (sched_scrub_pg.empty())
321 return false;
b3b6e05e 322 std::set<ScrubJob>::const_iterator iter = sched_scrub_pg.upper_bound(next);
7c673cae
FG
323 if (iter == sched_scrub_pg.cend())
324 return false;
325 *out = *iter;
326 return true;
327 }
328
f67539c2 329 void dumps_scrub(ceph::Formatter* f);
7c673cae 330
eafe8130
TL
331 bool can_inc_scrubs();
332 bool inc_scrubs_local();
333 void dec_scrubs_local();
334 bool inc_scrubs_remote();
335 void dec_scrubs_remote();
f67539c2 336 void dump_scrub_reservations(ceph::Formatter *f);
7c673cae
FG
337
338 void reply_op_error(OpRequestRef op, int err);
9f95a23c 339 void reply_op_error(OpRequestRef op, int err, eversion_t v, version_t uv,
f67539c2 340 std::vector<pg_log_op_return_item_t> op_returns);
7c673cae
FG
341 void handle_misdirected_op(PG *pg, OpRequestRef op);
342
343
344private:
345 // -- agent shared state --
9f95a23c
TL
346 ceph::mutex agent_lock = ceph::make_mutex("OSDService::agent_lock");
347 ceph::condition_variable agent_cond;
f67539c2
TL
348 std::map<uint64_t, std::set<PGRef> > agent_queue;
349 std::set<PGRef>::iterator agent_queue_pos;
7c673cae
FG
350 bool agent_valid_iterator;
351 int agent_ops;
352 int flush_mode_high_count; //once have one pg with FLUSH_MODE_HIGH then flush objects with high speed
f67539c2 353 std::set<hobject_t> agent_oids;
7c673cae
FG
354 bool agent_active;
355 struct AgentThread : public Thread {
356 OSDService *osd;
357 explicit AgentThread(OSDService *o) : osd(o) {}
358 void *entry() override {
359 osd->agent_entry();
360 return NULL;
361 }
362 } agent_thread;
363 bool agent_stop_flag;
9f95a23c 364 ceph::mutex agent_timer_lock = ceph::make_mutex("OSDService::agent_timer_lock");
7c673cae
FG
365 SafeTimer agent_timer;
366
367public:
368 void agent_entry();
369 void agent_stop();
370
371 void _enqueue(PG *pg, uint64_t priority) {
372 if (!agent_queue.empty() &&
373 agent_queue.rbegin()->first < priority)
374 agent_valid_iterator = false; // inserting higher-priority queue
f67539c2 375 std::set<PGRef>& nq = agent_queue[priority];
7c673cae 376 if (nq.empty())
9f95a23c 377 agent_cond.notify_all();
7c673cae
FG
378 nq.insert(pg);
379 }
380
381 void _dequeue(PG *pg, uint64_t old_priority) {
f67539c2
TL
382 std::set<PGRef>& oq = agent_queue[old_priority];
383 std::set<PGRef>::iterator p = oq.find(pg);
11fdf7f2 384 ceph_assert(p != oq.end());
7c673cae
FG
385 if (p == agent_queue_pos)
386 ++agent_queue_pos;
387 oq.erase(p);
388 if (oq.empty()) {
389 if (agent_queue.rbegin()->first == old_priority)
390 agent_valid_iterator = false;
391 agent_queue.erase(old_priority);
392 }
393 }
394
395 /// enable agent for a pg
396 void agent_enable_pg(PG *pg, uint64_t priority) {
11fdf7f2 397 std::lock_guard l(agent_lock);
7c673cae
FG
398 _enqueue(pg, priority);
399 }
400
401 /// adjust priority for an enagled pg
402 void agent_adjust_pg(PG *pg, uint64_t old_priority, uint64_t new_priority) {
11fdf7f2
TL
403 std::lock_guard l(agent_lock);
404 ceph_assert(new_priority != old_priority);
7c673cae
FG
405 _enqueue(pg, new_priority);
406 _dequeue(pg, old_priority);
407 }
408
409 /// disable agent for a pg
410 void agent_disable_pg(PG *pg, uint64_t old_priority) {
11fdf7f2 411 std::lock_guard l(agent_lock);
7c673cae
FG
412 _dequeue(pg, old_priority);
413 }
414
415 /// note start of an async (evict) op
416 void agent_start_evict_op() {
11fdf7f2 417 std::lock_guard l(agent_lock);
7c673cae
FG
418 ++agent_ops;
419 }
420
421 /// note finish or cancellation of an async (evict) op
422 void agent_finish_evict_op() {
11fdf7f2
TL
423 std::lock_guard l(agent_lock);
424 ceph_assert(agent_ops > 0);
7c673cae 425 --agent_ops;
9f95a23c 426 agent_cond.notify_all();
7c673cae
FG
427 }
428
429 /// note start of an async (flush) op
430 void agent_start_op(const hobject_t& oid) {
11fdf7f2 431 std::lock_guard l(agent_lock);
7c673cae 432 ++agent_ops;
11fdf7f2 433 ceph_assert(agent_oids.count(oid) == 0);
7c673cae
FG
434 agent_oids.insert(oid);
435 }
436
437 /// note finish or cancellation of an async (flush) op
438 void agent_finish_op(const hobject_t& oid) {
11fdf7f2
TL
439 std::lock_guard l(agent_lock);
440 ceph_assert(agent_ops > 0);
7c673cae 441 --agent_ops;
11fdf7f2 442 ceph_assert(agent_oids.count(oid) == 1);
7c673cae 443 agent_oids.erase(oid);
9f95a23c 444 agent_cond.notify_all();
7c673cae
FG
445 }
446
447 /// check if we are operating on an object
448 bool agent_is_active_oid(const hobject_t& oid) {
11fdf7f2 449 std::lock_guard l(agent_lock);
7c673cae
FG
450 return agent_oids.count(oid);
451 }
452
453 /// get count of active agent ops
454 int agent_get_num_ops() {
11fdf7f2 455 std::lock_guard l(agent_lock);
7c673cae
FG
456 return agent_ops;
457 }
458
459 void agent_inc_high_count() {
11fdf7f2 460 std::lock_guard l(agent_lock);
7c673cae
FG
461 flush_mode_high_count ++;
462 }
463
464 void agent_dec_high_count() {
11fdf7f2 465 std::lock_guard l(agent_lock);
7c673cae
FG
466 flush_mode_high_count --;
467 }
468
469private:
470 /// throttle promotion attempts
11fdf7f2 471 std::atomic<unsigned int> promote_probability_millis{1000}; ///< probability thousands. one word.
7c673cae
FG
472 PromoteCounter promote_counter;
473 utime_t last_recalibrate;
474 unsigned long promote_max_objects, promote_max_bytes;
475
476public:
477 bool promote_throttle() {
478 // NOTE: lockless! we rely on the probability being a single word.
479 promote_counter.attempt();
480 if ((unsigned)rand() % 1000 > promote_probability_millis)
481 return true; // yes throttle (no promote)
482 if (promote_max_objects &&
483 promote_counter.objects > promote_max_objects)
484 return true; // yes throttle
485 if (promote_max_bytes &&
486 promote_counter.bytes > promote_max_bytes)
487 return true; // yes throttle
488 return false; // no throttle (promote)
489 }
490 void promote_finish(uint64_t bytes) {
491 promote_counter.finish(bytes);
492 }
493 void promote_throttle_recalibrate();
9f95a23c
TL
494 unsigned get_num_shards() const {
495 return m_objecter_finishers;
496 }
497 Finisher* get_objecter_finisher(int shard) {
498 return objecter_finishers[shard].get();
499 }
7c673cae
FG
500
501 // -- Objecter, for tiering reads/writes from/to other OSDs --
f67539c2 502 ceph::async::io_context_pool& poolctx;
9f95a23c 503 std::unique_ptr<Objecter> objecter;
11fdf7f2 504 int m_objecter_finishers;
9f95a23c 505 std::vector<std::unique_ptr<Finisher>> objecter_finishers;
7c673cae
FG
506
507 // -- Watch --
9f95a23c 508 ceph::mutex watch_lock = ceph::make_mutex("OSDService::watch_lock");
7c673cae
FG
509 SafeTimer watch_timer;
510 uint64_t next_notif_id;
511 uint64_t get_next_id(epoch_t cur_epoch) {
11fdf7f2 512 std::lock_guard l(watch_lock);
7c673cae
FG
513 return (((uint64_t)cur_epoch) << 32) | ((uint64_t)(next_notif_id++));
514 }
515
516 // -- Recovery/Backfill Request Scheduling --
9f95a23c 517 ceph::mutex recovery_request_lock = ceph::make_mutex("OSDService::recovery_request_lock");
7c673cae
FG
518 SafeTimer recovery_request_timer;
519
31f18b77
FG
520 // For async recovery sleep
521 bool recovery_needs_sleep = true;
9f95a23c 522 ceph::real_clock::time_point recovery_schedule_time;
31f18b77 523
11fdf7f2 524 // For recovery & scrub & snap
9f95a23c 525 ceph::mutex sleep_lock = ceph::make_mutex("OSDService::sleep_lock");
11fdf7f2 526 SafeTimer sleep_timer;
31f18b77 527
7c673cae
FG
528 // -- tids --
529 // for ops i issue
11fdf7f2 530 std::atomic<unsigned int> last_tid{0};
7c673cae
FG
531 ceph_tid_t get_tid() {
532 return (ceph_tid_t)last_tid++;
533 }
534
535 // -- backfill_reservation --
536 Finisher reserver_finisher;
f67539c2
TL
537 AsyncReserver<spg_t, Finisher> local_reserver;
538 AsyncReserver<spg_t, Finisher> remote_reserver;
7c673cae 539
11fdf7f2 540 // -- pg merge --
9f95a23c 541 ceph::mutex merge_lock = ceph::make_mutex("OSD::merge_lock");
f67539c2
TL
542 std::map<pg_t,eversion_t> ready_to_merge_source; // pg -> version
543 std::map<pg_t,std::tuple<eversion_t,epoch_t,epoch_t>> ready_to_merge_target; // pg -> (version,les,lec)
544 std::set<pg_t> not_ready_to_merge_source;
545 std::map<pg_t,pg_t> not_ready_to_merge_target;
546 std::set<pg_t> sent_ready_to_merge_source;
11fdf7f2
TL
547
548 void set_ready_to_merge_source(PG *pg,
549 eversion_t version);
550 void set_ready_to_merge_target(PG *pg,
551 eversion_t version,
552 epoch_t last_epoch_started,
553 epoch_t last_epoch_clean);
554 void set_not_ready_to_merge_source(pg_t source);
555 void set_not_ready_to_merge_target(pg_t target, pg_t source);
556 void clear_ready_to_merge(PG *pg);
557 void send_ready_to_merge();
558 void _send_ready_to_merge();
559 void clear_sent_ready_to_merge();
9f95a23c 560 void prune_sent_ready_to_merge(const OSDMapRef& osdmap);
11fdf7f2 561
7c673cae
FG
562 // -- pg_temp --
563private:
9f95a23c 564 ceph::mutex pg_temp_lock = ceph::make_mutex("OSDService::pg_temp_lock");
94b18763 565 struct pg_temp_t {
f67539c2 566 std::vector<int> acting;
94b18763
FG
567 bool forced = false;
568 };
f67539c2
TL
569 std::map<pg_t, pg_temp_t> pg_temp_wanted;
570 std::map<pg_t, pg_temp_t> pg_temp_pending;
7c673cae 571 void _sent_pg_temp();
94b18763 572 friend std::ostream& operator<<(std::ostream&, const pg_temp_t&);
7c673cae 573public:
f67539c2 574 void queue_want_pg_temp(pg_t pgid, const std::vector<int>& want,
94b18763 575 bool forced = false);
7c673cae
FG
576 void remove_want_pg_temp(pg_t pgid);
577 void requeue_pg_temp();
578 void send_pg_temp();
579
11fdf7f2 580 ceph::mutex pg_created_lock = ceph::make_mutex("OSDService::pg_created_lock");
f67539c2 581 std::set<pg_t> pg_created;
7c673cae 582 void send_pg_created(pg_t pgid);
11fdf7f2
TL
583 void prune_pg_created();
584 void send_pg_created();
31f18b77 585
f67539c2 586 AsyncReserver<spg_t, Finisher> snap_reserver;
11fdf7f2 587 void queue_recovery_context(PG *pg, GenContext<ThreadPool::TPHandle&> *c);
7c673cae 588 void queue_for_snap_trim(PG *pg);
f67539c2
TL
589 void queue_for_scrub(PG* pg, Scrub::scrub_prio_t with_priority);
590 void queue_scrub_after_repair(PG* pg, Scrub::scrub_prio_t with_priority);
591
592 /// queue the message (-> event) that all replicas reserved scrub resources for us
593 void queue_for_scrub_granted(PG* pg, Scrub::scrub_prio_t with_priority);
594
595 /// queue the message (-> event) that some replicas denied our scrub resources request
596 void queue_for_scrub_denied(PG* pg, Scrub::scrub_prio_t with_priority);
597
598 /// Signals either (a) the end of a sleep period, or (b) a recheck of the availability
599 /// of the primary map being created by the backend.
600 void queue_for_scrub_resched(PG* pg, Scrub::scrub_prio_t with_priority);
601
602 /// Signals a change in the number of in-flight recovery writes
603 void queue_scrub_pushes_update(PG* pg, Scrub::scrub_prio_t with_priority);
604
605 /// Signals that all pending updates were applied
606 void queue_scrub_applied_update(PG* pg, Scrub::scrub_prio_t with_priority);
607
608 /// The block-range that was locked and prevented the scrubbing - is freed
609 void queue_scrub_unblocking(PG* pg, Scrub::scrub_prio_t with_priority);
610
611 /// Signals that all write OPs are done
612 void queue_scrub_digest_update(PG* pg, Scrub::scrub_prio_t with_priority);
613
614 /// Signals that we (the Primary) got all waited-for scrub-maps from our replicas
615 void queue_scrub_got_repl_maps(PG* pg, Scrub::scrub_prio_t with_priority);
616
617 void queue_for_rep_scrub(PG* pg,
618 Scrub::scrub_prio_t with_high_priority,
619 unsigned int qu_priority);
620
621 /// Signals a change in the number of in-flight recovery writes
622 void queue_scrub_replica_pushes(PG *pg, Scrub::scrub_prio_t with_priority);
623
624 void queue_for_rep_scrub_resched(PG* pg,
625 Scrub::scrub_prio_t with_high_priority,
626 unsigned int qu_priority);
627
11fdf7f2
TL
628 void queue_for_pg_delete(spg_t pgid, epoch_t e);
629 bool try_finish_pg_delete(PG *pg, unsigned old_pg_num);
7c673cae
FG
630
631private:
632 // -- pg recovery and associated throttling --
9f95a23c 633 ceph::mutex recovery_lock = ceph::make_mutex("OSDService::recovery_lock");
f67539c2
TL
634 std::list<std::pair<epoch_t, PGRef> > awaiting_throttle;
635
636 /// queue a scrub-related message for a PG
637 template <class MSG_TYPE>
638 void queue_scrub_event_msg(PG* pg,
639 Scrub::scrub_prio_t with_priority,
640 unsigned int qu_priority);
641
642 /// An alternative version of queue_scrub_event_msg(), in which the queuing priority is
643 /// provided by the executing scrub (i.e. taken from PgScrubber::m_flags)
644 template <class MSG_TYPE>
645 void queue_scrub_event_msg(PG* pg, Scrub::scrub_prio_t with_priority);
7c673cae
FG
646
647 utime_t defer_recovery_until;
648 uint64_t recovery_ops_active;
649 uint64_t recovery_ops_reserved;
650 bool recovery_paused;
651#ifdef DEBUG_RECOVERY_OIDS
f67539c2 652 std::map<spg_t, std::set<hobject_t> > recovery_oids;
7c673cae
FG
653#endif
654 bool _recover_now(uint64_t *available_pushes);
655 void _maybe_queue_recovery();
656 void _queue_for_recovery(
f67539c2 657 std::pair<epoch_t, PGRef> p, uint64_t reserved_pushes);
7c673cae
FG
658public:
659 void start_recovery_op(PG *pg, const hobject_t& soid);
660 void finish_recovery_op(PG *pg, const hobject_t& soid, bool dequeue);
661 bool is_recovery_active();
11fdf7f2 662 void release_reserved_pushes(uint64_t pushes);
7c673cae
FG
663 void defer_recovery(float defer_for) {
664 defer_recovery_until = ceph_clock_now();
665 defer_recovery_until += defer_for;
666 }
667 void pause_recovery() {
11fdf7f2 668 std::lock_guard l(recovery_lock);
7c673cae
FG
669 recovery_paused = true;
670 }
671 bool recovery_is_paused() {
11fdf7f2 672 std::lock_guard l(recovery_lock);
7c673cae
FG
673 return recovery_paused;
674 }
675 void unpause_recovery() {
11fdf7f2 676 std::lock_guard l(recovery_lock);
7c673cae
FG
677 recovery_paused = false;
678 _maybe_queue_recovery();
679 }
680 void kick_recovery_queue() {
11fdf7f2 681 std::lock_guard l(recovery_lock);
7c673cae
FG
682 _maybe_queue_recovery();
683 }
684 void clear_queued_recovery(PG *pg) {
11fdf7f2
TL
685 std::lock_guard l(recovery_lock);
686 awaiting_throttle.remove_if(
687 [pg](decltype(awaiting_throttle)::const_reference awaiting ) {
688 return awaiting.second.get() == pg;
689 });
7c673cae 690 }
9f95a23c
TL
691
692 unsigned get_target_pg_log_entries() const;
f67539c2 693
7c673cae 694 // delayed pg activation
c07f9fc5 695 void queue_for_recovery(PG *pg) {
11fdf7f2 696 std::lock_guard l(recovery_lock);
c07f9fc5 697
11fdf7f2 698 if (pg->is_forced_recovery_or_backfill()) {
f67539c2 699 awaiting_throttle.push_front(std::make_pair(pg->get_osdmap()->get_epoch(), pg));
7c673cae 700 } else {
f67539c2 701 awaiting_throttle.push_back(std::make_pair(pg->get_osdmap()->get_epoch(), pg));
7c673cae
FG
702 }
703 _maybe_queue_recovery();
704 }
31f18b77 705 void queue_recovery_after_sleep(PG *pg, epoch_t queued, uint64_t reserved_pushes) {
11fdf7f2 706 std::lock_guard l(recovery_lock);
f67539c2 707 _queue_for_recovery(std::make_pair(queued, pg), reserved_pushes);
31f18b77 708 }
7c673cae 709
9f95a23c
TL
710 void queue_check_readable(spg_t spgid,
711 epoch_t lpr,
712 ceph::signedspan delay = ceph::signedspan::zero());
713
7c673cae 714 // osd map cache (past osd maps)
9f95a23c 715 ceph::mutex map_cache_lock = ceph::make_mutex("OSDService::map_cache_lock");
7c673cae 716 SharedLRU<epoch_t, const OSDMap> map_cache;
f67539c2
TL
717 SimpleLRU<epoch_t, ceph::buffer::list> map_bl_cache;
718 SimpleLRU<epoch_t, ceph::buffer::list> map_bl_inc_cache;
7c673cae
FG
719
720 OSDMapRef try_get_map(epoch_t e);
721 OSDMapRef get_map(epoch_t e) {
722 OSDMapRef ret(try_get_map(e));
11fdf7f2 723 ceph_assert(ret);
7c673cae
FG
724 return ret;
725 }
726 OSDMapRef add_map(OSDMap *o) {
11fdf7f2 727 std::lock_guard l(map_cache_lock);
7c673cae
FG
728 return _add_map(o);
729 }
730 OSDMapRef _add_map(OSDMap *o);
731
f67539c2
TL
732 void _add_map_bl(epoch_t e, ceph::buffer::list& bl);
733 bool get_map_bl(epoch_t e, ceph::buffer::list& bl) {
11fdf7f2 734 std::lock_guard l(map_cache_lock);
7c673cae
FG
735 return _get_map_bl(e, bl);
736 }
f67539c2 737 bool _get_map_bl(epoch_t e, ceph::buffer::list& bl);
7c673cae 738
f67539c2
TL
739 void _add_map_inc_bl(epoch_t e, ceph::buffer::list& bl);
740 bool get_inc_map_bl(epoch_t e, ceph::buffer::list& bl);
7c673cae 741
11fdf7f2
TL
742 /// identify split child pgids over a osdmap interval
743 void identify_splits_and_merges(
744 OSDMapRef old_map,
745 OSDMapRef new_map,
746 spg_t pgid,
f67539c2
TL
747 std::set<std::pair<spg_t,epoch_t>> *new_children,
748 std::set<std::pair<spg_t,epoch_t>> *merge_pgs);
11fdf7f2
TL
749
750 void need_heartbeat_peer_update();
7c673cae
FG
751
752 void init();
f67539c2 753 void final_init();
7c673cae 754 void start_shutdown();
31f18b77 755 void shutdown_reserver();
7c673cae
FG
756 void shutdown();
757
7c673cae 758 // -- stats --
9f95a23c 759 ceph::mutex stat_lock = ceph::make_mutex("OSDService::stat_lock");
7c673cae 760 osd_stat_t osd_stat;
31f18b77 761 uint32_t seq = 0;
7c673cae 762
11fdf7f2
TL
763 void set_statfs(const struct store_statfs_t &stbuf,
764 osd_alert_list_t& alerts);
f67539c2 765 osd_stat_t set_osd_stat(std::vector<int>& hb_peers, int num_pgs);
11fdf7f2
TL
766 void inc_osd_stat_repaired(void);
767 float compute_adjusted_ratio(osd_stat_t new_stat, float *pratio, uint64_t adjust_used = 0);
7c673cae 768 osd_stat_t get_osd_stat() {
11fdf7f2 769 std::lock_guard l(stat_lock);
31f18b77
FG
770 ++seq;
771 osd_stat.up_from = up_epoch;
772 osd_stat.seq = ((uint64_t)osd_stat.up_from << 32) + seq;
7c673cae
FG
773 return osd_stat;
774 }
31f18b77 775 uint64_t get_osd_stat_seq() {
11fdf7f2 776 std::lock_guard l(stat_lock);
31f18b77
FG
777 return osd_stat.seq;
778 }
f67539c2 779 void get_hb_pingtime(std::map<int, osd_stat_t::Interfaces> *pp)
eafe8130
TL
780 {
781 std::lock_guard l(stat_lock);
782 *pp = osd_stat.hb_pingtime;
783 return;
784 }
7c673cae
FG
785
786 // -- OSD Full Status --
787private:
788 friend TestOpsSocketHook;
9f95a23c 789 mutable ceph::mutex full_status_lock = ceph::make_mutex("OSDService::full_status_lock");
7c673cae
FG
790 enum s_names { INVALID = -1, NONE, NEARFULL, BACKFILLFULL, FULL, FAILSAFE } cur_state; // ascending
791 const char *get_full_state_name(s_names s) const {
792 switch (s) {
793 case NONE: return "none";
794 case NEARFULL: return "nearfull";
795 case BACKFILLFULL: return "backfillfull";
796 case FULL: return "full";
797 case FAILSAFE: return "failsafe";
798 default: return "???";
799 }
800 }
f67539c2 801 s_names get_full_state(std::string type) const {
7c673cae
FG
802 if (type == "none")
803 return NONE;
804 else if (type == "failsafe")
805 return FAILSAFE;
806 else if (type == "full")
807 return FULL;
808 else if (type == "backfillfull")
809 return BACKFILLFULL;
810 else if (type == "nearfull")
811 return NEARFULL;
812 else
813 return INVALID;
814 }
11fdf7f2 815 double cur_ratio, physical_ratio; ///< current utilization
7c673cae
FG
816 mutable int64_t injectfull = 0;
817 s_names injectfull_state = NONE;
818 float get_failsafe_full_ratio();
11fdf7f2
TL
819 bool _check_inject_full(DoutPrefixProvider *dpp, s_names type) const;
820 bool _check_full(DoutPrefixProvider *dpp, s_names type) const;
7c673cae 821public:
11fdf7f2 822 void check_full_status(float ratio, float pratio);
f67539c2 823 s_names recalc_full_state(float ratio, float pratio, std::string &inject);
11fdf7f2
TL
824 bool _tentative_full(DoutPrefixProvider *dpp, s_names type, uint64_t adjust_used, osd_stat_t);
825 bool check_failsafe_full(DoutPrefixProvider *dpp) const;
826 bool check_full(DoutPrefixProvider *dpp) const;
827 bool tentative_backfill_full(DoutPrefixProvider *dpp, uint64_t adjust_used, osd_stat_t);
828 bool check_backfill_full(DoutPrefixProvider *dpp) const;
829 bool check_nearfull(DoutPrefixProvider *dpp) const;
7c673cae
FG
830 bool is_failsafe_full() const;
831 bool is_full() const;
832 bool is_backfillfull() const;
833 bool is_nearfull() const;
834 bool need_fullness_update(); ///< osdmap state needs update
835 void set_injectfull(s_names type, int64_t count);
7c673cae
FG
836
837
838 // -- epochs --
839private:
9f95a23c
TL
840 // protects access to boot_epoch, up_epoch, bind_epoch
841 mutable ceph::mutex epoch_lock = ceph::make_mutex("OSDService::epoch_lock");
7c673cae
FG
842 epoch_t boot_epoch; // _first_ epoch we were marked up (after this process started)
843 epoch_t up_epoch; // _most_recent_ epoch we were marked up
844 epoch_t bind_epoch; // epoch we last did a bind to new ip:ports
845public:
846 /**
f67539c2 847 * Retrieve the boot_, up_, and bind_ epochs the OSD has std::set. The params
7c673cae
FG
848 * can be NULL if you don't care about them.
849 */
850 void retrieve_epochs(epoch_t *_boot_epoch, epoch_t *_up_epoch,
851 epoch_t *_bind_epoch) const;
852 /**
f67539c2 853 * Std::set the boot, up, and bind epochs. Any NULL params will not be std::set.
7c673cae
FG
854 */
855 void set_epochs(const epoch_t *_boot_epoch, const epoch_t *_up_epoch,
856 const epoch_t *_bind_epoch);
857 epoch_t get_boot_epoch() const {
858 epoch_t ret;
859 retrieve_epochs(&ret, NULL, NULL);
860 return ret;
861 }
862 epoch_t get_up_epoch() const {
863 epoch_t ret;
864 retrieve_epochs(NULL, &ret, NULL);
865 return ret;
866 }
867 epoch_t get_bind_epoch() const {
868 epoch_t ret;
869 retrieve_epochs(NULL, NULL, &ret);
870 return ret;
871 }
872
181888fb
FG
873 void request_osdmap_update(epoch_t e);
874
9f95a23c
TL
875 // -- heartbeats --
876 ceph::mutex hb_stamp_lock = ceph::make_mutex("OSDServce::hb_stamp_lock");
877
878 /// osd -> heartbeat stamps
f67539c2 879 std::vector<HeartbeatStampsRef> hb_stamps;
9f95a23c
TL
880
881 /// get or create a ref for a peer's HeartbeatStamps
882 HeartbeatStampsRef get_hb_stamps(unsigned osd);
883
884
885 // Timer for readable leases
886 ceph::timer<ceph::mono_clock> mono_timer = ceph::timer<ceph::mono_clock>{ceph::construct_suspended};
887
888 void queue_renew_lease(epoch_t epoch, spg_t spgid);
889
7c673cae 890 // -- stopping --
9f95a23c
TL
891 ceph::mutex is_stopping_lock = ceph::make_mutex("OSDService::is_stopping_lock");
892 ceph::condition_variable is_stopping_cond;
7c673cae
FG
893 enum {
894 NOT_STOPPING,
895 PREPARING_TO_STOP,
896 STOPPING };
11fdf7f2
TL
897 std::atomic<int> state{NOT_STOPPING};
898 int get_state() const {
7c673cae
FG
899 return state;
900 }
901 void set_state(int s) {
902 state = s;
903 }
904 bool is_stopping() const {
905 return state == STOPPING;
906 }
907 bool is_preparing_to_stop() const {
908 return state == PREPARING_TO_STOP;
909 }
910 bool prepare_to_stop();
911 void got_stop_ack();
912
913
914#ifdef PG_DEBUG_REFS
9f95a23c 915 ceph::mutex pgid_lock = ceph::make_mutex("OSDService::pgid_lock");
f67539c2
TL
916 std::map<spg_t, int> pgid_tracker;
917 std::map<spg_t, PG*> live_pgs;
31f18b77
FG
918 void add_pgid(spg_t pgid, PG *pg);
919 void remove_pgid(spg_t pgid, PG *pg);
920 void dump_live_pgids();
7c673cae
FG
921#endif
922
f67539c2 923 explicit OSDService(OSD *osd, ceph::async::io_context_pool& poolctx);
9f95a23c 924 ~OSDService() = default;
7c673cae
FG
925};
926
11fdf7f2
TL
927/*
928
929 Each PG slot includes queues for events that are processing and/or waiting
930 for a PG to be materialized in the slot.
931
932 These are the constraints:
933
f67539c2 934 - client ops must remained ordered by client, regardless of std::map epoch
11fdf7f2
TL
935 - peering messages/events from peers must remain ordered by peer
936 - peering messages and client ops need not be ordered relative to each other
937
938 - some peering events can create a pg (e.g., notify)
939 - the query peering event can proceed when a PG doesn't exist
940
941 Implementation notes:
942
943 - everybody waits for split. If the OSD has the parent PG it will instantiate
944 the PGSlot early and mark it waiting_for_split. Everything will wait until
945 the parent is able to commit the split operation and the child PG's are
946 materialized in the child slots.
947
948 - every event has an epoch property and will wait for the OSDShard to catch
949 up to that epoch. For example, if we get a peering event from a future
950 epoch, the event will wait in the slot until the local OSD has caught up.
951 (We should be judicious in specifying the required epoch [by, e.g., setting
952 it to the same_interval_since epoch] so that we don't wait for epochs that
953 don't affect the given PG.)
954
955 - we maintain two separate wait lists, *waiting* and *waiting_peering*. The
9f95a23c 956 OpSchedulerItem has an is_peering() bool to determine which we use. Waiting
11fdf7f2
TL
957 peering events are queued up by epoch required.
958
959 - when we wake a PG slot (e.g., we finished split, or got a newer osdmap, or
960 materialized the PG), we wake *all* waiting items. (This could be optimized,
961 probably, but we don't bother.) We always requeue peering items ahead of
962 client ops.
963
964 - some peering events are marked !peering_requires_pg (PGQuery). if we do
965 not have a PG these are processed immediately (under the shard lock).
966
967 - we do not have a PG present, we check if the slot maps to the current host.
968 if so, we either queue the item and wait for the PG to materialize, or
969 (if the event is a pg creating event like PGNotify), we materialize the PG.
970
971 - when we advance the osdmap on the OSDShard, we scan pg slots and
972 discard any slots with no pg (and not waiting_for_split) that no
f67539c2 973 longer std::map to the current host.
11fdf7f2
TL
974
975 */
976
977struct OSDShardPGSlot {
9f95a23c 978 using OpSchedulerItem = ceph::osd::scheduler::OpSchedulerItem;
11fdf7f2 979 PGRef pg; ///< pg reference
f67539c2 980 std::deque<OpSchedulerItem> to_process; ///< order items for this slot
11fdf7f2
TL
981 int num_running = 0; ///< _process threads doing pg lookup/lock
982
f67539c2 983 std::deque<OpSchedulerItem> waiting; ///< waiting for pg (or map + pg)
11fdf7f2
TL
984
985 /// waiting for map (peering evt)
f67539c2 986 std::map<epoch_t,std::deque<OpSchedulerItem>> waiting_peering;
11fdf7f2
TL
987
988 /// incremented by wake_pg_waiters; indicates racing _process threads
989 /// should bail out (their op has been requeued)
990 uint64_t requeue_seq = 0;
991
992 /// waiting for split child to materialize in these epoch(s)
f67539c2 993 std::set<epoch_t> waiting_for_split;
11fdf7f2
TL
994
995 epoch_t epoch = 0;
996 boost::intrusive::set_member_hook<> pg_epoch_item;
997
998 /// waiting for a merge (source or target) by this epoch
999 epoch_t waiting_for_merge_epoch = 0;
1000};
1001
1002struct OSDShard {
1003 const unsigned shard_id;
1004 CephContext *cct;
1005 OSD *osd;
1006
f67539c2 1007 std::string shard_name;
11fdf7f2 1008
f67539c2 1009 std::string sdata_wait_lock_name;
11fdf7f2
TL
1010 ceph::mutex sdata_wait_lock;
1011 ceph::condition_variable sdata_cond;
f67539c2 1012 int waiting_threads = 0;
11fdf7f2 1013
11fdf7f2
TL
1014 ceph::mutex osdmap_lock; ///< protect shard_osdmap updates vs users w/o shard_lock
1015 OSDMapRef shard_osdmap;
1016
1017 OSDMapRef get_osdmap() {
1018 std::lock_guard l(osdmap_lock);
1019 return shard_osdmap;
1020 }
1021
f67539c2 1022 std::string shard_lock_name;
11fdf7f2
TL
1023 ceph::mutex shard_lock; ///< protects remaining members below
1024
1025 /// map of slots for each spg_t. maintains ordering of items dequeued
9f95a23c 1026 /// from scheduler while _process thread drops shard lock to acquire the
11fdf7f2 1027 /// pg lock. stale slots are removed by consume_map.
f67539c2 1028 std::unordered_map<spg_t,std::unique_ptr<OSDShardPGSlot>> pg_slots;
11fdf7f2
TL
1029
1030 struct pg_slot_compare_by_epoch {
1031 bool operator()(const OSDShardPGSlot& l, const OSDShardPGSlot& r) const {
1032 return l.epoch < r.epoch;
1033 }
1034 };
1035
1036 /// maintain an ordering of pg slots by pg epoch
1037 boost::intrusive::multiset<
1038 OSDShardPGSlot,
1039 boost::intrusive::member_hook<
1040 OSDShardPGSlot,
1041 boost::intrusive::set_member_hook<>,
1042 &OSDShardPGSlot::pg_epoch_item>,
1043 boost::intrusive::compare<pg_slot_compare_by_epoch>> pg_slots_by_epoch;
1044 int waiting_for_min_pg_epoch = 0;
1045 ceph::condition_variable min_pg_epoch_cond;
1046
1047 /// priority queue
9f95a23c 1048 ceph::osd::scheduler::OpSchedulerRef scheduler;
11fdf7f2
TL
1049
1050 bool stop_waiting = false;
1051
1052 ContextQueue context_queue;
1053
11fdf7f2
TL
1054 void _attach_pg(OSDShardPGSlot *slot, PG *pg);
1055 void _detach_pg(OSDShardPGSlot *slot);
1056
1057 void update_pg_epoch(OSDShardPGSlot *slot, epoch_t epoch);
1058 epoch_t get_min_pg_epoch();
1059 void wait_min_pg_epoch(epoch_t need);
1060
1061 /// return newest epoch we are waiting for
1062 epoch_t get_max_waiting_epoch();
1063
1064 /// push osdmap into shard
1065 void consume_map(
9f95a23c 1066 const OSDMapRef& osdmap,
11fdf7f2
TL
1067 unsigned *pushes_to_free);
1068
1069 void _wake_pg_slot(spg_t pgid, OSDShardPGSlot *slot);
1070
1071 void identify_splits_and_merges(
1072 const OSDMapRef& as_of_osdmap,
f67539c2
TL
1073 std::set<std::pair<spg_t,epoch_t>> *split_children,
1074 std::set<std::pair<spg_t,epoch_t>> *merge_pgs);
1075 void _prime_splits(std::set<std::pair<spg_t,epoch_t>> *pgids);
11fdf7f2 1076 void prime_splits(const OSDMapRef& as_of_osdmap,
f67539c2 1077 std::set<std::pair<spg_t,epoch_t>> *pgids);
11fdf7f2 1078 void prime_merges(const OSDMapRef& as_of_osdmap,
f67539c2 1079 std::set<std::pair<spg_t,epoch_t>> *merge_pgs);
11fdf7f2
TL
1080 void register_and_wake_split_child(PG *pg);
1081 void unprime_split_children(spg_t parent, unsigned old_pg_num);
1082
1083 OSDShard(
1084 int id,
1085 CephContext *cct,
9f95a23c 1086 OSD *osd);
11fdf7f2
TL
1087};
1088
7c673cae
FG
1089class OSD : public Dispatcher,
1090 public md_config_obs_t {
9f95a23c
TL
1091 using OpSchedulerItem = ceph::osd::scheduler::OpSchedulerItem;
1092
7c673cae 1093 /** OSD **/
9f95a23c
TL
1094 // global lock
1095 ceph::mutex osd_lock = ceph::make_mutex("OSD::osd_lock");
7c673cae
FG
1096 SafeTimer tick_timer; // safe timer (osd_lock)
1097
1098 // Tick timer for those stuff that do not need osd_lock
9f95a23c 1099 ceph::mutex tick_timer_lock = ceph::make_mutex("OSD::tick_timer_lock");
7c673cae 1100 SafeTimer tick_timer_without_osd_lock;
11fdf7f2
TL
1101 std::string gss_ktfile_client{};
1102
7c673cae
FG
1103public:
1104 // config observer bits
1105 const char** get_tracked_conf_keys() const override;
11fdf7f2 1106 void handle_conf_change(const ConfigProxy& conf,
7c673cae
FG
1107 const std::set <std::string> &changed) override;
1108 void update_log_config();
1109 void check_config();
1110
1111protected:
1112
91327a77
AA
1113 const double OSD_TICK_INTERVAL = { 1.0 };
1114 double get_tick_interval() const;
7c673cae 1115
7c673cae
FG
1116 Messenger *cluster_messenger;
1117 Messenger *client_messenger;
1118 Messenger *objecter_messenger;
1119 MonClient *monc; // check the "monc helpers" list before accessing directly
1120 MgrClient mgrc;
1121 PerfCounters *logger;
1122 PerfCounters *recoverystate_perf;
1123 ObjectStore *store;
1124#ifdef HAVE_LIBFUSE
1125 FuseStore *fuse_store = nullptr;
1126#endif
1127 LogClient log_client;
1128 LogChannelRef clog;
1129
1130 int whoami;
1131 std::string dev_path, journal_path;
1132
9f95a23c 1133 ceph_release_t last_require_osd_release{ceph_release_t::unknown};
11fdf7f2
TL
1134
1135 int numa_node = -1;
1136 size_t numa_cpu_set_size = 0;
1137 cpu_set_t numa_cpu_set;
1138
31f18b77 1139 bool store_is_rotational = true;
d2e6a577 1140 bool journal_is_rotational = true;
31f18b77 1141
7c673cae 1142 ZTracer::Endpoint trace_endpoint;
f67539c2
TL
1143 PerfCounters* create_logger();
1144 PerfCounters* create_recoverystate_perf();
7c673cae
FG
1145 void tick();
1146 void tick_without_osd_lock();
1147 void _dispatch(Message *m);
1148 void dispatch_op(OpRequestRef op);
1149
11fdf7f2 1150 void check_osdmap_features();
7c673cae
FG
1151
1152 // asok
1153 friend class OSDSocketHook;
1154 class OSDSocketHook *asok_hook;
9f95a23c
TL
1155 void asok_command(
1156 std::string_view prefix,
1157 const cmdmap_t& cmdmap,
f67539c2
TL
1158 ceph::Formatter *f,
1159 const ceph::buffer::list& inbl,
1160 std::function<void(int,const std::string&,ceph::buffer::list&)> on_finish);
7c673cae
FG
1161
1162public:
7c673cae 1163 int get_nodeid() { return whoami; }
f67539c2 1164
7c673cae
FG
1165 static ghobject_t get_osdmap_pobject_name(epoch_t epoch) {
1166 char foo[20];
1167 snprintf(foo, sizeof(foo), "osdmap.%d", epoch);
1168 return ghobject_t(hobject_t(sobject_t(object_t(foo), 0)));
1169 }
1170 static ghobject_t get_inc_osdmap_pobject_name(epoch_t epoch) {
1171 char foo[22];
1172 snprintf(foo, sizeof(foo), "inc_osdmap.%d", epoch);
1173 return ghobject_t(hobject_t(sobject_t(object_t(foo), 0)));
1174 }
1175
1176 static ghobject_t make_snapmapper_oid() {
1177 return ghobject_t(hobject_t(
1178 sobject_t(
1179 object_t("snapmapper"),
1180 0)));
1181 }
9f95a23c
TL
1182 static ghobject_t make_purged_snaps_oid() {
1183 return ghobject_t(hobject_t(
1184 sobject_t(
1185 object_t("purged_snaps"),
1186 0)));
1187 }
7c673cae
FG
1188
1189 static ghobject_t make_pg_log_oid(spg_t pg) {
f67539c2 1190 std::stringstream ss;
7c673cae 1191 ss << "pglog_" << pg;
f67539c2 1192 std::string s;
7c673cae
FG
1193 getline(ss, s);
1194 return ghobject_t(hobject_t(sobject_t(object_t(s.c_str()), 0)));
1195 }
f67539c2 1196
7c673cae 1197 static ghobject_t make_pg_biginfo_oid(spg_t pg) {
f67539c2 1198 std::stringstream ss;
7c673cae 1199 ss << "pginfo_" << pg;
f67539c2 1200 std::string s;
7c673cae
FG
1201 getline(ss, s);
1202 return ghobject_t(hobject_t(sobject_t(object_t(s.c_str()), 0)));
1203 }
1204 static ghobject_t make_infos_oid() {
1205 hobject_t oid(sobject_t("infos", CEPH_NOSNAP));
1206 return ghobject_t(oid);
1207 }
11fdf7f2
TL
1208
1209 static ghobject_t make_final_pool_info_oid(int64_t pool) {
1210 return ghobject_t(
1211 hobject_t(
1212 sobject_t(
f67539c2 1213 object_t(std::string("final_pool_") + stringify(pool)),
11fdf7f2
TL
1214 CEPH_NOSNAP)));
1215 }
1216
1217 static ghobject_t make_pg_num_history_oid() {
1218 return ghobject_t(hobject_t(sobject_t("pg_num_history", CEPH_NOSNAP)));
1219 }
1220
7c673cae
FG
1221 static void recursive_remove_collection(CephContext* cct,
1222 ObjectStore *store,
1223 spg_t pgid,
1224 coll_t tmp);
1225
1226 /**
1227 * get_osd_initial_compat_set()
1228 *
f67539c2 1229 * Get the initial feature std::set for this OSD. Features
7c673cae
FG
1230 * here are automatically upgraded.
1231 *
1232 * Return value: Initial osd CompatSet
1233 */
1234 static CompatSet get_osd_initial_compat_set();
1235
1236 /**
1237 * get_osd_compat_set()
1238 *
1239 * Get all features supported by this OSD
1240 *
1241 * Return value: CompatSet of all supported features
1242 */
1243 static CompatSet get_osd_compat_set();
f67539c2 1244
7c673cae
FG
1245
1246private:
1247 class C_Tick;
1248 class C_Tick_WithoutOSDLock;
1249
11fdf7f2
TL
1250 // -- config settings --
1251 float m_osd_pg_epoch_max_lag_factor;
1252
7c673cae
FG
1253 // -- superblock --
1254 OSDSuperblock superblock;
1255
1256 void write_superblock();
1257 void write_superblock(ObjectStore::Transaction& t);
1258 int read_superblock();
1259
1260 void clear_temp_objects();
1261
1262 CompatSet osd_compat;
1263
1264 // -- state --
1265public:
1266 typedef enum {
1267 STATE_INITIALIZING = 1,
1268 STATE_PREBOOT,
1269 STATE_BOOTING,
1270 STATE_ACTIVE,
1271 STATE_STOPPING,
1272 STATE_WAITING_FOR_HEALTHY
1273 } osd_state_t;
1274
1275 static const char *get_state_name(int s) {
1276 switch (s) {
1277 case STATE_INITIALIZING: return "initializing";
1278 case STATE_PREBOOT: return "preboot";
1279 case STATE_BOOTING: return "booting";
1280 case STATE_ACTIVE: return "active";
1281 case STATE_STOPPING: return "stopping";
1282 case STATE_WAITING_FOR_HEALTHY: return "waiting_for_healthy";
1283 default: return "???";
1284 }
1285 }
1286
1287private:
11fdf7f2 1288 std::atomic<int> state{STATE_INITIALIZING};
7c673cae
FG
1289
1290public:
1291 int get_state() const {
1292 return state;
1293 }
1294 void set_state(int s) {
1295 state = s;
1296 }
1297 bool is_initializing() const {
1298 return state == STATE_INITIALIZING;
1299 }
1300 bool is_preboot() const {
1301 return state == STATE_PREBOOT;
1302 }
1303 bool is_booting() const {
1304 return state == STATE_BOOTING;
1305 }
1306 bool is_active() const {
1307 return state == STATE_ACTIVE;
1308 }
1309 bool is_stopping() const {
1310 return state == STATE_STOPPING;
1311 }
1312 bool is_waiting_for_healthy() const {
1313 return state == STATE_WAITING_FOR_HEALTHY;
1314 }
1315
1316private:
1317
7c673cae 1318 ShardedThreadPool osd_op_tp;
7c673cae 1319
7c673cae
FG
1320 void get_latest_osdmap();
1321
1322 // -- sessions --
1323private:
9f95a23c 1324 void dispatch_session_waiting(const ceph::ref_t<Session>& session, OSDMapRef osdmap);
7c673cae 1325
9f95a23c 1326 ceph::mutex session_waiting_lock = ceph::make_mutex("OSD::session_waiting_lock");
f67539c2 1327 std::set<ceph::ref_t<Session>> session_waiting_for_map;
7c673cae
FG
1328
1329 /// Caller assumes refs for included Sessions
f67539c2 1330 void get_sessions_waiting_for_map(std::set<ceph::ref_t<Session>> *out) {
11fdf7f2 1331 std::lock_guard l(session_waiting_lock);
7c673cae
FG
1332 out->swap(session_waiting_for_map);
1333 }
9f95a23c 1334 void register_session_waiting_on_map(const ceph::ref_t<Session>& session) {
11fdf7f2
TL
1335 std::lock_guard l(session_waiting_lock);
1336 session_waiting_for_map.insert(session);
7c673cae 1337 }
9f95a23c 1338 void clear_session_waiting_on_map(const ceph::ref_t<Session>& session) {
11fdf7f2
TL
1339 std::lock_guard l(session_waiting_lock);
1340 session_waiting_for_map.erase(session);
7c673cae
FG
1341 }
1342 void dispatch_sessions_waiting_on_map() {
f67539c2 1343 std::set<ceph::ref_t<Session>> sessions_to_check;
7c673cae 1344 get_sessions_waiting_for_map(&sessions_to_check);
11fdf7f2 1345 for (auto i = sessions_to_check.begin();
7c673cae
FG
1346 i != sessions_to_check.end();
1347 sessions_to_check.erase(i++)) {
11fdf7f2 1348 std::lock_guard l{(*i)->session_dispatch_lock};
9f95a23c 1349 dispatch_session_waiting(*i, get_osdmap());
7c673cae
FG
1350 }
1351 }
9f95a23c 1352 void session_handle_reset(const ceph::ref_t<Session>& session) {
11fdf7f2 1353 std::lock_guard l(session->session_dispatch_lock);
7c673cae
FG
1354 clear_session_waiting_on_map(session);
1355
1356 session->clear_backoffs();
1357
1358 /* Messages have connection refs, we need to clear the
1359 * connection->session->message->connection
1360 * cycles which result.
1361 * Bug #12338
1362 */
1363 session->waiting_on_map.clear_and_dispose(TrackedOp::Putter());
1364 }
1365
1366private:
1367 /**
1368 * @defgroup monc helpers
1369 * @{
1370 * Right now we only have the one
1371 */
1372
1373 /**
1374 * Ask the Monitors for a sequence of OSDMaps.
1375 *
1376 * @param epoch The epoch to start with when replying
1377 * @param force_request True if this request forces a new subscription to
1378 * the monitors; false if an outstanding request that encompasses it is
1379 * sufficient.
1380 */
1381 void osdmap_subscribe(version_t epoch, bool force_request);
1382 /** @} monc helpers */
1383
9f95a23c 1384 ceph::mutex osdmap_subscribe_lock = ceph::make_mutex("OSD::osdmap_subscribe_lock");
181888fb
FG
1385 epoch_t latest_subscribed_epoch{0};
1386
7c673cae
FG
1387 // -- heartbeat --
1388 /// information about a heartbeat peer
1389 struct HeartbeatInfo {
1390 int peer; ///< peer
1391 ConnectionRef con_front; ///< peer connection (front)
1392 ConnectionRef con_back; ///< peer connection (back)
1393 utime_t first_tx; ///< time we sent our first ping request
1394 utime_t last_tx; ///< last time we sent a ping request
1395 utime_t last_rx_front; ///< last time we got a ping reply on the front side
1396 utime_t last_rx_back; ///< last time we got a ping reply on the back side
1397 epoch_t epoch; ///< most recent epoch we wanted this peer
11fdf7f2
TL
1398 /// number of connections we send and receive heartbeat pings/replies
1399 static constexpr int HEARTBEAT_MAX_CONN = 2;
1400 /// history of inflight pings, arranging by timestamp we sent
1401 /// send time -> deadline -> remaining replies
f67539c2 1402 std::map<utime_t, std::pair<utime_t, int>> ping_history;
11fdf7f2 1403
eafe8130
TL
1404 utime_t hb_interval_start;
1405 uint32_t hb_average_count = 0;
1406 uint32_t hb_index = 0;
1407
1408 uint32_t hb_total_back = 0;
1409 uint32_t hb_min_back = UINT_MAX;
1410 uint32_t hb_max_back = 0;
f67539c2
TL
1411 std::vector<uint32_t> hb_back_pingtime;
1412 std::vector<uint32_t> hb_back_min;
1413 std::vector<uint32_t> hb_back_max;
eafe8130
TL
1414
1415 uint32_t hb_total_front = 0;
1416 uint32_t hb_min_front = UINT_MAX;
1417 uint32_t hb_max_front = 0;
f67539c2
TL
1418 std::vector<uint32_t> hb_front_pingtime;
1419 std::vector<uint32_t> hb_front_min;
1420 std::vector<uint32_t> hb_front_max;
eafe8130 1421
b3b6e05e 1422 bool is_stale(utime_t stale) const {
494da23a
TL
1423 if (ping_history.empty()) {
1424 return false;
1425 }
1426 utime_t oldest_deadline = ping_history.begin()->second.first;
1427 return oldest_deadline <= stale;
1428 }
1429
b3b6e05e 1430 bool is_unhealthy(utime_t now) const {
11fdf7f2
TL
1431 if (ping_history.empty()) {
1432 /// we haven't sent a ping yet or we have got all replies,
1433 /// in either way we are safe and healthy for now
1434 return false;
1435 }
7c673cae 1436
11fdf7f2
TL
1437 utime_t oldest_deadline = ping_history.begin()->second.first;
1438 return now > oldest_deadline;
7c673cae
FG
1439 }
1440
b3b6e05e 1441 bool is_healthy(utime_t now) const {
11fdf7f2
TL
1442 if (last_rx_front == utime_t() || last_rx_back == utime_t()) {
1443 // only declare to be healthy until we have received the first
1444 // replies from both front/back connections
1445 return false;
1446 }
1447 return !is_unhealthy(now);
1448 }
9f95a23c
TL
1449
1450 void clear_mark_down(Connection *except = nullptr) {
1451 if (con_back && con_back != except) {
1452 con_back->mark_down();
1453 con_back->clear_priv();
1454 con_back.reset(nullptr);
1455 }
1456 if (con_front && con_front != except) {
1457 con_front->mark_down();
1458 con_front->clear_priv();
1459 con_front.reset(nullptr);
1460 }
1461 }
7c673cae 1462 };
9f95a23c
TL
1463
1464 ceph::mutex heartbeat_lock = ceph::make_mutex("OSD::heartbeat_lock");
f67539c2 1465 std::map<int, int> debug_heartbeat_drops_remaining;
9f95a23c 1466 ceph::condition_variable heartbeat_cond;
7c673cae 1467 bool heartbeat_stop;
f67539c2
TL
1468 std::atomic<bool> heartbeat_need_update;
1469 std::map<int,HeartbeatInfo> heartbeat_peers; ///< map of osd id to HeartbeatInfo
7c673cae
FG
1470 utime_t last_mon_heartbeat;
1471 Messenger *hb_front_client_messenger;
1472 Messenger *hb_back_client_messenger;
1473 Messenger *hb_front_server_messenger;
1474 Messenger *hb_back_server_messenger;
1475 utime_t last_heartbeat_resample; ///< last time we chose random peers in waiting-for-healthy state
1476 double daily_loadavg;
9f95a23c 1477 ceph::mono_time startup_time;
eafe8130
TL
1478
1479 // Track ping repsonse times using vector as a circular buffer
1480 // MUST BE A POWER OF 2
1481 const uint32_t hb_vector_size = 16;
1482
7c673cae
FG
1483 void _add_heartbeat_peer(int p);
1484 void _remove_heartbeat_peer(int p);
1485 bool heartbeat_reset(Connection *con);
1486 void maybe_update_heartbeat_peers();
494da23a 1487 void reset_heartbeat_peers(bool all);
7c673cae
FG
1488 bool heartbeat_peers_need_update() {
1489 return heartbeat_need_update.load();
1490 }
1491 void heartbeat_set_peers_need_update() {
1492 heartbeat_need_update.store(true);
1493 }
1494 void heartbeat_clear_peers_need_update() {
1495 heartbeat_need_update.store(false);
1496 }
1497 void heartbeat();
1498 void heartbeat_check();
1499 void heartbeat_entry();
1500 void need_heartbeat_peer_update();
1501
1502 void heartbeat_kick() {
11fdf7f2 1503 std::lock_guard l(heartbeat_lock);
9f95a23c 1504 heartbeat_cond.notify_all();
7c673cae
FG
1505 }
1506
1507 struct T_Heartbeat : public Thread {
1508 OSD *osd;
1509 explicit T_Heartbeat(OSD *o) : osd(o) {}
1510 void *entry() override {
1511 osd->heartbeat_entry();
1512 return 0;
1513 }
1514 } heartbeat_thread;
1515
1516public:
1517 bool heartbeat_dispatch(Message *m);
1518
1519 struct HeartbeatDispatcher : public Dispatcher {
1520 OSD *osd;
1521 explicit HeartbeatDispatcher(OSD *o) : Dispatcher(o->cct), osd(o) {}
1522
1523 bool ms_can_fast_dispatch_any() const override { return true; }
1524 bool ms_can_fast_dispatch(const Message *m) const override {
1525 switch (m->get_type()) {
11fdf7f2
TL
1526 case CEPH_MSG_PING:
1527 case MSG_OSD_PING:
1528 return true;
1529 default:
1530 return false;
1531 }
7c673cae
FG
1532 }
1533 void ms_fast_dispatch(Message *m) override {
1534 osd->heartbeat_dispatch(m);
1535 }
1536 bool ms_dispatch(Message *m) override {
1537 return osd->heartbeat_dispatch(m);
1538 }
1539 bool ms_handle_reset(Connection *con) override {
1540 return osd->heartbeat_reset(con);
1541 }
1542 void ms_handle_remote_reset(Connection *con) override {}
1543 bool ms_handle_refused(Connection *con) override {
1544 return osd->ms_handle_refused(con);
1545 }
11fdf7f2 1546 int ms_handle_authentication(Connection *con) override {
7c673cae
FG
1547 return true;
1548 }
1549 } heartbeat_dispatcher;
1550
1551private:
1552 // -- waiters --
f67539c2
TL
1553 std::list<OpRequestRef> finished;
1554
1555 void take_waiters(std::list<OpRequestRef>& ls) {
9f95a23c 1556 ceph_assert(ceph_mutex_is_locked(osd_lock));
7c673cae
FG
1557 finished.splice(finished.end(), ls);
1558 }
1559 void do_waiters();
f67539c2 1560
7c673cae
FG
1561 // -- op tracking --
1562 OpTracker op_tracker;
f67539c2 1563 void test_ops(std::string command, std::string args, std::ostream& ss);
7c673cae
FG
1564 friend class TestOpsSocketHook;
1565 TestOpsSocketHook *test_ops_hook;
11fdf7f2 1566 friend struct C_FinishSplits;
7c673cae
FG
1567 friend struct C_OpenPGs;
1568
11fdf7f2 1569protected:
7c673cae
FG
1570
1571 /*
1572 * The ordered op delivery chain is:
1573 *
9f95a23c
TL
1574 * fast dispatch -> scheduler back
1575 * scheduler front <-> to_process back
7c673cae
FG
1576 * to_process front -> RunVis(item)
1577 * <- queue_front()
1578 *
9f95a23c
TL
1579 * The scheduler is per-shard, and to_process is per pg_slot. Items can be
1580 * pushed back up into to_process and/or scheduler while order is preserved.
7c673cae
FG
1581 *
1582 * Multiple worker threads can operate on each shard.
1583 *
11fdf7f2 1584 * Under normal circumstances, num_running == to_process.size(). There are
7c673cae
FG
1585 * two times when that is not true: (1) when waiting_for_pg == true and
1586 * to_process is accumulating requests that are waiting for the pg to be
1587 * instantiated; in that case they will all get requeued together by
1588 * wake_pg_waiters, and (2) when wake_pg_waiters just ran, waiting_for_pg
1589 * and already requeued the items.
1590 */
9f95a23c
TL
1591 friend class ceph::osd::scheduler::PGOpItem;
1592 friend class ceph::osd::scheduler::PGPeeringItem;
1593 friend class ceph::osd::scheduler::PGRecovery;
f67539c2 1594 friend class ceph::osd::scheduler::PGRecoveryMsg;
9f95a23c 1595 friend class ceph::osd::scheduler::PGDelete;
224ce89b 1596
7c673cae 1597 class ShardedOpWQ
9f95a23c 1598 : public ShardedThreadPool::ShardedWQ<OpSchedulerItem>
7c673cae 1599 {
7c673cae 1600 OSD *osd;
7c673cae
FG
1601
1602 public:
11fdf7f2 1603 ShardedOpWQ(OSD *o,
f67539c2
TL
1604 ceph::timespan ti,
1605 ceph::timespan si,
7c673cae 1606 ShardedThreadPool* tp)
9f95a23c 1607 : ShardedThreadPool::ShardedWQ<OpSchedulerItem>(ti, si, tp),
11fdf7f2 1608 osd(o) {
7c673cae
FG
1609 }
1610
11fdf7f2
TL
1611 void _add_slot_waiter(
1612 spg_t token,
1613 OSDShardPGSlot *slot,
9f95a23c 1614 OpSchedulerItem&& qi);
7c673cae
FG
1615
1616 /// try to do some work
f67539c2 1617 void _process(uint32_t thread_index, ceph::heartbeat_handle_d *hb) override;
7c673cae
FG
1618
1619 /// enqueue a new item
9f95a23c 1620 void _enqueue(OpSchedulerItem&& item) override;
7c673cae
FG
1621
1622 /// requeue an old item (at the front of the line)
9f95a23c 1623 void _enqueue_front(OpSchedulerItem&& item) override;
f67539c2 1624
7c673cae 1625 void return_waiting_threads() override {
11fdf7f2
TL
1626 for(uint32_t i = 0; i < osd->num_shards; i++) {
1627 OSDShard* sdata = osd->shards[i];
1628 assert (NULL != sdata);
1629 std::scoped_lock l{sdata->sdata_wait_lock};
1630 sdata->stop_waiting = true;
1631 sdata->sdata_cond.notify_all();
7c673cae
FG
1632 }
1633 }
1634
11fdf7f2
TL
1635 void stop_return_waiting_threads() override {
1636 for(uint32_t i = 0; i < osd->num_shards; i++) {
1637 OSDShard* sdata = osd->shards[i];
7c673cae 1638 assert (NULL != sdata);
11fdf7f2
TL
1639 std::scoped_lock l{sdata->sdata_wait_lock};
1640 sdata->stop_waiting = false;
1641 }
1642 }
1643
f67539c2 1644 void dump(ceph::Formatter *f) {
11fdf7f2
TL
1645 for(uint32_t i = 0; i < osd->num_shards; i++) {
1646 auto &&sdata = osd->shards[i];
1647
1648 char queue_name[32] = {0};
1649 snprintf(queue_name, sizeof(queue_name), "%s%" PRIu32, "OSD:ShardedOpWQ:", i);
1650 ceph_assert(NULL != sdata);
1651
1652 std::scoped_lock l{sdata->shard_lock};
1653 f->open_object_section(queue_name);
9f95a23c 1654 sdata->scheduler->dump(*f);
7c673cae 1655 f->close_section();
7c673cae
FG
1656 }
1657 }
1658
11fdf7f2
TL
1659 bool is_shard_empty(uint32_t thread_index) override {
1660 uint32_t shard_index = thread_index % osd->num_shards;
1661 auto &&sdata = osd->shards[shard_index];
1662 ceph_assert(sdata);
1663 std::lock_guard l(sdata->shard_lock);
1664 if (thread_index < osd->num_shards) {
9f95a23c 1665 return sdata->scheduler->empty() && sdata->context_queue.empty();
11fdf7f2 1666 } else {
9f95a23c 1667 return sdata->scheduler->empty();
7c673cae 1668 }
11fdf7f2 1669 }
7c673cae 1670
f67539c2 1671 void handle_oncommits(std::list<Context*>& oncommits) {
11fdf7f2
TL
1672 for (auto p : oncommits) {
1673 p->complete(0);
1674 }
7c673cae
FG
1675 }
1676 } op_shardedwq;
1677
1678
11fdf7f2 1679 void enqueue_op(spg_t pg, OpRequestRef&& op, epoch_t epoch);
7c673cae
FG
1680 void dequeue_op(
1681 PGRef pg, OpRequestRef op,
1682 ThreadPool::TPHandle &handle);
1683
11fdf7f2
TL
1684 void enqueue_peering_evt(
1685 spg_t pgid,
1686 PGPeeringEventRef ref);
11fdf7f2
TL
1687 void dequeue_peering_evt(
1688 OSDShard *sdata,
1689 PG *pg,
1690 PGPeeringEventRef ref,
1691 ThreadPool::TPHandle& handle);
1692
1693 void dequeue_delete(
1694 OSDShard *sdata,
1695 PG *pg,
1696 epoch_t epoch,
1697 ThreadPool::TPHandle& handle);
7c673cae
FG
1698
1699 friend class PG;
f67539c2 1700 friend struct OSDShard;
7c673cae 1701 friend class PrimaryLogPG;
f67539c2 1702 friend class PgScrubber;
7c673cae
FG
1703
1704
1705 protected:
1706
1707 // -- osd map --
9f95a23c
TL
1708 // TODO: switch to std::atomic<OSDMapRef> when C++20 will be available.
1709 OSDMapRef _osdmap;
1710 void set_osdmap(OSDMapRef osdmap) {
1711 std::atomic_store(&_osdmap, osdmap);
1712 }
1713 OSDMapRef get_osdmap() const {
1714 return std::atomic_load(&_osdmap);
7c673cae 1715 }
224ce89b 1716 epoch_t get_osdmap_epoch() const {
9f95a23c
TL
1717 // XXX: performance?
1718 auto osdmap = get_osdmap();
7c673cae
FG
1719 return osdmap ? osdmap->get_epoch() : 0;
1720 }
1721
11fdf7f2
TL
1722 pool_pg_num_history_t pg_num_history;
1723
9f95a23c 1724 ceph::shared_mutex map_lock = ceph::make_shared_mutex("OSD::map_lock");
f67539c2
TL
1725 std::list<OpRequestRef> waiting_for_osdmap;
1726 std::deque<utime_t> osd_markdown_log;
7c673cae
FG
1727
1728 friend struct send_map_on_destruct;
1729
1730 void wait_for_new_map(OpRequestRef op);
1731 void handle_osd_map(class MOSDMap *m);
1732 void _committed_osd_maps(epoch_t first, epoch_t last, class MOSDMap *m);
1733 void trim_maps(epoch_t oldest, int nreceived, bool skip_maps);
1734 void note_down_osd(int osd);
1735 void note_up_osd(int osd);
f67539c2 1736 friend struct C_OnMapCommit;
7c673cae
FG
1737
1738 bool advance_pg(
11fdf7f2
TL
1739 epoch_t advance_to,
1740 PG *pg,
7c673cae 1741 ThreadPool::TPHandle &handle,
9f95a23c 1742 PeeringCtx &rctx);
7c673cae
FG
1743 void consume_map();
1744 void activate_map();
1745
1746 // osd map cache (past osd maps)
1747 OSDMapRef get_map(epoch_t e) {
1748 return service.get_map(e);
1749 }
1750 OSDMapRef add_map(OSDMap *o) {
1751 return service.add_map(o);
1752 }
f67539c2 1753 bool get_map_bl(epoch_t e, ceph::buffer::list& bl) {
7c673cae
FG
1754 return service.get_map_bl(e, bl);
1755 }
11fdf7f2
TL
1756
1757public:
1758 // -- shards --
f67539c2 1759 std::vector<OSDShard*> shards;
11fdf7f2
TL
1760 uint32_t num_shards = 0;
1761
1762 void inc_num_pgs() {
1763 ++num_pgs;
1764 }
1765 void dec_num_pgs() {
1766 --num_pgs;
1767 }
1768 int get_num_pgs() const {
1769 return num_pgs;
7c673cae
FG
1770 }
1771
1772protected:
9f95a23c 1773 ceph::mutex merge_lock = ceph::make_mutex("OSD::merge_lock");
11fdf7f2 1774 /// merge epoch -> target pgid -> source pgid -> pg
f67539c2 1775 std::map<epoch_t,std::map<spg_t,std::map<spg_t,PGRef>>> merge_waiters;
11fdf7f2
TL
1776
1777 bool add_merge_waiter(OSDMapRef nextmap, spg_t target, PGRef source,
1778 unsigned need);
1779
7c673cae 1780 // -- placement groups --
11fdf7f2 1781 std::atomic<size_t> num_pgs = {0};
7c673cae 1782
3efd9988 1783 std::mutex pending_creates_lock;
9f95a23c 1784 using create_from_osd_t = std::pair<spg_t, bool /* is primary*/>;
b32b8144 1785 std::set<create_from_osd_t> pending_creates_from_osd;
3efd9988
FG
1786 unsigned pending_creates_from_mon = 0;
1787
7c673cae
FG
1788 PGRecoveryStats pg_recovery_stats;
1789
11fdf7f2
TL
1790 PGRef _lookup_pg(spg_t pgid);
1791 PGRef _lookup_lock_pg(spg_t pgid);
1792 void register_pg(PGRef pg);
1793 bool try_finish_pg_delete(PG *pg, unsigned old_pg_num);
7c673cae 1794
f67539c2
TL
1795 void _get_pgs(std::vector<PGRef> *v, bool clear_too=false);
1796 void _get_pgids(std::vector<spg_t> *v);
31f18b77
FG
1797
1798public:
11fdf7f2 1799 PGRef lookup_lock_pg(spg_t pgid);
31f18b77 1800
11fdf7f2 1801 std::set<int64_t> get_mapped_pools();
35e4c445 1802
31f18b77 1803protected:
7c673cae 1804 PG* _make_pg(OSDMapRef createmap, spg_t pgid);
7c673cae 1805
11fdf7f2
TL
1806 bool maybe_wait_for_max_pg(const OSDMapRef& osdmap,
1807 spg_t pgid, bool is_mon_create);
3efd9988
FG
1808 void resume_creating_pg();
1809
7c673cae 1810 void load_pgs();
7c673cae
FG
1811
1812 /// build initial pg history and intervals on create
1813 void build_initial_pg_history(
1814 spg_t pgid,
1815 epoch_t created,
1816 utime_t created_stamp,
1817 pg_history_t *h,
1818 PastIntervals *pi);
1819
7c673cae
FG
1820 epoch_t last_pg_create_epoch;
1821
1822 void handle_pg_create(OpRequestRef op);
1823
1824 void split_pgs(
1825 PG *parent,
f67539c2 1826 const std::set<spg_t> &childpgids, std::set<PGRef> *out_pgs,
7c673cae
FG
1827 OSDMapRef curmap,
1828 OSDMapRef nextmap,
9f95a23c 1829 PeeringCtx &rctx);
f67539c2 1830 void _finish_splits(std::set<PGRef>& pgs);
7c673cae
FG
1831
1832 // == monitor interaction ==
9f95a23c 1833 ceph::mutex mon_report_lock = ceph::make_mutex("OSD::mon_report_lock");
7c673cae 1834 utime_t last_mon_report;
11fdf7f2 1835 Finisher boot_finisher;
7c673cae
FG
1836
1837 // -- boot --
1838 void start_boot();
1839 void _got_mon_epochs(epoch_t oldest, epoch_t newest);
1840 void _preboot(epoch_t oldest, epoch_t newest);
1841 void _send_boot();
f67539c2 1842 void _collect_metadata(std::map<std::string,std::string> *pmeta);
9f95a23c
TL
1843 void _get_purged_snaps();
1844 void handle_get_purged_snaps_reply(MMonGetPurgedSnapsReply *r);
7c673cae
FG
1845
1846 void start_waiting_for_healthy();
1847 bool _is_healthy();
1848
1849 void send_full_update();
f67539c2
TL
1850
1851 friend struct CB_OSD_GetVersion;
7c673cae
FG
1852
1853 // -- alive --
1854 epoch_t up_thru_wanted;
1855
1856 void queue_want_up_thru(epoch_t want);
1857 void send_alive();
1858
1859 // -- full map requests --
1860 epoch_t requested_full_first, requested_full_last;
1861
1862 void request_full_map(epoch_t first, epoch_t last);
1863 void rerequest_full_maps() {
1864 epoch_t first = requested_full_first;
1865 epoch_t last = requested_full_last;
1866 requested_full_first = 0;
1867 requested_full_last = 0;
1868 request_full_map(first, last);
1869 }
1870 void got_full_map(epoch_t e);
1871
1872 // -- failures --
f67539c2
TL
1873 std::map<int,utime_t> failure_queue;
1874 std::map<int,std::pair<utime_t,entity_addrvec_t> > failure_pending;
7c673cae
FG
1875
1876 void requeue_failures();
1877 void send_failures();
11fdf7f2
TL
1878 void send_still_alive(epoch_t epoch, int osd, const entity_addrvec_t &addrs);
1879 void cancel_pending_failures();
7c673cae
FG
1880
1881 ceph::coarse_mono_clock::time_point last_sent_beacon;
9f95a23c 1882 ceph::mutex min_last_epoch_clean_lock = ceph::make_mutex("OSD::min_last_epoch_clean_lock");
7c673cae
FG
1883 epoch_t min_last_epoch_clean = 0;
1884 // which pgs were scanned for min_lec
1885 std::vector<pg_t> min_last_epoch_clean_pgs;
1886 void send_beacon(const ceph::coarse_mono_clock::time_point& now);
1887
7c673cae
FG
1888 ceph_tid_t get_tid() {
1889 return service.get_tid();
1890 }
1891
9f95a23c
TL
1892 double scrub_sleep_time(bool must_scrub);
1893
7c673cae 1894 // -- generic pg peering --
9f95a23c
TL
1895 PeeringCtx create_context();
1896 void dispatch_context(PeeringCtx &ctx, PG *pg, OSDMapRef curmap,
7c673cae 1897 ThreadPool::TPHandle *handle = NULL);
7c673cae
FG
1898
1899 bool require_mon_peer(const Message *m);
1900 bool require_mon_or_mgr_peer(const Message *m);
1901 bool require_osd_peer(const Message *m);
1902 /***
1903 * Verifies that we were alive in the given epoch, and that
1904 * still are.
1905 */
1906 bool require_self_aliveness(const Message *m, epoch_t alive_since);
1907 /**
1908 * Verifies that the OSD who sent the given op has the same
f67539c2 1909 * address as in the given std::map.
7c673cae
FG
1910 * @pre op was sent by an OSD using the cluster messenger
1911 */
9f95a23c 1912 bool require_same_peer_instance(const Message *m, const OSDMapRef& map,
7c673cae
FG
1913 bool is_fast_dispatch);
1914
1915 bool require_same_or_newer_map(OpRequestRef& op, epoch_t e,
1916 bool is_fast_dispatch);
1917
11fdf7f2
TL
1918 void handle_fast_pg_create(MOSDPGCreate2 *m);
1919 void handle_fast_pg_query(MOSDPGQuery *m);
1920 void handle_pg_query_nopg(const MQuery& q);
1921 void handle_fast_pg_notify(MOSDPGNotify *m);
1922 void handle_pg_notify_nopg(const MNotifyRec& q);
1923 void handle_fast_pg_info(MOSDPGInfo *m);
1924 void handle_fast_pg_remove(MOSDPGRemove *m);
7c673cae 1925
11fdf7f2
TL
1926public:
1927 // used by OSDShard
1928 PGRef handle_pg_create_info(const OSDMapRef& osdmap, const PGCreateInfo *info);
1929protected:
c07f9fc5 1930
11fdf7f2 1931 void handle_fast_force_recovery(MOSDForceRecovery *m);
7c673cae
FG
1932
1933 // -- commands --
7c673cae 1934 void handle_command(class MCommand *m);
11fdf7f2 1935
7c673cae
FG
1936
1937 // -- pg recovery --
1938 void do_recovery(PG *pg, epoch_t epoch_queued, uint64_t pushes_reserved,
1939 ThreadPool::TPHandle &handle);
1940
1941
1942 // -- scrubbing --
1943 void sched_scrub();
494da23a 1944 void resched_all_scrubs();
7c673cae
FG
1945 bool scrub_random_backoff();
1946 bool scrub_load_below_threshold();
1947 bool scrub_time_permit(utime_t now);
1948
b32b8144
FG
1949 // -- status reporting --
1950 MPGStats *collect_pg_stats();
11fdf7f2
TL
1951 std::vector<DaemonHealthMetric> get_health_metrics();
1952
b32b8144 1953
224ce89b 1954private:
7c673cae
FG
1955 bool ms_can_fast_dispatch_any() const override { return true; }
1956 bool ms_can_fast_dispatch(const Message *m) const override {
1957 switch (m->get_type()) {
11fdf7f2 1958 case CEPH_MSG_PING:
7c673cae
FG
1959 case CEPH_MSG_OSD_OP:
1960 case CEPH_MSG_OSD_BACKOFF:
11fdf7f2
TL
1961 case MSG_OSD_SCRUB2:
1962 case MSG_OSD_FORCE_RECOVERY:
1963 case MSG_MON_COMMAND:
1964 case MSG_OSD_PG_CREATE2:
1965 case MSG_OSD_PG_QUERY:
9f95a23c 1966 case MSG_OSD_PG_QUERY2:
11fdf7f2 1967 case MSG_OSD_PG_INFO:
9f95a23c 1968 case MSG_OSD_PG_INFO2:
11fdf7f2 1969 case MSG_OSD_PG_NOTIFY:
9f95a23c 1970 case MSG_OSD_PG_NOTIFY2:
11fdf7f2
TL
1971 case MSG_OSD_PG_LOG:
1972 case MSG_OSD_PG_TRIM:
1973 case MSG_OSD_PG_REMOVE:
1974 case MSG_OSD_BACKFILL_RESERVE:
1975 case MSG_OSD_RECOVERY_RESERVE:
7c673cae 1976 case MSG_OSD_REPOP:
7c673cae
FG
1977 case MSG_OSD_REPOPREPLY:
1978 case MSG_OSD_PG_PUSH:
1979 case MSG_OSD_PG_PULL:
1980 case MSG_OSD_PG_PUSH_REPLY:
1981 case MSG_OSD_PG_SCAN:
1982 case MSG_OSD_PG_BACKFILL:
1983 case MSG_OSD_PG_BACKFILL_REMOVE:
1984 case MSG_OSD_EC_WRITE:
1985 case MSG_OSD_EC_WRITE_REPLY:
1986 case MSG_OSD_EC_READ:
1987 case MSG_OSD_EC_READ_REPLY:
1988 case MSG_OSD_SCRUB_RESERVE:
1989 case MSG_OSD_REP_SCRUB:
1990 case MSG_OSD_REP_SCRUBMAP:
1991 case MSG_OSD_PG_UPDATE_LOG_MISSING:
1992 case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY:
c07f9fc5
FG
1993 case MSG_OSD_PG_RECOVERY_DELETE:
1994 case MSG_OSD_PG_RECOVERY_DELETE_REPLY:
9f95a23c
TL
1995 case MSG_OSD_PG_LEASE:
1996 case MSG_OSD_PG_LEASE_ACK:
7c673cae
FG
1997 return true;
1998 default:
1999 return false;
2000 }
2001 }
2002 void ms_fast_dispatch(Message *m) override;
7c673cae 2003 bool ms_dispatch(Message *m) override;
7c673cae
FG
2004 void ms_handle_connect(Connection *con) override;
2005 void ms_handle_fast_connect(Connection *con) override;
2006 void ms_handle_fast_accept(Connection *con) override;
11fdf7f2 2007 int ms_handle_authentication(Connection *con) override;
7c673cae
FG
2008 bool ms_handle_reset(Connection *con) override;
2009 void ms_handle_remote_reset(Connection *con) override {}
2010 bool ms_handle_refused(Connection *con) override;
2011
7c673cae
FG
2012 public:
2013 /* internal and external can point to the same messenger, they will still
2014 * be cleaned up properly*/
2015 OSD(CephContext *cct_,
2016 ObjectStore *store_,
2017 int id,
2018 Messenger *internal,
2019 Messenger *external,
2020 Messenger *hb_front_client,
2021 Messenger *hb_back_client,
2022 Messenger *hb_front_server,
2023 Messenger *hb_back_server,
2024 Messenger *osdc_messenger,
f67539c2
TL
2025 MonClient *mc, const std::string &dev, const std::string &jdev,
2026 ceph::async::io_context_pool& poolctx);
7c673cae
FG
2027 ~OSD() override;
2028
2029 // static bits
e306af50 2030 static int mkfs(CephContext *cct, ObjectStore *store, uuid_d fsid, int whoami, std::string osdspec_affinity);
11fdf7f2 2031
f67539c2
TL
2032 /* remove any non-user xattrs from a std::map of them */
2033 void filter_xattrs(std::map<std::string, ceph::buffer::ptr>& attrs) {
2034 for (std::map<std::string, ceph::buffer::ptr>::iterator iter = attrs.begin();
7c673cae
FG
2035 iter != attrs.end();
2036 ) {
2037 if (('_' != iter->first.at(0)) || (iter->first.size() == 1))
2038 attrs.erase(iter++);
2039 else ++iter;
2040 }
2041 }
2042
2043private:
f67539c2 2044 int mon_cmd_maybe_osd_create(std::string &cmd);
7c673cae
FG
2045 int update_crush_device_class();
2046 int update_crush_location();
2047
3efd9988
FG
2048 static int write_meta(CephContext *cct,
2049 ObjectStore *store,
e306af50 2050 uuid_d& cluster_fsid, uuid_d& osd_fsid, int whoami, std::string& osdspec_affinity);
7c673cae 2051
f67539c2
TL
2052 void handle_scrub(class MOSDScrub *m);
2053 void handle_fast_scrub(class MOSDScrub2 *m);
7c673cae
FG
2054 void handle_osd_ping(class MOSDPing *m);
2055
9f95a23c 2056 size_t get_num_cache_shards();
31f18b77
FG
2057 int get_num_op_shards();
2058 int get_num_op_threads();
2059
c07f9fc5 2060 float get_osd_recovery_sleep();
11fdf7f2 2061 float get_osd_delete_sleep();
494da23a 2062 float get_osd_snap_trim_sleep();
11fdf7f2 2063
9f95a23c 2064 int get_recovery_max_active();
b3b6e05e 2065 bool maybe_override_options_for_qos();
9f95a23c
TL
2066
2067 void scrub_purged_snaps();
f67539c2 2068 void probe_smart(const std::string& devid, std::ostream& ss);
c07f9fc5 2069
7c673cae 2070public:
11fdf7f2 2071 static int peek_meta(ObjectStore *store,
f67539c2 2072 std::string *magic,
11fdf7f2
TL
2073 uuid_d *cluster_fsid,
2074 uuid_d *osd_fsid,
2075 int *whoami,
9f95a23c 2076 ceph_release_t *min_osd_release);
f67539c2 2077
7c673cae
FG
2078
2079 // startup/shutdown
2080 int pre_init();
2081 int init();
2082 void final_init();
2083
2084 int enable_disable_fuse(bool stop);
11fdf7f2 2085 int set_numa_affinity();
7c673cae
FG
2086
2087 void suicide(int exitcode);
2088 int shutdown();
2089
2090 void handle_signal(int signum);
2091
2092 /// check if we can throw out op from a disconnected client
2093 static bool op_is_discardable(const MOSDOp *m);
2094
2095public:
2096 OSDService service;
2097 friend class OSDService;
11fdf7f2
TL
2098
2099private:
9f95a23c
TL
2100 void set_perf_queries(const ConfigPayload &config_payload);
2101 MetricPayload get_perf_reports();
11fdf7f2 2102
9f95a23c 2103 ceph::mutex m_perf_queries_lock = ceph::make_mutex("OSD::m_perf_queries_lock");
11fdf7f2
TL
2104 std::list<OSDPerfMetricQuery> m_perf_queries;
2105 std::map<OSDPerfMetricQuery, OSDPerfMetricLimits> m_perf_limits;
7c673cae
FG
2106};
2107
224ce89b 2108
7c673cae
FG
2109//compatibility of the executable
2110extern const CompatSet::Feature ceph_osd_feature_compat[];
2111extern const CompatSet::Feature ceph_osd_feature_ro_compat[];
2112extern const CompatSet::Feature ceph_osd_feature_incompat[];
2113
224ce89b 2114#endif // CEPH_OSD_H