]> git.proxmox.com Git - ceph.git/blame - ceph/src/osd/PGBackend.h
import quincy beta 17.1.0
[ceph.git] / ceph / src / osd / PGBackend.h
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3/*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2013,2014 Inktank Storage, Inc.
7 * Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
8 *
9 * Author: Loic Dachary <loic@dachary.org>
10 *
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
15 *
16 */
17
18#ifndef PGBACKEND_H
19#define PGBACKEND_H
20
21#include "osd_types.h"
22#include "common/WorkQueue.h"
23#include "include/Context.h"
24#include "os/ObjectStore.h"
25#include "common/LogClient.h"
26#include <string>
27#include "PGTransaction.h"
9f95a23c 28#include "common/ostream_temp.h"
7c673cae
FG
29
30namespace Scrub {
31 class Store;
32}
33struct shard_info_wrapper;
34struct inconsistent_obj_wrapper;
35
36//forward declaration
37class OSDMap;
38class PGLog;
11fdf7f2 39typedef std::shared_ptr<const OSDMap> OSDMapRef;
7c673cae
FG
40
41 /**
42 * PGBackend
43 *
44 * PGBackend defines an interface for logic handling IO and
45 * replication on RADOS objects. The PGBackend implementation
46 * is responsible for:
47 *
48 * 1) Handling client operations
49 * 2) Handling object recovery
50 * 3) Handling object access
51 * 4) Handling scrub, deep-scrub, repair
52 */
53 class PGBackend {
54 public:
55 CephContext* cct;
56 protected:
57 ObjectStore *store;
58 const coll_t coll;
59 ObjectStore::CollectionHandle &ch;
f67539c2 60 public:
7c673cae
FG
61 /**
62 * Provides interfaces for PGBackend callbacks
63 *
64 * The intention is that the parent calls into the PGBackend
65 * implementation holding a lock and that the callbacks are
66 * called under the same locks.
67 */
68 class Listener {
69 public:
70 /// Debugging
71 virtual DoutPrefixProvider *get_dpp() = 0;
72
73 /// Recovery
74
75 /**
76 * Called with the transaction recovering oid
77 */
78 virtual void on_local_recover(
79 const hobject_t &oid,
80 const ObjectRecoveryInfo &recovery_info,
81 ObjectContextRef obc,
c07f9fc5 82 bool is_delete,
7c673cae
FG
83 ObjectStore::Transaction *t
84 ) = 0;
85
86 /**
87 * Called when transaction recovering oid is durable and
88 * applied on all replicas
89 */
90 virtual void on_global_recover(
91 const hobject_t &oid,
c07f9fc5
FG
92 const object_stat_sum_t &stat_diff,
93 bool is_delete
7c673cae
FG
94 ) = 0;
95
96 /**
97 * Called when peer is recovered
98 */
99 virtual void on_peer_recover(
100 pg_shard_t peer,
101 const hobject_t &oid,
102 const ObjectRecoveryInfo &recovery_info
103 ) = 0;
104
105 virtual void begin_peer_recover(
106 pg_shard_t peer,
107 const hobject_t oid) = 0;
108
7c673cae
FG
109 virtual void apply_stats(
110 const hobject_t &soid,
111 const object_stat_sum_t &delta_stats) = 0;
112
224ce89b 113 /**
f67539c2 114 * Called when a read from a std::set of replicas/primary fails
224ce89b 115 */
9f95a23c 116 virtual void on_failed_pull(
f67539c2 117 const std::set<pg_shard_t> &from,
9f95a23c
TL
118 const hobject_t &soid,
119 const eversion_t &v
b32b8144
FG
120 ) = 0;
121
9f95a23c
TL
122 /**
123 * Called when a pull on soid cannot be completed due to
124 * down peers
125 */
126 virtual void cancel_pull(
127 const hobject_t &soid) = 0;
7c673cae 128
9f95a23c
TL
129 /**
130 * Called to remove an object.
131 */
132 virtual void remove_missing_object(
133 const hobject_t &oid,
134 eversion_t v,
135 Context *on_complete) = 0;
28e407b8 136
7c673cae
FG
137 /**
138 * Bless a context
139 *
140 * Wraps a context in whatever outer layers the parent usually
141 * uses to call into the PGBackend
142 */
143 virtual Context *bless_context(Context *c) = 0;
144 virtual GenContext<ThreadPool::TPHandle&> *bless_gencontext(
145 GenContext<ThreadPool::TPHandle&> *c) = 0;
11fdf7f2
TL
146 virtual GenContext<ThreadPool::TPHandle&> *bless_unlocked_gencontext(
147 GenContext<ThreadPool::TPHandle&> *c) = 0;
7c673cae
FG
148
149 virtual void send_message(int to_osd, Message *m) = 0;
150 virtual void queue_transaction(
151 ObjectStore::Transaction&& t,
152 OpRequestRef op = OpRequestRef()
153 ) = 0;
154 virtual void queue_transactions(
f67539c2 155 std::vector<ObjectStore::Transaction>& tls,
7c673cae
FG
156 OpRequestRef op = OpRequestRef()
157 ) = 0;
7c673cae
FG
158 virtual epoch_t get_interval_start_epoch() const = 0;
159 virtual epoch_t get_last_peering_reset_epoch() const = 0;
160
f67539c2
TL
161 virtual const std::set<pg_shard_t> &get_acting_recovery_backfill_shards() const = 0;
162 virtual const std::set<pg_shard_t> &get_acting_shards() const = 0;
163 virtual const std::set<pg_shard_t> &get_backfill_shards() const = 0;
7c673cae 164
11fdf7f2 165 virtual std::ostream& gen_dbg_prefix(std::ostream& out) const = 0;
7c673cae 166
f67539c2 167 virtual const std::map<hobject_t, std::set<pg_shard_t>> &get_missing_loc_shards()
7c673cae
FG
168 const = 0;
169
170 virtual const pg_missing_tracker_t &get_local_missing() const = 0;
11fdf7f2 171 virtual void add_local_next_event(const pg_log_entry_t& e) = 0;
f67539c2 172 virtual const std::map<pg_shard_t, pg_missing_t> &get_shard_missing()
7c673cae 173 const = 0;
9f95a23c 174 virtual const pg_missing_const_i * maybe_get_shard_missing(
7c673cae
FG
175 pg_shard_t peer) const {
176 if (peer == primary_shard()) {
9f95a23c 177 return &get_local_missing();
7c673cae 178 } else {
f67539c2 179 std::map<pg_shard_t, pg_missing_t>::const_iterator i =
7c673cae
FG
180 get_shard_missing().find(peer);
181 if (i == get_shard_missing().end()) {
9f95a23c 182 return nullptr;
7c673cae 183 } else {
9f95a23c 184 return &(i->second);
7c673cae
FG
185 }
186 }
187 }
188 virtual const pg_missing_const_i &get_shard_missing(pg_shard_t peer) const {
189 auto m = maybe_get_shard_missing(peer);
11fdf7f2 190 ceph_assert(m);
7c673cae
FG
191 return *m;
192 }
193
f67539c2 194 virtual const std::map<pg_shard_t, pg_info_t> &get_shard_info() const = 0;
7c673cae
FG
195 virtual const pg_info_t &get_shard_info(pg_shard_t peer) const {
196 if (peer == primary_shard()) {
197 return get_info();
198 } else {
f67539c2 199 std::map<pg_shard_t, pg_info_t>::const_iterator i =
7c673cae 200 get_shard_info().find(peer);
11fdf7f2 201 ceph_assert(i != get_shard_info().end());
7c673cae
FG
202 return i->second;
203 }
204 }
205
206 virtual const PGLog &get_log() const = 0;
207 virtual bool pgb_is_primary() const = 0;
11fdf7f2
TL
208 virtual const OSDMapRef& pgb_get_osdmap() const = 0;
209 virtual epoch_t pgb_get_osdmap_epoch() const = 0;
7c673cae
FG
210 virtual const pg_info_t &get_info() const = 0;
211 virtual const pg_pool_t &get_pool() const = 0;
212
213 virtual ObjectContextRef get_obc(
214 const hobject_t &hoid,
20effc67 215 const std::map<std::string, ceph::buffer::list, std::less<>> &attrs) = 0;
7c673cae
FG
216
217 virtual bool try_lock_for_read(
218 const hobject_t &hoid,
219 ObcLockManager &manager) = 0;
220
221 virtual void release_locks(ObcLockManager &manager) = 0;
222
223 virtual void op_applied(
224 const eversion_t &applied_version) = 0;
225
226 virtual bool should_send_op(
227 pg_shard_t peer,
228 const hobject_t &hoid) = 0;
229
11fdf7f2
TL
230 virtual bool pg_is_undersized() const = 0;
231 virtual bool pg_is_repair() const = 0;
232
7c673cae 233 virtual void log_operation(
f67539c2 234 std::vector<pg_log_entry_t>&& logv,
9f95a23c 235 const std::optional<pg_hit_set_history_t> &hset_history,
7c673cae
FG
236 const eversion_t &trim_to,
237 const eversion_t &roll_forward_to,
9f95a23c 238 const eversion_t &min_last_complete_ondisk,
7c673cae 239 bool transaction_applied,
11fdf7f2
TL
240 ObjectStore::Transaction &t,
241 bool async = false) = 0;
7c673cae
FG
242
243 virtual void pgb_set_object_snap_mapping(
244 const hobject_t &soid,
f67539c2 245 const std::set<snapid_t> &snaps,
7c673cae
FG
246 ObjectStore::Transaction *t) = 0;
247
248 virtual void pgb_clear_object_snap_mapping(
249 const hobject_t &soid,
250 ObjectStore::Transaction *t) = 0;
251
252 virtual void update_peer_last_complete_ondisk(
253 pg_shard_t fromosd,
254 eversion_t lcod) = 0;
255
256 virtual void update_last_complete_ondisk(
257 eversion_t lcod) = 0;
258
259 virtual void update_stats(
260 const pg_stat_t &stat) = 0;
261
262 virtual void schedule_recovery_work(
263 GenContext<ThreadPool::TPHandle&> *c) = 0;
264
265 virtual pg_shard_t whoami_shard() const = 0;
266 int whoami() const {
267 return whoami_shard().osd;
268 }
269 spg_t whoami_spg_t() const {
270 return get_info().pgid;
271 }
272
273 virtual spg_t primary_spg_t() const = 0;
274 virtual pg_shard_t primary_shard() const = 0;
9f95a23c 275 virtual uint64_t min_peer_features() const = 0;
f91f0fd5 276 virtual uint64_t min_upacting_features() const = 0;
7c673cae
FG
277 virtual hobject_t get_temp_recovery_object(const hobject_t& target,
278 eversion_t version) = 0;
279
9f95a23c 280 virtual void send_message_osd_cluster(
7c673cae 281 int peer, Message *m, epoch_t from_epoch) = 0;
9f95a23c
TL
282 virtual void send_message_osd_cluster(
283 std::vector<std::pair<int, Message*>>& messages, epoch_t from_epoch) = 0;
7c673cae 284 virtual void send_message_osd_cluster(
f67539c2 285 MessageRef, Connection *con) = 0;
7c673cae
FG
286 virtual void send_message_osd_cluster(
287 Message *m, const ConnectionRef& con) = 0;
288 virtual ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) = 0;
289 virtual entity_name_t get_cluster_msgr_name() = 0;
290
291 virtual PerfCounters *get_logger() = 0;
292
293 virtual ceph_tid_t get_tid() = 0;
294
9f95a23c
TL
295 virtual OstreamTemp clog_error() = 0;
296 virtual OstreamTemp clog_warn() = 0;
7c673cae 297
11fdf7f2 298 virtual bool check_failsafe_full() = 0;
7c673cae 299
11fdf7f2
TL
300 virtual bool pg_is_repair() = 0;
301 virtual void inc_osd_stat_repaired() = 0;
302 virtual bool pg_is_remote_backfilling() = 0;
303 virtual void pg_add_local_num_bytes(int64_t num_bytes) = 0;
304 virtual void pg_sub_local_num_bytes(int64_t num_bytes) = 0;
305 virtual void pg_add_num_bytes(int64_t num_bytes) = 0;
306 virtual void pg_sub_num_bytes(int64_t num_bytes) = 0;
28e407b8 307 virtual bool maybe_preempt_replica_scrub(const hobject_t& oid) = 0;
7c673cae
FG
308 virtual ~Listener() {}
309 };
310 Listener *parent;
311 Listener *get_parent() const { return parent; }
11fdf7f2 312 PGBackend(CephContext* cct, Listener *l, ObjectStore *store, const coll_t &coll,
7c673cae
FG
313 ObjectStore::CollectionHandle &ch) :
314 cct(cct),
315 store(store),
316 coll(coll),
317 ch(ch),
318 parent(l) {}
319 bool is_primary() const { return get_parent()->pgb_is_primary(); }
11fdf7f2
TL
320 const OSDMapRef& get_osdmap() const { return get_parent()->pgb_get_osdmap(); }
321 epoch_t get_osdmap_epoch() const { return get_parent()->pgb_get_osdmap_epoch(); }
7c673cae
FG
322 const pg_info_t &get_info() { return get_parent()->get_info(); }
323
11fdf7f2
TL
324 std::ostream& gen_prefix(std::ostream& out) const {
325 return parent->gen_dbg_prefix(out);
7c673cae
FG
326 }
327
328 /**
329 * RecoveryHandle
330 *
f67539c2 331 * We may want to recover multiple objects in the same std::set of
7c673cae
FG
332 * messages. RecoveryHandle is an interface for the opaque
333 * object used by the implementation to store the details of
334 * the pending recovery operations.
335 */
336 struct RecoveryHandle {
337 bool cache_dont_need;
f67539c2 338 std::map<pg_shard_t, std::vector<std::pair<hobject_t, eversion_t> > > deletes;
7c673cae
FG
339
340 RecoveryHandle(): cache_dont_need(false) {}
341 virtual ~RecoveryHandle() {}
342 };
343
344 /// Get a fresh recovery operation
345 virtual RecoveryHandle *open_recovery_op() = 0;
346
347 /// run_recovery_op: finish the operation represented by h
348 virtual void run_recovery_op(
349 RecoveryHandle *h, ///< [in] op to finish
350 int priority ///< [in] msg priority
351 ) = 0;
352
c07f9fc5
FG
353 void recover_delete_object(const hobject_t &oid, eversion_t v,
354 RecoveryHandle *h);
355 void send_recovery_deletes(int prio,
f67539c2 356 const std::map<pg_shard_t, std::vector<std::pair<hobject_t, eversion_t> > > &deletes);
c07f9fc5 357
7c673cae
FG
358 /**
359 * recover_object
360 *
361 * Triggers a recovery operation on the specified hobject_t
362 * onreadable must be called before onwriteable
363 *
364 * On each replica (primary included), get_parent()->on_not_missing()
365 * must be called when the transaction finalizing the recovery
366 * is queued. Similarly, get_parent()->on_readable() must be called
367 * when the transaction is applied in the backing store.
368 *
369 * get_parent()->on_not_degraded() should be called on the primary
370 * when writes can resume on the object.
371 *
372 * obc may be NULL if the primary lacks the object.
373 *
374 * head may be NULL only if the head/snapdir is missing
375 *
f67539c2 376 * @param missing [in] std::set of info, missing pairs for queried nodes
7c673cae
FG
377 * @param overlaps [in] mapping of object to file offset overlaps
378 */
224ce89b 379 virtual int recover_object(
7c673cae
FG
380 const hobject_t &hoid, ///< [in] object to recover
381 eversion_t v, ///< [in] version to recover
382 ObjectContextRef head, ///< [in] context of the head/snapdir object
383 ObjectContextRef obc, ///< [in] context of the object
384 RecoveryHandle *h ///< [in,out] handle to attach recovery op to
385 ) = 0;
386
387 /**
388 * true if PGBackend can handle this message while inactive
389 *
390 * If it returns true, handle_message *must* also return true
391 */
392 virtual bool can_handle_while_inactive(OpRequestRef op) = 0;
393
394 /// gives PGBackend a crack at an incoming message
c07f9fc5 395 bool handle_message(
7c673cae 396 OpRequestRef op ///< [in] message received
c07f9fc5
FG
397 ); ///< @return true if the message was handled
398
399 /// the variant of handle_message that is overridden by child classes
400 virtual bool _handle_message(OpRequestRef op) = 0;
7c673cae
FG
401
402 virtual void check_recovery_sources(const OSDMapRef& osdmap) = 0;
403
404
405 /**
406 * clean up any temporary on-disk state due to a pg interval change
407 */
408 void on_change_cleanup(ObjectStore::Transaction *t);
409 /**
410 * implementation should clear itself, contexts blessed prior to on_change
411 * won't be called after on_change()
412 */
413 virtual void on_change() = 0;
414 virtual void clear_recovery_state() = 0;
415
11fdf7f2
TL
416 virtual IsPGRecoverablePredicate *get_is_recoverable_predicate() const = 0;
417 virtual IsPGReadablePredicate *get_is_readable_predicate() const = 0;
418 virtual int get_ec_data_chunk_count() const { return 0; };
419 virtual int get_ec_stripe_chunk_size() const { return 0; };
7c673cae 420
f67539c2 421 virtual void dump_recovery_info(ceph::Formatter *f) const = 0;
7c673cae
FG
422
423 private:
f67539c2 424 std::set<hobject_t> temp_contents;
7c673cae
FG
425 public:
426 // Track contents of temp collection, clear on reset
427 void add_temp_obj(const hobject_t &oid) {
428 temp_contents.insert(oid);
429 }
f67539c2 430 void add_temp_objs(const std::set<hobject_t> &oids) {
7c673cae
FG
431 temp_contents.insert(oids.begin(), oids.end());
432 }
433 void clear_temp_obj(const hobject_t &oid) {
434 temp_contents.erase(oid);
435 }
f67539c2
TL
436 void clear_temp_objs(const std::set<hobject_t> &oids) {
437 for (std::set<hobject_t>::const_iterator i = oids.begin();
7c673cae
FG
438 i != oids.end();
439 ++i) {
440 temp_contents.erase(*i);
441 }
442 }
443
444 virtual ~PGBackend() {}
445
446 /// execute implementation specific transaction
447 virtual void submit_transaction(
448 const hobject_t &hoid, ///< [in] object
449 const object_stat_sum_t &delta_stats,///< [in] stat change
450 const eversion_t &at_version, ///< [in] version
451 PGTransactionUPtr &&t, ///< [in] trans to execute (move)
452 const eversion_t &trim_to, ///< [in] trim log to here
9f95a23c
TL
453 const eversion_t &min_last_complete_ondisk, ///< [in] lower bound on
454 /// committed version
f67539c2 455 std::vector<pg_log_entry_t>&& log_entries, ///< [in] log entries for t
7c673cae 456 /// [in] hitset history (if updated with this transaction)
9f95a23c 457 std::optional<pg_hit_set_history_t> &hset_history,
7c673cae
FG
458 Context *on_all_commit, ///< [in] called when all commit
459 ceph_tid_t tid, ///< [in] tid
460 osd_reqid_t reqid, ///< [in] reqid
461 OpRequestRef op ///< [in] op
462 ) = 0;
463
464 /// submit callback to be called in order with pending writes
465 virtual void call_write_ordered(std::function<void(void)> &&cb) = 0;
466
467 void try_stash(
468 const hobject_t &hoid,
469 version_t v,
470 ObjectStore::Transaction *t);
471
472 void rollback(
473 const pg_log_entry_t &entry,
474 ObjectStore::Transaction *t);
475
476 friend class LRBTrimmer;
477 void rollforward(
478 const pg_log_entry_t &entry,
479 ObjectStore::Transaction *t);
480
481 void trim(
482 const pg_log_entry_t &entry,
483 ObjectStore::Transaction *t);
484
485 void remove(
486 const hobject_t &hoid,
487 ObjectStore::Transaction *t);
488
489 protected:
c07f9fc5
FG
490
491 void handle_recovery_delete(OpRequestRef op);
492 void handle_recovery_delete_reply(OpRequestRef op);
493
7c673cae
FG
494 /// Reapply old attributes
495 void rollback_setattrs(
496 const hobject_t &hoid,
f67539c2 497 std::map<std::string, std::optional<ceph::buffer::list> > &old_attrs,
7c673cae
FG
498 ObjectStore::Transaction *t);
499
500 /// Truncate object to rollback append
501 virtual void rollback_append(
502 const hobject_t &hoid,
503 uint64_t old_size,
504 ObjectStore::Transaction *t);
505
506 /// Unstash object to rollback stash
507 void rollback_stash(
508 const hobject_t &hoid,
509 version_t old_version,
510 ObjectStore::Transaction *t);
511
512 /// Unstash object to rollback stash
513 void rollback_try_stash(
514 const hobject_t &hoid,
515 version_t old_version,
516 ObjectStore::Transaction *t);
517
518 /// Delete object to rollback create
519 void rollback_create(
520 const hobject_t &hoid,
521 ObjectStore::Transaction *t) {
522 remove(hoid, t);
523 }
524
525 /// Clone the extents back into place
526 void rollback_extents(
527 version_t gen,
f67539c2 528 const std::vector<std::pair<uint64_t, uint64_t> > &extents,
7c673cae
FG
529 const hobject_t &hoid,
530 ObjectStore::Transaction *t);
531 public:
532
533 /// Trim object stashed at version
534 void trim_rollback_object(
535 const hobject_t &hoid,
536 version_t gen,
537 ObjectStore::Transaction *t);
538
f67539c2 539 /// Std::list objects in collection
7c673cae
FG
540 int objects_list_partial(
541 const hobject_t &begin,
542 int min,
543 int max,
f67539c2 544 std::vector<hobject_t> *ls,
7c673cae
FG
545 hobject_t *next);
546
547 int objects_list_range(
548 const hobject_t &start,
549 const hobject_t &end,
f67539c2
TL
550 std::vector<hobject_t> *ls,
551 std::vector<ghobject_t> *gen_obs=0);
7c673cae
FG
552
553 int objects_get_attr(
554 const hobject_t &hoid,
f67539c2
TL
555 const std::string &attr,
556 ceph::buffer::list *out);
7c673cae
FG
557
558 virtual int objects_get_attrs(
559 const hobject_t &hoid,
20effc67 560 std::map<std::string, ceph::buffer::list, std::less<>> *out);
7c673cae
FG
561
562 virtual int objects_read_sync(
563 const hobject_t &hoid,
564 uint64_t off,
565 uint64_t len,
566 uint32_t op_flags,
f67539c2 567 ceph::buffer::list *bl) = 0;
7c673cae 568
9f95a23c
TL
569 virtual int objects_readv_sync(
570 const hobject_t &hoid,
f67539c2 571 std::map<uint64_t, uint64_t>&& m,
9f95a23c 572 uint32_t op_flags,
f67539c2 573 ceph::buffer::list *bl) {
9f95a23c
TL
574 return -EOPNOTSUPP;
575 }
576
7c673cae
FG
577 virtual void objects_read_async(
578 const hobject_t &hoid,
f67539c2
TL
579 const std::list<std::pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
580 std::pair<ceph::buffer::list*, Context*> > > &to_read,
7c673cae
FG
581 Context *on_complete, bool fast_read = false) = 0;
582
7c673cae 583 virtual bool auto_repair_supported() const = 0;
28e407b8
AA
584 int be_scan_list(
585 ScrubMap &map,
586 ScrubMapBuilder &pos);
7c673cae
FG
587 bool be_compare_scrub_objects(
588 pg_shard_t auth_shard,
589 const ScrubMap::object &auth,
590 const object_info_t& auth_oi,
591 const ScrubMap::object &candidate,
592 shard_info_wrapper& shard_error,
593 inconsistent_obj_wrapper &result,
f67539c2 594 std::ostream &errorstream,
91327a77 595 bool has_snapset);
f67539c2 596 std::map<pg_shard_t, ScrubMap *>::const_iterator be_select_auth_object(
7c673cae 597 const hobject_t &obj,
f67539c2 598 const std::map<pg_shard_t,ScrubMap*> &maps,
7c673cae 599 object_info_t *auth_oi,
f67539c2 600 std::map<pg_shard_t, shard_info_wrapper> &shard_map,
91327a77
AA
601 bool &digest_match,
602 spg_t pgid,
f67539c2 603 std::ostream &errorstream);
7c673cae 604 void be_compare_scrubmaps(
f67539c2
TL
605 const std::map<pg_shard_t,ScrubMap*> &maps,
606 const std::set<hobject_t> &master_set,
7c673cae 607 bool repair,
f67539c2
TL
608 std::map<hobject_t, std::set<pg_shard_t>> &missing,
609 std::map<hobject_t, std::set<pg_shard_t>> &inconsistent,
610 std::map<hobject_t, std::list<pg_shard_t>> &authoritative,
611 std::map<hobject_t, std::pair<std::optional<uint32_t>,
9f95a23c 612 std::optional<uint32_t>>> &missing_digest,
7c673cae
FG
613 int &shallow_errors, int &deep_errors,
614 Scrub::Store *store,
615 const spg_t& pgid,
f67539c2
TL
616 const std::vector<int> &acting,
617 std::ostream &errorstream);
7c673cae
FG
618 virtual uint64_t be_get_ondisk_size(
619 uint64_t logical_size) = 0;
28e407b8
AA
620 virtual int be_deep_scrub(
621 const hobject_t &oid,
622 ScrubMap &map,
623 ScrubMapBuilder &pos,
624 ScrubMap::object &o) = 0;
11fdf7f2 625 void be_omap_checks(
f67539c2
TL
626 const std::map<pg_shard_t,ScrubMap*> &maps,
627 const std::set<hobject_t> &master_set,
11fdf7f2 628 omap_stat_t& omap_stats,
f67539c2 629 std::ostream &warnstream) const;
7c673cae
FG
630
631 static PGBackend *build_pg_backend(
632 const pg_pool_t &pool,
f67539c2 633 const std::map<std::string,std::string>& profile,
7c673cae
FG
634 Listener *l,
635 coll_t coll,
636 ObjectStore::CollectionHandle &ch,
637 ObjectStore *store,
638 CephContext *cct);
639};
640
641#endif