]> git.proxmox.com Git - ceph.git/blob - ceph/src/osd/PGBackend.h
update sources to v12.1.1
[ceph.git] / ceph / src / osd / PGBackend.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2013,2014 Inktank Storage, Inc.
7 * Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
8 *
9 * Author: Loic Dachary <loic@dachary.org>
10 *
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
15 *
16 */
17
18 #ifndef PGBACKEND_H
19 #define PGBACKEND_H
20
21 #include "osd_types.h"
22 #include "common/WorkQueue.h"
23 #include "include/Context.h"
24 #include "os/ObjectStore.h"
25 #include "common/LogClient.h"
26 #include <string>
27 #include "PGTransaction.h"
28
29 namespace Scrub {
30 class Store;
31 }
32 struct shard_info_wrapper;
33 struct inconsistent_obj_wrapper;
34
35 //forward declaration
36 class OSDMap;
37 class PGLog;
38 typedef ceph::shared_ptr<const OSDMap> OSDMapRef;
39
40 /**
41 * PGBackend
42 *
43 * PGBackend defines an interface for logic handling IO and
44 * replication on RADOS objects. The PGBackend implementation
45 * is responsible for:
46 *
47 * 1) Handling client operations
48 * 2) Handling object recovery
49 * 3) Handling object access
50 * 4) Handling scrub, deep-scrub, repair
51 */
52 class PGBackend {
53 public:
54 CephContext* cct;
55 protected:
56 ObjectStore *store;
57 const coll_t coll;
58 ObjectStore::CollectionHandle &ch;
59 public:
60 /**
61 * Provides interfaces for PGBackend callbacks
62 *
63 * The intention is that the parent calls into the PGBackend
64 * implementation holding a lock and that the callbacks are
65 * called under the same locks.
66 */
67 class Listener {
68 public:
69 /// Debugging
70 virtual DoutPrefixProvider *get_dpp() = 0;
71
72 /// Recovery
73
74 /**
75 * Called with the transaction recovering oid
76 */
77 virtual void on_local_recover(
78 const hobject_t &oid,
79 const ObjectRecoveryInfo &recovery_info,
80 ObjectContextRef obc,
81 ObjectStore::Transaction *t
82 ) = 0;
83
84 /**
85 * Called when transaction recovering oid is durable and
86 * applied on all replicas
87 */
88 virtual void on_global_recover(
89 const hobject_t &oid,
90 const object_stat_sum_t &stat_diff
91 ) = 0;
92
93 /**
94 * Called when peer is recovered
95 */
96 virtual void on_peer_recover(
97 pg_shard_t peer,
98 const hobject_t &oid,
99 const ObjectRecoveryInfo &recovery_info
100 ) = 0;
101
102 virtual void begin_peer_recover(
103 pg_shard_t peer,
104 const hobject_t oid) = 0;
105
106 virtual void failed_push(const list<pg_shard_t> &from, const hobject_t &soid) = 0;
107 virtual void primary_failed(const hobject_t &soid) = 0;
108 virtual bool primary_error(const hobject_t& soid, eversion_t v) = 0;
109
110 virtual void cancel_pull(const hobject_t &soid) = 0;
111
112 virtual void apply_stats(
113 const hobject_t &soid,
114 const object_stat_sum_t &delta_stats) = 0;
115
116 /**
117 * Called when a read on the primary fails when pushing
118 */
119 virtual void on_primary_error(
120 const hobject_t &oid,
121 eversion_t v
122 ) = 0;
123
124
125 /**
126 * Bless a context
127 *
128 * Wraps a context in whatever outer layers the parent usually
129 * uses to call into the PGBackend
130 */
131 virtual Context *bless_context(Context *c) = 0;
132 virtual GenContext<ThreadPool::TPHandle&> *bless_gencontext(
133 GenContext<ThreadPool::TPHandle&> *c) = 0;
134
135 virtual void send_message(int to_osd, Message *m) = 0;
136 virtual void queue_transaction(
137 ObjectStore::Transaction&& t,
138 OpRequestRef op = OpRequestRef()
139 ) = 0;
140 virtual void queue_transactions(
141 vector<ObjectStore::Transaction>& tls,
142 OpRequestRef op = OpRequestRef()
143 ) = 0;
144 virtual epoch_t get_epoch() const = 0;
145 virtual epoch_t get_interval_start_epoch() const = 0;
146 virtual epoch_t get_last_peering_reset_epoch() const = 0;
147
148 virtual const set<pg_shard_t> &get_actingbackfill_shards() const = 0;
149 virtual const set<pg_shard_t> &get_acting_shards() const = 0;
150 virtual const set<pg_shard_t> &get_backfill_shards() const = 0;
151
152 virtual std::string gen_dbg_prefix() const = 0;
153
154 virtual const map<hobject_t, set<pg_shard_t>> &get_missing_loc_shards()
155 const = 0;
156
157 virtual const pg_missing_tracker_t &get_local_missing() const = 0;
158 virtual const map<pg_shard_t, pg_missing_t> &get_shard_missing()
159 const = 0;
160 virtual boost::optional<const pg_missing_const_i &> maybe_get_shard_missing(
161 pg_shard_t peer) const {
162 if (peer == primary_shard()) {
163 return get_local_missing();
164 } else {
165 map<pg_shard_t, pg_missing_t>::const_iterator i =
166 get_shard_missing().find(peer);
167 if (i == get_shard_missing().end()) {
168 return boost::optional<const pg_missing_const_i &>();
169 } else {
170 return i->second;
171 }
172 }
173 }
174 virtual const pg_missing_const_i &get_shard_missing(pg_shard_t peer) const {
175 auto m = maybe_get_shard_missing(peer);
176 assert(m);
177 return *m;
178 }
179
180 virtual const map<pg_shard_t, pg_info_t> &get_shard_info() const = 0;
181 virtual const pg_info_t &get_shard_info(pg_shard_t peer) const {
182 if (peer == primary_shard()) {
183 return get_info();
184 } else {
185 map<pg_shard_t, pg_info_t>::const_iterator i =
186 get_shard_info().find(peer);
187 assert(i != get_shard_info().end());
188 return i->second;
189 }
190 }
191
192 virtual const PGLog &get_log() const = 0;
193 virtual bool pgb_is_primary() const = 0;
194 virtual OSDMapRef pgb_get_osdmap() const = 0;
195 virtual const pg_info_t &get_info() const = 0;
196 virtual const pg_pool_t &get_pool() const = 0;
197
198 virtual ObjectContextRef get_obc(
199 const hobject_t &hoid,
200 const map<string, bufferlist> &attrs) = 0;
201
202 virtual bool try_lock_for_read(
203 const hobject_t &hoid,
204 ObcLockManager &manager) = 0;
205
206 virtual void release_locks(ObcLockManager &manager) = 0;
207
208 virtual void op_applied(
209 const eversion_t &applied_version) = 0;
210
211 virtual bool should_send_op(
212 pg_shard_t peer,
213 const hobject_t &hoid) = 0;
214
215 virtual void log_operation(
216 const vector<pg_log_entry_t> &logv,
217 const boost::optional<pg_hit_set_history_t> &hset_history,
218 const eversion_t &trim_to,
219 const eversion_t &roll_forward_to,
220 bool transaction_applied,
221 ObjectStore::Transaction &t) = 0;
222
223 virtual void pgb_set_object_snap_mapping(
224 const hobject_t &soid,
225 const set<snapid_t> &snaps,
226 ObjectStore::Transaction *t) = 0;
227
228 virtual void pgb_clear_object_snap_mapping(
229 const hobject_t &soid,
230 ObjectStore::Transaction *t) = 0;
231
232 virtual void update_peer_last_complete_ondisk(
233 pg_shard_t fromosd,
234 eversion_t lcod) = 0;
235
236 virtual void update_last_complete_ondisk(
237 eversion_t lcod) = 0;
238
239 virtual void update_stats(
240 const pg_stat_t &stat) = 0;
241
242 virtual void schedule_recovery_work(
243 GenContext<ThreadPool::TPHandle&> *c) = 0;
244
245 virtual pg_shard_t whoami_shard() const = 0;
246 int whoami() const {
247 return whoami_shard().osd;
248 }
249 spg_t whoami_spg_t() const {
250 return get_info().pgid;
251 }
252
253 virtual spg_t primary_spg_t() const = 0;
254 virtual pg_shard_t primary_shard() const = 0;
255
256 virtual uint64_t min_peer_features() const = 0;
257
258 virtual hobject_t get_temp_recovery_object(const hobject_t& target,
259 eversion_t version) = 0;
260
261 virtual void send_message_osd_cluster(
262 int peer, Message *m, epoch_t from_epoch) = 0;
263 virtual void send_message_osd_cluster(
264 Message *m, Connection *con) = 0;
265 virtual void send_message_osd_cluster(
266 Message *m, const ConnectionRef& con) = 0;
267 virtual ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) = 0;
268 virtual entity_name_t get_cluster_msgr_name() = 0;
269
270 virtual PerfCounters *get_logger() = 0;
271
272 virtual ceph_tid_t get_tid() = 0;
273
274 virtual LogClientTemp clog_error() = 0;
275
276 virtual bool check_failsafe_full(ostream &ss) = 0;
277
278 virtual bool check_osdmap_full(const set<pg_shard_t> &missing_on) = 0;
279
280 virtual ~Listener() {}
281 };
282 Listener *parent;
283 Listener *get_parent() const { return parent; }
284 PGBackend(CephContext* cct, Listener *l, ObjectStore *store, coll_t coll,
285 ObjectStore::CollectionHandle &ch) :
286 cct(cct),
287 store(store),
288 coll(coll),
289 ch(ch),
290 parent(l) {}
291 bool is_primary() const { return get_parent()->pgb_is_primary(); }
292 OSDMapRef get_osdmap() const { return get_parent()->pgb_get_osdmap(); }
293 const pg_info_t &get_info() { return get_parent()->get_info(); }
294
295 std::string gen_prefix() const {
296 return parent->gen_dbg_prefix();
297 }
298
299 /**
300 * RecoveryHandle
301 *
302 * We may want to recover multiple objects in the same set of
303 * messages. RecoveryHandle is an interface for the opaque
304 * object used by the implementation to store the details of
305 * the pending recovery operations.
306 */
307 struct RecoveryHandle {
308 bool cache_dont_need;
309
310 RecoveryHandle(): cache_dont_need(false) {}
311 virtual ~RecoveryHandle() {}
312 };
313
314 /// Get a fresh recovery operation
315 virtual RecoveryHandle *open_recovery_op() = 0;
316
317 /// run_recovery_op: finish the operation represented by h
318 virtual void run_recovery_op(
319 RecoveryHandle *h, ///< [in] op to finish
320 int priority ///< [in] msg priority
321 ) = 0;
322
323 /**
324 * recover_object
325 *
326 * Triggers a recovery operation on the specified hobject_t
327 * onreadable must be called before onwriteable
328 *
329 * On each replica (primary included), get_parent()->on_not_missing()
330 * must be called when the transaction finalizing the recovery
331 * is queued. Similarly, get_parent()->on_readable() must be called
332 * when the transaction is applied in the backing store.
333 *
334 * get_parent()->on_not_degraded() should be called on the primary
335 * when writes can resume on the object.
336 *
337 * obc may be NULL if the primary lacks the object.
338 *
339 * head may be NULL only if the head/snapdir is missing
340 *
341 * @param missing [in] set of info, missing pairs for queried nodes
342 * @param overlaps [in] mapping of object to file offset overlaps
343 */
344 virtual int recover_object(
345 const hobject_t &hoid, ///< [in] object to recover
346 eversion_t v, ///< [in] version to recover
347 ObjectContextRef head, ///< [in] context of the head/snapdir object
348 ObjectContextRef obc, ///< [in] context of the object
349 RecoveryHandle *h ///< [in,out] handle to attach recovery op to
350 ) = 0;
351
352 /**
353 * true if PGBackend can handle this message while inactive
354 *
355 * If it returns true, handle_message *must* also return true
356 */
357 virtual bool can_handle_while_inactive(OpRequestRef op) = 0;
358
359 /// gives PGBackend a crack at an incoming message
360 virtual bool handle_message(
361 OpRequestRef op ///< [in] message received
362 ) = 0; ///< @return true if the message was handled
363
364 virtual void check_recovery_sources(const OSDMapRef& osdmap) = 0;
365
366
367 /**
368 * clean up any temporary on-disk state due to a pg interval change
369 */
370 void on_change_cleanup(ObjectStore::Transaction *t);
371 /**
372 * implementation should clear itself, contexts blessed prior to on_change
373 * won't be called after on_change()
374 */
375 virtual void on_change() = 0;
376 virtual void clear_recovery_state() = 0;
377
378 virtual void on_flushed() = 0;
379
380 virtual IsPGRecoverablePredicate *get_is_recoverable_predicate() = 0;
381 virtual IsPGReadablePredicate *get_is_readable_predicate() = 0;
382
383 virtual void dump_recovery_info(Formatter *f) const = 0;
384
385 private:
386 set<hobject_t> temp_contents;
387 public:
388 // Track contents of temp collection, clear on reset
389 void add_temp_obj(const hobject_t &oid) {
390 temp_contents.insert(oid);
391 }
392 void add_temp_objs(const set<hobject_t> &oids) {
393 temp_contents.insert(oids.begin(), oids.end());
394 }
395 void clear_temp_obj(const hobject_t &oid) {
396 temp_contents.erase(oid);
397 }
398 void clear_temp_objs(const set<hobject_t> &oids) {
399 for (set<hobject_t>::const_iterator i = oids.begin();
400 i != oids.end();
401 ++i) {
402 temp_contents.erase(*i);
403 }
404 }
405
406 virtual ~PGBackend() {}
407
408 /// execute implementation specific transaction
409 virtual void submit_transaction(
410 const hobject_t &hoid, ///< [in] object
411 const object_stat_sum_t &delta_stats,///< [in] stat change
412 const eversion_t &at_version, ///< [in] version
413 PGTransactionUPtr &&t, ///< [in] trans to execute (move)
414 const eversion_t &trim_to, ///< [in] trim log to here
415 const eversion_t &roll_forward_to, ///< [in] trim rollback info to here
416 const vector<pg_log_entry_t> &log_entries, ///< [in] log entries for t
417 /// [in] hitset history (if updated with this transaction)
418 boost::optional<pg_hit_set_history_t> &hset_history,
419 Context *on_local_applied_sync, ///< [in] called when applied locally
420 Context *on_all_applied, ///< [in] called when all acked
421 Context *on_all_commit, ///< [in] called when all commit
422 ceph_tid_t tid, ///< [in] tid
423 osd_reqid_t reqid, ///< [in] reqid
424 OpRequestRef op ///< [in] op
425 ) = 0;
426
427 /// submit callback to be called in order with pending writes
428 virtual void call_write_ordered(std::function<void(void)> &&cb) = 0;
429
430 void try_stash(
431 const hobject_t &hoid,
432 version_t v,
433 ObjectStore::Transaction *t);
434
435 void rollback(
436 const pg_log_entry_t &entry,
437 ObjectStore::Transaction *t);
438
439 friend class LRBTrimmer;
440 void rollforward(
441 const pg_log_entry_t &entry,
442 ObjectStore::Transaction *t);
443
444 void trim(
445 const pg_log_entry_t &entry,
446 ObjectStore::Transaction *t);
447
448 void remove(
449 const hobject_t &hoid,
450 ObjectStore::Transaction *t);
451
452 protected:
453 /// Reapply old attributes
454 void rollback_setattrs(
455 const hobject_t &hoid,
456 map<string, boost::optional<bufferlist> > &old_attrs,
457 ObjectStore::Transaction *t);
458
459 /// Truncate object to rollback append
460 virtual void rollback_append(
461 const hobject_t &hoid,
462 uint64_t old_size,
463 ObjectStore::Transaction *t);
464
465 /// Unstash object to rollback stash
466 void rollback_stash(
467 const hobject_t &hoid,
468 version_t old_version,
469 ObjectStore::Transaction *t);
470
471 /// Unstash object to rollback stash
472 void rollback_try_stash(
473 const hobject_t &hoid,
474 version_t old_version,
475 ObjectStore::Transaction *t);
476
477 /// Delete object to rollback create
478 void rollback_create(
479 const hobject_t &hoid,
480 ObjectStore::Transaction *t) {
481 remove(hoid, t);
482 }
483
484 /// Clone the extents back into place
485 void rollback_extents(
486 version_t gen,
487 const vector<pair<uint64_t, uint64_t> > &extents,
488 const hobject_t &hoid,
489 ObjectStore::Transaction *t);
490 public:
491
492 /// Trim object stashed at version
493 void trim_rollback_object(
494 const hobject_t &hoid,
495 version_t gen,
496 ObjectStore::Transaction *t);
497
498 /// List objects in collection
499 int objects_list_partial(
500 const hobject_t &begin,
501 int min,
502 int max,
503 vector<hobject_t> *ls,
504 hobject_t *next);
505
506 int objects_list_range(
507 const hobject_t &start,
508 const hobject_t &end,
509 snapid_t seq,
510 vector<hobject_t> *ls,
511 vector<ghobject_t> *gen_obs=0);
512
513 int objects_get_attr(
514 const hobject_t &hoid,
515 const string &attr,
516 bufferlist *out);
517
518 virtual int objects_get_attrs(
519 const hobject_t &hoid,
520 map<string, bufferlist> *out);
521
522 virtual int objects_read_sync(
523 const hobject_t &hoid,
524 uint64_t off,
525 uint64_t len,
526 uint32_t op_flags,
527 bufferlist *bl) = 0;
528
529 virtual void objects_read_async(
530 const hobject_t &hoid,
531 const list<pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
532 pair<bufferlist*, Context*> > > &to_read,
533 Context *on_complete, bool fast_read = false) = 0;
534
535 virtual bool scrub_supported() = 0;
536 virtual bool auto_repair_supported() const = 0;
537 void be_scan_list(
538 ScrubMap &map, const vector<hobject_t> &ls, bool deep, uint32_t seed,
539 ThreadPool::TPHandle &handle);
540 bool be_compare_scrub_objects(
541 pg_shard_t auth_shard,
542 const ScrubMap::object &auth,
543 const object_info_t& auth_oi,
544 const ScrubMap::object &candidate,
545 shard_info_wrapper& shard_error,
546 inconsistent_obj_wrapper &result,
547 ostream &errorstream);
548 map<pg_shard_t, ScrubMap *>::const_iterator be_select_auth_object(
549 const hobject_t &obj,
550 const map<pg_shard_t,ScrubMap*> &maps,
551 object_info_t *auth_oi,
552 map<pg_shard_t, shard_info_wrapper> &shard_map,
553 inconsistent_obj_wrapper &object_error);
554 void be_compare_scrubmaps(
555 const map<pg_shard_t,ScrubMap*> &maps,
556 bool repair,
557 map<hobject_t, set<pg_shard_t>> &missing,
558 map<hobject_t, set<pg_shard_t>> &inconsistent,
559 map<hobject_t, list<pg_shard_t>> &authoritative,
560 map<hobject_t, pair<uint32_t,uint32_t>> &missing_digest,
561 int &shallow_errors, int &deep_errors,
562 Scrub::Store *store,
563 const spg_t& pgid,
564 const vector<int> &acting,
565 ostream &errorstream);
566 virtual uint64_t be_get_ondisk_size(
567 uint64_t logical_size) = 0;
568 virtual void be_deep_scrub(
569 const hobject_t &poid,
570 uint32_t seed,
571 ScrubMap::object &o,
572 ThreadPool::TPHandle &handle) = 0;
573
574 static PGBackend *build_pg_backend(
575 const pg_pool_t &pool,
576 const OSDMapRef curmap,
577 Listener *l,
578 coll_t coll,
579 ObjectStore::CollectionHandle &ch,
580 ObjectStore *store,
581 CephContext *cct);
582 };
583
584 #endif