]> git.proxmox.com Git - ceph.git/blob - ceph/src/osd/PGBackend.h
update sources to v12.1.2
[ceph.git] / ceph / src / osd / PGBackend.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2013,2014 Inktank Storage, Inc.
7 * Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
8 *
9 * Author: Loic Dachary <loic@dachary.org>
10 *
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
15 *
16 */
17
18 #ifndef PGBACKEND_H
19 #define PGBACKEND_H
20
21 #include "osd_types.h"
22 #include "common/WorkQueue.h"
23 #include "include/Context.h"
24 #include "os/ObjectStore.h"
25 #include "common/LogClient.h"
26 #include <string>
27 #include "PGTransaction.h"
28
29 namespace Scrub {
30 class Store;
31 }
32 struct shard_info_wrapper;
33 struct inconsistent_obj_wrapper;
34
35 //forward declaration
36 class OSDMap;
37 class PGLog;
38 typedef ceph::shared_ptr<const OSDMap> OSDMapRef;
39
40 /**
41 * PGBackend
42 *
43 * PGBackend defines an interface for logic handling IO and
44 * replication on RADOS objects. The PGBackend implementation
45 * is responsible for:
46 *
47 * 1) Handling client operations
48 * 2) Handling object recovery
49 * 3) Handling object access
50 * 4) Handling scrub, deep-scrub, repair
51 */
52 class PGBackend {
53 public:
54 CephContext* cct;
55 protected:
56 ObjectStore *store;
57 const coll_t coll;
58 ObjectStore::CollectionHandle &ch;
59 public:
60 /**
61 * Provides interfaces for PGBackend callbacks
62 *
63 * The intention is that the parent calls into the PGBackend
64 * implementation holding a lock and that the callbacks are
65 * called under the same locks.
66 */
67 class Listener {
68 public:
69 /// Debugging
70 virtual DoutPrefixProvider *get_dpp() = 0;
71
72 /// Recovery
73
74 /**
75 * Called with the transaction recovering oid
76 */
77 virtual void on_local_recover(
78 const hobject_t &oid,
79 const ObjectRecoveryInfo &recovery_info,
80 ObjectContextRef obc,
81 bool is_delete,
82 ObjectStore::Transaction *t
83 ) = 0;
84
85 /**
86 * Called when transaction recovering oid is durable and
87 * applied on all replicas
88 */
89 virtual void on_global_recover(
90 const hobject_t &oid,
91 const object_stat_sum_t &stat_diff,
92 bool is_delete
93 ) = 0;
94
95 /**
96 * Called when peer is recovered
97 */
98 virtual void on_peer_recover(
99 pg_shard_t peer,
100 const hobject_t &oid,
101 const ObjectRecoveryInfo &recovery_info
102 ) = 0;
103
104 virtual void begin_peer_recover(
105 pg_shard_t peer,
106 const hobject_t oid) = 0;
107
108 virtual void failed_push(const list<pg_shard_t> &from, const hobject_t &soid) = 0;
109 virtual void primary_failed(const hobject_t &soid) = 0;
110 virtual bool primary_error(const hobject_t& soid, eversion_t v) = 0;
111 virtual void cancel_pull(const hobject_t &soid) = 0;
112
113 virtual void apply_stats(
114 const hobject_t &soid,
115 const object_stat_sum_t &delta_stats) = 0;
116
117 /**
118 * Called when a read on the primary fails when pushing
119 */
120 virtual void on_primary_error(
121 const hobject_t &oid,
122 eversion_t v
123 ) = 0;
124
125 virtual void remove_missing_object(const hobject_t &oid,
126 eversion_t v,
127 Context *on_complete) = 0;
128
129 /**
130 * Bless a context
131 *
132 * Wraps a context in whatever outer layers the parent usually
133 * uses to call into the PGBackend
134 */
135 virtual Context *bless_context(Context *c) = 0;
136 virtual GenContext<ThreadPool::TPHandle&> *bless_gencontext(
137 GenContext<ThreadPool::TPHandle&> *c) = 0;
138
139 virtual void send_message(int to_osd, Message *m) = 0;
140 virtual void queue_transaction(
141 ObjectStore::Transaction&& t,
142 OpRequestRef op = OpRequestRef()
143 ) = 0;
144 virtual void queue_transactions(
145 vector<ObjectStore::Transaction>& tls,
146 OpRequestRef op = OpRequestRef()
147 ) = 0;
148 virtual epoch_t get_epoch() const = 0;
149 virtual epoch_t get_interval_start_epoch() const = 0;
150 virtual epoch_t get_last_peering_reset_epoch() const = 0;
151
152 virtual const set<pg_shard_t> &get_actingbackfill_shards() const = 0;
153 virtual const set<pg_shard_t> &get_acting_shards() const = 0;
154 virtual const set<pg_shard_t> &get_backfill_shards() const = 0;
155
156 virtual std::string gen_dbg_prefix() const = 0;
157
158 virtual const map<hobject_t, set<pg_shard_t>> &get_missing_loc_shards()
159 const = 0;
160
161 virtual const pg_missing_tracker_t &get_local_missing() const = 0;
162 virtual const map<pg_shard_t, pg_missing_t> &get_shard_missing()
163 const = 0;
164 virtual boost::optional<const pg_missing_const_i &> maybe_get_shard_missing(
165 pg_shard_t peer) const {
166 if (peer == primary_shard()) {
167 return get_local_missing();
168 } else {
169 map<pg_shard_t, pg_missing_t>::const_iterator i =
170 get_shard_missing().find(peer);
171 if (i == get_shard_missing().end()) {
172 return boost::optional<const pg_missing_const_i &>();
173 } else {
174 return i->second;
175 }
176 }
177 }
178 virtual const pg_missing_const_i &get_shard_missing(pg_shard_t peer) const {
179 auto m = maybe_get_shard_missing(peer);
180 assert(m);
181 return *m;
182 }
183
184 virtual const map<pg_shard_t, pg_info_t> &get_shard_info() const = 0;
185 virtual const pg_info_t &get_shard_info(pg_shard_t peer) const {
186 if (peer == primary_shard()) {
187 return get_info();
188 } else {
189 map<pg_shard_t, pg_info_t>::const_iterator i =
190 get_shard_info().find(peer);
191 assert(i != get_shard_info().end());
192 return i->second;
193 }
194 }
195
196 virtual const PGLog &get_log() const = 0;
197 virtual bool pgb_is_primary() const = 0;
198 virtual OSDMapRef pgb_get_osdmap() const = 0;
199 virtual const pg_info_t &get_info() const = 0;
200 virtual const pg_pool_t &get_pool() const = 0;
201
202 virtual ObjectContextRef get_obc(
203 const hobject_t &hoid,
204 const map<string, bufferlist> &attrs) = 0;
205
206 virtual bool try_lock_for_read(
207 const hobject_t &hoid,
208 ObcLockManager &manager) = 0;
209
210 virtual void release_locks(ObcLockManager &manager) = 0;
211
212 virtual void op_applied(
213 const eversion_t &applied_version) = 0;
214
215 virtual bool should_send_op(
216 pg_shard_t peer,
217 const hobject_t &hoid) = 0;
218
219 virtual void log_operation(
220 const vector<pg_log_entry_t> &logv,
221 const boost::optional<pg_hit_set_history_t> &hset_history,
222 const eversion_t &trim_to,
223 const eversion_t &roll_forward_to,
224 bool transaction_applied,
225 ObjectStore::Transaction &t) = 0;
226
227 virtual void pgb_set_object_snap_mapping(
228 const hobject_t &soid,
229 const set<snapid_t> &snaps,
230 ObjectStore::Transaction *t) = 0;
231
232 virtual void pgb_clear_object_snap_mapping(
233 const hobject_t &soid,
234 ObjectStore::Transaction *t) = 0;
235
236 virtual void update_peer_last_complete_ondisk(
237 pg_shard_t fromosd,
238 eversion_t lcod) = 0;
239
240 virtual void update_last_complete_ondisk(
241 eversion_t lcod) = 0;
242
243 virtual void update_stats(
244 const pg_stat_t &stat) = 0;
245
246 virtual void schedule_recovery_work(
247 GenContext<ThreadPool::TPHandle&> *c) = 0;
248
249 virtual pg_shard_t whoami_shard() const = 0;
250 int whoami() const {
251 return whoami_shard().osd;
252 }
253 spg_t whoami_spg_t() const {
254 return get_info().pgid;
255 }
256
257 virtual spg_t primary_spg_t() const = 0;
258 virtual pg_shard_t primary_shard() const = 0;
259
260 virtual uint64_t min_peer_features() const = 0;
261
262 virtual hobject_t get_temp_recovery_object(const hobject_t& target,
263 eversion_t version) = 0;
264
265 virtual void send_message_osd_cluster(
266 int peer, Message *m, epoch_t from_epoch) = 0;
267 virtual void send_message_osd_cluster(
268 Message *m, Connection *con) = 0;
269 virtual void send_message_osd_cluster(
270 Message *m, const ConnectionRef& con) = 0;
271 virtual ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) = 0;
272 virtual entity_name_t get_cluster_msgr_name() = 0;
273
274 virtual PerfCounters *get_logger() = 0;
275
276 virtual ceph_tid_t get_tid() = 0;
277
278 virtual LogClientTemp clog_error() = 0;
279 virtual LogClientTemp clog_warn() = 0;
280
281 virtual bool check_failsafe_full(ostream &ss) = 0;
282
283 virtual bool check_osdmap_full(const set<pg_shard_t> &missing_on) = 0;
284
285 virtual ~Listener() {}
286 };
287 Listener *parent;
288 Listener *get_parent() const { return parent; }
289 PGBackend(CephContext* cct, Listener *l, ObjectStore *store, coll_t coll,
290 ObjectStore::CollectionHandle &ch) :
291 cct(cct),
292 store(store),
293 coll(coll),
294 ch(ch),
295 parent(l) {}
296 bool is_primary() const { return get_parent()->pgb_is_primary(); }
297 OSDMapRef get_osdmap() const { return get_parent()->pgb_get_osdmap(); }
298 const pg_info_t &get_info() { return get_parent()->get_info(); }
299
300 std::string gen_prefix() const {
301 return parent->gen_dbg_prefix();
302 }
303
304 /**
305 * RecoveryHandle
306 *
307 * We may want to recover multiple objects in the same set of
308 * messages. RecoveryHandle is an interface for the opaque
309 * object used by the implementation to store the details of
310 * the pending recovery operations.
311 */
312 struct RecoveryHandle {
313 bool cache_dont_need;
314 map<pg_shard_t, vector<pair<hobject_t, eversion_t> > > deletes;
315
316 RecoveryHandle(): cache_dont_need(false) {}
317 virtual ~RecoveryHandle() {}
318 };
319
320 /// Get a fresh recovery operation
321 virtual RecoveryHandle *open_recovery_op() = 0;
322
323 /// run_recovery_op: finish the operation represented by h
324 virtual void run_recovery_op(
325 RecoveryHandle *h, ///< [in] op to finish
326 int priority ///< [in] msg priority
327 ) = 0;
328
329 void recover_delete_object(const hobject_t &oid, eversion_t v,
330 RecoveryHandle *h);
331 void send_recovery_deletes(int prio,
332 const map<pg_shard_t, vector<pair<hobject_t, eversion_t> > > &deletes);
333
334 /**
335 * recover_object
336 *
337 * Triggers a recovery operation on the specified hobject_t
338 * onreadable must be called before onwriteable
339 *
340 * On each replica (primary included), get_parent()->on_not_missing()
341 * must be called when the transaction finalizing the recovery
342 * is queued. Similarly, get_parent()->on_readable() must be called
343 * when the transaction is applied in the backing store.
344 *
345 * get_parent()->on_not_degraded() should be called on the primary
346 * when writes can resume on the object.
347 *
348 * obc may be NULL if the primary lacks the object.
349 *
350 * head may be NULL only if the head/snapdir is missing
351 *
352 * @param missing [in] set of info, missing pairs for queried nodes
353 * @param overlaps [in] mapping of object to file offset overlaps
354 */
355 virtual int recover_object(
356 const hobject_t &hoid, ///< [in] object to recover
357 eversion_t v, ///< [in] version to recover
358 ObjectContextRef head, ///< [in] context of the head/snapdir object
359 ObjectContextRef obc, ///< [in] context of the object
360 RecoveryHandle *h ///< [in,out] handle to attach recovery op to
361 ) = 0;
362
363 /**
364 * true if PGBackend can handle this message while inactive
365 *
366 * If it returns true, handle_message *must* also return true
367 */
368 virtual bool can_handle_while_inactive(OpRequestRef op) = 0;
369
370 /// gives PGBackend a crack at an incoming message
371 bool handle_message(
372 OpRequestRef op ///< [in] message received
373 ); ///< @return true if the message was handled
374
375 /// the variant of handle_message that is overridden by child classes
376 virtual bool _handle_message(OpRequestRef op) = 0;
377
378 virtual void check_recovery_sources(const OSDMapRef& osdmap) = 0;
379
380
381 /**
382 * clean up any temporary on-disk state due to a pg interval change
383 */
384 void on_change_cleanup(ObjectStore::Transaction *t);
385 /**
386 * implementation should clear itself, contexts blessed prior to on_change
387 * won't be called after on_change()
388 */
389 virtual void on_change() = 0;
390 virtual void clear_recovery_state() = 0;
391
392 virtual void on_flushed() = 0;
393
394 virtual IsPGRecoverablePredicate *get_is_recoverable_predicate() = 0;
395 virtual IsPGReadablePredicate *get_is_readable_predicate() = 0;
396
397 virtual void dump_recovery_info(Formatter *f) const = 0;
398
399 private:
400 set<hobject_t> temp_contents;
401 public:
402 // Track contents of temp collection, clear on reset
403 void add_temp_obj(const hobject_t &oid) {
404 temp_contents.insert(oid);
405 }
406 void add_temp_objs(const set<hobject_t> &oids) {
407 temp_contents.insert(oids.begin(), oids.end());
408 }
409 void clear_temp_obj(const hobject_t &oid) {
410 temp_contents.erase(oid);
411 }
412 void clear_temp_objs(const set<hobject_t> &oids) {
413 for (set<hobject_t>::const_iterator i = oids.begin();
414 i != oids.end();
415 ++i) {
416 temp_contents.erase(*i);
417 }
418 }
419
420 virtual ~PGBackend() {}
421
422 /// execute implementation specific transaction
423 virtual void submit_transaction(
424 const hobject_t &hoid, ///< [in] object
425 const object_stat_sum_t &delta_stats,///< [in] stat change
426 const eversion_t &at_version, ///< [in] version
427 PGTransactionUPtr &&t, ///< [in] trans to execute (move)
428 const eversion_t &trim_to, ///< [in] trim log to here
429 const eversion_t &roll_forward_to, ///< [in] trim rollback info to here
430 const vector<pg_log_entry_t> &log_entries, ///< [in] log entries for t
431 /// [in] hitset history (if updated with this transaction)
432 boost::optional<pg_hit_set_history_t> &hset_history,
433 Context *on_local_applied_sync, ///< [in] called when applied locally
434 Context *on_all_applied, ///< [in] called when all acked
435 Context *on_all_commit, ///< [in] called when all commit
436 ceph_tid_t tid, ///< [in] tid
437 osd_reqid_t reqid, ///< [in] reqid
438 OpRequestRef op ///< [in] op
439 ) = 0;
440
441 /// submit callback to be called in order with pending writes
442 virtual void call_write_ordered(std::function<void(void)> &&cb) = 0;
443
444 void try_stash(
445 const hobject_t &hoid,
446 version_t v,
447 ObjectStore::Transaction *t);
448
449 void rollback(
450 const pg_log_entry_t &entry,
451 ObjectStore::Transaction *t);
452
453 friend class LRBTrimmer;
454 void rollforward(
455 const pg_log_entry_t &entry,
456 ObjectStore::Transaction *t);
457
458 void trim(
459 const pg_log_entry_t &entry,
460 ObjectStore::Transaction *t);
461
462 void remove(
463 const hobject_t &hoid,
464 ObjectStore::Transaction *t);
465
466 protected:
467
468 void handle_recovery_delete(OpRequestRef op);
469 void handle_recovery_delete_reply(OpRequestRef op);
470
471 /// Reapply old attributes
472 void rollback_setattrs(
473 const hobject_t &hoid,
474 map<string, boost::optional<bufferlist> > &old_attrs,
475 ObjectStore::Transaction *t);
476
477 /// Truncate object to rollback append
478 virtual void rollback_append(
479 const hobject_t &hoid,
480 uint64_t old_size,
481 ObjectStore::Transaction *t);
482
483 /// Unstash object to rollback stash
484 void rollback_stash(
485 const hobject_t &hoid,
486 version_t old_version,
487 ObjectStore::Transaction *t);
488
489 /// Unstash object to rollback stash
490 void rollback_try_stash(
491 const hobject_t &hoid,
492 version_t old_version,
493 ObjectStore::Transaction *t);
494
495 /// Delete object to rollback create
496 void rollback_create(
497 const hobject_t &hoid,
498 ObjectStore::Transaction *t) {
499 remove(hoid, t);
500 }
501
502 /// Clone the extents back into place
503 void rollback_extents(
504 version_t gen,
505 const vector<pair<uint64_t, uint64_t> > &extents,
506 const hobject_t &hoid,
507 ObjectStore::Transaction *t);
508 public:
509
510 /// Trim object stashed at version
511 void trim_rollback_object(
512 const hobject_t &hoid,
513 version_t gen,
514 ObjectStore::Transaction *t);
515
516 /// List objects in collection
517 int objects_list_partial(
518 const hobject_t &begin,
519 int min,
520 int max,
521 vector<hobject_t> *ls,
522 hobject_t *next);
523
524 int objects_list_range(
525 const hobject_t &start,
526 const hobject_t &end,
527 snapid_t seq,
528 vector<hobject_t> *ls,
529 vector<ghobject_t> *gen_obs=0);
530
531 int objects_get_attr(
532 const hobject_t &hoid,
533 const string &attr,
534 bufferlist *out);
535
536 virtual int objects_get_attrs(
537 const hobject_t &hoid,
538 map<string, bufferlist> *out);
539
540 virtual int objects_read_sync(
541 const hobject_t &hoid,
542 uint64_t off,
543 uint64_t len,
544 uint32_t op_flags,
545 bufferlist *bl) = 0;
546
547 virtual void objects_read_async(
548 const hobject_t &hoid,
549 const list<pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
550 pair<bufferlist*, Context*> > > &to_read,
551 Context *on_complete, bool fast_read = false) = 0;
552
553 virtual bool scrub_supported() = 0;
554 virtual bool auto_repair_supported() const = 0;
555 void be_scan_list(
556 ScrubMap &map, const vector<hobject_t> &ls, bool deep, uint32_t seed,
557 ThreadPool::TPHandle &handle);
558 bool be_compare_scrub_objects(
559 pg_shard_t auth_shard,
560 const ScrubMap::object &auth,
561 const object_info_t& auth_oi,
562 const ScrubMap::object &candidate,
563 shard_info_wrapper& shard_error,
564 inconsistent_obj_wrapper &result,
565 ostream &errorstream);
566 map<pg_shard_t, ScrubMap *>::const_iterator be_select_auth_object(
567 const hobject_t &obj,
568 const map<pg_shard_t,ScrubMap*> &maps,
569 object_info_t *auth_oi,
570 map<pg_shard_t, shard_info_wrapper> &shard_map,
571 inconsistent_obj_wrapper &object_error);
572 void be_compare_scrubmaps(
573 const map<pg_shard_t,ScrubMap*> &maps,
574 bool repair,
575 map<hobject_t, set<pg_shard_t>> &missing,
576 map<hobject_t, set<pg_shard_t>> &inconsistent,
577 map<hobject_t, list<pg_shard_t>> &authoritative,
578 map<hobject_t, pair<uint32_t,uint32_t>> &missing_digest,
579 int &shallow_errors, int &deep_errors,
580 Scrub::Store *store,
581 const spg_t& pgid,
582 const vector<int> &acting,
583 ostream &errorstream);
584 virtual uint64_t be_get_ondisk_size(
585 uint64_t logical_size) = 0;
586 virtual void be_deep_scrub(
587 const hobject_t &poid,
588 uint32_t seed,
589 ScrubMap::object &o,
590 ThreadPool::TPHandle &handle) = 0;
591
592 static PGBackend *build_pg_backend(
593 const pg_pool_t &pool,
594 const OSDMapRef curmap,
595 Listener *l,
596 coll_t coll,
597 ObjectStore::CollectionHandle &ch,
598 ObjectStore *store,
599 CephContext *cct);
600 };
601
602 #endif