]> git.proxmox.com Git - ceph.git/blob - ceph/src/osd/PGLog.h
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / osd / PGLog.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 * Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
8 *
9 * Author: Loic Dachary <loic@dachary.org>
10 *
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
15 *
16 */
17 #ifndef CEPH_PG_LOG_H
18 #define CEPH_PG_LOG_H
19
20 // re-include our assert to clobber boost's
21 #include "include/assert.h"
22 #include "osd_types.h"
23 #include "os/ObjectStore.h"
24 #include <list>
25 using namespace std;
26
27 #define PGLOG_INDEXED_OBJECTS (1 << 0)
28 #define PGLOG_INDEXED_CALLER_OPS (1 << 1)
29 #define PGLOG_INDEXED_EXTRA_CALLER_OPS (1 << 2)
30 #define PGLOG_INDEXED_ALL (PGLOG_INDEXED_OBJECTS | PGLOG_INDEXED_CALLER_OPS | PGLOG_INDEXED_EXTRA_CALLER_OPS)
31
32 class CephContext;
33
34 struct PGLog : DoutPrefixProvider {
35 DoutPrefixProvider *prefix_provider;
36 string gen_prefix() const override {
37 return prefix_provider ? prefix_provider->gen_prefix() : "";
38 }
39 unsigned get_subsys() const override {
40 return prefix_provider ? prefix_provider->get_subsys() :
41 (unsigned)ceph_subsys_osd;
42 }
43 CephContext *get_cct() const override {
44 return cct;
45 }
46
47 ////////////////////////////// sub classes //////////////////////////////
48 struct LogEntryHandler {
49 virtual void rollback(
50 const pg_log_entry_t &entry) = 0;
51 virtual void rollforward(
52 const pg_log_entry_t &entry) = 0;
53 virtual void trim(
54 const pg_log_entry_t &entry) = 0;
55 virtual void remove(
56 const hobject_t &hoid) = 0;
57 virtual void try_stash(
58 const hobject_t &hoid,
59 version_t v) = 0;
60 virtual ~LogEntryHandler() {}
61 };
62
63 /* Exceptions */
64 class read_log_and_missing_error : public buffer::error {
65 public:
66 explicit read_log_and_missing_error(const char *what) {
67 snprintf(buf, sizeof(buf), "read_log_and_missing_error: %s", what);
68 }
69 const char *what() const throw () override {
70 return buf;
71 }
72 private:
73 char buf[512];
74 };
75
76 public:
77 /**
78 * IndexLog - adds in-memory index of the log, by oid.
79 * plus some methods to manipulate it all.
80 */
81 struct IndexedLog : public pg_log_t {
82 mutable ceph::unordered_map<hobject_t,pg_log_entry_t*> objects; // ptrs into log. be careful!
83 mutable ceph::unordered_map<osd_reqid_t,pg_log_entry_t*> caller_ops;
84 mutable ceph::unordered_multimap<osd_reqid_t,pg_log_entry_t*> extra_caller_ops;
85
86 // recovery pointers
87 list<pg_log_entry_t>::iterator complete_to; // not inclusive of referenced item
88 version_t last_requested = 0; // last object requested by primary
89
90 //
91 private:
92 mutable __u16 indexed_data = 0;
93 /**
94 * rollback_info_trimmed_to_riter points to the first log entry <=
95 * rollback_info_trimmed_to
96 *
97 * It's a reverse_iterator because rend() is a natural representation for
98 * tail, and rbegin() works nicely for head.
99 */
100 mempool::osd::list<pg_log_entry_t>::reverse_iterator
101 rollback_info_trimmed_to_riter;
102
103 template <typename F>
104 void advance_can_rollback_to(eversion_t to, F &&f) {
105 if (to > can_rollback_to)
106 can_rollback_to = to;
107
108 if (to > rollback_info_trimmed_to)
109 rollback_info_trimmed_to = to;
110
111 while (rollback_info_trimmed_to_riter != log.rbegin()) {
112 --rollback_info_trimmed_to_riter;
113 if (rollback_info_trimmed_to_riter->version > rollback_info_trimmed_to) {
114 ++rollback_info_trimmed_to_riter;
115 break;
116 }
117 f(*rollback_info_trimmed_to_riter);
118 }
119 }
120
121 void reset_rollback_info_trimmed_to_riter() {
122 rollback_info_trimmed_to_riter = log.rbegin();
123 while (rollback_info_trimmed_to_riter != log.rend() &&
124 rollback_info_trimmed_to_riter->version > rollback_info_trimmed_to)
125 ++rollback_info_trimmed_to_riter;
126 }
127
128 // indexes objects, caller ops and extra caller ops
129 public:
130 IndexedLog() :
131 complete_to(log.end()),
132 last_requested(0),
133 indexed_data(0),
134 rollback_info_trimmed_to_riter(log.rbegin())
135 {}
136
137 template <typename... Args>
138 IndexedLog(Args&&... args) :
139 pg_log_t(std::forward<Args>(args)...),
140 complete_to(log.end()),
141 last_requested(0),
142 indexed_data(0),
143 rollback_info_trimmed_to_riter(log.rbegin()) {
144 reset_rollback_info_trimmed_to_riter();
145 index();
146 }
147
148 IndexedLog(const IndexedLog &rhs) :
149 pg_log_t(rhs),
150 complete_to(log.end()),
151 last_requested(rhs.last_requested),
152 indexed_data(0),
153 rollback_info_trimmed_to_riter(log.rbegin()) {
154 reset_rollback_info_trimmed_to_riter();
155 index(rhs.indexed_data);
156 }
157 IndexedLog &operator=(const IndexedLog &rhs) {
158 this->~IndexedLog();
159 new (this) IndexedLog(rhs);
160 return *this;
161 }
162
163 void trim_rollback_info_to(eversion_t to, LogEntryHandler *h) {
164 advance_can_rollback_to(
165 to,
166 [&](pg_log_entry_t &entry) {
167 h->trim(entry);
168 });
169 }
170 void roll_forward_to(eversion_t to, LogEntryHandler *h) {
171 advance_can_rollback_to(
172 to,
173 [&](pg_log_entry_t &entry) {
174 h->rollforward(entry);
175 });
176 }
177
178 void skip_can_rollback_to_to_head() {
179 advance_can_rollback_to(head, [&](const pg_log_entry_t &entry) {});
180 }
181
182 mempool::osd::list<pg_log_entry_t> rewind_from_head(eversion_t newhead) {
183 auto divergent = pg_log_t::rewind_from_head(newhead);
184 index();
185 reset_rollback_info_trimmed_to_riter();
186 return divergent;
187 }
188
189 template <typename T>
190 void scan_log_after(
191 const eversion_t &bound, ///< [in] scan entries > bound
192 T &&f) const {
193 auto iter = log.rbegin();
194 while (iter != log.rend() && iter->version > bound)
195 ++iter;
196
197 while (true) {
198 if (iter == log.rbegin())
199 break;
200 f(*(--iter));
201 }
202 }
203
204 /****/
205 void claim_log_and_clear_rollback_info(const pg_log_t& o) {
206 // we must have already trimmed the old entries
207 assert(rollback_info_trimmed_to == head);
208 assert(rollback_info_trimmed_to_riter == log.rbegin());
209
210 *this = IndexedLog(o);
211
212 skip_can_rollback_to_to_head();
213 index();
214 }
215
216 void split_out_child(
217 pg_t child_pgid,
218 unsigned split_bits,
219 IndexedLog *target);
220
221 void zero() {
222 // we must have already trimmed the old entries
223 assert(rollback_info_trimmed_to == head);
224 assert(rollback_info_trimmed_to_riter == log.rbegin());
225
226 unindex();
227 pg_log_t::clear();
228 rollback_info_trimmed_to_riter = log.rbegin();
229 reset_recovery_pointers();
230 }
231 void clear() {
232 skip_can_rollback_to_to_head();
233 zero();
234 }
235 void reset_recovery_pointers() {
236 complete_to = log.end();
237 last_requested = 0;
238 }
239
240 bool logged_object(const hobject_t& oid) const {
241 if (!(indexed_data & PGLOG_INDEXED_OBJECTS)) {
242 index_objects();
243 }
244 return objects.count(oid);
245 }
246
247 bool logged_req(const osd_reqid_t &r) const {
248 if (!(indexed_data & PGLOG_INDEXED_CALLER_OPS)) {
249 index_caller_ops();
250 }
251 if (!caller_ops.count(r)) {
252 if (!(indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS)) {
253 index_extra_caller_ops();
254 }
255 return extra_caller_ops.count(r);
256 }
257 return true;
258 }
259
260 bool get_request(
261 const osd_reqid_t &r,
262 eversion_t *version,
263 version_t *user_version,
264 int *return_code) const {
265 assert(version);
266 assert(user_version);
267 assert(return_code);
268 ceph::unordered_map<osd_reqid_t,pg_log_entry_t*>::const_iterator p;
269 if (!(indexed_data & PGLOG_INDEXED_CALLER_OPS)) {
270 index_caller_ops();
271 }
272 p = caller_ops.find(r);
273 if (p != caller_ops.end()) {
274 *version = p->second->version;
275 *user_version = p->second->user_version;
276 *return_code = p->second->return_code;
277 return true;
278 }
279
280 // warning: we will return *a* request for this reqid, but not
281 // necessarily the most recent.
282 if (!(indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS)) {
283 index_extra_caller_ops();
284 }
285 p = extra_caller_ops.find(r);
286 if (p != extra_caller_ops.end()) {
287 for (vector<pair<osd_reqid_t, version_t> >::const_iterator i =
288 p->second->extra_reqids.begin();
289 i != p->second->extra_reqids.end();
290 ++i) {
291 if (i->first == r) {
292 *version = p->second->version;
293 *user_version = i->second;
294 *return_code = p->second->return_code;
295 return true;
296 }
297 }
298 assert(0 == "in extra_caller_ops but not extra_reqids");
299 }
300 return false;
301 }
302
303 /// get a (bounded) list of recent reqids for the given object
304 void get_object_reqids(const hobject_t& oid, unsigned max,
305 vector<pair<osd_reqid_t, version_t> > *pls) const {
306 // make sure object is present at least once before we do an
307 // O(n) search.
308 if (!(indexed_data & PGLOG_INDEXED_OBJECTS)) {
309 index_objects();
310 }
311 if (objects.count(oid) == 0)
312 return;
313 for (list<pg_log_entry_t>::const_reverse_iterator i = log.rbegin();
314 i != log.rend();
315 ++i) {
316 if (i->soid == oid) {
317 if (i->reqid_is_indexed())
318 pls->push_back(make_pair(i->reqid, i->user_version));
319 pls->insert(pls->end(), i->extra_reqids.begin(), i->extra_reqids.end());
320 if (pls->size() >= max) {
321 if (pls->size() > max) {
322 pls->resize(max);
323 }
324 return;
325 }
326 }
327 }
328 }
329
330 void index(__u16 to_index = PGLOG_INDEXED_ALL) const {
331 if (to_index & PGLOG_INDEXED_OBJECTS)
332 objects.clear();
333 if (to_index & PGLOG_INDEXED_CALLER_OPS)
334 caller_ops.clear();
335 if (to_index & PGLOG_INDEXED_EXTRA_CALLER_OPS)
336 extra_caller_ops.clear();
337
338 for (list<pg_log_entry_t>::const_iterator i = log.begin();
339 i != log.end();
340 ++i) {
341 if (to_index & PGLOG_INDEXED_OBJECTS) {
342 if (i->object_is_indexed()) {
343 objects[i->soid] = const_cast<pg_log_entry_t*>(&(*i));
344 }
345 }
346
347 if (to_index & PGLOG_INDEXED_CALLER_OPS) {
348 if (i->reqid_is_indexed()) {
349 caller_ops[i->reqid] = const_cast<pg_log_entry_t*>(&(*i));
350 }
351 }
352
353 if (to_index & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
354 for (vector<pair<osd_reqid_t, version_t> >::const_iterator j =
355 i->extra_reqids.begin();
356 j != i->extra_reqids.end();
357 ++j) {
358 extra_caller_ops.insert(
359 make_pair(j->first, const_cast<pg_log_entry_t*>(&(*i))));
360 }
361 }
362 }
363
364 indexed_data |= to_index;
365 }
366
367 void index_objects() const {
368 index(PGLOG_INDEXED_OBJECTS);
369 }
370
371 void index_caller_ops() const {
372 index(PGLOG_INDEXED_CALLER_OPS);
373 }
374
375 void index_extra_caller_ops() const {
376 index(PGLOG_INDEXED_EXTRA_CALLER_OPS);
377 }
378
379 void index(pg_log_entry_t& e) {
380 if ((indexed_data & PGLOG_INDEXED_OBJECTS) && e.object_is_indexed()) {
381 if (objects.count(e.soid) == 0 ||
382 objects[e.soid]->version < e.version)
383 objects[e.soid] = &e;
384 }
385 if (indexed_data & PGLOG_INDEXED_CALLER_OPS) {
386 // divergent merge_log indexes new before unindexing old
387 if (e.reqid_is_indexed()) {
388 caller_ops[e.reqid] = &e;
389 }
390 }
391 if (indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
392 for (vector<pair<osd_reqid_t, version_t> >::const_iterator j =
393 e.extra_reqids.begin();
394 j != e.extra_reqids.end();
395 ++j) {
396 extra_caller_ops.insert(make_pair(j->first, &e));
397 }
398 }
399 }
400 void unindex() {
401 objects.clear();
402 caller_ops.clear();
403 extra_caller_ops.clear();
404 indexed_data = 0;
405 }
406 void unindex(pg_log_entry_t& e) {
407 // NOTE: this only works if we remove from the _tail_ of the log!
408 if (indexed_data & PGLOG_INDEXED_OBJECTS) {
409 if (objects.count(e.soid) && objects[e.soid]->version == e.version)
410 objects.erase(e.soid);
411 }
412 if (e.reqid_is_indexed()) {
413 if (indexed_data & PGLOG_INDEXED_CALLER_OPS) {
414 // divergent merge_log indexes new before unindexing old
415 if (caller_ops.count(e.reqid) && caller_ops[e.reqid] == &e)
416 caller_ops.erase(e.reqid);
417 }
418 }
419 if (indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
420 for (vector<pair<osd_reqid_t, version_t> >::const_iterator j =
421 e.extra_reqids.begin();
422 j != e.extra_reqids.end();
423 ++j) {
424 for (ceph::unordered_multimap<osd_reqid_t,pg_log_entry_t*>::iterator k =
425 extra_caller_ops.find(j->first);
426 k != extra_caller_ops.end() && k->first == j->first;
427 ++k) {
428 if (k->second == &e) {
429 extra_caller_ops.erase(k);
430 break;
431 }
432 }
433 }
434 }
435 }
436
437 // actors
438 void add(const pg_log_entry_t& e, bool applied = true) {
439 if (!applied) {
440 assert(get_can_rollback_to() == head);
441 }
442
443 // add to log
444 log.push_back(e);
445
446 // riter previously pointed to the previous entry
447 if (rollback_info_trimmed_to_riter == log.rbegin())
448 ++rollback_info_trimmed_to_riter;
449
450 assert(e.version > head);
451 assert(head.version == 0 || e.version.version > head.version);
452 head = e.version;
453
454 // to our index
455 if ((indexed_data & PGLOG_INDEXED_OBJECTS) && e.object_is_indexed()) {
456 objects[e.soid] = &(log.back());
457 }
458 if (indexed_data & PGLOG_INDEXED_CALLER_OPS) {
459 if (e.reqid_is_indexed()) {
460 caller_ops[e.reqid] = &(log.back());
461 }
462 }
463
464 if (indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
465 for (vector<pair<osd_reqid_t, version_t> >::const_iterator j =
466 e.extra_reqids.begin();
467 j != e.extra_reqids.end();
468 ++j) {
469 extra_caller_ops.insert(make_pair(j->first, &(log.back())));
470 }
471 }
472
473 if (!applied) {
474 skip_can_rollback_to_to_head();
475 }
476 }
477
478 void trim(
479 CephContext* cct,
480 eversion_t s,
481 set<eversion_t> *trimmed);
482
483 ostream& print(ostream& out) const;
484 };
485
486
487 protected:
488 //////////////////// data members ////////////////////
489
490 pg_missing_tracker_t missing;
491 IndexedLog log;
492
493 eversion_t dirty_to; ///< must clear/writeout all keys <= dirty_to
494 eversion_t dirty_from; ///< must clear/writeout all keys >= dirty_from
495 eversion_t writeout_from; ///< must writout keys >= writeout_from
496 set<eversion_t> trimmed; ///< must clear keys in trimmed
497 CephContext *cct;
498 bool pg_log_debug;
499 /// Log is clean on [dirty_to, dirty_from)
500 bool touched_log;
501 bool clear_divergent_priors;
502
503 void mark_dirty_to(eversion_t to) {
504 if (to > dirty_to)
505 dirty_to = to;
506 }
507 void mark_dirty_from(eversion_t from) {
508 if (from < dirty_from)
509 dirty_from = from;
510 }
511 void mark_writeout_from(eversion_t from) {
512 if (from < writeout_from)
513 writeout_from = from;
514 }
515 public:
516 bool is_dirty() const {
517 return !touched_log ||
518 (dirty_to != eversion_t()) ||
519 (dirty_from != eversion_t::max()) ||
520 (writeout_from != eversion_t::max()) ||
521 !(trimmed.empty()) ||
522 !missing.is_clean();
523 }
524 void mark_log_for_rewrite() {
525 mark_dirty_to(eversion_t::max());
526 mark_dirty_from(eversion_t());
527 touched_log = false;
528 }
529 protected:
530
531 /// DEBUG
532 set<string> log_keys_debug;
533 static void clear_after(set<string> *log_keys_debug, const string &lb) {
534 if (!log_keys_debug)
535 return;
536 for (set<string>::iterator i = log_keys_debug->lower_bound(lb);
537 i != log_keys_debug->end();
538 log_keys_debug->erase(i++));
539 }
540 static void clear_up_to(set<string> *log_keys_debug, const string &ub) {
541 if (!log_keys_debug)
542 return;
543 for (set<string>::iterator i = log_keys_debug->begin();
544 i != log_keys_debug->end() && *i < ub;
545 log_keys_debug->erase(i++));
546 }
547
548 void check();
549 void undirty() {
550 dirty_to = eversion_t();
551 dirty_from = eversion_t::max();
552 touched_log = true;
553 trimmed.clear();
554 writeout_from = eversion_t::max();
555 check();
556 missing.flush();
557 }
558 public:
559 // cppcheck-suppress noExplicitConstructor
560 PGLog(CephContext *cct, DoutPrefixProvider *dpp = 0) :
561 prefix_provider(dpp),
562 dirty_from(eversion_t::max()),
563 writeout_from(eversion_t::max()),
564 cct(cct),
565 pg_log_debug(!(cct && !(cct->_conf->osd_debug_pg_log_writeout))),
566 touched_log(false),
567 clear_divergent_priors(false) {}
568
569
570 void reset_backfill();
571
572 void clear();
573
574 //////////////////// get or set missing ////////////////////
575
576 const pg_missing_tracker_t& get_missing() const { return missing; }
577 void revise_have(hobject_t oid, eversion_t have) {
578 missing.revise_have(oid, have);
579 }
580
581 void revise_need(hobject_t oid, eversion_t need) {
582 missing.revise_need(oid, need);
583 }
584
585 void missing_add(const hobject_t& oid, eversion_t need, eversion_t have) {
586 missing.add(oid, need, have);
587 }
588
589 void missing_add_event(const pg_log_entry_t &e) {
590 missing.add_next_event(e);
591 }
592
593 //////////////////// get or set log ////////////////////
594
595 const IndexedLog &get_log() const { return log; }
596
597 const eversion_t &get_tail() const { return log.tail; }
598
599 void set_tail(eversion_t tail) { log.tail = tail; }
600
601 const eversion_t &get_head() const { return log.head; }
602
603 void set_head(eversion_t head) { log.head = head; }
604
605 void set_last_requested(version_t last_requested) {
606 log.last_requested = last_requested;
607 }
608
609 void index() { log.index(); }
610
611 void unindex() { log.unindex(); }
612
613 void add(const pg_log_entry_t& e, bool applied = true) {
614 mark_writeout_from(e.version);
615 log.add(e, applied);
616 }
617
618 void reset_recovery_pointers() { log.reset_recovery_pointers(); }
619
620 static void clear_info_log(
621 spg_t pgid,
622 ObjectStore::Transaction *t);
623
624 void trim(
625 eversion_t trim_to,
626 pg_info_t &info);
627
628 void roll_forward_to(
629 eversion_t roll_forward_to,
630 LogEntryHandler *h) {
631 log.roll_forward_to(
632 roll_forward_to,
633 h);
634 }
635
636 eversion_t get_can_rollback_to() const {
637 return log.get_can_rollback_to();
638 }
639
640 void roll_forward(LogEntryHandler *h) {
641 roll_forward_to(
642 log.head,
643 h);
644 }
645
646 //////////////////// get or set log & missing ////////////////////
647
648 void reset_backfill_claim_log(const pg_log_t &o, LogEntryHandler *h) {
649 log.trim_rollback_info_to(log.head, h);
650 log.claim_log_and_clear_rollback_info(o);
651 missing.clear();
652 mark_dirty_to(eversion_t::max());
653 }
654
655 void split_into(
656 pg_t child_pgid,
657 unsigned split_bits,
658 PGLog *opg_log) {
659 log.split_out_child(child_pgid, split_bits, &opg_log->log);
660 missing.split_into(child_pgid, split_bits, &(opg_log->missing));
661 opg_log->mark_dirty_to(eversion_t::max());
662 mark_dirty_to(eversion_t::max());
663 }
664
665 void recover_got(hobject_t oid, eversion_t v, pg_info_t &info) {
666 if (missing.is_missing(oid, v)) {
667 missing.got(oid, v);
668
669 // raise last_complete?
670 if (missing.get_items().empty()) {
671 log.complete_to = log.log.end();
672 info.last_complete = info.last_update;
673 }
674 while (log.complete_to != log.log.end()) {
675 if (missing.get_items().at(
676 missing.get_rmissing().begin()->second
677 ).need <= log.complete_to->version)
678 break;
679 if (info.last_complete < log.complete_to->version)
680 info.last_complete = log.complete_to->version;
681 ++log.complete_to;
682 }
683 }
684
685 assert(log.get_can_rollback_to() >= v);
686 }
687
688 void activate_not_complete(pg_info_t &info) {
689 log.complete_to = log.log.begin();
690 while (log.complete_to->version <
691 missing.get_items().at(
692 missing.get_rmissing().begin()->second
693 ).need)
694 ++log.complete_to;
695 assert(log.complete_to != log.log.end());
696 if (log.complete_to == log.log.begin()) {
697 info.last_complete = eversion_t();
698 } else {
699 --log.complete_to;
700 info.last_complete = log.complete_to->version;
701 ++log.complete_to;
702 }
703 log.last_requested = 0;
704 }
705
706 void proc_replica_log(pg_info_t &oinfo,
707 const pg_log_t &olog,
708 pg_missing_t& omissing, pg_shard_t from) const;
709
710 protected:
711 static void split_by_object(
712 mempool::osd::list<pg_log_entry_t> &entries,
713 map<hobject_t, mempool::osd::list<pg_log_entry_t>> *out_entries) {
714 while (!entries.empty()) {
715 mempool::osd::list<pg_log_entry_t> &out_list = (*out_entries)[entries.front().soid];
716 out_list.splice(out_list.end(), entries, entries.begin());
717 }
718 }
719
720 /**
721 * _merge_object_divergent_entries
722 *
723 * There are 5 distinct cases:
724 * 1) There is a more recent update: in this case we assume we adjusted the
725 * store and missing during merge_log
726 * 2) The first entry in the divergent sequence is a create. This might
727 * either be because the object is a clone or because prior_version is
728 * eversion_t(). In this case the object does not exist and we must
729 * adjust missing and the store to match.
730 * 3) We are currently missing the object. In this case, we adjust the
731 * missing to our prior_version taking care to add a divergent_prior
732 * if necessary
733 * 4) We can rollback all of the entries. In this case, we do so using
734 * the rollbacker and return -- the object does not go into missing.
735 * 5) We cannot rollback at least 1 of the entries. In this case, we
736 * clear the object out of the store and add a missing entry at
737 * prior_version taking care to add a divergent_prior if
738 * necessary.
739 */
740 template <typename missing_type>
741 static void _merge_object_divergent_entries(
742 const IndexedLog &log, ///< [in] log to merge against
743 const hobject_t &hoid, ///< [in] object we are merging
744 const mempool::osd::list<pg_log_entry_t> &entries, ///< [in] entries for hoid to merge
745 const pg_info_t &info, ///< [in] info for merging entries
746 eversion_t olog_can_rollback_to, ///< [in] rollback boundary
747 missing_type &missing, ///< [in,out] missing to adjust, use
748 LogEntryHandler *rollbacker, ///< [in] optional rollbacker object
749 const DoutPrefixProvider *dpp ///< [in] logging provider
750 ) {
751 ldpp_dout(dpp, 20) << __func__ << ": merging hoid " << hoid
752 << " entries: " << entries << dendl;
753
754 if (hoid > info.last_backfill) {
755 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid << " after last_backfill"
756 << dendl;
757 return;
758 }
759
760 // entries is non-empty
761 assert(!entries.empty());
762 eversion_t last;
763 for (list<pg_log_entry_t>::const_iterator i = entries.begin();
764 i != entries.end();
765 ++i) {
766 // all entries are on hoid
767 assert(i->soid == hoid);
768 if (i != entries.begin() && i->prior_version != eversion_t()) {
769 // in increasing order of version
770 assert(i->version > last);
771 // prior_version correct
772 assert(i->prior_version == last);
773 }
774 last = i->version;
775 }
776
777 const eversion_t prior_version = entries.begin()->prior_version;
778 const eversion_t first_divergent_update = entries.begin()->version;
779 const eversion_t last_divergent_update = entries.rbegin()->version;
780 const bool object_not_in_store =
781 !missing.is_missing(hoid) &&
782 entries.rbegin()->is_delete();
783 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
784 << " prior_version: " << prior_version
785 << " first_divergent_update: " << first_divergent_update
786 << " last_divergent_update: " << last_divergent_update
787 << dendl;
788
789 ceph::unordered_map<hobject_t, pg_log_entry_t*>::const_iterator objiter =
790 log.objects.find(hoid);
791 if (objiter != log.objects.end() &&
792 objiter->second->version >= first_divergent_update) {
793 /// Case 1)
794 ldpp_dout(dpp, 10) << __func__ << ": more recent entry found: "
795 << *objiter->second << ", already merged" << dendl;
796
797 assert(objiter->second->version > last_divergent_update);
798
799 // ensure missing has been updated appropriately
800 if (objiter->second->is_update()) {
801 assert(missing.is_missing(hoid) &&
802 missing.get_items().at(hoid).need == objiter->second->version);
803 } else {
804 assert(!missing.is_missing(hoid));
805 }
806 missing.revise_have(hoid, eversion_t());
807 if (rollbacker) {
808 if (!object_not_in_store) {
809 rollbacker->remove(hoid);
810 }
811 for (auto &&i: entries) {
812 rollbacker->trim(i);
813 }
814 }
815 return;
816 }
817
818 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
819 <<" has no more recent entries in log" << dendl;
820 if (prior_version == eversion_t() || entries.front().is_clone()) {
821 /// Case 2)
822 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
823 << " prior_version or op type indicates creation,"
824 << " deleting"
825 << dendl;
826 if (missing.is_missing(hoid))
827 missing.rm(missing.get_items().find(hoid));
828 if (rollbacker) {
829 if (!object_not_in_store) {
830 rollbacker->remove(hoid);
831 }
832 for (auto &&i: entries) {
833 rollbacker->trim(i);
834 }
835 }
836 return;
837 }
838
839 if (missing.is_missing(hoid)) {
840 /// Case 3)
841 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
842 << " missing, " << missing.get_items().at(hoid)
843 << " adjusting" << dendl;
844
845 if (missing.get_items().at(hoid).have == prior_version) {
846 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
847 << " missing.have is prior_version " << prior_version
848 << " removing from missing" << dendl;
849 missing.rm(missing.get_items().find(hoid));
850 } else {
851 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
852 << " missing.have is " << missing.get_items().at(hoid).have
853 << ", adjusting" << dendl;
854 missing.revise_need(hoid, prior_version);
855 if (prior_version <= info.log_tail) {
856 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
857 << " prior_version " << prior_version
858 << " <= info.log_tail "
859 << info.log_tail << dendl;
860 }
861 }
862 if (rollbacker) {
863 for (auto &&i: entries) {
864 rollbacker->trim(i);
865 }
866 }
867 return;
868 }
869
870 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
871 << " must be rolled back or recovered,"
872 << " attempting to rollback"
873 << dendl;
874 bool can_rollback = true;
875 /// Distinguish between 4) and 5)
876 for (list<pg_log_entry_t>::const_reverse_iterator i = entries.rbegin();
877 i != entries.rend();
878 ++i) {
879 if (!i->can_rollback() || i->version <= olog_can_rollback_to) {
880 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid << " cannot rollback "
881 << *i << dendl;
882 can_rollback = false;
883 break;
884 }
885 }
886
887 if (can_rollback) {
888 /// Case 4)
889 for (list<pg_log_entry_t>::const_reverse_iterator i = entries.rbegin();
890 i != entries.rend();
891 ++i) {
892 assert(i->can_rollback() && i->version > olog_can_rollback_to);
893 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
894 << " rolling back " << *i << dendl;
895 if (rollbacker)
896 rollbacker->rollback(*i);
897 }
898 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
899 << " rolled back" << dendl;
900 return;
901 } else {
902 /// Case 5)
903 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid << " cannot roll back, "
904 << "removing and adding to missing" << dendl;
905 if (rollbacker) {
906 if (!object_not_in_store)
907 rollbacker->remove(hoid);
908 for (auto &&i: entries) {
909 rollbacker->trim(i);
910 }
911 }
912 missing.add(hoid, prior_version, eversion_t());
913 if (prior_version <= info.log_tail) {
914 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
915 << " prior_version " << prior_version
916 << " <= info.log_tail "
917 << info.log_tail << dendl;
918 }
919 }
920 }
921
922 /// Merge all entries using above
923 template <typename missing_type>
924 static void _merge_divergent_entries(
925 const IndexedLog &log, ///< [in] log to merge against
926 mempool::osd::list<pg_log_entry_t> &entries, ///< [in] entries to merge
927 const pg_info_t &oinfo, ///< [in] info for merging entries
928 eversion_t olog_can_rollback_to, ///< [in] rollback boundary
929 missing_type &omissing, ///< [in,out] missing to adjust, use
930 LogEntryHandler *rollbacker, ///< [in] optional rollbacker object
931 const DoutPrefixProvider *dpp ///< [in] logging provider
932 ) {
933 map<hobject_t, mempool::osd::list<pg_log_entry_t> > split;
934 split_by_object(entries, &split);
935 for (map<hobject_t, mempool::osd::list<pg_log_entry_t>>::iterator i = split.begin();
936 i != split.end();
937 ++i) {
938 _merge_object_divergent_entries(
939 log,
940 i->first,
941 i->second,
942 oinfo,
943 olog_can_rollback_to,
944 omissing,
945 rollbacker,
946 dpp);
947 }
948 }
949
950 /**
951 * Exists for use in TestPGLog for simply testing single divergent log
952 * cases
953 */
954 void merge_old_entry(
955 ObjectStore::Transaction& t,
956 const pg_log_entry_t& oe,
957 const pg_info_t& info,
958 LogEntryHandler *rollbacker) {
959 mempool::osd::list<pg_log_entry_t> entries;
960 entries.push_back(oe);
961 _merge_object_divergent_entries(
962 log,
963 oe.soid,
964 entries,
965 info,
966 log.get_can_rollback_to(),
967 missing,
968 rollbacker,
969 this);
970 }
971 public:
972 void rewind_divergent_log(eversion_t newhead,
973 pg_info_t &info,
974 LogEntryHandler *rollbacker,
975 bool &dirty_info,
976 bool &dirty_big_info);
977
978 void merge_log(pg_info_t &oinfo,
979 pg_log_t &olog,
980 pg_shard_t from,
981 pg_info_t &info, LogEntryHandler *rollbacker,
982 bool &dirty_info, bool &dirty_big_info);
983
984 template <typename missing_type>
985 static bool append_log_entries_update_missing(
986 const hobject_t &last_backfill,
987 bool last_backfill_bitwise,
988 const mempool::osd::list<pg_log_entry_t> &entries,
989 bool maintain_rollback,
990 IndexedLog *log,
991 missing_type &missing,
992 LogEntryHandler *rollbacker,
993 const DoutPrefixProvider *dpp) {
994 bool invalidate_stats = false;
995 if (log && !entries.empty()) {
996 assert(log->head < entries.begin()->version);
997 }
998 for (list<pg_log_entry_t>::const_iterator p = entries.begin();
999 p != entries.end();
1000 ++p) {
1001 invalidate_stats = invalidate_stats || !p->is_error();
1002 if (log) {
1003 ldpp_dout(dpp, 20) << "update missing, append " << *p << dendl;
1004 log->add(*p);
1005 }
1006 if (p->soid <= last_backfill &&
1007 !p->is_error()) {
1008 missing.add_next_event(*p);
1009 if (rollbacker) {
1010 // hack to match PG::mark_all_unfound_lost
1011 if (maintain_rollback && p->is_lost_delete() && p->can_rollback()) {
1012 rollbacker->try_stash(p->soid, p->version.version);
1013 } else if (p->is_delete()) {
1014 rollbacker->remove(p->soid);
1015 }
1016 }
1017 }
1018 }
1019 return invalidate_stats;
1020 }
1021 bool append_new_log_entries(
1022 const hobject_t &last_backfill,
1023 bool last_backfill_bitwise,
1024 const mempool::osd::list<pg_log_entry_t> &entries,
1025 LogEntryHandler *rollbacker) {
1026 bool invalidate_stats = append_log_entries_update_missing(
1027 last_backfill,
1028 last_backfill_bitwise,
1029 entries,
1030 true,
1031 &log,
1032 missing,
1033 rollbacker,
1034 this);
1035 if (!entries.empty()) {
1036 mark_writeout_from(entries.begin()->version);
1037 }
1038 return invalidate_stats;
1039 }
1040
1041 void write_log_and_missing(ObjectStore::Transaction& t,
1042 map<string,bufferlist> *km,
1043 const coll_t& coll,
1044 const ghobject_t &log_oid,
1045 bool require_rollback);
1046
1047 static void write_log_and_missing_wo_missing(
1048 ObjectStore::Transaction& t,
1049 map<string,bufferlist>* km,
1050 pg_log_t &log,
1051 const coll_t& coll,
1052 const ghobject_t &log_oid, map<eversion_t, hobject_t> &divergent_priors,
1053 bool require_rollback);
1054
1055 static void write_log_and_missing(
1056 ObjectStore::Transaction& t,
1057 map<string,bufferlist>* km,
1058 pg_log_t &log,
1059 const coll_t& coll,
1060 const ghobject_t &log_oid,
1061 const pg_missing_tracker_t &missing,
1062 bool require_rollback);
1063
1064 static void _write_log_and_missing_wo_missing(
1065 ObjectStore::Transaction& t,
1066 map<string,bufferlist>* km,
1067 pg_log_t &log,
1068 const coll_t& coll, const ghobject_t &log_oid,
1069 map<eversion_t, hobject_t> &divergent_priors,
1070 eversion_t dirty_to,
1071 eversion_t dirty_from,
1072 eversion_t writeout_from,
1073 const set<eversion_t> &trimmed,
1074 bool dirty_divergent_priors,
1075 bool touch_log,
1076 bool require_rollback,
1077 set<string> *log_keys_debug
1078 );
1079
1080 static void _write_log_and_missing(
1081 ObjectStore::Transaction& t,
1082 map<string,bufferlist>* km,
1083 pg_log_t &log,
1084 const coll_t& coll, const ghobject_t &log_oid,
1085 eversion_t dirty_to,
1086 eversion_t dirty_from,
1087 eversion_t writeout_from,
1088 const set<eversion_t> &trimmed,
1089 const pg_missing_tracker_t &missing,
1090 bool touch_log,
1091 bool require_rollback,
1092 bool clear_divergent_priors,
1093 set<string> *log_keys_debug
1094 );
1095
1096 void read_log_and_missing(
1097 ObjectStore *store, coll_t pg_coll,
1098 coll_t log_coll, ghobject_t log_oid,
1099 const pg_info_t &info,
1100 ostringstream &oss,
1101 bool tolerate_divergent_missing_log,
1102 bool debug_verify_stored_missing = false
1103 ) {
1104 return read_log_and_missing(
1105 store, pg_coll, log_coll, log_oid, info,
1106 log, missing, oss,
1107 tolerate_divergent_missing_log,
1108 &clear_divergent_priors,
1109 this,
1110 (pg_log_debug ? &log_keys_debug : 0),
1111 debug_verify_stored_missing);
1112 }
1113
1114 template <typename missing_type>
1115 static void read_log_and_missing(ObjectStore *store, coll_t pg_coll,
1116 coll_t log_coll, ghobject_t log_oid,
1117 const pg_info_t &info,
1118 IndexedLog &log,
1119 missing_type &missing, ostringstream &oss,
1120 bool tolerate_divergent_missing_log,
1121 bool *clear_divergent_priors = NULL,
1122 const DoutPrefixProvider *dpp = NULL,
1123 set<string> *log_keys_debug = 0,
1124 bool debug_verify_stored_missing = false
1125 ) {
1126 ldpp_dout(dpp, 20) << "read_log_and_missing coll " << pg_coll
1127 << " log_oid " << log_oid << dendl;
1128
1129 // legacy?
1130 struct stat st;
1131 int r = store->stat(log_coll, log_oid, &st);
1132 assert(r == 0);
1133 assert(st.st_size == 0);
1134
1135 // will get overridden below if it had been recorded
1136 eversion_t on_disk_can_rollback_to = info.last_update;
1137 eversion_t on_disk_rollback_info_trimmed_to = eversion_t();
1138 ObjectMap::ObjectMapIterator p = store->get_omap_iterator(log_coll, log_oid);
1139 map<eversion_t, hobject_t> divergent_priors;
1140 bool has_divergent_priors = false;
1141 list<pg_log_entry_t> entries;
1142 if (p) {
1143 for (p->seek_to_first(); p->valid() ; p->next(false)) {
1144 // non-log pgmeta_oid keys are prefixed with _; skip those
1145 if (p->key()[0] == '_')
1146 continue;
1147 bufferlist bl = p->value();//Copy bufferlist before creating iterator
1148 bufferlist::iterator bp = bl.begin();
1149 if (p->key() == "divergent_priors") {
1150 ::decode(divergent_priors, bp);
1151 ldpp_dout(dpp, 20) << "read_log_and_missing " << divergent_priors.size()
1152 << " divergent_priors" << dendl;
1153 has_divergent_priors = true;
1154 debug_verify_stored_missing = false;
1155 } else if (p->key() == "can_rollback_to") {
1156 ::decode(on_disk_can_rollback_to, bp);
1157 } else if (p->key() == "rollback_info_trimmed_to") {
1158 ::decode(on_disk_rollback_info_trimmed_to, bp);
1159 } else if (p->key().substr(0, 7) == string("missing")) {
1160 pair<hobject_t, pg_missing_item> p;
1161 ::decode(p, bp);
1162 missing.add(p.first, p.second.need, p.second.have);
1163 } else {
1164 pg_log_entry_t e;
1165 e.decode_with_checksum(bp);
1166 ldpp_dout(dpp, 20) << "read_log_and_missing " << e << dendl;
1167 if (!entries.empty()) {
1168 pg_log_entry_t last_e(entries.back());
1169 assert(last_e.version.version < e.version.version);
1170 assert(last_e.version.epoch <= e.version.epoch);
1171 }
1172 entries.push_back(e);
1173 if (log_keys_debug)
1174 log_keys_debug->insert(e.get_key_name());
1175 }
1176 }
1177 }
1178 log = IndexedLog(
1179 info.last_update,
1180 info.log_tail,
1181 on_disk_can_rollback_to,
1182 on_disk_rollback_info_trimmed_to,
1183 std::move(entries));
1184
1185 if (has_divergent_priors || debug_verify_stored_missing) {
1186 // build missing
1187 if (debug_verify_stored_missing || info.last_complete < info.last_update) {
1188 ldpp_dout(dpp, 10) << "read_log_and_missing checking for missing items over interval ("
1189 << info.last_complete
1190 << "," << info.last_update << "]" << dendl;
1191
1192 set<hobject_t> did;
1193 set<hobject_t> checked;
1194 set<hobject_t> skipped;
1195 for (list<pg_log_entry_t>::reverse_iterator i = log.log.rbegin();
1196 i != log.log.rend();
1197 ++i) {
1198 if (!debug_verify_stored_missing && i->version <= info.last_complete) break;
1199 if (i->soid > info.last_backfill)
1200 continue;
1201 if (i->is_error())
1202 continue;
1203 if (did.count(i->soid)) continue;
1204 did.insert(i->soid);
1205
1206 if (i->is_delete()) continue;
1207
1208 bufferlist bv;
1209 int r = store->getattr(
1210 pg_coll,
1211 ghobject_t(i->soid, ghobject_t::NO_GEN, info.pgid.shard),
1212 OI_ATTR,
1213 bv);
1214 if (r >= 0) {
1215 object_info_t oi(bv);
1216 if (oi.version < i->version) {
1217 ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i
1218 << " (have " << oi.version << ")" << dendl;
1219 if (debug_verify_stored_missing) {
1220 auto miter = missing.get_items().find(i->soid);
1221 assert(miter != missing.get_items().end());
1222 assert(miter->second.need == i->version);
1223 assert(miter->second.have == oi.version);
1224 checked.insert(i->soid);
1225 } else {
1226 missing.add(i->soid, i->version, oi.version);
1227 }
1228 }
1229 } else {
1230 ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i << dendl;
1231 if (debug_verify_stored_missing) {
1232 auto miter = missing.get_items().find(i->soid);
1233 assert(miter != missing.get_items().end());
1234 assert(miter->second.need == i->version);
1235 assert(miter->second.have == eversion_t());
1236 checked.insert(i->soid);
1237 } else {
1238 missing.add(i->soid, i->version, eversion_t());
1239 }
1240 }
1241 }
1242 if (debug_verify_stored_missing) {
1243 for (auto &&i: missing.get_items()) {
1244 if (checked.count(i.first))
1245 continue;
1246 if (i.second.need > log.tail ||
1247 i.first > info.last_backfill) {
1248 ldpp_dout(dpp, -1) << __func__ << ": invalid missing set entry found "
1249 << i.first
1250 << dendl;
1251 assert(0 == "invalid missing set entry found");
1252 }
1253 bufferlist bv;
1254 int r = store->getattr(
1255 pg_coll,
1256 ghobject_t(i.first, ghobject_t::NO_GEN, info.pgid.shard),
1257 OI_ATTR,
1258 bv);
1259 if (r >= 0) {
1260 object_info_t oi(bv);
1261 assert(oi.version == i.second.have);
1262 } else {
1263 assert(eversion_t() == i.second.have);
1264 }
1265 }
1266 } else {
1267 assert(has_divergent_priors);
1268 for (map<eversion_t, hobject_t>::reverse_iterator i =
1269 divergent_priors.rbegin();
1270 i != divergent_priors.rend();
1271 ++i) {
1272 if (i->first <= info.last_complete) break;
1273 if (i->second > info.last_backfill)
1274 continue;
1275 if (did.count(i->second)) continue;
1276 did.insert(i->second);
1277 bufferlist bv;
1278 int r = store->getattr(
1279 pg_coll,
1280 ghobject_t(i->second, ghobject_t::NO_GEN, info.pgid.shard),
1281 OI_ATTR,
1282 bv);
1283 if (r >= 0) {
1284 object_info_t oi(bv);
1285 /**
1286 * 1) we see this entry in the divergent priors mapping
1287 * 2) we didn't see an entry for this object in the log
1288 *
1289 * From 1 & 2 we know that either the object does not exist
1290 * or it is at the version specified in the divergent_priors
1291 * map since the object would have been deleted atomically
1292 * with the addition of the divergent_priors entry, an older
1293 * version would not have been recovered, and a newer version
1294 * would show up in the log above.
1295 */
1296 /**
1297 * Unfortunately the assessment above is incorrect because of
1298 * http://tracker.ceph.com/issues/17916 (we were incorrectly
1299 * not removing the divergent_priors set from disk state!),
1300 * so let's check that.
1301 */
1302 if (oi.version > i->first && tolerate_divergent_missing_log) {
1303 ldpp_dout(dpp, 0) << "read_log divergent_priors entry (" << *i
1304 << ") inconsistent with disk state (" << oi
1305 << "), assuming it is tracker.ceph.com/issues/17916"
1306 << dendl;
1307 } else {
1308 assert(oi.version == i->first);
1309 }
1310 } else {
1311 ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i << dendl;
1312 missing.add(i->second, i->first, eversion_t());
1313 }
1314 }
1315 }
1316 if (clear_divergent_priors)
1317 (*clear_divergent_priors) = true;
1318 }
1319 }
1320
1321 if (!has_divergent_priors) {
1322 if (clear_divergent_priors)
1323 (*clear_divergent_priors) = false;
1324 missing.flush();
1325 }
1326 ldpp_dout(dpp, 10) << "read_log_and_missing done" << dendl;
1327 }
1328 };
1329
1330 #endif // CEPH_PG_LOG_H