]> git.proxmox.com Git - ceph.git/blob - ceph/src/osd/PGLog.h
19405de25be67aac67edb054f9a7c6d9ecf94f2e
[ceph.git] / ceph / src / osd / PGLog.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 /*
4 * Ceph - scalable distributed file system
5 *
6 * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net>
7 * Copyright (C) 2013 Cloudwatt <libre.licensing@cloudwatt.com>
8 *
9 * Author: Loic Dachary <loic@dachary.org>
10 *
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
15 *
16 */
17 #pragma once
18
19 // re-include our assert to clobber boost's
20 #include "include/assert.h"
21 #include "osd_types.h"
22 #include "os/ObjectStore.h"
23 #include <list>
24 using namespace std;
25
26 #define PGLOG_INDEXED_OBJECTS (1 << 0)
27 #define PGLOG_INDEXED_CALLER_OPS (1 << 1)
28 #define PGLOG_INDEXED_EXTRA_CALLER_OPS (1 << 2)
29 #define PGLOG_INDEXED_DUPS (1 << 3)
30 #define PGLOG_INDEXED_ALL (PGLOG_INDEXED_OBJECTS | \
31 PGLOG_INDEXED_CALLER_OPS | \
32 PGLOG_INDEXED_EXTRA_CALLER_OPS | \
33 PGLOG_INDEXED_DUPS)
34
35 class CephContext;
36
37 struct PGLog : DoutPrefixProvider {
38 DoutPrefixProvider *prefix_provider;
39 string gen_prefix() const override {
40 return prefix_provider ? prefix_provider->gen_prefix() : "";
41 }
42 unsigned get_subsys() const override {
43 return prefix_provider ? prefix_provider->get_subsys() :
44 (unsigned)ceph_subsys_osd;
45 }
46 CephContext *get_cct() const override {
47 return cct;
48 }
49
50 ////////////////////////////// sub classes //////////////////////////////
51 struct LogEntryHandler {
52 virtual void rollback(
53 const pg_log_entry_t &entry) = 0;
54 virtual void rollforward(
55 const pg_log_entry_t &entry) = 0;
56 virtual void trim(
57 const pg_log_entry_t &entry) = 0;
58 virtual void remove(
59 const hobject_t &hoid) = 0;
60 virtual void try_stash(
61 const hobject_t &hoid,
62 version_t v) = 0;
63 virtual ~LogEntryHandler() {}
64 };
65
66 /* Exceptions */
67 class read_log_and_missing_error : public buffer::error {
68 public:
69 explicit read_log_and_missing_error(const char *what) {
70 snprintf(buf, sizeof(buf), "read_log_and_missing_error: %s", what);
71 }
72 const char *what() const throw () override {
73 return buf;
74 }
75 private:
76 char buf[512];
77 };
78
79 public:
80 /**
81 * IndexLog - adds in-memory index of the log, by oid.
82 * plus some methods to manipulate it all.
83 */
84 struct IndexedLog : public pg_log_t {
85 mutable ceph::unordered_map<hobject_t,pg_log_entry_t*> objects; // ptrs into log. be careful!
86 mutable ceph::unordered_map<osd_reqid_t,pg_log_entry_t*> caller_ops;
87 mutable ceph::unordered_multimap<osd_reqid_t,pg_log_entry_t*> extra_caller_ops;
88 mutable ceph::unordered_map<osd_reqid_t,pg_log_dup_t*> dup_index;
89
90 // recovery pointers
91 list<pg_log_entry_t>::iterator complete_to; // not inclusive of referenced item
92 version_t last_requested = 0; // last object requested by primary
93
94 //
95 private:
96 mutable __u16 indexed_data = 0;
97 /**
98 * rollback_info_trimmed_to_riter points to the first log entry <=
99 * rollback_info_trimmed_to
100 *
101 * It's a reverse_iterator because rend() is a natural representation for
102 * tail, and rbegin() works nicely for head.
103 */
104 mempool::osd_pglog::list<pg_log_entry_t>::reverse_iterator
105 rollback_info_trimmed_to_riter;
106
107 template <typename F>
108 void advance_can_rollback_to(eversion_t to, F &&f) {
109 if (to > can_rollback_to)
110 can_rollback_to = to;
111
112 if (to > rollback_info_trimmed_to)
113 rollback_info_trimmed_to = to;
114
115 while (rollback_info_trimmed_to_riter != log.rbegin()) {
116 --rollback_info_trimmed_to_riter;
117 if (rollback_info_trimmed_to_riter->version > rollback_info_trimmed_to) {
118 ++rollback_info_trimmed_to_riter;
119 break;
120 }
121 f(*rollback_info_trimmed_to_riter);
122 }
123 }
124
125 void reset_rollback_info_trimmed_to_riter() {
126 rollback_info_trimmed_to_riter = log.rbegin();
127 while (rollback_info_trimmed_to_riter != log.rend() &&
128 rollback_info_trimmed_to_riter->version > rollback_info_trimmed_to)
129 ++rollback_info_trimmed_to_riter;
130 }
131
132 // indexes objects, caller ops and extra caller ops
133 public:
134 IndexedLog() :
135 complete_to(log.end()),
136 last_requested(0),
137 indexed_data(0),
138 rollback_info_trimmed_to_riter(log.rbegin())
139 { }
140
141 template <typename... Args>
142 IndexedLog(Args&&... args) :
143 pg_log_t(std::forward<Args>(args)...),
144 complete_to(log.end()),
145 last_requested(0),
146 indexed_data(0),
147 rollback_info_trimmed_to_riter(log.rbegin())
148 {
149 reset_rollback_info_trimmed_to_riter();
150 index();
151 }
152
153 IndexedLog(const IndexedLog &rhs) :
154 pg_log_t(rhs),
155 complete_to(log.end()),
156 last_requested(rhs.last_requested),
157 indexed_data(0),
158 rollback_info_trimmed_to_riter(log.rbegin())
159 {
160 reset_rollback_info_trimmed_to_riter();
161 index(rhs.indexed_data);
162 }
163
164 IndexedLog &operator=(const IndexedLog &rhs) {
165 this->~IndexedLog();
166 new (this) IndexedLog(rhs);
167 return *this;
168 }
169
170 void trim_rollback_info_to(eversion_t to, LogEntryHandler *h) {
171 advance_can_rollback_to(
172 to,
173 [&](pg_log_entry_t &entry) {
174 h->trim(entry);
175 });
176 }
177 void roll_forward_to(eversion_t to, LogEntryHandler *h) {
178 advance_can_rollback_to(
179 to,
180 [&](pg_log_entry_t &entry) {
181 h->rollforward(entry);
182 });
183 }
184
185 void skip_can_rollback_to_to_head() {
186 advance_can_rollback_to(head, [&](const pg_log_entry_t &entry) {});
187 }
188
189 mempool::osd_pglog::list<pg_log_entry_t> rewind_from_head(eversion_t newhead) {
190 auto divergent = pg_log_t::rewind_from_head(newhead);
191 index();
192 reset_rollback_info_trimmed_to_riter();
193 return divergent;
194 }
195
196 template <typename T>
197 void scan_log_after(
198 const eversion_t &bound, ///< [in] scan entries > bound
199 T &&f) const {
200 auto iter = log.rbegin();
201 while (iter != log.rend() && iter->version > bound)
202 ++iter;
203
204 while (true) {
205 if (iter == log.rbegin())
206 break;
207 f(*(--iter));
208 }
209 }
210
211 /****/
212 void claim_log_and_clear_rollback_info(const pg_log_t& o) {
213 // we must have already trimmed the old entries
214 assert(rollback_info_trimmed_to == head);
215 assert(rollback_info_trimmed_to_riter == log.rbegin());
216
217 *this = IndexedLog(o);
218
219 skip_can_rollback_to_to_head();
220 index();
221 }
222
223 void split_out_child(
224 pg_t child_pgid,
225 unsigned split_bits,
226 IndexedLog *target);
227
228 void zero() {
229 // we must have already trimmed the old entries
230 assert(rollback_info_trimmed_to == head);
231 assert(rollback_info_trimmed_to_riter == log.rbegin());
232
233 unindex();
234 pg_log_t::clear();
235 rollback_info_trimmed_to_riter = log.rbegin();
236 reset_recovery_pointers();
237 }
238 void clear() {
239 skip_can_rollback_to_to_head();
240 zero();
241 }
242 void reset_recovery_pointers() {
243 complete_to = log.end();
244 last_requested = 0;
245 }
246
247 bool logged_object(const hobject_t& oid) const {
248 if (!(indexed_data & PGLOG_INDEXED_OBJECTS)) {
249 index_objects();
250 }
251 return objects.count(oid);
252 }
253
254 bool logged_req(const osd_reqid_t &r) const {
255 if (!(indexed_data & PGLOG_INDEXED_CALLER_OPS)) {
256 index_caller_ops();
257 }
258 if (!caller_ops.count(r)) {
259 if (!(indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS)) {
260 index_extra_caller_ops();
261 }
262 return extra_caller_ops.count(r);
263 }
264 return true;
265 }
266
267 bool get_request(
268 const osd_reqid_t &r,
269 eversion_t *version,
270 version_t *user_version,
271 int *return_code) const
272 {
273 assert(version);
274 assert(user_version);
275 assert(return_code);
276 ceph::unordered_map<osd_reqid_t,pg_log_entry_t*>::const_iterator p;
277 if (!(indexed_data & PGLOG_INDEXED_CALLER_OPS)) {
278 index_caller_ops();
279 }
280 p = caller_ops.find(r);
281 if (p != caller_ops.end()) {
282 *version = p->second->version;
283 *user_version = p->second->user_version;
284 *return_code = p->second->return_code;
285 return true;
286 }
287
288 // warning: we will return *a* request for this reqid, but not
289 // necessarily the most recent.
290 if (!(indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS)) {
291 index_extra_caller_ops();
292 }
293 p = extra_caller_ops.find(r);
294 if (p != extra_caller_ops.end()) {
295 for (auto i = p->second->extra_reqids.begin();
296 i != p->second->extra_reqids.end();
297 ++i) {
298 if (i->first == r) {
299 *version = p->second->version;
300 *user_version = i->second;
301 *return_code = p->second->return_code;
302 return true;
303 }
304 }
305 assert(0 == "in extra_caller_ops but not extra_reqids");
306 }
307
308 if (!(indexed_data & PGLOG_INDEXED_DUPS)) {
309 index_dups();
310 }
311 auto q = dup_index.find(r);
312 if (q != dup_index.end()) {
313 *version = q->second->version;
314 *user_version = q->second->user_version;
315 *return_code = q->second->return_code;
316 return true;
317 }
318
319 return false;
320 }
321
322 /// get a (bounded) list of recent reqids for the given object
323 void get_object_reqids(const hobject_t& oid, unsigned max,
324 mempool::osd_pglog::vector<pair<osd_reqid_t, version_t> > *pls) const {
325 // make sure object is present at least once before we do an
326 // O(n) search.
327 if (!(indexed_data & PGLOG_INDEXED_OBJECTS)) {
328 index_objects();
329 }
330 if (objects.count(oid) == 0)
331 return;
332 for (list<pg_log_entry_t>::const_reverse_iterator i = log.rbegin();
333 i != log.rend();
334 ++i) {
335 if (i->soid == oid) {
336 if (i->reqid_is_indexed())
337 pls->push_back(make_pair(i->reqid, i->user_version));
338 pls->insert(pls->end(), i->extra_reqids.begin(), i->extra_reqids.end());
339 if (pls->size() >= max) {
340 if (pls->size() > max) {
341 pls->resize(max);
342 }
343 return;
344 }
345 }
346 }
347 }
348
349 void index(__u16 to_index = PGLOG_INDEXED_ALL) const {
350 // if to_index is 0, no need to run any of this code, especially
351 // loop below; this can happen with copy constructor for
352 // IndexedLog (and indirectly through assignment operator)
353 if (!to_index) return;
354
355 if (to_index & PGLOG_INDEXED_OBJECTS)
356 objects.clear();
357 if (to_index & PGLOG_INDEXED_CALLER_OPS)
358 caller_ops.clear();
359 if (to_index & PGLOG_INDEXED_EXTRA_CALLER_OPS)
360 extra_caller_ops.clear();
361 if (to_index & PGLOG_INDEXED_DUPS) {
362 dup_index.clear();
363 for (auto& i : dups) {
364 dup_index[i.reqid] = const_cast<pg_log_dup_t*>(&i);
365 }
366 }
367
368 constexpr __u16 any_log_entry_index =
369 PGLOG_INDEXED_OBJECTS |
370 PGLOG_INDEXED_CALLER_OPS |
371 PGLOG_INDEXED_EXTRA_CALLER_OPS;
372
373 if (to_index & any_log_entry_index) {
374 for (list<pg_log_entry_t>::const_iterator i = log.begin();
375 i != log.end();
376 ++i) {
377 if (to_index & PGLOG_INDEXED_OBJECTS) {
378 if (i->object_is_indexed()) {
379 objects[i->soid] = const_cast<pg_log_entry_t*>(&(*i));
380 }
381 }
382
383 if (to_index & PGLOG_INDEXED_CALLER_OPS) {
384 if (i->reqid_is_indexed()) {
385 caller_ops[i->reqid] = const_cast<pg_log_entry_t*>(&(*i));
386 }
387 }
388
389 if (to_index & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
390 for (auto j = i->extra_reqids.begin();
391 j != i->extra_reqids.end();
392 ++j) {
393 extra_caller_ops.insert(
394 make_pair(j->first, const_cast<pg_log_entry_t*>(&(*i))));
395 }
396 }
397 }
398 }
399
400 indexed_data |= to_index;
401 }
402
403 void index_objects() const {
404 index(PGLOG_INDEXED_OBJECTS);
405 }
406
407 void index_caller_ops() const {
408 index(PGLOG_INDEXED_CALLER_OPS);
409 }
410
411 void index_extra_caller_ops() const {
412 index(PGLOG_INDEXED_EXTRA_CALLER_OPS);
413 }
414
415 void index_dups() const {
416 index(PGLOG_INDEXED_DUPS);
417 }
418
419 void index(pg_log_entry_t& e) {
420 if ((indexed_data & PGLOG_INDEXED_OBJECTS) && e.object_is_indexed()) {
421 if (objects.count(e.soid) == 0 ||
422 objects[e.soid]->version < e.version)
423 objects[e.soid] = &e;
424 }
425 if (indexed_data & PGLOG_INDEXED_CALLER_OPS) {
426 // divergent merge_log indexes new before unindexing old
427 if (e.reqid_is_indexed()) {
428 caller_ops[e.reqid] = &e;
429 }
430 }
431 if (indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
432 for (auto j = e.extra_reqids.begin();
433 j != e.extra_reqids.end();
434 ++j) {
435 extra_caller_ops.insert(make_pair(j->first, &e));
436 }
437 }
438 }
439
440 void unindex() {
441 objects.clear();
442 caller_ops.clear();
443 extra_caller_ops.clear();
444 dup_index.clear();
445 indexed_data = 0;
446 }
447
448 void unindex(const pg_log_entry_t& e) {
449 // NOTE: this only works if we remove from the _tail_ of the log!
450 if (indexed_data & PGLOG_INDEXED_OBJECTS) {
451 if (objects.count(e.soid) && objects[e.soid]->version == e.version)
452 objects.erase(e.soid);
453 }
454 if (e.reqid_is_indexed()) {
455 if (indexed_data & PGLOG_INDEXED_CALLER_OPS) {
456 // divergent merge_log indexes new before unindexing old
457 if (caller_ops.count(e.reqid) && caller_ops[e.reqid] == &e)
458 caller_ops.erase(e.reqid);
459 }
460 }
461 if (indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
462 for (auto j = e.extra_reqids.begin();
463 j != e.extra_reqids.end();
464 ++j) {
465 for (ceph::unordered_multimap<osd_reqid_t,pg_log_entry_t*>::iterator k =
466 extra_caller_ops.find(j->first);
467 k != extra_caller_ops.end() && k->first == j->first;
468 ++k) {
469 if (k->second == &e) {
470 extra_caller_ops.erase(k);
471 break;
472 }
473 }
474 }
475 }
476 }
477
478 void index(pg_log_dup_t& e) {
479 if (PGLOG_INDEXED_DUPS) {
480 dup_index[e.reqid] = &e;
481 }
482 }
483
484 void unindex(const pg_log_dup_t& e) {
485 if (PGLOG_INDEXED_DUPS) {
486 auto i = dup_index.find(e.reqid);
487 if (i != dup_index.end()) {
488 dup_index.erase(i);
489 }
490 }
491 }
492
493 // actors
494 void add(const pg_log_entry_t& e, bool applied = true) {
495 if (!applied) {
496 assert(get_can_rollback_to() == head);
497 }
498
499 // make sure our buffers don't pin bigger buffers
500 e.mod_desc.trim_bl();
501
502 // add to log
503 log.push_back(e);
504
505 // riter previously pointed to the previous entry
506 if (rollback_info_trimmed_to_riter == log.rbegin())
507 ++rollback_info_trimmed_to_riter;
508
509 assert(e.version > head);
510 assert(head.version == 0 || e.version.version > head.version);
511 head = e.version;
512
513 // to our index
514 if ((indexed_data & PGLOG_INDEXED_OBJECTS) && e.object_is_indexed()) {
515 objects[e.soid] = &(log.back());
516 }
517 if (indexed_data & PGLOG_INDEXED_CALLER_OPS) {
518 if (e.reqid_is_indexed()) {
519 caller_ops[e.reqid] = &(log.back());
520 }
521 }
522
523 if (indexed_data & PGLOG_INDEXED_EXTRA_CALLER_OPS) {
524 for (auto j = e.extra_reqids.begin();
525 j != e.extra_reqids.end();
526 ++j) {
527 extra_caller_ops.insert(make_pair(j->first, &(log.back())));
528 }
529 }
530
531 if (!applied) {
532 skip_can_rollback_to_to_head();
533 }
534 } // add
535
536 void trim(
537 CephContext* cct,
538 eversion_t s,
539 set<eversion_t> *trimmed,
540 set<string>* trimmed_dups,
541 bool* dirty_dups);
542
543 ostream& print(ostream& out) const;
544 }; // IndexedLog
545
546
547 protected:
548 //////////////////// data members ////////////////////
549
550 pg_missing_tracker_t missing;
551 IndexedLog log;
552
553 eversion_t dirty_to; ///< must clear/writeout all keys <= dirty_to
554 eversion_t dirty_from; ///< must clear/writeout all keys >= dirty_from
555 eversion_t writeout_from; ///< must writout keys >= writeout_from
556 set<eversion_t> trimmed; ///< must clear keys in trimmed
557 set<string> trimmed_dups; ///< must clear keys in trimmed_dups
558 CephContext *cct;
559 bool pg_log_debug;
560 /// Log is clean on [dirty_to, dirty_from)
561 bool touched_log;
562 bool clear_divergent_priors;
563 bool dirty_dups; /// log.dups is updated
564 bool rebuilt_missing_with_deletes = false;
565
566 void mark_dirty_to(eversion_t to) {
567 if (to > dirty_to)
568 dirty_to = to;
569 }
570 void mark_dirty_from(eversion_t from) {
571 if (from < dirty_from)
572 dirty_from = from;
573 }
574 void mark_writeout_from(eversion_t from) {
575 if (from < writeout_from)
576 writeout_from = from;
577 }
578 public:
579 bool is_dirty() const {
580 return !touched_log ||
581 (dirty_to != eversion_t()) ||
582 (dirty_from != eversion_t::max()) ||
583 (writeout_from != eversion_t::max()) ||
584 !(trimmed.empty()) ||
585 !missing.is_clean() ||
586 !(trimmed_dups.empty()) ||
587 dirty_dups ||
588 rebuilt_missing_with_deletes;
589 }
590 void mark_log_for_rewrite() {
591 mark_dirty_to(eversion_t::max());
592 mark_dirty_from(eversion_t());
593 touched_log = false;
594 }
595 bool get_rebuilt_missing_with_deletes() const {
596 return rebuilt_missing_with_deletes;
597 }
598 protected:
599
600 /// DEBUG
601 set<string> log_keys_debug;
602 static void clear_after(set<string> *log_keys_debug, const string &lb) {
603 if (!log_keys_debug)
604 return;
605 for (set<string>::iterator i = log_keys_debug->lower_bound(lb);
606 i != log_keys_debug->end();
607 log_keys_debug->erase(i++));
608 }
609 static void clear_up_to(set<string> *log_keys_debug, const string &ub) {
610 if (!log_keys_debug)
611 return;
612 for (set<string>::iterator i = log_keys_debug->begin();
613 i != log_keys_debug->end() && *i < ub;
614 log_keys_debug->erase(i++));
615 }
616
617 void check();
618 void undirty() {
619 dirty_to = eversion_t();
620 dirty_from = eversion_t::max();
621 touched_log = true;
622 trimmed.clear();
623 trimmed_dups.clear();
624 writeout_from = eversion_t::max();
625 check();
626 missing.flush();
627 dirty_dups = false;
628 }
629 public:
630
631 // cppcheck-suppress noExplicitConstructor
632 PGLog(CephContext *cct, DoutPrefixProvider *dpp = nullptr) :
633 prefix_provider(dpp),
634 dirty_from(eversion_t::max()),
635 writeout_from(eversion_t::max()),
636 cct(cct),
637 pg_log_debug(!(cct && !(cct->_conf->osd_debug_pg_log_writeout))),
638 touched_log(false),
639 clear_divergent_priors(false),
640 dirty_dups(false)
641 { }
642
643 void reset_backfill();
644
645 void clear();
646
647 //////////////////// get or set missing ////////////////////
648
649 const pg_missing_tracker_t& get_missing() const { return missing; }
650 void revise_have(hobject_t oid, eversion_t have) {
651 missing.revise_have(oid, have);
652 }
653
654 void missing_add(const hobject_t& oid, eversion_t need, eversion_t have) {
655 missing.add(oid, need, have, false);
656 }
657
658 //////////////////// get or set log ////////////////////
659
660 const IndexedLog &get_log() const { return log; }
661
662 const eversion_t &get_tail() const { return log.tail; }
663
664 void set_tail(eversion_t tail) { log.tail = tail; }
665
666 const eversion_t &get_head() const { return log.head; }
667
668 void set_head(eversion_t head) { log.head = head; }
669
670 void set_last_requested(version_t last_requested) {
671 log.last_requested = last_requested;
672 }
673
674 void index() { log.index(); }
675
676 void unindex() { log.unindex(); }
677
678 void add(const pg_log_entry_t& e, bool applied = true) {
679 mark_writeout_from(e.version);
680 log.add(e, applied);
681 }
682
683 void reset_recovery_pointers() { log.reset_recovery_pointers(); }
684
685 static void clear_info_log(
686 spg_t pgid,
687 ObjectStore::Transaction *t);
688
689 void trim(
690 eversion_t trim_to,
691 pg_info_t &info);
692
693 void roll_forward_to(
694 eversion_t roll_forward_to,
695 LogEntryHandler *h) {
696 log.roll_forward_to(
697 roll_forward_to,
698 h);
699 }
700
701 eversion_t get_can_rollback_to() const {
702 return log.get_can_rollback_to();
703 }
704
705 void roll_forward(LogEntryHandler *h) {
706 roll_forward_to(
707 log.head,
708 h);
709 }
710
711 //////////////////// get or set log & missing ////////////////////
712
713 void reset_backfill_claim_log(const pg_log_t &o, LogEntryHandler *h) {
714 log.trim_rollback_info_to(log.head, h);
715 log.claim_log_and_clear_rollback_info(o);
716 missing.clear();
717 mark_dirty_to(eversion_t::max());
718 }
719
720 void split_into(
721 pg_t child_pgid,
722 unsigned split_bits,
723 PGLog *opg_log) {
724 log.split_out_child(child_pgid, split_bits, &opg_log->log);
725 missing.split_into(child_pgid, split_bits, &(opg_log->missing));
726 opg_log->mark_dirty_to(eversion_t::max());
727 mark_dirty_to(eversion_t::max());
728 if (missing.may_include_deletes)
729 opg_log->rebuilt_missing_with_deletes = true;
730 }
731
732 void recover_got(hobject_t oid, eversion_t v, pg_info_t &info) {
733 if (missing.is_missing(oid, v)) {
734 missing.got(oid, v);
735
736 // raise last_complete?
737 if (missing.get_items().empty()) {
738 log.complete_to = log.log.end();
739 info.last_complete = info.last_update;
740 }
741 while (log.complete_to != log.log.end()) {
742 if (missing.get_items().at(
743 missing.get_rmissing().begin()->second
744 ).need <= log.complete_to->version)
745 break;
746 if (info.last_complete < log.complete_to->version)
747 info.last_complete = log.complete_to->version;
748 ++log.complete_to;
749 }
750 }
751
752 assert(log.get_can_rollback_to() >= v);
753 }
754
755 void reset_complete_to(pg_info_t *info) {
756 log.complete_to = log.log.begin();
757 while (!missing.get_items().empty() && log.complete_to->version <
758 missing.get_items().at(
759 missing.get_rmissing().begin()->second
760 ).need) {
761 ++log.complete_to;
762 }
763 assert(log.complete_to != log.log.end());
764 if (log.complete_to == log.log.begin()) {
765 if (info)
766 info->last_complete = eversion_t();
767 } else {
768 --log.complete_to;
769 if (info)
770 info->last_complete = log.complete_to->version;
771 ++log.complete_to;
772 }
773 }
774
775 void activate_not_complete(pg_info_t &info) {
776 reset_complete_to(&info);
777 log.last_requested = 0;
778 }
779
780 void proc_replica_log(pg_info_t &oinfo,
781 const pg_log_t &olog,
782 pg_missing_t& omissing, pg_shard_t from) const;
783
784 void rebuild_missing_set_with_deletes(ObjectStore *store,
785 coll_t pg_coll,
786 const pg_info_t &info);
787
788 protected:
789 static void split_by_object(
790 mempool::osd_pglog::list<pg_log_entry_t> &entries,
791 map<hobject_t, mempool::osd_pglog::list<pg_log_entry_t>> *out_entries) {
792 while (!entries.empty()) {
793 auto &out_list = (*out_entries)[entries.front().soid];
794 out_list.splice(out_list.end(), entries, entries.begin());
795 }
796 }
797
798 /**
799 * _merge_object_divergent_entries
800 *
801 * There are 5 distinct cases:
802 * 1) There is a more recent update: in this case we assume we adjusted the
803 * store and missing during merge_log
804 * 2) The first entry in the divergent sequence is a create. This might
805 * either be because the object is a clone or because prior_version is
806 * eversion_t(). In this case the object does not exist and we must
807 * adjust missing and the store to match.
808 * 3) We are currently missing the object. In this case, we adjust the
809 * missing to our prior_version taking care to add a divergent_prior
810 * if necessary
811 * 4) We can rollback all of the entries. In this case, we do so using
812 * the rollbacker and return -- the object does not go into missing.
813 * 5) We cannot rollback at least 1 of the entries. In this case, we
814 * clear the object out of the store and add a missing entry at
815 * prior_version taking care to add a divergent_prior if
816 * necessary.
817 */
818 template <typename missing_type>
819 static void _merge_object_divergent_entries(
820 const IndexedLog &log, ///< [in] log to merge against
821 const hobject_t &hoid, ///< [in] object we are merging
822 const mempool::osd_pglog::list<pg_log_entry_t> &orig_entries, ///< [in] entries for hoid to merge
823 const pg_info_t &info, ///< [in] info for merging entries
824 eversion_t olog_can_rollback_to, ///< [in] rollback boundary
825 missing_type &missing, ///< [in,out] missing to adjust, use
826 LogEntryHandler *rollbacker, ///< [in] optional rollbacker object
827 const DoutPrefixProvider *dpp ///< [in] logging provider
828 ) {
829 ldpp_dout(dpp, 20) << __func__ << ": merging hoid " << hoid
830 << " entries: " << orig_entries << dendl;
831
832 if (hoid > info.last_backfill) {
833 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid << " after last_backfill"
834 << dendl;
835 return;
836 }
837
838 // entries is non-empty
839 assert(!orig_entries.empty());
840 // strip out and ignore ERROR entries
841 mempool::osd_pglog::list<pg_log_entry_t> entries;
842 eversion_t last;
843 for (list<pg_log_entry_t>::const_iterator i = orig_entries.begin();
844 i != orig_entries.end();
845 ++i) {
846 // all entries are on hoid
847 assert(i->soid == hoid);
848 if (i != orig_entries.begin() && i->prior_version != eversion_t()) {
849 // in increasing order of version
850 assert(i->version > last);
851 // prior_version correct (unless it is an ERROR entry)
852 assert(i->prior_version == last || i->is_error());
853 }
854 last = i->version;
855 if (i->is_error()) {
856 ldpp_dout(dpp, 20) << __func__ << ": ignoring " << *i << dendl;
857 } else {
858 ldpp_dout(dpp, 20) << __func__ << ": keeping " << *i << dendl;
859 entries.push_back(*i);
860 }
861 }
862 if (entries.empty()) {
863 ldpp_dout(dpp, 10) << __func__ << ": no non-ERROR entries" << dendl;
864 return;
865 }
866
867 const eversion_t prior_version = entries.begin()->prior_version;
868 const eversion_t first_divergent_update = entries.begin()->version;
869 const eversion_t last_divergent_update = entries.rbegin()->version;
870 const bool object_not_in_store =
871 !missing.is_missing(hoid) &&
872 entries.rbegin()->is_delete();
873 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
874 << " prior_version: " << prior_version
875 << " first_divergent_update: " << first_divergent_update
876 << " last_divergent_update: " << last_divergent_update
877 << dendl;
878
879 ceph::unordered_map<hobject_t, pg_log_entry_t*>::const_iterator objiter =
880 log.objects.find(hoid);
881 if (objiter != log.objects.end() &&
882 objiter->second->version >= first_divergent_update) {
883 /// Case 1)
884 ldpp_dout(dpp, 10) << __func__ << ": more recent entry found: "
885 << *objiter->second << ", already merged" << dendl;
886
887 assert(objiter->second->version > last_divergent_update);
888
889 // ensure missing has been updated appropriately
890 if (objiter->second->is_update() ||
891 (missing.may_include_deletes && objiter->second->is_delete())) {
892 assert(missing.is_missing(hoid) &&
893 missing.get_items().at(hoid).need == objiter->second->version);
894 } else {
895 assert(!missing.is_missing(hoid));
896 }
897 missing.revise_have(hoid, eversion_t());
898 if (rollbacker) {
899 if (!object_not_in_store) {
900 rollbacker->remove(hoid);
901 }
902 for (auto &&i: entries) {
903 rollbacker->trim(i);
904 }
905 }
906 return;
907 }
908
909 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
910 <<" has no more recent entries in log" << dendl;
911 if (prior_version == eversion_t() || entries.front().is_clone()) {
912 /// Case 2)
913 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
914 << " prior_version or op type indicates creation,"
915 << " deleting"
916 << dendl;
917 if (missing.is_missing(hoid))
918 missing.rm(missing.get_items().find(hoid));
919 if (rollbacker) {
920 if (!object_not_in_store) {
921 rollbacker->remove(hoid);
922 }
923 for (auto &&i: entries) {
924 rollbacker->trim(i);
925 }
926 }
927 return;
928 }
929
930 if (missing.is_missing(hoid)) {
931 /// Case 3)
932 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
933 << " missing, " << missing.get_items().at(hoid)
934 << " adjusting" << dendl;
935
936 if (missing.get_items().at(hoid).have == prior_version) {
937 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
938 << " missing.have is prior_version " << prior_version
939 << " removing from missing" << dendl;
940 missing.rm(missing.get_items().find(hoid));
941 } else {
942 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
943 << " missing.have is " << missing.get_items().at(hoid).have
944 << ", adjusting" << dendl;
945 missing.revise_need(hoid, prior_version, false);
946 if (prior_version <= info.log_tail) {
947 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
948 << " prior_version " << prior_version
949 << " <= info.log_tail "
950 << info.log_tail << dendl;
951 }
952 }
953 if (rollbacker) {
954 for (auto &&i: entries) {
955 rollbacker->trim(i);
956 }
957 }
958 return;
959 }
960
961 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
962 << " must be rolled back or recovered,"
963 << " attempting to rollback"
964 << dendl;
965 bool can_rollback = true;
966 /// Distinguish between 4) and 5)
967 for (list<pg_log_entry_t>::const_reverse_iterator i = entries.rbegin();
968 i != entries.rend();
969 ++i) {
970 if (!i->can_rollback() || i->version <= olog_can_rollback_to) {
971 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid << " cannot rollback "
972 << *i << dendl;
973 can_rollback = false;
974 break;
975 }
976 }
977
978 if (can_rollback) {
979 /// Case 4)
980 for (list<pg_log_entry_t>::const_reverse_iterator i = entries.rbegin();
981 i != entries.rend();
982 ++i) {
983 assert(i->can_rollback() && i->version > olog_can_rollback_to);
984 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
985 << " rolling back " << *i << dendl;
986 if (rollbacker)
987 rollbacker->rollback(*i);
988 }
989 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
990 << " rolled back" << dendl;
991 return;
992 } else {
993 /// Case 5)
994 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid << " cannot roll back, "
995 << "removing and adding to missing" << dendl;
996 if (rollbacker) {
997 if (!object_not_in_store)
998 rollbacker->remove(hoid);
999 for (auto &&i: entries) {
1000 rollbacker->trim(i);
1001 }
1002 }
1003 missing.add(hoid, prior_version, eversion_t(), false);
1004 if (prior_version <= info.log_tail) {
1005 ldpp_dout(dpp, 10) << __func__ << ": hoid " << hoid
1006 << " prior_version " << prior_version
1007 << " <= info.log_tail "
1008 << info.log_tail << dendl;
1009 }
1010 }
1011 }
1012
1013 /// Merge all entries using above
1014 template <typename missing_type>
1015 static void _merge_divergent_entries(
1016 const IndexedLog &log, ///< [in] log to merge against
1017 mempool::osd_pglog::list<pg_log_entry_t> &entries, ///< [in] entries to merge
1018 const pg_info_t &oinfo, ///< [in] info for merging entries
1019 eversion_t olog_can_rollback_to, ///< [in] rollback boundary
1020 missing_type &omissing, ///< [in,out] missing to adjust, use
1021 LogEntryHandler *rollbacker, ///< [in] optional rollbacker object
1022 const DoutPrefixProvider *dpp ///< [in] logging provider
1023 ) {
1024 map<hobject_t, mempool::osd_pglog::list<pg_log_entry_t> > split;
1025 split_by_object(entries, &split);
1026 for (map<hobject_t, mempool::osd_pglog::list<pg_log_entry_t>>::iterator i = split.begin();
1027 i != split.end();
1028 ++i) {
1029 _merge_object_divergent_entries(
1030 log,
1031 i->first,
1032 i->second,
1033 oinfo,
1034 olog_can_rollback_to,
1035 omissing,
1036 rollbacker,
1037 dpp);
1038 }
1039 }
1040
1041 /**
1042 * Exists for use in TestPGLog for simply testing single divergent log
1043 * cases
1044 */
1045 void merge_old_entry(
1046 ObjectStore::Transaction& t,
1047 const pg_log_entry_t& oe,
1048 const pg_info_t& info,
1049 LogEntryHandler *rollbacker) {
1050 mempool::osd_pglog::list<pg_log_entry_t> entries;
1051 entries.push_back(oe);
1052 _merge_object_divergent_entries(
1053 log,
1054 oe.soid,
1055 entries,
1056 info,
1057 log.get_can_rollback_to(),
1058 missing,
1059 rollbacker,
1060 this);
1061 }
1062
1063 bool merge_log_dups(const pg_log_t& olog);
1064
1065 public:
1066
1067 void rewind_divergent_log(eversion_t newhead,
1068 pg_info_t &info,
1069 LogEntryHandler *rollbacker,
1070 bool &dirty_info,
1071 bool &dirty_big_info);
1072
1073 void merge_log(pg_info_t &oinfo,
1074 pg_log_t &olog,
1075 pg_shard_t from,
1076 pg_info_t &info, LogEntryHandler *rollbacker,
1077 bool &dirty_info, bool &dirty_big_info);
1078
1079 template <typename missing_type>
1080 static bool append_log_entries_update_missing(
1081 const hobject_t &last_backfill,
1082 bool last_backfill_bitwise,
1083 const mempool::osd_pglog::list<pg_log_entry_t> &entries,
1084 bool maintain_rollback,
1085 IndexedLog *log,
1086 missing_type &missing,
1087 LogEntryHandler *rollbacker,
1088 const DoutPrefixProvider *dpp) {
1089 bool invalidate_stats = false;
1090 if (log && !entries.empty()) {
1091 assert(log->head < entries.begin()->version);
1092 }
1093 for (list<pg_log_entry_t>::const_iterator p = entries.begin();
1094 p != entries.end();
1095 ++p) {
1096 invalidate_stats = invalidate_stats || !p->is_error();
1097 if (log) {
1098 ldpp_dout(dpp, 20) << "update missing, append " << *p << dendl;
1099 log->add(*p);
1100 }
1101 if (p->soid <= last_backfill &&
1102 !p->is_error()) {
1103 if (missing.may_include_deletes) {
1104 missing.add_next_event(*p);
1105 } else {
1106 if (p->is_delete()) {
1107 missing.rm(p->soid, p->version);
1108 } else {
1109 missing.add_next_event(*p);
1110 }
1111 if (rollbacker) {
1112 // hack to match PG::mark_all_unfound_lost
1113 if (maintain_rollback && p->is_lost_delete() && p->can_rollback()) {
1114 rollbacker->try_stash(p->soid, p->version.version);
1115 } else if (p->is_delete()) {
1116 rollbacker->remove(p->soid);
1117 }
1118 }
1119 }
1120 }
1121 }
1122 return invalidate_stats;
1123 }
1124 bool append_new_log_entries(
1125 const hobject_t &last_backfill,
1126 bool last_backfill_bitwise,
1127 const mempool::osd_pglog::list<pg_log_entry_t> &entries,
1128 LogEntryHandler *rollbacker) {
1129 bool invalidate_stats = append_log_entries_update_missing(
1130 last_backfill,
1131 last_backfill_bitwise,
1132 entries,
1133 true,
1134 &log,
1135 missing,
1136 rollbacker,
1137 this);
1138 if (!entries.empty()) {
1139 mark_writeout_from(entries.begin()->version);
1140 if (entries.begin()->is_lost_delete()) {
1141 // hack: since lost deletes queue recovery directly, and don't
1142 // go through activate_not_complete() again, our complete_to
1143 // iterator may still point at log.end(). Reset it to point
1144 // before these new lost_delete entries. This only occurs
1145 // when lost+delete entries are initially added, which is
1146 // always in a list of solely lost_delete entries, so it is
1147 // sufficient to check whether the first entry is a
1148 // lost_delete
1149 reset_complete_to(nullptr);
1150 }
1151 }
1152 return invalidate_stats;
1153 }
1154
1155 void write_log_and_missing(
1156 ObjectStore::Transaction& t,
1157 map<string,bufferlist> *km,
1158 const coll_t& coll,
1159 const ghobject_t &log_oid,
1160 bool require_rollback);
1161
1162 static void write_log_and_missing_wo_missing(
1163 ObjectStore::Transaction& t,
1164 map<string,bufferlist>* km,
1165 pg_log_t &log,
1166 const coll_t& coll,
1167 const ghobject_t &log_oid, map<eversion_t, hobject_t> &divergent_priors,
1168 bool require_rollback,
1169 bool dirty_dups);
1170
1171 static void write_log_and_missing(
1172 ObjectStore::Transaction& t,
1173 map<string,bufferlist>* km,
1174 pg_log_t &log,
1175 const coll_t& coll,
1176 const ghobject_t &log_oid,
1177 const pg_missing_tracker_t &missing,
1178 bool require_rollback,
1179 bool dirty_dups,
1180 bool *rebuilt_missing_set_with_deletes);
1181
1182 static void _write_log_and_missing_wo_missing(
1183 ObjectStore::Transaction& t,
1184 map<string,bufferlist>* km,
1185 pg_log_t &log,
1186 const coll_t& coll, const ghobject_t &log_oid,
1187 map<eversion_t, hobject_t> &divergent_priors,
1188 eversion_t dirty_to,
1189 eversion_t dirty_from,
1190 eversion_t writeout_from,
1191 const set<eversion_t> &trimmed,
1192 const set<string> &trimmed_dups,
1193 bool dirty_divergent_priors,
1194 bool touch_log,
1195 bool require_rollback,
1196 bool dirty_dups,
1197 set<string> *log_keys_debug
1198 );
1199
1200 static void _write_log_and_missing(
1201 ObjectStore::Transaction& t,
1202 map<string,bufferlist>* km,
1203 pg_log_t &log,
1204 const coll_t& coll, const ghobject_t &log_oid,
1205 eversion_t dirty_to,
1206 eversion_t dirty_from,
1207 eversion_t writeout_from,
1208 const set<eversion_t> &trimmed,
1209 const set<string> &trimmed_dups,
1210 const pg_missing_tracker_t &missing,
1211 bool touch_log,
1212 bool require_rollback,
1213 bool clear_divergent_priors,
1214 bool dirty_dups,
1215 bool *rebuilt_missing_with_deletes,
1216 set<string> *log_keys_debug
1217 );
1218
1219 void read_log_and_missing(
1220 ObjectStore *store,
1221 coll_t pg_coll,
1222 coll_t log_coll,
1223 ghobject_t log_oid,
1224 const pg_info_t &info,
1225 ostringstream &oss,
1226 bool tolerate_divergent_missing_log,
1227 bool debug_verify_stored_missing = false
1228 ) {
1229 return read_log_and_missing(
1230 store, pg_coll, log_coll, log_oid, info,
1231 log, missing, oss,
1232 tolerate_divergent_missing_log,
1233 &clear_divergent_priors,
1234 this,
1235 (pg_log_debug ? &log_keys_debug : nullptr),
1236 debug_verify_stored_missing);
1237 }
1238
1239 template <typename missing_type>
1240 static void read_log_and_missing(
1241 ObjectStore *store,
1242 coll_t pg_coll,
1243 coll_t log_coll,
1244 ghobject_t log_oid,
1245 const pg_info_t &info,
1246 IndexedLog &log,
1247 missing_type &missing,
1248 ostringstream &oss,
1249 bool tolerate_divergent_missing_log,
1250 bool *clear_divergent_priors = nullptr,
1251 const DoutPrefixProvider *dpp = nullptr,
1252 set<string> *log_keys_debug = nullptr,
1253 bool debug_verify_stored_missing = false
1254 ) {
1255 ldpp_dout(dpp, 20) << "read_log_and_missing coll " << pg_coll
1256 << " log_oid " << log_oid << dendl;
1257
1258 // legacy?
1259 struct stat st;
1260 int r = store->stat(log_coll, log_oid, &st);
1261 assert(r == 0);
1262 assert(st.st_size == 0);
1263
1264 // will get overridden below if it had been recorded
1265 eversion_t on_disk_can_rollback_to = info.last_update;
1266 eversion_t on_disk_rollback_info_trimmed_to = eversion_t();
1267 ObjectMap::ObjectMapIterator p = store->get_omap_iterator(log_coll, log_oid);
1268 map<eversion_t, hobject_t> divergent_priors;
1269 bool has_divergent_priors = false;
1270 missing.may_include_deletes = false;
1271 list<pg_log_entry_t> entries;
1272 list<pg_log_dup_t> dups;
1273 if (p) {
1274 for (p->seek_to_first(); p->valid() ; p->next(false)) {
1275 // non-log pgmeta_oid keys are prefixed with _; skip those
1276 if (p->key()[0] == '_')
1277 continue;
1278 bufferlist bl = p->value();//Copy bufferlist before creating iterator
1279 bufferlist::iterator bp = bl.begin();
1280 if (p->key() == "divergent_priors") {
1281 ::decode(divergent_priors, bp);
1282 ldpp_dout(dpp, 20) << "read_log_and_missing " << divergent_priors.size()
1283 << " divergent_priors" << dendl;
1284 has_divergent_priors = true;
1285 debug_verify_stored_missing = false;
1286 } else if (p->key() == "can_rollback_to") {
1287 ::decode(on_disk_can_rollback_to, bp);
1288 } else if (p->key() == "rollback_info_trimmed_to") {
1289 ::decode(on_disk_rollback_info_trimmed_to, bp);
1290 } else if (p->key() == "may_include_deletes_in_missing") {
1291 missing.may_include_deletes = true;
1292 } else if (p->key().substr(0, 7) == string("missing")) {
1293 hobject_t oid;
1294 pg_missing_item item;
1295 ::decode(oid, bp);
1296 ::decode(item, bp);
1297 if (item.is_delete()) {
1298 assert(missing.may_include_deletes);
1299 }
1300 missing.add(oid, item.need, item.have, item.is_delete());
1301 } else if (p->key().substr(0, 4) == string("dup_")) {
1302 pg_log_dup_t dup;
1303 ::decode(dup, bp);
1304 if (!dups.empty()) {
1305 assert(dups.back().version < dup.version);
1306 }
1307 dups.push_back(dup);
1308 } else {
1309 pg_log_entry_t e;
1310 e.decode_with_checksum(bp);
1311 ldpp_dout(dpp, 20) << "read_log_and_missing " << e << dendl;
1312 if (!entries.empty()) {
1313 pg_log_entry_t last_e(entries.back());
1314 assert(last_e.version.version < e.version.version);
1315 assert(last_e.version.epoch <= e.version.epoch);
1316 }
1317 entries.push_back(e);
1318 if (log_keys_debug)
1319 log_keys_debug->insert(e.get_key_name());
1320 }
1321 }
1322 }
1323 log = IndexedLog(
1324 info.last_update,
1325 info.log_tail,
1326 on_disk_can_rollback_to,
1327 on_disk_rollback_info_trimmed_to,
1328 std::move(entries),
1329 std::move(dups));
1330
1331 if (has_divergent_priors || debug_verify_stored_missing) {
1332 // build missing
1333 if (debug_verify_stored_missing || info.last_complete < info.last_update) {
1334 ldpp_dout(dpp, 10)
1335 << "read_log_and_missing checking for missing items over interval ("
1336 << info.last_complete
1337 << "," << info.last_update << "]" << dendl;
1338
1339 set<hobject_t> did;
1340 set<hobject_t> checked;
1341 set<hobject_t> skipped;
1342 for (list<pg_log_entry_t>::reverse_iterator i = log.log.rbegin();
1343 i != log.log.rend();
1344 ++i) {
1345 if (!debug_verify_stored_missing && i->version <= info.last_complete) break;
1346 if (i->soid > info.last_backfill)
1347 continue;
1348 if (i->is_error())
1349 continue;
1350 if (did.count(i->soid)) continue;
1351 did.insert(i->soid);
1352
1353 if (!missing.may_include_deletes && i->is_delete())
1354 continue;
1355
1356 bufferlist bv;
1357 int r = store->getattr(
1358 pg_coll,
1359 ghobject_t(i->soid, ghobject_t::NO_GEN, info.pgid.shard),
1360 OI_ATTR,
1361 bv);
1362 if (r >= 0) {
1363 object_info_t oi(bv);
1364 if (oi.version < i->version) {
1365 ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i
1366 << " (have " << oi.version << ")" << dendl;
1367 if (debug_verify_stored_missing) {
1368 auto miter = missing.get_items().find(i->soid);
1369 assert(miter != missing.get_items().end());
1370 assert(miter->second.need == i->version);
1371 // the 'have' version is reset if an object is deleted,
1372 // then created again
1373 assert(miter->second.have == oi.version || miter->second.have == eversion_t());
1374 checked.insert(i->soid);
1375 } else {
1376 missing.add(i->soid, i->version, oi.version, i->is_delete());
1377 }
1378 }
1379 } else {
1380 ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i << dendl;
1381 if (debug_verify_stored_missing) {
1382 auto miter = missing.get_items().find(i->soid);
1383 if (i->is_delete()) {
1384 assert(miter == missing.get_items().end() ||
1385 (miter->second.need == i->version &&
1386 miter->second.have == eversion_t()));
1387 } else {
1388 assert(miter != missing.get_items().end());
1389 assert(miter->second.need == i->version);
1390 assert(miter->second.have == eversion_t());
1391 }
1392 checked.insert(i->soid);
1393 } else {
1394 missing.add(i->soid, i->version, eversion_t(), i->is_delete());
1395 }
1396 }
1397 }
1398 if (debug_verify_stored_missing) {
1399 for (auto &&i: missing.get_items()) {
1400 if (checked.count(i.first))
1401 continue;
1402 if (i.first > info.last_backfill) {
1403 ldpp_dout(dpp, -1) << __func__ << ": invalid missing set entry "
1404 << "found before last_backfill: "
1405 << i.first << " " << i.second
1406 << " last_backfill = " << info.last_backfill
1407 << dendl;
1408 assert(0 == "invalid missing set entry found");
1409 }
1410 bufferlist bv;
1411 int r = store->getattr(
1412 pg_coll,
1413 ghobject_t(i.first, ghobject_t::NO_GEN, info.pgid.shard),
1414 OI_ATTR,
1415 bv);
1416 if (r >= 0) {
1417 object_info_t oi(bv);
1418 assert(oi.version == i.second.have);
1419 } else {
1420 assert(i.second.is_delete() || eversion_t() == i.second.have);
1421 }
1422 }
1423 } else {
1424 assert(has_divergent_priors);
1425 for (map<eversion_t, hobject_t>::reverse_iterator i =
1426 divergent_priors.rbegin();
1427 i != divergent_priors.rend();
1428 ++i) {
1429 if (i->first <= info.last_complete) break;
1430 if (i->second > info.last_backfill)
1431 continue;
1432 if (did.count(i->second)) continue;
1433 did.insert(i->second);
1434 bufferlist bv;
1435 int r = store->getattr(
1436 pg_coll,
1437 ghobject_t(i->second, ghobject_t::NO_GEN, info.pgid.shard),
1438 OI_ATTR,
1439 bv);
1440 if (r >= 0) {
1441 object_info_t oi(bv);
1442 /**
1443 * 1) we see this entry in the divergent priors mapping
1444 * 2) we didn't see an entry for this object in the log
1445 *
1446 * From 1 & 2 we know that either the object does not exist
1447 * or it is at the version specified in the divergent_priors
1448 * map since the object would have been deleted atomically
1449 * with the addition of the divergent_priors entry, an older
1450 * version would not have been recovered, and a newer version
1451 * would show up in the log above.
1452 */
1453 /**
1454 * Unfortunately the assessment above is incorrect because of
1455 * http://tracker.ceph.com/issues/17916 (we were incorrectly
1456 * not removing the divergent_priors set from disk state!),
1457 * so let's check that.
1458 */
1459 if (oi.version > i->first && tolerate_divergent_missing_log) {
1460 ldpp_dout(dpp, 0) << "read_log divergent_priors entry (" << *i
1461 << ") inconsistent with disk state (" << oi
1462 << "), assuming it is tracker.ceph.com/issues/17916"
1463 << dendl;
1464 } else {
1465 assert(oi.version == i->first);
1466 }
1467 } else {
1468 ldpp_dout(dpp, 15) << "read_log_and_missing missing " << *i << dendl;
1469 missing.add(i->second, i->first, eversion_t(), false);
1470 }
1471 }
1472 }
1473 if (clear_divergent_priors)
1474 (*clear_divergent_priors) = true;
1475 }
1476 }
1477
1478 if (!has_divergent_priors) {
1479 if (clear_divergent_priors)
1480 (*clear_divergent_priors) = false;
1481 missing.flush();
1482 }
1483 ldpp_dout(dpp, 10) << "read_log_and_missing done" << dendl;
1484 } // static read_log_and_missing
1485 }; // struct PGLog