1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2013,2014 Inktank Storage, Inc.
7 * Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
9 * Author: Loic Dachary <loic@dachary.org>
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
19 #include "common/errno.h"
20 #include "common/scrub_types.h"
21 #include "ReplicatedBackend.h"
22 #include "ScrubStore.h"
23 #include "ECBackend.h"
24 #include "PGBackend.h"
26 #include "erasure-code/ErasureCodePlugin.h"
29 #include "common/LogClient.h"
31 #define dout_context cct
32 #define dout_subsys ceph_subsys_osd
33 #define DOUT_PREFIX_ARGS this
35 #define dout_prefix _prefix(_dout, this)
36 static ostream
& _prefix(std::ostream
*_dout
, PGBackend
*pgb
) {
37 return *_dout
<< pgb
->get_parent()->gen_dbg_prefix();
40 void PGBackend::rollback(
41 const pg_log_entry_t
&entry
,
42 ObjectStore::Transaction
*t
)
45 struct RollbackVisitor
: public ObjectModDesc::Visitor
{
46 const hobject_t
&hoid
;
48 ObjectStore::Transaction t
;
50 const hobject_t
&hoid
,
51 PGBackend
*pg
) : hoid(hoid
), pg(pg
) {}
52 void append(uint64_t old_size
) override
{
53 ObjectStore::Transaction temp
;
54 pg
->rollback_append(hoid
, old_size
, &temp
);
58 void setattrs(map
<string
, boost::optional
<bufferlist
> > &attrs
) override
{
59 ObjectStore::Transaction temp
;
60 pg
->rollback_setattrs(hoid
, attrs
, &temp
);
64 void rmobject(version_t old_version
) override
{
65 ObjectStore::Transaction temp
;
66 pg
->rollback_stash(hoid
, old_version
, &temp
);
70 void try_rmobject(version_t old_version
) override
{
71 ObjectStore::Transaction temp
;
72 pg
->rollback_try_stash(hoid
, old_version
, &temp
);
76 void create() override
{
77 ObjectStore::Transaction temp
;
78 pg
->rollback_create(hoid
, &temp
);
82 void update_snaps(const set
<snapid_t
> &snaps
) override
{
83 ObjectStore::Transaction temp
;
84 pg
->get_parent()->pgb_set_object_snap_mapping(hoid
, snaps
, &temp
);
88 void rollback_extents(
90 const vector
<pair
<uint64_t, uint64_t> > &extents
) override
{
91 ObjectStore::Transaction temp
;
92 pg
->rollback_extents(gen
, extents
, hoid
, &temp
);
98 assert(entry
.mod_desc
.can_rollback());
99 RollbackVisitor
vis(entry
.soid
, this);
100 entry
.mod_desc
.visit(&vis
);
104 struct Trimmer
: public ObjectModDesc::Visitor
{
105 const hobject_t
&soid
;
107 ObjectStore::Transaction
*t
;
109 const hobject_t
&soid
,
111 ObjectStore::Transaction
*t
)
112 : soid(soid
), pg(pg
), t(t
) {}
113 void rmobject(version_t old_version
) override
{
114 pg
->trim_rollback_object(
119 // try_rmobject defaults to rmobject
120 void rollback_extents(
122 const vector
<pair
<uint64_t, uint64_t> > &extents
) override
{
123 pg
->trim_rollback_object(
130 void PGBackend::rollforward(
131 const pg_log_entry_t
&entry
,
132 ObjectStore::Transaction
*t
)
134 auto dpp
= get_parent()->get_dpp();
135 ldpp_dout(dpp
, 20) << __func__
<< ": entry=" << entry
<< dendl
;
136 if (!entry
.can_rollback())
138 Trimmer
trimmer(entry
.soid
, this, t
);
139 entry
.mod_desc
.visit(&trimmer
);
142 void PGBackend::trim(
143 const pg_log_entry_t
&entry
,
144 ObjectStore::Transaction
*t
)
146 if (!entry
.can_rollback())
148 Trimmer
trimmer(entry
.soid
, this, t
);
149 entry
.mod_desc
.visit(&trimmer
);
152 void PGBackend::try_stash(
153 const hobject_t
&hoid
,
155 ObjectStore::Transaction
*t
)
159 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
160 ghobject_t(hoid
, v
, get_parent()->whoami_shard().shard
));
163 void PGBackend::remove(
164 const hobject_t
&hoid
,
165 ObjectStore::Transaction
*t
) {
166 assert(!hoid
.is_temp());
169 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
));
170 get_parent()->pgb_clear_object_snap_mapping(hoid
, t
);
173 void PGBackend::on_change_cleanup(ObjectStore::Transaction
*t
)
175 dout(10) << __func__
<< dendl
;
177 for (set
<hobject_t
>::iterator i
= temp_contents
.begin();
178 i
!= temp_contents
.end();
180 dout(10) << __func__
<< ": Removing oid "
181 << *i
<< " from the temp collection" << dendl
;
184 ghobject_t(*i
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
));
186 temp_contents
.clear();
189 int PGBackend::objects_list_partial(
190 const hobject_t
&begin
,
193 vector
<hobject_t
> *ls
,
197 // Starts with the smallest generation to make sure the result list
198 // has the marker object (it might have multiple generations
199 // though, which would be filtered).
202 _next
= ghobject_t(begin
, 0, get_parent()->whoami_shard().shard
);
209 while (!_next
.is_max() && ls
->size() < (unsigned)min
) {
210 vector
<ghobject_t
> objects
;
211 r
= store
->collection_list(
214 ghobject_t::get_max(),
219 derr
<< __func__
<< " list collection " << ch
<< " got: " << cpp_strerror(r
) << dendl
;
222 for (vector
<ghobject_t
>::iterator i
= objects
.begin();
225 if (i
->is_pgmeta() || i
->hobj
.is_temp()) {
228 if (i
->is_no_gen()) {
229 ls
->push_back(i
->hobj
);
238 int PGBackend::objects_list_range(
239 const hobject_t
&start
,
240 const hobject_t
&end
,
242 vector
<hobject_t
> *ls
,
243 vector
<ghobject_t
> *gen_obs
)
246 vector
<ghobject_t
> objects
;
247 int r
= store
->collection_list(
249 ghobject_t(start
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
250 ghobject_t(end
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
254 ls
->reserve(objects
.size());
255 for (vector
<ghobject_t
>::iterator i
= objects
.begin();
258 if (i
->is_pgmeta() || i
->hobj
.is_temp()) {
261 if (i
->is_no_gen()) {
262 ls
->push_back(i
->hobj
);
263 } else if (gen_obs
) {
264 gen_obs
->push_back(*i
);
270 int PGBackend::objects_get_attr(
271 const hobject_t
&hoid
,
276 int r
= store
->getattr(
278 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
283 out
->push_back(std::move(bp
));
288 int PGBackend::objects_get_attrs(
289 const hobject_t
&hoid
,
290 map
<string
, bufferlist
> *out
)
292 return store
->getattrs(
294 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
298 void PGBackend::rollback_setattrs(
299 const hobject_t
&hoid
,
300 map
<string
, boost::optional
<bufferlist
> > &old_attrs
,
301 ObjectStore::Transaction
*t
) {
302 map
<string
, bufferlist
> to_set
;
303 assert(!hoid
.is_temp());
304 for (map
<string
, boost::optional
<bufferlist
> >::iterator i
= old_attrs
.begin();
305 i
!= old_attrs
.end();
308 to_set
[i
->first
] = i
->second
.get();
312 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
318 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
322 void PGBackend::rollback_append(
323 const hobject_t
&hoid
,
325 ObjectStore::Transaction
*t
) {
326 assert(!hoid
.is_temp());
329 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
333 void PGBackend::rollback_stash(
334 const hobject_t
&hoid
,
335 version_t old_version
,
336 ObjectStore::Transaction
*t
) {
337 assert(!hoid
.is_temp());
340 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
));
341 t
->collection_move_rename(
343 ghobject_t(hoid
, old_version
, get_parent()->whoami_shard().shard
),
345 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
));
348 void PGBackend::rollback_try_stash(
349 const hobject_t
&hoid
,
350 version_t old_version
,
351 ObjectStore::Transaction
*t
) {
352 assert(!hoid
.is_temp());
355 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
));
358 ghobject_t(hoid
, old_version
, get_parent()->whoami_shard().shard
),
359 ghobject_t(hoid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
));
362 void PGBackend::rollback_extents(
364 const vector
<pair
<uint64_t, uint64_t> > &extents
,
365 const hobject_t
&hoid
,
366 ObjectStore::Transaction
*t
) {
367 auto shard
= get_parent()->whoami_shard().shard
;
368 for (auto &&extent
: extents
) {
371 ghobject_t(hoid
, gen
, shard
),
372 ghobject_t(hoid
, ghobject_t::NO_GEN
, shard
),
379 ghobject_t(hoid
, gen
, shard
));
382 void PGBackend::trim_rollback_object(
383 const hobject_t
&hoid
,
384 version_t old_version
,
385 ObjectStore::Transaction
*t
) {
386 assert(!hoid
.is_temp());
388 coll
, ghobject_t(hoid
, old_version
, get_parent()->whoami_shard().shard
));
391 PGBackend
*PGBackend::build_pg_backend(
392 const pg_pool_t
&pool
,
393 const OSDMapRef curmap
,
396 ObjectStore::CollectionHandle
&ch
,
401 case pg_pool_t::TYPE_REPLICATED
: {
402 return new ReplicatedBackend(l
, coll
, ch
, store
, cct
);
404 case pg_pool_t::TYPE_ERASURE
: {
405 ErasureCodeInterfaceRef ec_impl
;
406 ErasureCodeProfile profile
= curmap
->get_erasure_code_profile(pool
.erasure_code_profile
);
407 assert(profile
.count("plugin"));
409 ceph::ErasureCodePluginRegistry::instance().factory(
410 profile
.find("plugin")->second
,
411 cct
->_conf
->get_val
<std::string
>("erasure_code_dir"),
416 return new ECBackend(
432 * pg lock may or may not be held
434 void PGBackend::be_scan_list(
435 ScrubMap
&map
, const vector
<hobject_t
> &ls
, bool deep
, uint32_t seed
,
436 ThreadPool::TPHandle
&handle
)
438 dout(10) << __func__
<< " scanning " << ls
.size() << " objects"
439 << (deep
? " deeply" : "") << dendl
;
441 for (vector
<hobject_t
>::const_iterator p
= ls
.begin();
444 handle
.reset_tp_timeout();
451 poid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
455 ScrubMap::object
&o
= map
.objects
[poid
];
461 poid
, ghobject_t::NO_GEN
, get_parent()->whoami_shard().shard
),
464 // calculate the CRC32 on deep scrubs
466 be_deep_scrub(*p
, seed
, o
, handle
);
469 dout(25) << __func__
<< " " << poid
<< dendl
;
470 } else if (r
== -ENOENT
) {
471 dout(25) << __func__
<< " " << poid
<< " got " << r
472 << ", skipping" << dendl
;
473 } else if (r
== -EIO
) {
474 dout(25) << __func__
<< " " << poid
<< " got " << r
475 << ", stat_error" << dendl
;
476 ScrubMap::object
&o
= map
.objects
[poid
];
479 derr
<< __func__
<< " got: " << cpp_strerror(r
) << dendl
;
485 bool PGBackend::be_compare_scrub_objects(
486 pg_shard_t auth_shard
,
487 const ScrubMap::object
&auth
,
488 const object_info_t
& auth_oi
,
489 const ScrubMap::object
&candidate
,
490 shard_info_wrapper
&shard_result
,
491 inconsistent_obj_wrapper
&obj_result
,
492 ostream
&errorstream
)
494 enum { CLEAN
, FOUND_ERROR
} error
= CLEAN
;
495 if (candidate
.stat_error
) {
496 assert(shard_result
.has_stat_error());
498 errorstream
<< "candidate had a stat error";
500 if (candidate
.read_error
|| candidate
.ec_hash_mismatch
|| candidate
.ec_size_mismatch
) {
502 errorstream
<< "candidate had a read error";
504 if (auth
.digest_present
&& candidate
.digest_present
) {
505 if (auth
.digest
!= candidate
.digest
) {
509 errorstream
<< "data_digest 0x" << std::hex
<< candidate
.digest
510 << " != data_digest 0x" << auth
.digest
<< std::dec
511 << " from shard " << auth_shard
;
512 obj_result
.set_data_digest_mismatch();
515 if (auth
.omap_digest_present
&& candidate
.omap_digest_present
) {
516 if (auth
.omap_digest
!= candidate
.omap_digest
) {
520 errorstream
<< "omap_digest 0x" << std::hex
<< candidate
.omap_digest
521 << " != omap_digest 0x" << auth
.omap_digest
<< std::dec
522 << " from shard " << auth_shard
;
523 obj_result
.set_omap_digest_mismatch();
526 if (parent
->get_pool().is_replicated()) {
527 if (auth_oi
.is_data_digest() && candidate
.digest_present
) {
528 if (auth_oi
.data_digest
!= candidate
.digest
) {
532 errorstream
<< "data_digest 0x" << std::hex
<< candidate
.digest
533 << " != data_digest 0x" << auth_oi
.data_digest
<< std::dec
534 << " from auth oi " << auth_oi
;
535 shard_result
.set_data_digest_mismatch_oi();
538 if (auth_oi
.is_omap_digest() && candidate
.omap_digest_present
) {
539 if (auth_oi
.omap_digest
!= candidate
.omap_digest
) {
543 errorstream
<< "omap_digest 0x" << std::hex
<< candidate
.omap_digest
544 << " != omap_digest 0x" << auth_oi
.omap_digest
<< std::dec
545 << " from auth oi " << auth_oi
;
546 shard_result
.set_omap_digest_mismatch_oi();
550 if (candidate
.stat_error
)
551 return error
== FOUND_ERROR
;
552 uint64_t oi_size
= be_get_ondisk_size(auth_oi
.size
);
553 if (oi_size
!= candidate
.size
) {
557 errorstream
<< "size " << candidate
.size
558 << " != size " << oi_size
559 << " from auth oi " << auth_oi
;
560 shard_result
.set_size_mismatch_oi();
562 if (auth
.size
!= candidate
.size
) {
566 errorstream
<< "size " << candidate
.size
567 << " != size " << auth
.size
568 << " from shard " << auth_shard
;
569 obj_result
.set_size_mismatch();
571 for (map
<string
,bufferptr
>::const_iterator i
= auth
.attrs
.begin();
572 i
!= auth
.attrs
.end();
574 if (!candidate
.attrs
.count(i
->first
)) {
578 errorstream
<< "attr name mismatch '" << i
->first
<< "'";
579 obj_result
.set_attr_name_mismatch();
580 } else if (candidate
.attrs
.find(i
->first
)->second
.cmp(i
->second
)) {
584 errorstream
<< "attr value mismatch '" << i
->first
<< "'";
585 obj_result
.set_attr_value_mismatch();
588 for (map
<string
,bufferptr
>::const_iterator i
= candidate
.attrs
.begin();
589 i
!= candidate
.attrs
.end();
591 if (!auth
.attrs
.count(i
->first
)) {
595 errorstream
<< "attr name mismatch '" << i
->first
<< "'";
596 obj_result
.set_attr_name_mismatch();
599 return error
== FOUND_ERROR
;
602 static int dcount(const object_info_t
&oi
)
605 if (oi
.is_data_digest())
607 if (oi
.is_omap_digest())
612 map
<pg_shard_t
, ScrubMap
*>::const_iterator
613 PGBackend::be_select_auth_object(
614 const hobject_t
&obj
,
615 const map
<pg_shard_t
,ScrubMap
*> &maps
,
616 object_info_t
*auth_oi
,
617 map
<pg_shard_t
, shard_info_wrapper
> &shard_map
,
618 inconsistent_obj_wrapper
&object_error
)
620 eversion_t auth_version
;
623 map
<pg_shard_t
, ScrubMap
*>::const_iterator auth
= maps
.end();
624 for (map
<pg_shard_t
, ScrubMap
*>::const_iterator j
= maps
.begin();
627 map
<hobject_t
, ScrubMap::object
>::iterator i
=
628 j
->second
->objects
.find(obj
);
629 if (i
== j
->second
->objects
.end()) {
633 auto& shard_info
= shard_map
[j
->first
];
634 if (i
->second
.read_error
) {
635 shard_info
.set_read_error();
636 error_string
+= " read_error";
638 if (i
->second
.ec_hash_mismatch
) {
639 shard_info
.set_ec_hash_mismatch();
640 error_string
+= " ec_hash_mismatch";
642 if (i
->second
.ec_size_mismatch
) {
643 shard_info
.set_ec_size_mismatch();
644 error_string
+= " ec_size_mismatch";
649 map
<string
, bufferptr
>::iterator k
;
651 if (i
->second
.stat_error
) {
652 shard_info
.set_stat_error();
653 error_string
+= " stat_error";
654 // With stat_error no further checking
655 // We don't need to also see a missing_object_info_attr
659 k
= i
->second
.attrs
.find(OI_ATTR
);
660 if (k
== i
->second
.attrs
.end()) {
661 // no object info on object, probably corrupt
662 shard_info
.set_oi_attr_missing();
663 error_string
+= " oi_attr_missing";
666 bl
.push_back(k
->second
);
668 bufferlist::iterator bliter
= bl
.begin();
669 ::decode(oi
, bliter
);
671 // invalid object info, probably corrupt
672 shard_info
.set_oi_attr_corrupted();
673 error_string
+= " oi_attr_corrupted";
677 if (auth_version
!= eversion_t()) {
678 if (!object_error
.has_object_info_inconsistency() && !(bl
== auth_bl
)) {
679 object_error
.set_object_info_inconsistency();
680 error_string
+= " object_info_inconsistency";
684 // Don't use this particular shard because it won't be able to repair data
685 // XXX: For now we can't pick one shard for repair and another's object info
686 if (i
->second
.read_error
|| i
->second
.ec_hash_mismatch
|| i
->second
.ec_size_mismatch
)
689 if (auth_version
== eversion_t() || oi
.version
> auth_version
||
690 (oi
.version
== auth_version
&& dcount(oi
) > dcount(*auth_oi
))) {
693 auth_version
= oi
.version
;
699 // Check error_string because some errors already generated messages
700 if (error_string
!= "") {
701 dout(10) << __func__
<< ": error(s) osd " << j
->first
702 << " for obj " << obj
703 << "," << error_string
706 // Keep scanning other shards
708 dout(10) << __func__
<< ": selecting osd " << auth
->first
709 << " for obj " << obj
710 << " with oi " << *auth_oi
715 void PGBackend::be_compare_scrubmaps(
716 const map
<pg_shard_t
,ScrubMap
*> &maps
,
718 map
<hobject_t
, set
<pg_shard_t
>> &missing
,
719 map
<hobject_t
, set
<pg_shard_t
>> &inconsistent
,
720 map
<hobject_t
, list
<pg_shard_t
>> &authoritative
,
721 map
<hobject_t
, pair
<uint32_t,uint32_t>> &missing_digest
,
722 int &shallow_errors
, int &deep_errors
,
725 const vector
<int> &acting
,
726 ostream
&errorstream
)
728 map
<hobject_t
,ScrubMap::object
>::const_iterator i
;
729 map
<pg_shard_t
, ScrubMap
*>::const_iterator j
;
730 set
<hobject_t
> master_set
;
731 utime_t now
= ceph_clock_now();
733 // Construct master set
734 for (j
= maps
.begin(); j
!= maps
.end(); ++j
) {
735 for (i
= j
->second
->objects
.begin(); i
!= j
->second
->objects
.end(); ++i
) {
736 master_set
.insert(i
->first
);
740 // Check maps against master set and each other
741 for (set
<hobject_t
>::const_iterator k
= master_set
.begin();
742 k
!= master_set
.end();
744 object_info_t auth_oi
;
745 map
<pg_shard_t
, shard_info_wrapper
> shard_map
;
747 inconsistent_obj_wrapper object_error
{*k
};
749 map
<pg_shard_t
, ScrubMap
*>::const_iterator auth
=
750 be_select_auth_object(*k
, maps
, &auth_oi
, shard_map
, object_error
);
752 list
<pg_shard_t
> auth_list
;
753 if (auth
== maps
.end()) {
754 object_error
.set_version(0);
755 object_error
.set_auth_missing(*k
, maps
, shard_map
, shallow_errors
, deep_errors
);
756 if (object_error
.has_deep_errors())
758 else if (object_error
.has_shallow_errors())
760 store
->add_object_error(k
->pool
, object_error
);
761 errorstream
<< pgid
.pgid
<< " soid " << *k
762 << ": failed to pick suitable object info\n";
765 object_error
.set_version(auth_oi
.user_version
);
766 ScrubMap::object
& auth_object
= auth
->second
->objects
[*k
];
767 set
<pg_shard_t
> cur_missing
;
768 set
<pg_shard_t
> cur_inconsistent
;
770 for (j
= maps
.begin(); j
!= maps
.end(); ++j
) {
772 shard_map
[auth
->first
].selected_oi
= true;
773 if (j
->second
->objects
.count(*k
)) {
774 shard_map
[j
->first
].set_object(j
->second
->objects
[*k
]);
777 bool found
= be_compare_scrub_objects(auth
->first
,
780 j
->second
->objects
[*k
],
784 // Some errors might have already been set in be_select_auth_object()
785 if (shard_map
[j
->first
].errors
!= 0) {
786 cur_inconsistent
.insert(j
->first
);
787 if (shard_map
[j
->first
].has_deep_errors())
791 // Only true if be_compare_scrub_objects() found errors and put something
794 errorstream
<< pgid
<< " shard " << j
->first
<< ": soid " << *k
795 << " " << ss
.str() << "\n";
797 // XXX: The auth shard might get here that we don't know
798 // that it has the "correct" data.
799 auth_list
.push_back(j
->first
);
802 cur_missing
.insert(j
->first
);
803 shard_map
[j
->first
].set_missing();
804 // Can't have any other errors if there is no information available
806 errorstream
<< pgid
<< " shard " << j
->first
<< " missing " << *k
809 object_error
.add_shard(j
->first
, shard_map
[j
->first
]);
812 if (auth_list
.empty()) {
813 errorstream
<< pgid
.pgid
<< " soid " << *k
814 << ": failed to pick suitable auth object\n";
817 if (!cur_missing
.empty()) {
818 missing
[*k
] = cur_missing
;
820 if (!cur_inconsistent
.empty()) {
821 inconsistent
[*k
] = cur_inconsistent
;
823 if (!cur_inconsistent
.empty() || !cur_missing
.empty()) {
824 authoritative
[*k
] = auth_list
;
825 } else if (parent
->get_pool().is_replicated()) {
832 if (auth_object
.digest_present
&& auth_object
.omap_digest_present
&&
833 (!auth_oi
.is_data_digest() || !auth_oi
.is_omap_digest())) {
834 dout(20) << __func__
<< " missing digest on " << *k
<< dendl
;
837 if (auth_object
.digest_present
&& auth_object
.omap_digest_present
&&
838 cct
->_conf
->osd_debug_scrub_chance_rewrite_digest
&&
839 (((unsigned)rand() % 100) >
840 cct
->_conf
->osd_debug_scrub_chance_rewrite_digest
)) {
841 dout(20) << __func__
<< " randomly updating digest on " << *k
<< dendl
;
845 // recorded digest != actual digest?
846 if (auth_oi
.is_data_digest() && auth_object
.digest_present
&&
847 auth_oi
.data_digest
!= auth_object
.digest
) {
848 assert(shard_map
[auth
->first
].has_data_digest_mismatch_oi());
849 errorstream
<< pgid
<< " recorded data digest 0x"
850 << std::hex
<< auth_oi
.data_digest
<< " != on disk 0x"
851 << auth_object
.digest
<< std::dec
<< " on " << auth_oi
.soid
856 if (auth_oi
.is_omap_digest() && auth_object
.omap_digest_present
&&
857 auth_oi
.omap_digest
!= auth_object
.omap_digest
) {
858 assert(shard_map
[auth
->first
].has_omap_digest_mismatch_oi());
859 errorstream
<< pgid
<< " recorded omap digest 0x"
860 << std::hex
<< auth_oi
.omap_digest
<< " != on disk 0x"
861 << auth_object
.omap_digest
<< std::dec
862 << " on " << auth_oi
.soid
<< "\n";
868 utime_t age
= now
- auth_oi
.local_mtime
;
869 if (update
== FORCE
||
870 age
> cct
->_conf
->osd_deep_scrub_update_digest_min_age
) {
871 dout(20) << __func__
<< " will update digest on " << *k
<< dendl
;
872 missing_digest
[*k
] = make_pair(auth_object
.digest
,
873 auth_object
.omap_digest
);
875 dout(20) << __func__
<< " missing digest but age " << age
876 << " < " << cct
->_conf
->osd_deep_scrub_update_digest_min_age
877 << " on " << *k
<< dendl
;
882 if (object_error
.has_deep_errors())
884 else if (object_error
.has_shallow_errors())
886 if (object_error
.errors
|| object_error
.union_shards
.errors
) {
887 store
->add_object_error(k
->pool
, object_error
);