1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2011 New Dream Network
7 * Copyright (C) 2013,2014 Cloudwatt <libre.licensing@cloudwatt.com>
9 * Author: Loic Dachary <loic@dachary.org>
11 * This is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License version 2.1, as published by the Free Software
14 * Foundation. See file COPYING.
18 #include <boost/assign/list_of.hpp>
20 #include "osd_types.h"
21 #include "include/ceph_features.h"
23 #include "crush/hash.h"
27 #include "PGBackend.h"
29 const char *ceph_osd_flag_name(unsigned flag
)
32 case CEPH_OSD_FLAG_ACK
: return "ack";
33 case CEPH_OSD_FLAG_ONNVRAM
: return "onnvram";
34 case CEPH_OSD_FLAG_ONDISK
: return "ondisk";
35 case CEPH_OSD_FLAG_RETRY
: return "retry";
36 case CEPH_OSD_FLAG_READ
: return "read";
37 case CEPH_OSD_FLAG_WRITE
: return "write";
38 case CEPH_OSD_FLAG_ORDERSNAP
: return "ordersnap";
39 case CEPH_OSD_FLAG_PEERSTAT_OLD
: return "peerstat_old";
40 case CEPH_OSD_FLAG_BALANCE_READS
: return "balance_reads";
41 case CEPH_OSD_FLAG_PARALLELEXEC
: return "parallelexec";
42 case CEPH_OSD_FLAG_PGOP
: return "pgop";
43 case CEPH_OSD_FLAG_EXEC
: return "exec";
44 case CEPH_OSD_FLAG_EXEC_PUBLIC
: return "exec_public";
45 case CEPH_OSD_FLAG_LOCALIZE_READS
: return "localize_reads";
46 case CEPH_OSD_FLAG_RWORDERED
: return "rwordered";
47 case CEPH_OSD_FLAG_IGNORE_CACHE
: return "ignore_cache";
48 case CEPH_OSD_FLAG_SKIPRWLOCKS
: return "skiprwlocks";
49 case CEPH_OSD_FLAG_IGNORE_OVERLAY
: return "ignore_overlay";
50 case CEPH_OSD_FLAG_FLUSH
: return "flush";
51 case CEPH_OSD_FLAG_MAP_SNAP_CLONE
: return "map_snap_clone";
52 case CEPH_OSD_FLAG_ENFORCE_SNAPC
: return "enforce_snapc";
53 case CEPH_OSD_FLAG_REDIRECTED
: return "redirected";
54 case CEPH_OSD_FLAG_KNOWN_REDIR
: return "known_if_redirected";
55 case CEPH_OSD_FLAG_FULL_TRY
: return "full_try";
56 case CEPH_OSD_FLAG_FULL_FORCE
: return "full_force";
57 case CEPH_OSD_FLAG_IGNORE_REDIRECT
: return "ignore_redirect";
58 default: return "???";
62 string
ceph_osd_flag_string(unsigned flags
)
65 for (unsigned i
=0; i
<32; ++i
) {
66 if (flags
& (1u<<i
)) {
69 s
+= ceph_osd_flag_name(1u << i
);
77 const char * ceph_osd_op_flag_name(unsigned flag
)
82 case CEPH_OSD_OP_FLAG_EXCL
:
85 case CEPH_OSD_OP_FLAG_FAILOK
:
88 case CEPH_OSD_OP_FLAG_FADVISE_RANDOM
:
89 name
= "fadvise_random";
91 case CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL
:
92 name
= "fadvise_sequential";
94 case CEPH_OSD_OP_FLAG_FADVISE_WILLNEED
:
95 name
= "favise_willneed";
97 case CEPH_OSD_OP_FLAG_FADVISE_DONTNEED
:
98 name
= "fadvise_dontneed";
100 case CEPH_OSD_OP_FLAG_FADVISE_NOCACHE
:
101 name
= "fadvise_nocache";
110 string
ceph_osd_op_flag_string(unsigned flags
)
113 for (unsigned i
=0; i
<32; ++i
) {
114 if (flags
& (1u<<i
)) {
117 s
+= ceph_osd_op_flag_name(1u << i
);
125 string
ceph_osd_alloc_hint_flag_string(unsigned flags
)
128 for (unsigned i
=0; i
<32; ++i
) {
129 if (flags
& (1u<<i
)) {
132 s
+= ceph_osd_alloc_hint_flag_name(1u << i
);
140 void pg_shard_t::encode(bufferlist
&bl
) const
142 ENCODE_START(1, 1, bl
);
147 void pg_shard_t::decode(bufferlist::iterator
&bl
)
155 ostream
&operator<<(ostream
&lhs
, const pg_shard_t
&rhs
)
157 if (rhs
.is_undefined())
159 if (rhs
.shard
== shard_id_t::NO_SHARD
)
160 return lhs
<< rhs
.get_osd();
161 return lhs
<< rhs
.get_osd() << '(' << (unsigned)(rhs
.shard
) << ')';
165 void osd_reqid_t::dump(Formatter
*f
) const
167 f
->dump_stream("name") << name
;
168 f
->dump_int("inc", inc
);
169 f
->dump_unsigned("tid", tid
);
172 void osd_reqid_t::generate_test_instances(list
<osd_reqid_t
*>& o
)
174 o
.push_back(new osd_reqid_t
);
175 o
.push_back(new osd_reqid_t(entity_name_t::CLIENT(123), 1, 45678));
178 // -- object_locator_t --
180 void object_locator_t::encode(bufferlist
& bl
) const
182 // verify that nobody's corrupted the locator
183 assert(hash
== -1 || key
.empty());
184 __u8 encode_compat
= 3;
185 ENCODE_START(6, encode_compat
, bl
);
187 int32_t preferred
= -1; // tell old code there is no preferred osd (-1).
188 ::encode(preferred
, bl
);
190 ::encode(nspace
, bl
);
193 encode_compat
= MAX(encode_compat
, 6); // need to interpret the hash
194 ENCODE_FINISH_NEW_COMPAT(bl
, encode_compat
);
197 void object_locator_t::decode(bufferlist::iterator
& p
)
199 DECODE_START_LEGACY_COMPAT_LEN(6, 3, 3, p
);
209 ::decode(preferred
, p
);
219 // verify that nobody's corrupted the locator
220 assert(hash
== -1 || key
.empty());
223 void object_locator_t::dump(Formatter
*f
) const
225 f
->dump_int("pool", pool
);
226 f
->dump_string("key", key
);
227 f
->dump_string("namespace", nspace
);
228 f
->dump_int("hash", hash
);
231 void object_locator_t::generate_test_instances(list
<object_locator_t
*>& o
)
233 o
.push_back(new object_locator_t
);
234 o
.push_back(new object_locator_t(123));
235 o
.push_back(new object_locator_t(123, 876));
236 o
.push_back(new object_locator_t(1, "n2"));
237 o
.push_back(new object_locator_t(1234, "", "key"));
238 o
.push_back(new object_locator_t(12, "n1", "key2"));
241 // -- request_redirect_t --
242 void request_redirect_t::encode(bufferlist
& bl
) const
244 ENCODE_START(1, 1, bl
);
245 ::encode(redirect_locator
, bl
);
246 ::encode(redirect_object
, bl
);
247 ::encode(osd_instructions
, bl
);
251 void request_redirect_t::decode(bufferlist::iterator
& bl
)
254 ::decode(redirect_locator
, bl
);
255 ::decode(redirect_object
, bl
);
256 ::decode(osd_instructions
, bl
);
260 void request_redirect_t::dump(Formatter
*f
) const
262 f
->dump_string("object", redirect_object
);
263 f
->open_object_section("locator");
264 redirect_locator
.dump(f
);
265 f
->close_section(); // locator
268 void request_redirect_t::generate_test_instances(list
<request_redirect_t
*>& o
)
270 object_locator_t
loc(1, "redir_obj");
271 o
.push_back(new request_redirect_t());
272 o
.push_back(new request_redirect_t(loc
, 0));
273 o
.push_back(new request_redirect_t(loc
, "redir_obj"));
274 o
.push_back(new request_redirect_t(loc
));
277 void objectstore_perf_stat_t::dump(Formatter
*f
) const
279 f
->dump_unsigned("commit_latency_ms", os_commit_latency
);
280 f
->dump_unsigned("apply_latency_ms", os_apply_latency
);
283 void objectstore_perf_stat_t::encode(bufferlist
&bl
) const
285 ENCODE_START(1, 1, bl
);
286 ::encode(os_commit_latency
, bl
);
287 ::encode(os_apply_latency
, bl
);
291 void objectstore_perf_stat_t::decode(bufferlist::iterator
&bl
)
294 ::decode(os_commit_latency
, bl
);
295 ::decode(os_apply_latency
, bl
);
299 void objectstore_perf_stat_t::generate_test_instances(std::list
<objectstore_perf_stat_t
*>& o
)
301 o
.push_back(new objectstore_perf_stat_t());
302 o
.push_back(new objectstore_perf_stat_t());
303 o
.back()->os_commit_latency
= 20;
304 o
.back()->os_apply_latency
= 30;
308 void osd_stat_t::dump(Formatter
*f
) const
310 f
->dump_unsigned("up_from", up_from
);
311 f
->dump_unsigned("seq", seq
);
312 f
->dump_unsigned("num_pgs", num_pgs
);
313 f
->dump_unsigned("kb", kb
);
314 f
->dump_unsigned("kb_used", kb_used
);
315 f
->dump_unsigned("kb_avail", kb_avail
);
316 f
->open_array_section("hb_peers");
317 for (auto p
: hb_peers
)
318 f
->dump_int("osd", p
);
320 f
->dump_int("snap_trim_queue_len", snap_trim_queue_len
);
321 f
->dump_int("num_snap_trimming", num_snap_trimming
);
322 f
->open_object_section("op_queue_age_hist");
323 op_queue_age_hist
.dump(f
);
325 f
->open_object_section("perf_stat");
326 os_perf_stat
.dump(f
);
330 void osd_stat_t::encode(bufferlist
&bl
) const
332 ENCODE_START(7, 2, bl
);
334 ::encode(kb_used
, bl
);
335 ::encode(kb_avail
, bl
);
336 ::encode(snap_trim_queue_len
, bl
);
337 ::encode(num_snap_trimming
, bl
);
338 ::encode(hb_peers
, bl
);
339 ::encode((uint32_t)0, bl
);
340 ::encode(op_queue_age_hist
, bl
);
341 ::encode(os_perf_stat
, bl
);
342 ::encode(up_from
, bl
);
344 ::encode(num_pgs
, bl
);
348 void osd_stat_t::decode(bufferlist::iterator
&bl
)
350 DECODE_START_LEGACY_COMPAT_LEN(6, 2, 2, bl
);
352 ::decode(kb_used
, bl
);
353 ::decode(kb_avail
, bl
);
354 ::decode(snap_trim_queue_len
, bl
);
355 ::decode(num_snap_trimming
, bl
);
356 ::decode(hb_peers
, bl
);
357 vector
<int> num_hb_out
;
358 ::decode(num_hb_out
, bl
);
360 ::decode(op_queue_age_hist
, bl
);
362 ::decode(os_perf_stat
, bl
);
364 ::decode(up_from
, bl
);
368 ::decode(num_pgs
, bl
);
373 void osd_stat_t::generate_test_instances(std::list
<osd_stat_t
*>& o
)
375 o
.push_back(new osd_stat_t
);
377 o
.push_back(new osd_stat_t
);
379 o
.back()->kb_used
= 2;
380 o
.back()->kb_avail
= 3;
381 o
.back()->hb_peers
.push_back(7);
382 o
.back()->snap_trim_queue_len
= 8;
383 o
.back()->num_snap_trimming
= 99;
388 int pg_t::print(char *o
, int maxlen
) const
390 if (preferred() >= 0)
391 return snprintf(o
, maxlen
, "%llu.%xp%d", (unsigned long long)pool(), ps(), preferred());
393 return snprintf(o
, maxlen
, "%llu.%x", (unsigned long long)pool(), ps());
396 bool pg_t::parse(const char *s
)
401 int r
= sscanf(s
, "%llu.%xp%d", (long long unsigned *)&ppool
, &pseed
, &pref
);
413 bool spg_t::parse(const char *s
)
415 pgid
.set_preferred(-1);
416 shard
= shard_id_t::NO_SHARD
;
421 int r
= sscanf(s
, "%llu.%x", (long long unsigned *)&ppool
, &pseed
);
424 pgid
.set_pool(ppool
);
427 const char *p
= strchr(s
, 'p');
429 r
= sscanf(p
, "p%d", &pref
);
431 pgid
.set_preferred(pref
);
439 r
= sscanf(p
, "s%d", &pshard
);
441 shard
= shard_id_t(pshard
);
449 char *spg_t::calc_name(char *buf
, const char *suffix_backwords
) const
451 while (*suffix_backwords
)
452 *--buf
= *suffix_backwords
++;
454 if (!is_no_shard()) {
455 buf
= ritoa
<uint8_t, 10>((uint8_t)shard
.id
, buf
);
459 return pgid
.calc_name(buf
, "");
462 ostream
& operator<<(ostream
& out
, const spg_t
&pg
)
464 char buf
[spg_t::calc_name_buf_size
];
465 buf
[spg_t::calc_name_buf_size
- 1] = '\0';
466 out
<< pg
.calc_name(buf
+ spg_t::calc_name_buf_size
- 1, "");
470 pg_t
pg_t::get_ancestor(unsigned old_pg_num
) const
472 int old_bits
= cbits(old_pg_num
);
473 int old_mask
= (1 << old_bits
) - 1;
475 ret
.m_seed
= ceph_stable_mod(m_seed
, old_pg_num
, old_mask
);
479 bool pg_t::is_split(unsigned old_pg_num
, unsigned new_pg_num
, set
<pg_t
> *children
) const
481 assert(m_seed
< old_pg_num
);
482 if (new_pg_num
<= old_pg_num
)
487 unsigned old_bits
= cbits(old_pg_num
);
488 unsigned old_mask
= (1 << old_bits
) - 1;
489 for (unsigned n
= 1; ; n
++) {
490 unsigned next_bit
= (n
<< (old_bits
-1));
491 unsigned s
= next_bit
| m_seed
;
493 if (s
< old_pg_num
|| s
== m_seed
)
497 if ((unsigned)ceph_stable_mod(s
, old_pg_num
, old_mask
) == m_seed
) {
500 children
->insert(pg_t(s
, m_pool
, m_preferred
));
506 int old_bits
= cbits(old_pg_num
);
507 int old_mask
= (1 << old_bits
) - 1;
508 for (unsigned x
= old_pg_num
; x
< new_pg_num
; ++x
) {
509 unsigned o
= ceph_stable_mod(x
, old_pg_num
, old_mask
);
512 children
->insert(pg_t(x
, m_pool
, m_preferred
));
519 unsigned pg_t::get_split_bits(unsigned pg_num
) const {
524 // Find unique p such that pg_num \in [2^(p-1), 2^p)
525 unsigned p
= cbits(pg_num
);
526 assert(p
); // silence coverity #751330
528 if ((m_seed
% (1<<(p
-1))) < (pg_num
% (1<<(p
-1))))
534 pg_t
pg_t::get_parent() const
536 unsigned bits
= cbits(m_seed
);
539 retval
.m_seed
&= ~((~0)<<(bits
- 1));
543 hobject_t
pg_t::get_hobj_start() const
545 return hobject_t(object_t(), string(), CEPH_NOSNAP
, m_seed
, m_pool
,
549 hobject_t
pg_t::get_hobj_end(unsigned pg_num
) const
551 // note: this assumes a bitwise sort; with the legacy nibblewise
552 // sort a PG did not always cover a single contiguous range of the
553 // (bit-reversed) hash range.
554 unsigned bits
= get_split_bits(pg_num
);
555 uint64_t rev_start
= hobject_t::_reverse_bits(m_seed
);
556 uint64_t rev_end
= (rev_start
| (0xffffffff >> bits
)) + 1;
557 if (rev_end
>= 0x100000000) {
558 assert(rev_end
== 0x100000000);
559 return hobject_t::get_max();
561 return hobject_t(object_t(), string(), CEPH_NOSNAP
,
562 hobject_t::_reverse_bits(rev_end
), m_pool
,
567 void pg_t::dump(Formatter
*f
) const
569 f
->dump_unsigned("pool", m_pool
);
570 f
->dump_unsigned("seed", m_seed
);
571 f
->dump_int("preferred_osd", m_preferred
);
574 void pg_t::generate_test_instances(list
<pg_t
*>& o
)
576 o
.push_back(new pg_t
);
577 o
.push_back(new pg_t(1, 2, -1));
578 o
.push_back(new pg_t(13123, 3, -1));
579 o
.push_back(new pg_t(131223, 4, 23));
582 char *pg_t::calc_name(char *buf
, const char *suffix_backwords
) const
584 while (*suffix_backwords
)
585 *--buf
= *suffix_backwords
++;
587 if (m_preferred
>= 0)
590 buf
= ritoa
<uint32_t, 16>(m_seed
, buf
);
594 return ritoa
<uint64_t, 10>(m_pool
, buf
);
597 ostream
& operator<<(ostream
& out
, const pg_t
&pg
)
599 char buf
[pg_t::calc_name_buf_size
];
600 buf
[pg_t::calc_name_buf_size
- 1] = '\0';
601 out
<< pg
.calc_name(buf
+ pg_t::calc_name_buf_size
- 1, "");
608 void coll_t::calc_str()
612 strcpy(_str_buff
, "meta");
616 _str_buff
[spg_t::calc_name_buf_size
- 1] = '\0';
617 _str
= pgid
.calc_name(_str_buff
+ spg_t::calc_name_buf_size
- 1, "daeh_");
620 _str_buff
[spg_t::calc_name_buf_size
- 1] = '\0';
621 _str
= pgid
.calc_name(_str_buff
+ spg_t::calc_name_buf_size
- 1, "PMET_");
624 assert(0 == "unknown collection type");
628 bool coll_t::parse(const std::string
& s
)
638 if (s
.find("_head") == s
.length() - 5 &&
639 pgid
.parse(s
.substr(0, s
.length() - 5))) {
646 if (s
.find("_TEMP") == s
.length() - 5 &&
647 pgid
.parse(s
.substr(0, s
.length() - 5))) {
657 void coll_t::encode(bufferlist
& bl
) const
659 // when changing this, remember to update encoded_size() too.
661 // can't express this as v2...
663 ::encode(struct_v
, bl
);
664 ::encode(to_str(), bl
);
667 ::encode(struct_v
, bl
);
668 ::encode((__u8
)type
, bl
);
670 snapid_t snap
= CEPH_NOSNAP
;
675 size_t coll_t::encoded_size() const
677 size_t r
= sizeof(__u8
);
690 r
+= sizeof(ceph_le32
) + 2 * sizeof(__u8
);
692 r
+= sizeof(__u8
) + sizeof(uint64_t) + 2 * sizeof(uint32_t);
696 r
+= sizeof(uint64_t);
702 void coll_t::decode(bufferlist::iterator
& bl
)
705 ::decode(struct_v
, bl
);
714 if (pgid
== spg_t() && snap
== 0) {
730 type
= (type_t
)_type
;
739 bool ok
= parse(str
);
741 throw std::domain_error(std::string("unable to parse pg ") + str
);
748 oss
<< "coll_t::decode(): don't know how to decode version "
750 throw std::domain_error(oss
.str());
755 void coll_t::dump(Formatter
*f
) const
757 f
->dump_unsigned("type_id", (unsigned)type
);
758 if (type
!= TYPE_META
)
759 f
->dump_stream("pgid") << pgid
;
760 f
->dump_string("name", to_str());
763 void coll_t::generate_test_instances(list
<coll_t
*>& o
)
765 o
.push_back(new coll_t());
766 o
.push_back(new coll_t(spg_t(pg_t(1, 0), shard_id_t::NO_SHARD
)));
767 o
.push_back(new coll_t(o
.back()->get_temp()));
768 o
.push_back(new coll_t(spg_t(pg_t(3, 2), shard_id_t(12))));
769 o
.push_back(new coll_t(o
.back()->get_temp()));
770 o
.push_back(new coll_t());
775 std::string
pg_vector_string(const vector
<int32_t> &a
)
779 for (vector
<int32_t>::const_iterator i
= a
.begin(); i
!= a
.end(); ++i
) {
782 if (*i
!= CRUSH_ITEM_NONE
)
791 std::string
pg_state_string(int state
)
794 if (state
& PG_STATE_STALE
)
796 if (state
& PG_STATE_CREATING
)
798 if (state
& PG_STATE_ACTIVE
)
800 if (state
& PG_STATE_ACTIVATING
)
801 oss
<< "activating+";
802 if (state
& PG_STATE_CLEAN
)
804 if (state
& PG_STATE_RECOVERY_WAIT
)
805 oss
<< "recovery_wait+";
806 if (state
& PG_STATE_RECOVERY_TOOFULL
)
807 oss
<< "recovery_toofull+";
808 if (state
& PG_STATE_RECOVERING
)
809 oss
<< "recovering+";
810 if (state
& PG_STATE_FORCED_RECOVERY
)
811 oss
<< "forced_recovery+";
812 if (state
& PG_STATE_DOWN
)
814 if (state
& PG_STATE_RECOVERY_UNFOUND
)
815 oss
<< "recovery_unfound+";
816 if (state
& PG_STATE_BACKFILL_UNFOUND
)
817 oss
<< "backfill_unfound+";
818 if (state
& PG_STATE_UNDERSIZED
)
819 oss
<< "undersized+";
820 if (state
& PG_STATE_DEGRADED
)
822 if (state
& PG_STATE_REMAPPED
)
824 if (state
& PG_STATE_SCRUBBING
)
826 if (state
& PG_STATE_DEEP_SCRUB
)
828 if (state
& PG_STATE_INCONSISTENT
)
829 oss
<< "inconsistent+";
830 if (state
& PG_STATE_PEERING
)
832 if (state
& PG_STATE_REPAIR
)
834 if (state
& PG_STATE_BACKFILL_WAIT
)
835 oss
<< "backfill_wait+";
836 if (state
& PG_STATE_BACKFILLING
)
837 oss
<< "backfilling+";
838 if (state
& PG_STATE_FORCED_BACKFILL
)
839 oss
<< "forced_backfill+";
840 if (state
& PG_STATE_BACKFILL_TOOFULL
)
841 oss
<< "backfill_toofull+";
842 if (state
& PG_STATE_INCOMPLETE
)
843 oss
<< "incomplete+";
844 if (state
& PG_STATE_PEERED
)
846 if (state
& PG_STATE_SNAPTRIM
)
848 if (state
& PG_STATE_SNAPTRIM_WAIT
)
849 oss
<< "snaptrim_wait+";
850 if (state
& PG_STATE_SNAPTRIM_ERROR
)
851 oss
<< "snaptrim_error+";
852 string
ret(oss
.str());
853 if (ret
.length() > 0)
854 ret
.resize(ret
.length() - 1);
860 boost::optional
<uint64_t> pg_string_state(const std::string
& state
)
862 boost::optional
<uint64_t> type
;
863 if (state
== "active")
864 type
= PG_STATE_ACTIVE
;
865 else if (state
== "clean")
866 type
= PG_STATE_CLEAN
;
867 else if (state
== "down")
868 type
= PG_STATE_DOWN
;
869 else if (state
== "recovery_unfound")
870 type
= PG_STATE_RECOVERY_UNFOUND
;
871 else if (state
== "backfill_unfound")
872 type
= PG_STATE_BACKFILL_UNFOUND
;
873 else if (state
== "scrubbing")
874 type
= PG_STATE_SCRUBBING
;
875 else if (state
== "degraded")
876 type
= PG_STATE_DEGRADED
;
877 else if (state
== "inconsistent")
878 type
= PG_STATE_INCONSISTENT
;
879 else if (state
== "peering")
880 type
= PG_STATE_PEERING
;
881 else if (state
== "repair")
882 type
= PG_STATE_REPAIR
;
883 else if (state
== "recovering")
884 type
= PG_STATE_RECOVERING
;
885 else if (state
== "forced_recovery")
886 type
= PG_STATE_FORCED_RECOVERY
;
887 else if (state
== "backfill_wait")
888 type
= PG_STATE_BACKFILL_WAIT
;
889 else if (state
== "incomplete")
890 type
= PG_STATE_INCOMPLETE
;
891 else if (state
== "stale")
892 type
= PG_STATE_STALE
;
893 else if (state
== "remapped")
894 type
= PG_STATE_REMAPPED
;
895 else if (state
== "deep_scrub")
896 type
= PG_STATE_DEEP_SCRUB
;
897 else if (state
== "backfilling")
898 type
= PG_STATE_BACKFILLING
;
899 else if (state
== "forced_backfill")
900 type
= PG_STATE_FORCED_BACKFILL
;
901 else if (state
== "backfill_toofull")
902 type
= PG_STATE_BACKFILL_TOOFULL
;
903 else if (state
== "recovery_wait")
904 type
= PG_STATE_RECOVERY_WAIT
;
905 else if (state
== "recovery_toofull")
906 type
= PG_STATE_RECOVERY_TOOFULL
;
907 else if (state
== "undersized")
908 type
= PG_STATE_UNDERSIZED
;
909 else if (state
== "activating")
910 type
= PG_STATE_ACTIVATING
;
911 else if (state
== "peered")
912 type
= PG_STATE_PEERED
;
913 else if (state
== "snaptrim")
914 type
= PG_STATE_SNAPTRIM
;
915 else if (state
== "snaptrim_wait")
916 type
= PG_STATE_SNAPTRIM_WAIT
;
917 else if (state
== "snaptrim_error")
918 type
= PG_STATE_SNAPTRIM_ERROR
;
925 string
eversion_t::get_key_name() const
928 // Below is equivalent of sprintf("%010u.%020llu");
930 ritoa
<uint64_t, 10, 20>(version
, key
+ 31);
932 ritoa
<uint32_t, 10, 10>(epoch
, key
+ 10);
937 // -- pool_snap_info_t --
938 void pool_snap_info_t::dump(Formatter
*f
) const
940 f
->dump_unsigned("snapid", snapid
);
941 f
->dump_stream("stamp") << stamp
;
942 f
->dump_string("name", name
);
945 void pool_snap_info_t::encode(bufferlist
& bl
, uint64_t features
) const
947 if ((features
& CEPH_FEATURE_PGPOOL3
) == 0) {
949 ::encode(struct_v
, bl
);
950 ::encode(snapid
, bl
);
955 ENCODE_START(2, 2, bl
);
956 ::encode(snapid
, bl
);
962 void pool_snap_info_t::decode(bufferlist::iterator
& bl
)
964 DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl
);
965 ::decode(snapid
, bl
);
971 void pool_snap_info_t::generate_test_instances(list
<pool_snap_info_t
*>& o
)
973 o
.push_back(new pool_snap_info_t
);
974 o
.push_back(new pool_snap_info_t
);
975 o
.back()->snapid
= 1;
976 o
.back()->stamp
= utime_t(1, 2);
977 o
.back()->name
= "foo";
982 typedef std::map
<std::string
, pool_opts_t::opt_desc_t
> opt_mapping_t
;
983 static opt_mapping_t opt_mapping
= boost::assign::map_list_of
984 ("scrub_min_interval", pool_opts_t::opt_desc_t(
985 pool_opts_t::SCRUB_MIN_INTERVAL
, pool_opts_t::DOUBLE
))
986 ("scrub_max_interval", pool_opts_t::opt_desc_t(
987 pool_opts_t::SCRUB_MAX_INTERVAL
, pool_opts_t::DOUBLE
))
988 ("deep_scrub_interval", pool_opts_t::opt_desc_t(
989 pool_opts_t::DEEP_SCRUB_INTERVAL
, pool_opts_t::DOUBLE
))
990 ("recovery_priority", pool_opts_t::opt_desc_t(
991 pool_opts_t::RECOVERY_PRIORITY
, pool_opts_t::INT
))
992 ("recovery_op_priority", pool_opts_t::opt_desc_t(
993 pool_opts_t::RECOVERY_OP_PRIORITY
, pool_opts_t::INT
))
994 ("scrub_priority", pool_opts_t::opt_desc_t(
995 pool_opts_t::SCRUB_PRIORITY
, pool_opts_t::INT
))
996 ("compression_mode", pool_opts_t::opt_desc_t(
997 pool_opts_t::COMPRESSION_MODE
, pool_opts_t::STR
))
998 ("compression_algorithm", pool_opts_t::opt_desc_t(
999 pool_opts_t::COMPRESSION_ALGORITHM
, pool_opts_t::STR
))
1000 ("compression_required_ratio", pool_opts_t::opt_desc_t(
1001 pool_opts_t::COMPRESSION_REQUIRED_RATIO
, pool_opts_t::DOUBLE
))
1002 ("compression_max_blob_size", pool_opts_t::opt_desc_t(
1003 pool_opts_t::COMPRESSION_MAX_BLOB_SIZE
, pool_opts_t::INT
))
1004 ("compression_min_blob_size", pool_opts_t::opt_desc_t(
1005 pool_opts_t::COMPRESSION_MIN_BLOB_SIZE
, pool_opts_t::INT
))
1006 ("csum_type", pool_opts_t::opt_desc_t(
1007 pool_opts_t::CSUM_TYPE
, pool_opts_t::INT
))
1008 ("csum_max_block", pool_opts_t::opt_desc_t(
1009 pool_opts_t::CSUM_MAX_BLOCK
, pool_opts_t::INT
))
1010 ("csum_min_block", pool_opts_t::opt_desc_t(
1011 pool_opts_t::CSUM_MIN_BLOCK
, pool_opts_t::INT
));
1013 bool pool_opts_t::is_opt_name(const std::string
& name
) {
1014 return opt_mapping
.count(name
);
1017 pool_opts_t::opt_desc_t
pool_opts_t::get_opt_desc(const std::string
& name
) {
1018 opt_mapping_t::iterator i
= opt_mapping
.find(name
);
1019 assert(i
!= opt_mapping
.end());
1023 bool pool_opts_t::is_set(pool_opts_t::key_t key
) const {
1024 return opts
.count(key
);
1027 const pool_opts_t::value_t
& pool_opts_t::get(pool_opts_t::key_t key
) const {
1028 opts_t::const_iterator i
= opts
.find(key
);
1029 assert(i
!= opts
.end());
1033 bool pool_opts_t::unset(pool_opts_t::key_t key
) {
1034 return opts
.erase(key
) > 0;
1037 class pool_opts_dumper_t
: public boost::static_visitor
<>
1040 pool_opts_dumper_t(const std::string
& name_
, Formatter
* f_
) :
1041 name(name_
.c_str()), f(f_
) {}
1043 void operator()(std::string s
) const {
1044 f
->dump_string(name
, s
);
1046 void operator()(int i
) const {
1047 f
->dump_int(name
, i
);
1049 void operator()(double d
) const {
1050 f
->dump_float(name
, d
);
1058 void pool_opts_t::dump(const std::string
& name
, Formatter
* f
) const
1060 const opt_desc_t
& desc
= get_opt_desc(name
);
1061 opts_t::const_iterator i
= opts
.find(desc
.key
);
1062 if (i
== opts
.end()) {
1065 boost::apply_visitor(pool_opts_dumper_t(name
, f
), i
->second
);
1068 void pool_opts_t::dump(Formatter
* f
) const
1070 for (opt_mapping_t::iterator i
= opt_mapping
.begin(); i
!= opt_mapping
.end();
1072 const std::string
& name
= i
->first
;
1073 const opt_desc_t
& desc
= i
->second
;
1074 opts_t::const_iterator j
= opts
.find(desc
.key
);
1075 if (j
== opts
.end()) {
1078 boost::apply_visitor(pool_opts_dumper_t(name
, f
), j
->second
);
1082 class pool_opts_encoder_t
: public boost::static_visitor
<>
1085 explicit pool_opts_encoder_t(bufferlist
& bl_
) : bl(bl_
) {}
1087 void operator()(std::string s
) const {
1088 ::encode(static_cast<int32_t>(pool_opts_t::STR
), bl
);
1091 void operator()(int i
) const {
1092 ::encode(static_cast<int32_t>(pool_opts_t::INT
), bl
);
1095 void operator()(double d
) const {
1096 ::encode(static_cast<int32_t>(pool_opts_t::DOUBLE
), bl
);
1104 void pool_opts_t::encode(bufferlist
& bl
) const {
1105 ENCODE_START(1, 1, bl
);
1106 uint32_t n
= static_cast<uint32_t>(opts
.size());
1108 for (opts_t::const_iterator i
= opts
.begin(); i
!= opts
.end(); ++i
) {
1109 ::encode(static_cast<int32_t>(i
->first
), bl
);
1110 boost::apply_visitor(pool_opts_encoder_t(bl
), i
->second
);
1115 void pool_opts_t::decode(bufferlist::iterator
& bl
) {
1116 DECODE_START(1, bl
);
1127 opts
[static_cast<key_t
>(k
)] = s
;
1128 } else if (t
== INT
) {
1131 opts
[static_cast<key_t
>(k
)] = i
;
1132 } else if (t
== DOUBLE
) {
1135 opts
[static_cast<key_t
>(k
)] = d
;
1137 assert(!"invalid type");
1143 ostream
& operator<<(ostream
& out
, const pool_opts_t
& opts
)
1145 for (opt_mapping_t::iterator i
= opt_mapping
.begin(); i
!= opt_mapping
.end();
1147 const std::string
& name
= i
->first
;
1148 const pool_opts_t::opt_desc_t
& desc
= i
->second
;
1149 pool_opts_t::opts_t::const_iterator j
= opts
.opts
.find(desc
.key
);
1150 if (j
== opts
.opts
.end()) {
1153 out
<< " " << name
<< " " << j
->second
;
1160 const char *pg_pool_t::APPLICATION_NAME_CEPHFS("cephfs");
1161 const char *pg_pool_t::APPLICATION_NAME_RBD("rbd");
1162 const char *pg_pool_t::APPLICATION_NAME_RGW("rgw");
1164 void pg_pool_t::dump(Formatter
*f
) const
1166 f
->dump_unsigned("flags", get_flags());
1167 f
->dump_string("flags_names", get_flags_string());
1168 f
->dump_int("type", get_type());
1169 f
->dump_int("size", get_size());
1170 f
->dump_int("min_size", get_min_size());
1171 f
->dump_int("crush_rule", get_crush_rule());
1172 f
->dump_int("object_hash", get_object_hash());
1173 f
->dump_unsigned("pg_num", get_pg_num());
1174 f
->dump_unsigned("pg_placement_num", get_pgp_num());
1175 f
->dump_unsigned("crash_replay_interval", get_crash_replay_interval());
1176 f
->dump_stream("last_change") << get_last_change();
1177 f
->dump_stream("last_force_op_resend") << get_last_force_op_resend();
1178 f
->dump_stream("last_force_op_resend_preluminous")
1179 << get_last_force_op_resend_preluminous();
1180 f
->dump_unsigned("auid", get_auid());
1181 f
->dump_string("snap_mode", is_pool_snaps_mode() ? "pool" : "selfmanaged");
1182 f
->dump_unsigned("snap_seq", get_snap_seq());
1183 f
->dump_unsigned("snap_epoch", get_snap_epoch());
1184 f
->open_array_section("pool_snaps");
1185 for (map
<snapid_t
, pool_snap_info_t
>::const_iterator p
= snaps
.begin(); p
!= snaps
.end(); ++p
) {
1186 f
->open_object_section("pool_snap_info");
1191 f
->dump_stream("removed_snaps") << removed_snaps
;
1192 f
->dump_unsigned("quota_max_bytes", quota_max_bytes
);
1193 f
->dump_unsigned("quota_max_objects", quota_max_objects
);
1194 f
->open_array_section("tiers");
1195 for (set
<uint64_t>::const_iterator p
= tiers
.begin(); p
!= tiers
.end(); ++p
)
1196 f
->dump_unsigned("pool_id", *p
);
1198 f
->dump_int("tier_of", tier_of
);
1199 f
->dump_int("read_tier", read_tier
);
1200 f
->dump_int("write_tier", write_tier
);
1201 f
->dump_string("cache_mode", get_cache_mode_name());
1202 f
->dump_unsigned("target_max_bytes", target_max_bytes
);
1203 f
->dump_unsigned("target_max_objects", target_max_objects
);
1204 f
->dump_unsigned("cache_target_dirty_ratio_micro",
1205 cache_target_dirty_ratio_micro
);
1206 f
->dump_unsigned("cache_target_dirty_high_ratio_micro",
1207 cache_target_dirty_high_ratio_micro
);
1208 f
->dump_unsigned("cache_target_full_ratio_micro",
1209 cache_target_full_ratio_micro
);
1210 f
->dump_unsigned("cache_min_flush_age", cache_min_flush_age
);
1211 f
->dump_unsigned("cache_min_evict_age", cache_min_evict_age
);
1212 f
->dump_string("erasure_code_profile", erasure_code_profile
);
1213 f
->open_object_section("hit_set_params");
1214 hit_set_params
.dump(f
);
1215 f
->close_section(); // hit_set_params
1216 f
->dump_unsigned("hit_set_period", hit_set_period
);
1217 f
->dump_unsigned("hit_set_count", hit_set_count
);
1218 f
->dump_bool("use_gmt_hitset", use_gmt_hitset
);
1219 f
->dump_unsigned("min_read_recency_for_promote", min_read_recency_for_promote
);
1220 f
->dump_unsigned("min_write_recency_for_promote", min_write_recency_for_promote
);
1221 f
->dump_unsigned("hit_set_grade_decay_rate", hit_set_grade_decay_rate
);
1222 f
->dump_unsigned("hit_set_search_last_n", hit_set_search_last_n
);
1223 f
->open_array_section("grade_table");
1224 for (unsigned i
= 0; i
< hit_set_count
; ++i
)
1225 f
->dump_unsigned("value", get_grade(i
));
1227 f
->dump_unsigned("stripe_width", get_stripe_width());
1228 f
->dump_unsigned("expected_num_objects", expected_num_objects
);
1229 f
->dump_bool("fast_read", fast_read
);
1230 f
->open_object_section("options");
1232 f
->close_section(); // options
1233 f
->open_object_section("application_metadata");
1234 for (auto &app_pair
: application_metadata
) {
1235 f
->open_object_section(app_pair
.first
.c_str());
1236 for (auto &kv_pair
: app_pair
.second
) {
1237 f
->dump_string(kv_pair
.first
.c_str(), kv_pair
.second
);
1239 f
->close_section(); // application
1241 f
->close_section(); // application_metadata
1244 void pg_pool_t::convert_to_pg_shards(const vector
<int> &from
, set
<pg_shard_t
>* to
) const {
1245 for (size_t i
= 0; i
< from
.size(); ++i
) {
1246 if (from
[i
] != CRUSH_ITEM_NONE
) {
1250 ec_pool() ? shard_id_t(i
) : shard_id_t::NO_SHARD
));
1255 void pg_pool_t::calc_pg_masks()
1257 pg_num_mask
= (1 << cbits(pg_num
-1)) - 1;
1258 pgp_num_mask
= (1 << cbits(pgp_num
-1)) - 1;
1261 unsigned pg_pool_t::get_pg_num_divisor(pg_t pgid
) const
1263 if (pg_num
== pg_num_mask
+ 1)
1264 return pg_num
; // power-of-2 split
1265 unsigned mask
= pg_num_mask
>> 1;
1266 if ((pgid
.ps() & mask
) < (pg_num
& mask
))
1267 return pg_num_mask
+ 1; // smaller bin size (already split)
1269 return (pg_num_mask
+ 1) >> 1; // bigger bin (not yet split)
1273 * we have two snap modes:
1274 * - pool global snaps
1275 * - snap existence/non-existence defined by snaps[] and snap_seq
1276 * - user managed snaps
1277 * - removal governed by removed_snaps
1279 * we know which mode we're using based on whether removed_snaps is empty.
1281 bool pg_pool_t::is_pool_snaps_mode() const
1283 return removed_snaps
.empty() && get_snap_seq() > 0;
1286 bool pg_pool_t::is_unmanaged_snaps_mode() const
1288 return removed_snaps
.size() && get_snap_seq() > 0;
1291 bool pg_pool_t::is_removed_snap(snapid_t s
) const
1293 if (is_pool_snaps_mode())
1294 return s
<= get_snap_seq() && snaps
.count(s
) == 0;
1296 return removed_snaps
.contains(s
);
1300 * build set of known-removed sets from either pool snaps or
1301 * explicit removed_snaps set.
1303 void pg_pool_t::build_removed_snaps(interval_set
<snapid_t
>& rs
) const
1305 if (is_pool_snaps_mode()) {
1307 for (snapid_t s
= 1; s
<= get_snap_seq(); s
= s
+ 1)
1308 if (snaps
.count(s
) == 0)
1315 snapid_t
pg_pool_t::snap_exists(const char *s
) const
1317 for (map
<snapid_t
,pool_snap_info_t
>::const_iterator p
= snaps
.begin();
1320 if (p
->second
.name
== s
)
1321 return p
->second
.snapid
;
1325 void pg_pool_t::add_snap(const char *n
, utime_t stamp
)
1327 assert(!is_unmanaged_snaps_mode());
1328 snapid_t s
= get_snap_seq() + 1;
1330 snaps
[s
].snapid
= s
;
1332 snaps
[s
].stamp
= stamp
;
1335 void pg_pool_t::add_unmanaged_snap(uint64_t& snapid
)
1337 if (removed_snaps
.empty()) {
1338 assert(!is_pool_snaps_mode());
1339 removed_snaps
.insert(snapid_t(1));
1342 snapid
= snap_seq
= snap_seq
+ 1;
1345 void pg_pool_t::remove_snap(snapid_t s
)
1347 assert(snaps
.count(s
));
1349 snap_seq
= snap_seq
+ 1;
1352 void pg_pool_t::remove_unmanaged_snap(snapid_t s
)
1354 assert(is_unmanaged_snaps_mode());
1355 removed_snaps
.insert(s
);
1356 snap_seq
= snap_seq
+ 1;
1357 removed_snaps
.insert(get_snap_seq());
1360 SnapContext
pg_pool_t::get_snap_context() const
1362 vector
<snapid_t
> s(snaps
.size());
1364 for (map
<snapid_t
, pool_snap_info_t
>::const_reverse_iterator p
= snaps
.rbegin();
1368 return SnapContext(get_snap_seq(), s
);
1371 uint32_t pg_pool_t::hash_key(const string
& key
, const string
& ns
) const
1374 return ceph_str_hash(object_hash
, key
.data(), key
.length());
1375 int nsl
= ns
.length();
1376 int len
= key
.length() + nsl
+ 1;
1378 memcpy(&buf
[0], ns
.data(), nsl
);
1380 memcpy(&buf
[nsl
+1], key
.data(), key
.length());
1381 return ceph_str_hash(object_hash
, &buf
[0], len
);
1384 uint32_t pg_pool_t::raw_hash_to_pg(uint32_t v
) const
1386 return ceph_stable_mod(v
, pg_num
, pg_num_mask
);
1390 * map a raw pg (with full precision ps) into an actual pg, for storage
1392 pg_t
pg_pool_t::raw_pg_to_pg(pg_t pg
) const
1394 pg
.set_ps(ceph_stable_mod(pg
.ps(), pg_num
, pg_num_mask
));
1399 * map raw pg (full precision ps) into a placement seed. include
1400 * pool id in that value so that different pools don't use the same
1403 ps_t
pg_pool_t::raw_pg_to_pps(pg_t pg
) const
1405 if (flags
& FLAG_HASHPSPOOL
) {
1406 // Hash the pool id so that pool PGs do not overlap.
1408 crush_hash32_2(CRUSH_HASH_RJENKINS1
,
1409 ceph_stable_mod(pg
.ps(), pgp_num
, pgp_num_mask
),
1412 // Legacy behavior; add ps and pool together. This is not a great
1413 // idea because the PGs from each pool will essentially overlap on
1414 // top of each other: 0.5 == 1.4 == 2.3 == ...
1416 ceph_stable_mod(pg
.ps(), pgp_num
, pgp_num_mask
) +
1421 uint32_t pg_pool_t::get_random_pg_position(pg_t pg
, uint32_t seed
) const
1423 uint32_t r
= crush_hash32_2(CRUSH_HASH_RJENKINS1
, seed
, 123);
1424 if (pg_num
== pg_num_mask
+ 1) {
1427 unsigned smaller_mask
= pg_num_mask
>> 1;
1428 if ((pg
.ps() & smaller_mask
) < (pg_num
& smaller_mask
)) {
1438 void pg_pool_t::encode(bufferlist
& bl
, uint64_t features
) const
1440 if ((features
& CEPH_FEATURE_PGPOOL3
) == 0) {
1441 // this encoding matches the old struct ceph_pg_pool
1443 ::encode(struct_v
, bl
);
1446 ::encode(crush_rule
, bl
);
1447 ::encode(object_hash
, bl
);
1448 ::encode(pg_num
, bl
);
1449 ::encode(pgp_num
, bl
);
1450 __u32 lpg_num
= 0, lpgp_num
= 0; // tell old code that there are no localized pgs.
1451 ::encode(lpg_num
, bl
);
1452 ::encode(lpgp_num
, bl
);
1453 ::encode(last_change
, bl
);
1454 ::encode(snap_seq
, bl
);
1455 ::encode(snap_epoch
, bl
);
1457 __u32 n
= snaps
.size();
1459 n
= removed_snaps
.num_intervals();
1464 ::encode_nohead(snaps
, bl
, features
);
1465 ::encode_nohead(removed_snaps
, bl
);
1469 if ((features
& CEPH_FEATURE_OSDENC
) == 0) {
1471 ::encode(struct_v
, bl
);
1474 ::encode(crush_rule
, bl
);
1475 ::encode(object_hash
, bl
);
1476 ::encode(pg_num
, bl
);
1477 ::encode(pgp_num
, bl
);
1478 __u32 lpg_num
= 0, lpgp_num
= 0; // tell old code that there are no localized pgs.
1479 ::encode(lpg_num
, bl
);
1480 ::encode(lpgp_num
, bl
);
1481 ::encode(last_change
, bl
);
1482 ::encode(snap_seq
, bl
);
1483 ::encode(snap_epoch
, bl
);
1484 ::encode(snaps
, bl
, features
);
1485 ::encode(removed_snaps
, bl
);
1487 ::encode(flags
, bl
);
1488 ::encode(crash_replay_interval
, bl
);
1492 if ((features
& CEPH_FEATURE_OSD_POOLRESEND
) == 0) {
1493 // we simply added last_force_op_resend here, which is a fully
1494 // backward compatible change. however, encoding the same map
1495 // differently between monitors triggers scrub noise (even though
1496 // they are decodable without the feature), so let's be pendantic
1498 ENCODE_START(14, 5, bl
);
1501 ::encode(crush_rule
, bl
);
1502 ::encode(object_hash
, bl
);
1503 ::encode(pg_num
, bl
);
1504 ::encode(pgp_num
, bl
);
1505 __u32 lpg_num
= 0, lpgp_num
= 0; // tell old code that there are no localized pgs.
1506 ::encode(lpg_num
, bl
);
1507 ::encode(lpgp_num
, bl
);
1508 ::encode(last_change
, bl
);
1509 ::encode(snap_seq
, bl
);
1510 ::encode(snap_epoch
, bl
);
1511 ::encode(snaps
, bl
, features
);
1512 ::encode(removed_snaps
, bl
);
1514 ::encode(flags
, bl
);
1515 ::encode(crash_replay_interval
, bl
);
1516 ::encode(min_size
, bl
);
1517 ::encode(quota_max_bytes
, bl
);
1518 ::encode(quota_max_objects
, bl
);
1519 ::encode(tiers
, bl
);
1520 ::encode(tier_of
, bl
);
1521 __u8 c
= cache_mode
;
1523 ::encode(read_tier
, bl
);
1524 ::encode(write_tier
, bl
);
1525 ::encode(properties
, bl
);
1526 ::encode(hit_set_params
, bl
);
1527 ::encode(hit_set_period
, bl
);
1528 ::encode(hit_set_count
, bl
);
1529 ::encode(stripe_width
, bl
);
1530 ::encode(target_max_bytes
, bl
);
1531 ::encode(target_max_objects
, bl
);
1532 ::encode(cache_target_dirty_ratio_micro
, bl
);
1533 ::encode(cache_target_full_ratio_micro
, bl
);
1534 ::encode(cache_min_flush_age
, bl
);
1535 ::encode(cache_min_evict_age
, bl
);
1536 ::encode(erasure_code_profile
, bl
);
1542 if (!(features
& CEPH_FEATURE_NEW_OSDOP_ENCODING
)) {
1543 // this was the first post-hammer thing we added; if it's missing, encode
1547 if (!HAVE_FEATURE(features
, SERVER_LUMINOUS
)) {
1551 ENCODE_START(v
, 5, bl
);
1554 ::encode(crush_rule
, bl
);
1555 ::encode(object_hash
, bl
);
1556 ::encode(pg_num
, bl
);
1557 ::encode(pgp_num
, bl
);
1558 __u32 lpg_num
= 0, lpgp_num
= 0; // tell old code that there are no localized pgs.
1559 ::encode(lpg_num
, bl
);
1560 ::encode(lpgp_num
, bl
);
1561 ::encode(last_change
, bl
);
1562 ::encode(snap_seq
, bl
);
1563 ::encode(snap_epoch
, bl
);
1564 ::encode(snaps
, bl
, features
);
1565 ::encode(removed_snaps
, bl
);
1567 ::encode(flags
, bl
);
1568 ::encode(crash_replay_interval
, bl
);
1569 ::encode(min_size
, bl
);
1570 ::encode(quota_max_bytes
, bl
);
1571 ::encode(quota_max_objects
, bl
);
1572 ::encode(tiers
, bl
);
1573 ::encode(tier_of
, bl
);
1574 __u8 c
= cache_mode
;
1576 ::encode(read_tier
, bl
);
1577 ::encode(write_tier
, bl
);
1578 ::encode(properties
, bl
);
1579 ::encode(hit_set_params
, bl
);
1580 ::encode(hit_set_period
, bl
);
1581 ::encode(hit_set_count
, bl
);
1582 ::encode(stripe_width
, bl
);
1583 ::encode(target_max_bytes
, bl
);
1584 ::encode(target_max_objects
, bl
);
1585 ::encode(cache_target_dirty_ratio_micro
, bl
);
1586 ::encode(cache_target_full_ratio_micro
, bl
);
1587 ::encode(cache_min_flush_age
, bl
);
1588 ::encode(cache_min_evict_age
, bl
);
1589 ::encode(erasure_code_profile
, bl
);
1590 ::encode(last_force_op_resend_preluminous
, bl
);
1591 ::encode(min_read_recency_for_promote
, bl
);
1592 ::encode(expected_num_objects
, bl
);
1594 ::encode(cache_target_dirty_high_ratio_micro
, bl
);
1597 ::encode(min_write_recency_for_promote
, bl
);
1600 ::encode(use_gmt_hitset
, bl
);
1603 ::encode(fast_read
, bl
);
1606 ::encode(hit_set_grade_decay_rate
, bl
);
1607 ::encode(hit_set_search_last_n
, bl
);
1613 ::encode(last_force_op_resend
, bl
);
1616 ::encode(application_metadata
, bl
);
1621 void pg_pool_t::decode(bufferlist::iterator
& bl
)
1623 DECODE_START_LEGACY_COMPAT_LEN(26, 5, 5, bl
);
1626 ::decode(crush_rule
, bl
);
1627 ::decode(object_hash
, bl
);
1628 ::decode(pg_num
, bl
);
1629 ::decode(pgp_num
, bl
);
1631 __u32 lpg_num
, lpgp_num
;
1632 ::decode(lpg_num
, bl
);
1633 ::decode(lpgp_num
, bl
);
1635 ::decode(last_change
, bl
);
1636 ::decode(snap_seq
, bl
);
1637 ::decode(snap_epoch
, bl
);
1639 if (struct_v
>= 3) {
1640 ::decode(snaps
, bl
);
1641 ::decode(removed_snaps
, bl
);
1648 ::decode_nohead(n
, snaps
, bl
);
1649 ::decode_nohead(m
, removed_snaps
, bl
);
1652 if (struct_v
>= 4) {
1653 ::decode(flags
, bl
);
1654 ::decode(crash_replay_interval
, bl
);
1658 // if this looks like the 'data' pool, set the
1659 // crash_replay_interval appropriately. unfortunately, we can't
1660 // be precise here. this should be good enough to preserve replay
1661 // on the data pool for the majority of cluster upgrades, though.
1662 if (crush_rule
== 0 && auid
== 0)
1663 crash_replay_interval
= 60;
1665 crash_replay_interval
= 0;
1667 if (struct_v
>= 7) {
1668 ::decode(min_size
, bl
);
1670 min_size
= size
- size
/2;
1672 if (struct_v
>= 8) {
1673 ::decode(quota_max_bytes
, bl
);
1674 ::decode(quota_max_objects
, bl
);
1676 if (struct_v
>= 9) {
1677 ::decode(tiers
, bl
);
1678 ::decode(tier_of
, bl
);
1681 cache_mode
= (cache_mode_t
)v
;
1682 ::decode(read_tier
, bl
);
1683 ::decode(write_tier
, bl
);
1685 if (struct_v
>= 10) {
1686 ::decode(properties
, bl
);
1688 if (struct_v
>= 11) {
1689 ::decode(hit_set_params
, bl
);
1690 ::decode(hit_set_period
, bl
);
1691 ::decode(hit_set_count
, bl
);
1694 hit_set_period
= def
.hit_set_period
;
1695 hit_set_count
= def
.hit_set_count
;
1697 if (struct_v
>= 12) {
1698 ::decode(stripe_width
, bl
);
1700 set_stripe_width(0);
1702 if (struct_v
>= 13) {
1703 ::decode(target_max_bytes
, bl
);
1704 ::decode(target_max_objects
, bl
);
1705 ::decode(cache_target_dirty_ratio_micro
, bl
);
1706 ::decode(cache_target_full_ratio_micro
, bl
);
1707 ::decode(cache_min_flush_age
, bl
);
1708 ::decode(cache_min_evict_age
, bl
);
1710 target_max_bytes
= 0;
1711 target_max_objects
= 0;
1712 cache_target_dirty_ratio_micro
= 0;
1713 cache_target_full_ratio_micro
= 0;
1714 cache_min_flush_age
= 0;
1715 cache_min_evict_age
= 0;
1717 if (struct_v
>= 14) {
1718 ::decode(erasure_code_profile
, bl
);
1720 if (struct_v
>= 15) {
1721 ::decode(last_force_op_resend_preluminous
, bl
);
1723 last_force_op_resend_preluminous
= 0;
1725 if (struct_v
>= 16) {
1726 ::decode(min_read_recency_for_promote
, bl
);
1728 min_read_recency_for_promote
= 1;
1730 if (struct_v
>= 17) {
1731 ::decode(expected_num_objects
, bl
);
1733 expected_num_objects
= 0;
1735 if (struct_v
>= 19) {
1736 ::decode(cache_target_dirty_high_ratio_micro
, bl
);
1738 cache_target_dirty_high_ratio_micro
= cache_target_dirty_ratio_micro
;
1740 if (struct_v
>= 20) {
1741 ::decode(min_write_recency_for_promote
, bl
);
1743 min_write_recency_for_promote
= 1;
1745 if (struct_v
>= 21) {
1746 ::decode(use_gmt_hitset
, bl
);
1748 use_gmt_hitset
= false;
1750 if (struct_v
>= 22) {
1751 ::decode(fast_read
, bl
);
1755 if (struct_v
>= 23) {
1756 ::decode(hit_set_grade_decay_rate
, bl
);
1757 ::decode(hit_set_search_last_n
, bl
);
1759 hit_set_grade_decay_rate
= 0;
1760 hit_set_search_last_n
= 1;
1762 if (struct_v
>= 24) {
1765 if (struct_v
>= 25) {
1766 ::decode(last_force_op_resend
, bl
);
1768 last_force_op_resend
= last_force_op_resend_preluminous
;
1770 if (struct_v
>= 26) {
1771 ::decode(application_metadata
, bl
);
1778 void pg_pool_t::generate_test_instances(list
<pg_pool_t
*>& o
)
1781 o
.push_back(new pg_pool_t(a
));
1783 a
.type
= TYPE_REPLICATED
;
1790 a
.last_force_op_resend
= 123823;
1791 a
.last_force_op_resend_preluminous
= 123824;
1795 a
.crash_replay_interval
= 13;
1796 a
.quota_max_bytes
= 473;
1797 a
.quota_max_objects
= 474;
1798 o
.push_back(new pg_pool_t(a
));
1800 a
.snaps
[3].name
= "asdf";
1801 a
.snaps
[3].snapid
= 3;
1802 a
.snaps
[3].stamp
= utime_t(123, 4);
1803 a
.snaps
[6].name
= "qwer";
1804 a
.snaps
[6].snapid
= 6;
1805 a
.snaps
[6].stamp
= utime_t(23423, 4);
1806 o
.push_back(new pg_pool_t(a
));
1808 a
.removed_snaps
.insert(2); // not quite valid to combine with snaps!
1809 a
.quota_max_bytes
= 2473;
1810 a
.quota_max_objects
= 4374;
1814 a
.cache_mode
= CACHEMODE_WRITEBACK
;
1817 a
.hit_set_params
= HitSet::Params(new BloomHitSet::Params
);
1818 a
.hit_set_period
= 3600;
1819 a
.hit_set_count
= 8;
1820 a
.min_read_recency_for_promote
= 1;
1821 a
.min_write_recency_for_promote
= 1;
1822 a
.hit_set_grade_decay_rate
= 50;
1823 a
.hit_set_search_last_n
= 1;
1824 a
.calc_grade_table();
1825 a
.set_stripe_width(12345);
1826 a
.target_max_bytes
= 1238132132;
1827 a
.target_max_objects
= 1232132;
1828 a
.cache_target_dirty_ratio_micro
= 187232;
1829 a
.cache_target_dirty_high_ratio_micro
= 309856;
1830 a
.cache_target_full_ratio_micro
= 987222;
1831 a
.cache_min_flush_age
= 231;
1832 a
.cache_min_evict_age
= 2321;
1833 a
.erasure_code_profile
= "profile in osdmap";
1834 a
.expected_num_objects
= 123456;
1835 a
.fast_read
= false;
1836 a
.application_metadata
= {{"rbd", {{"key", "value"}}}};
1837 o
.push_back(new pg_pool_t(a
));
1840 ostream
& operator<<(ostream
& out
, const pg_pool_t
& p
)
1842 out
<< p
.get_type_name()
1843 << " size " << p
.get_size()
1844 << " min_size " << p
.get_min_size()
1845 << " crush_rule " << p
.get_crush_rule()
1846 << " object_hash " << p
.get_object_hash_name()
1847 << " pg_num " << p
.get_pg_num()
1848 << " pgp_num " << p
.get_pgp_num()
1849 << " last_change " << p
.get_last_change();
1850 if (p
.get_last_force_op_resend() ||
1851 p
.get_last_force_op_resend_preluminous())
1852 out
<< " lfor " << p
.get_last_force_op_resend() << "/"
1853 << p
.get_last_force_op_resend_preluminous();
1855 out
<< " owner " << p
.get_auid();
1857 out
<< " flags " << p
.get_flags_string();
1858 if (p
.crash_replay_interval
)
1859 out
<< " crash_replay_interval " << p
.crash_replay_interval
;
1860 if (p
.quota_max_bytes
)
1861 out
<< " max_bytes " << p
.quota_max_bytes
;
1862 if (p
.quota_max_objects
)
1863 out
<< " max_objects " << p
.quota_max_objects
;
1864 if (!p
.tiers
.empty())
1865 out
<< " tiers " << p
.tiers
;
1867 out
<< " tier_of " << p
.tier_of
;
1868 if (p
.has_read_tier())
1869 out
<< " read_tier " << p
.read_tier
;
1870 if (p
.has_write_tier())
1871 out
<< " write_tier " << p
.write_tier
;
1873 out
<< " cache_mode " << p
.get_cache_mode_name();
1874 if (p
.target_max_bytes
)
1875 out
<< " target_bytes " << p
.target_max_bytes
;
1876 if (p
.target_max_objects
)
1877 out
<< " target_objects " << p
.target_max_objects
;
1878 if (p
.hit_set_params
.get_type() != HitSet::TYPE_NONE
) {
1879 out
<< " hit_set " << p
.hit_set_params
1880 << " " << p
.hit_set_period
<< "s"
1881 << " x" << p
.hit_set_count
<< " decay_rate "
1882 << p
.hit_set_grade_decay_rate
1883 << " search_last_n " << p
.hit_set_search_last_n
;
1885 if (p
.min_read_recency_for_promote
)
1886 out
<< " min_read_recency_for_promote " << p
.min_read_recency_for_promote
;
1887 if (p
.min_write_recency_for_promote
)
1888 out
<< " min_write_recency_for_promote " << p
.min_write_recency_for_promote
;
1889 out
<< " stripe_width " << p
.get_stripe_width();
1890 if (p
.expected_num_objects
)
1891 out
<< " expected_num_objects " << p
.expected_num_objects
;
1893 out
<< " fast_read " << p
.fast_read
;
1895 if (!p
.application_metadata
.empty()) {
1896 out
<< " application ";
1897 for (auto it
= p
.application_metadata
.begin();
1898 it
!= p
.application_metadata
.end(); ++it
) {
1899 if (it
!= p
.application_metadata
.begin())
1908 // -- object_stat_sum_t --
1910 void object_stat_sum_t::dump(Formatter
*f
) const
1912 f
->dump_int("num_bytes", num_bytes
);
1913 f
->dump_int("num_objects", num_objects
);
1914 f
->dump_int("num_object_clones", num_object_clones
);
1915 f
->dump_int("num_object_copies", num_object_copies
);
1916 f
->dump_int("num_objects_missing_on_primary", num_objects_missing_on_primary
);
1917 f
->dump_int("num_objects_missing", num_objects_missing
);
1918 f
->dump_int("num_objects_degraded", num_objects_degraded
);
1919 f
->dump_int("num_objects_misplaced", num_objects_misplaced
);
1920 f
->dump_int("num_objects_unfound", num_objects_unfound
);
1921 f
->dump_int("num_objects_dirty", num_objects_dirty
);
1922 f
->dump_int("num_whiteouts", num_whiteouts
);
1923 f
->dump_int("num_read", num_rd
);
1924 f
->dump_int("num_read_kb", num_rd_kb
);
1925 f
->dump_int("num_write", num_wr
);
1926 f
->dump_int("num_write_kb", num_wr_kb
);
1927 f
->dump_int("num_scrub_errors", num_scrub_errors
);
1928 f
->dump_int("num_shallow_scrub_errors", num_shallow_scrub_errors
);
1929 f
->dump_int("num_deep_scrub_errors", num_deep_scrub_errors
);
1930 f
->dump_int("num_objects_recovered", num_objects_recovered
);
1931 f
->dump_int("num_bytes_recovered", num_bytes_recovered
);
1932 f
->dump_int("num_keys_recovered", num_keys_recovered
);
1933 f
->dump_int("num_objects_omap", num_objects_omap
);
1934 f
->dump_int("num_objects_hit_set_archive", num_objects_hit_set_archive
);
1935 f
->dump_int("num_bytes_hit_set_archive", num_bytes_hit_set_archive
);
1936 f
->dump_int("num_flush", num_flush
);
1937 f
->dump_int("num_flush_kb", num_flush_kb
);
1938 f
->dump_int("num_evict", num_evict
);
1939 f
->dump_int("num_evict_kb", num_evict_kb
);
1940 f
->dump_int("num_promote", num_promote
);
1941 f
->dump_int("num_flush_mode_high", num_flush_mode_high
);
1942 f
->dump_int("num_flush_mode_low", num_flush_mode_low
);
1943 f
->dump_int("num_evict_mode_some", num_evict_mode_some
);
1944 f
->dump_int("num_evict_mode_full", num_evict_mode_full
);
1945 f
->dump_int("num_objects_pinned", num_objects_pinned
);
1946 f
->dump_int("num_legacy_snapsets", num_legacy_snapsets
);
1949 void object_stat_sum_t::encode(bufferlist
& bl
) const
1951 ENCODE_START(16, 14, bl
);
1952 #if defined(CEPH_LITTLE_ENDIAN)
1953 bl
.append((char *)(&num_bytes
), sizeof(object_stat_sum_t
));
1955 ::encode(num_bytes
, bl
);
1956 ::encode(num_objects
, bl
);
1957 ::encode(num_object_clones
, bl
);
1958 ::encode(num_object_copies
, bl
);
1959 ::encode(num_objects_missing_on_primary
, bl
);
1960 ::encode(num_objects_degraded
, bl
);
1961 ::encode(num_objects_unfound
, bl
);
1962 ::encode(num_rd
, bl
);
1963 ::encode(num_rd_kb
, bl
);
1964 ::encode(num_wr
, bl
);
1965 ::encode(num_wr_kb
, bl
);
1966 ::encode(num_scrub_errors
, bl
);
1967 ::encode(num_objects_recovered
, bl
);
1968 ::encode(num_bytes_recovered
, bl
);
1969 ::encode(num_keys_recovered
, bl
);
1970 ::encode(num_shallow_scrub_errors
, bl
);
1971 ::encode(num_deep_scrub_errors
, bl
);
1972 ::encode(num_objects_dirty
, bl
);
1973 ::encode(num_whiteouts
, bl
);
1974 ::encode(num_objects_omap
, bl
);
1975 ::encode(num_objects_hit_set_archive
, bl
);
1976 ::encode(num_objects_misplaced
, bl
);
1977 ::encode(num_bytes_hit_set_archive
, bl
);
1978 ::encode(num_flush
, bl
);
1979 ::encode(num_flush_kb
, bl
);
1980 ::encode(num_evict
, bl
);
1981 ::encode(num_evict_kb
, bl
);
1982 ::encode(num_promote
, bl
);
1983 ::encode(num_flush_mode_high
, bl
);
1984 ::encode(num_flush_mode_low
, bl
);
1985 ::encode(num_evict_mode_some
, bl
);
1986 ::encode(num_evict_mode_full
, bl
);
1987 ::encode(num_objects_pinned
, bl
);
1988 ::encode(num_objects_missing
, bl
);
1989 ::encode(num_legacy_snapsets
, bl
);
1994 void object_stat_sum_t::decode(bufferlist::iterator
& bl
)
1996 bool decode_finish
= false;
1997 DECODE_START(16, bl
);
1998 #if defined(CEPH_LITTLE_ENDIAN)
1999 if (struct_v
>= 16) {
2000 bl
.copy(sizeof(object_stat_sum_t
), (char*)(&num_bytes
));
2001 decode_finish
= true;
2004 if (!decode_finish
) {
2005 ::decode(num_bytes
, bl
);
2006 ::decode(num_objects
, bl
);
2007 ::decode(num_object_clones
, bl
);
2008 ::decode(num_object_copies
, bl
);
2009 ::decode(num_objects_missing_on_primary
, bl
);
2010 ::decode(num_objects_degraded
, bl
);
2011 ::decode(num_objects_unfound
, bl
);
2012 ::decode(num_rd
, bl
);
2013 ::decode(num_rd_kb
, bl
);
2014 ::decode(num_wr
, bl
);
2015 ::decode(num_wr_kb
, bl
);
2016 ::decode(num_scrub_errors
, bl
);
2017 ::decode(num_objects_recovered
, bl
);
2018 ::decode(num_bytes_recovered
, bl
);
2019 ::decode(num_keys_recovered
, bl
);
2020 ::decode(num_shallow_scrub_errors
, bl
);
2021 ::decode(num_deep_scrub_errors
, bl
);
2022 ::decode(num_objects_dirty
, bl
);
2023 ::decode(num_whiteouts
, bl
);
2024 ::decode(num_objects_omap
, bl
);
2025 ::decode(num_objects_hit_set_archive
, bl
);
2026 ::decode(num_objects_misplaced
, bl
);
2027 ::decode(num_bytes_hit_set_archive
, bl
);
2028 ::decode(num_flush
, bl
);
2029 ::decode(num_flush_kb
, bl
);
2030 ::decode(num_evict
, bl
);
2031 ::decode(num_evict_kb
, bl
);
2032 ::decode(num_promote
, bl
);
2033 ::decode(num_flush_mode_high
, bl
);
2034 ::decode(num_flush_mode_low
, bl
);
2035 ::decode(num_evict_mode_some
, bl
);
2036 ::decode(num_evict_mode_full
, bl
);
2037 ::decode(num_objects_pinned
, bl
);
2038 ::decode(num_objects_missing
, bl
);
2039 if (struct_v
>= 16) {
2040 ::decode(num_legacy_snapsets
, bl
);
2042 num_legacy_snapsets
= num_object_clones
; // upper bound
2048 void object_stat_sum_t::generate_test_instances(list
<object_stat_sum_t
*>& o
)
2050 object_stat_sum_t a
;
2054 a
.num_object_clones
= 4;
2055 a
.num_object_copies
= 5;
2056 a
.num_objects_missing_on_primary
= 6;
2057 a
.num_objects_missing
= 123;
2058 a
.num_objects_degraded
= 7;
2059 a
.num_objects_unfound
= 8;
2060 a
.num_rd
= 9; a
.num_rd_kb
= 10;
2061 a
.num_wr
= 11; a
.num_wr_kb
= 12;
2062 a
.num_objects_recovered
= 14;
2063 a
.num_bytes_recovered
= 15;
2064 a
.num_keys_recovered
= 16;
2065 a
.num_deep_scrub_errors
= 17;
2066 a
.num_shallow_scrub_errors
= 18;
2067 a
.num_scrub_errors
= a
.num_deep_scrub_errors
+ a
.num_shallow_scrub_errors
;
2068 a
.num_objects_dirty
= 21;
2069 a
.num_whiteouts
= 22;
2070 a
.num_objects_misplaced
= 1232;
2071 a
.num_objects_hit_set_archive
= 2;
2072 a
.num_bytes_hit_set_archive
= 27;
2078 a
.num_flush_mode_high
= 0;
2079 a
.num_flush_mode_low
= 1;
2080 a
.num_evict_mode_some
= 1;
2081 a
.num_evict_mode_full
= 0;
2082 a
.num_objects_pinned
= 20;
2083 o
.push_back(new object_stat_sum_t(a
));
2086 void object_stat_sum_t::add(const object_stat_sum_t
& o
)
2088 num_bytes
+= o
.num_bytes
;
2089 num_objects
+= o
.num_objects
;
2090 num_object_clones
+= o
.num_object_clones
;
2091 num_object_copies
+= o
.num_object_copies
;
2092 num_objects_missing_on_primary
+= o
.num_objects_missing_on_primary
;
2093 num_objects_missing
+= o
.num_objects_missing
;
2094 num_objects_degraded
+= o
.num_objects_degraded
;
2095 num_objects_misplaced
+= o
.num_objects_misplaced
;
2097 num_rd_kb
+= o
.num_rd_kb
;
2099 num_wr_kb
+= o
.num_wr_kb
;
2100 num_objects_unfound
+= o
.num_objects_unfound
;
2101 num_scrub_errors
+= o
.num_scrub_errors
;
2102 num_shallow_scrub_errors
+= o
.num_shallow_scrub_errors
;
2103 num_deep_scrub_errors
+= o
.num_deep_scrub_errors
;
2104 num_objects_recovered
+= o
.num_objects_recovered
;
2105 num_bytes_recovered
+= o
.num_bytes_recovered
;
2106 num_keys_recovered
+= o
.num_keys_recovered
;
2107 num_objects_dirty
+= o
.num_objects_dirty
;
2108 num_whiteouts
+= o
.num_whiteouts
;
2109 num_objects_omap
+= o
.num_objects_omap
;
2110 num_objects_hit_set_archive
+= o
.num_objects_hit_set_archive
;
2111 num_bytes_hit_set_archive
+= o
.num_bytes_hit_set_archive
;
2112 num_flush
+= o
.num_flush
;
2113 num_flush_kb
+= o
.num_flush_kb
;
2114 num_evict
+= o
.num_evict
;
2115 num_evict_kb
+= o
.num_evict_kb
;
2116 num_promote
+= o
.num_promote
;
2117 num_flush_mode_high
+= o
.num_flush_mode_high
;
2118 num_flush_mode_low
+= o
.num_flush_mode_low
;
2119 num_evict_mode_some
+= o
.num_evict_mode_some
;
2120 num_evict_mode_full
+= o
.num_evict_mode_full
;
2121 num_objects_pinned
+= o
.num_objects_pinned
;
2122 num_legacy_snapsets
+= o
.num_legacy_snapsets
;
2125 void object_stat_sum_t::sub(const object_stat_sum_t
& o
)
2127 num_bytes
-= o
.num_bytes
;
2128 num_objects
-= o
.num_objects
;
2129 num_object_clones
-= o
.num_object_clones
;
2130 num_object_copies
-= o
.num_object_copies
;
2131 num_objects_missing_on_primary
-= o
.num_objects_missing_on_primary
;
2132 num_objects_missing
-= o
.num_objects_missing
;
2133 num_objects_degraded
-= o
.num_objects_degraded
;
2134 num_objects_misplaced
-= o
.num_objects_misplaced
;
2136 num_rd_kb
-= o
.num_rd_kb
;
2138 num_wr_kb
-= o
.num_wr_kb
;
2139 num_objects_unfound
-= o
.num_objects_unfound
;
2140 num_scrub_errors
-= o
.num_scrub_errors
;
2141 num_shallow_scrub_errors
-= o
.num_shallow_scrub_errors
;
2142 num_deep_scrub_errors
-= o
.num_deep_scrub_errors
;
2143 num_objects_recovered
-= o
.num_objects_recovered
;
2144 num_bytes_recovered
-= o
.num_bytes_recovered
;
2145 num_keys_recovered
-= o
.num_keys_recovered
;
2146 num_objects_dirty
-= o
.num_objects_dirty
;
2147 num_whiteouts
-= o
.num_whiteouts
;
2148 num_objects_omap
-= o
.num_objects_omap
;
2149 num_objects_hit_set_archive
-= o
.num_objects_hit_set_archive
;
2150 num_bytes_hit_set_archive
-= o
.num_bytes_hit_set_archive
;
2151 num_flush
-= o
.num_flush
;
2152 num_flush_kb
-= o
.num_flush_kb
;
2153 num_evict
-= o
.num_evict
;
2154 num_evict_kb
-= o
.num_evict_kb
;
2155 num_promote
-= o
.num_promote
;
2156 num_flush_mode_high
-= o
.num_flush_mode_high
;
2157 num_flush_mode_low
-= o
.num_flush_mode_low
;
2158 num_evict_mode_some
-= o
.num_evict_mode_some
;
2159 num_evict_mode_full
-= o
.num_evict_mode_full
;
2160 num_objects_pinned
-= o
.num_objects_pinned
;
2161 num_legacy_snapsets
-= o
.num_legacy_snapsets
;
2164 bool operator==(const object_stat_sum_t
& l
, const object_stat_sum_t
& r
)
2167 l
.num_bytes
== r
.num_bytes
&&
2168 l
.num_objects
== r
.num_objects
&&
2169 l
.num_object_clones
== r
.num_object_clones
&&
2170 l
.num_object_copies
== r
.num_object_copies
&&
2171 l
.num_objects_missing_on_primary
== r
.num_objects_missing_on_primary
&&
2172 l
.num_objects_missing
== r
.num_objects_missing
&&
2173 l
.num_objects_degraded
== r
.num_objects_degraded
&&
2174 l
.num_objects_misplaced
== r
.num_objects_misplaced
&&
2175 l
.num_objects_unfound
== r
.num_objects_unfound
&&
2176 l
.num_rd
== r
.num_rd
&&
2177 l
.num_rd_kb
== r
.num_rd_kb
&&
2178 l
.num_wr
== r
.num_wr
&&
2179 l
.num_wr_kb
== r
.num_wr_kb
&&
2180 l
.num_scrub_errors
== r
.num_scrub_errors
&&
2181 l
.num_shallow_scrub_errors
== r
.num_shallow_scrub_errors
&&
2182 l
.num_deep_scrub_errors
== r
.num_deep_scrub_errors
&&
2183 l
.num_objects_recovered
== r
.num_objects_recovered
&&
2184 l
.num_bytes_recovered
== r
.num_bytes_recovered
&&
2185 l
.num_keys_recovered
== r
.num_keys_recovered
&&
2186 l
.num_objects_dirty
== r
.num_objects_dirty
&&
2187 l
.num_whiteouts
== r
.num_whiteouts
&&
2188 l
.num_objects_omap
== r
.num_objects_omap
&&
2189 l
.num_objects_hit_set_archive
== r
.num_objects_hit_set_archive
&&
2190 l
.num_bytes_hit_set_archive
== r
.num_bytes_hit_set_archive
&&
2191 l
.num_flush
== r
.num_flush
&&
2192 l
.num_flush_kb
== r
.num_flush_kb
&&
2193 l
.num_evict
== r
.num_evict
&&
2194 l
.num_evict_kb
== r
.num_evict_kb
&&
2195 l
.num_promote
== r
.num_promote
&&
2196 l
.num_flush_mode_high
== r
.num_flush_mode_high
&&
2197 l
.num_flush_mode_low
== r
.num_flush_mode_low
&&
2198 l
.num_evict_mode_some
== r
.num_evict_mode_some
&&
2199 l
.num_evict_mode_full
== r
.num_evict_mode_full
&&
2200 l
.num_objects_pinned
== r
.num_objects_pinned
&&
2201 l
.num_legacy_snapsets
== r
.num_legacy_snapsets
;
2204 // -- object_stat_collection_t --
2206 void object_stat_collection_t::dump(Formatter
*f
) const
2208 f
->open_object_section("stat_sum");
2213 void object_stat_collection_t::encode(bufferlist
& bl
) const
2215 ENCODE_START(2, 2, bl
);
2217 ::encode((__u32
)0, bl
);
2221 void object_stat_collection_t::decode(bufferlist::iterator
& bl
)
2223 DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl
);
2226 map
<string
,object_stat_sum_t
> cat_sum
;
2227 ::decode(cat_sum
, bl
);
2232 void object_stat_collection_t::generate_test_instances(list
<object_stat_collection_t
*>& o
)
2234 object_stat_collection_t a
;
2235 o
.push_back(new object_stat_collection_t(a
));
2236 list
<object_stat_sum_t
*> l
;
2237 object_stat_sum_t::generate_test_instances(l
);
2238 for (list
<object_stat_sum_t
*>::iterator p
= l
.begin(); p
!= l
.end(); ++p
) {
2240 o
.push_back(new object_stat_collection_t(a
));
2247 bool pg_stat_t::is_acting_osd(int32_t osd
, bool primary
) const
2249 if (primary
&& osd
== acting_primary
) {
2251 } else if (!primary
) {
2252 for(vector
<int32_t>::const_iterator it
= acting
.begin();
2253 it
!= acting
.end(); ++it
)
2262 void pg_stat_t::dump(Formatter
*f
) const
2264 f
->dump_stream("version") << version
;
2265 f
->dump_stream("reported_seq") << reported_seq
;
2266 f
->dump_stream("reported_epoch") << reported_epoch
;
2267 f
->dump_string("state", pg_state_string(state
));
2268 f
->dump_stream("last_fresh") << last_fresh
;
2269 f
->dump_stream("last_change") << last_change
;
2270 f
->dump_stream("last_active") << last_active
;
2271 f
->dump_stream("last_peered") << last_peered
;
2272 f
->dump_stream("last_clean") << last_clean
;
2273 f
->dump_stream("last_became_active") << last_became_active
;
2274 f
->dump_stream("last_became_peered") << last_became_peered
;
2275 f
->dump_stream("last_unstale") << last_unstale
;
2276 f
->dump_stream("last_undegraded") << last_undegraded
;
2277 f
->dump_stream("last_fullsized") << last_fullsized
;
2278 f
->dump_unsigned("mapping_epoch", mapping_epoch
);
2279 f
->dump_stream("log_start") << log_start
;
2280 f
->dump_stream("ondisk_log_start") << ondisk_log_start
;
2281 f
->dump_unsigned("created", created
);
2282 f
->dump_unsigned("last_epoch_clean", last_epoch_clean
);
2283 f
->dump_stream("parent") << parent
;
2284 f
->dump_unsigned("parent_split_bits", parent_split_bits
);
2285 f
->dump_stream("last_scrub") << last_scrub
;
2286 f
->dump_stream("last_scrub_stamp") << last_scrub_stamp
;
2287 f
->dump_stream("last_deep_scrub") << last_deep_scrub
;
2288 f
->dump_stream("last_deep_scrub_stamp") << last_deep_scrub_stamp
;
2289 f
->dump_stream("last_clean_scrub_stamp") << last_clean_scrub_stamp
;
2290 f
->dump_int("log_size", log_size
);
2291 f
->dump_int("ondisk_log_size", ondisk_log_size
);
2292 f
->dump_bool("stats_invalid", stats_invalid
);
2293 f
->dump_bool("dirty_stats_invalid", dirty_stats_invalid
);
2294 f
->dump_bool("omap_stats_invalid", omap_stats_invalid
);
2295 f
->dump_bool("hitset_stats_invalid", hitset_stats_invalid
);
2296 f
->dump_bool("hitset_bytes_stats_invalid", hitset_bytes_stats_invalid
);
2297 f
->dump_bool("pin_stats_invalid", pin_stats_invalid
);
2298 f
->dump_unsigned("snaptrimq_len", snaptrimq_len
);
2300 f
->open_array_section("up");
2301 for (vector
<int32_t>::const_iterator p
= up
.begin(); p
!= up
.end(); ++p
)
2302 f
->dump_int("osd", *p
);
2304 f
->open_array_section("acting");
2305 for (vector
<int32_t>::const_iterator p
= acting
.begin(); p
!= acting
.end(); ++p
)
2306 f
->dump_int("osd", *p
);
2308 f
->open_array_section("blocked_by");
2309 for (vector
<int32_t>::const_iterator p
= blocked_by
.begin();
2310 p
!= blocked_by
.end(); ++p
)
2311 f
->dump_int("osd", *p
);
2313 f
->dump_int("up_primary", up_primary
);
2314 f
->dump_int("acting_primary", acting_primary
);
2317 void pg_stat_t::dump_brief(Formatter
*f
) const
2319 f
->dump_string("state", pg_state_string(state
));
2320 f
->open_array_section("up");
2321 for (vector
<int32_t>::const_iterator p
= up
.begin(); p
!= up
.end(); ++p
)
2322 f
->dump_int("osd", *p
);
2324 f
->open_array_section("acting");
2325 for (vector
<int32_t>::const_iterator p
= acting
.begin(); p
!= acting
.end(); ++p
)
2326 f
->dump_int("osd", *p
);
2328 f
->dump_int("up_primary", up_primary
);
2329 f
->dump_int("acting_primary", acting_primary
);
2332 void pg_stat_t::encode(bufferlist
&bl
) const
2334 ENCODE_START(23, 22, bl
);
2335 ::encode(version
, bl
);
2336 ::encode(reported_seq
, bl
);
2337 ::encode(reported_epoch
, bl
);
2338 ::encode(state
, bl
);
2339 ::encode(log_start
, bl
);
2340 ::encode(ondisk_log_start
, bl
);
2341 ::encode(created
, bl
);
2342 ::encode(last_epoch_clean
, bl
);
2343 ::encode(parent
, bl
);
2344 ::encode(parent_split_bits
, bl
);
2345 ::encode(last_scrub
, bl
);
2346 ::encode(last_scrub_stamp
, bl
);
2347 ::encode(stats
, bl
);
2348 ::encode(log_size
, bl
);
2349 ::encode(ondisk_log_size
, bl
);
2351 ::encode(acting
, bl
);
2352 ::encode(last_fresh
, bl
);
2353 ::encode(last_change
, bl
);
2354 ::encode(last_active
, bl
);
2355 ::encode(last_clean
, bl
);
2356 ::encode(last_unstale
, bl
);
2357 ::encode(mapping_epoch
, bl
);
2358 ::encode(last_deep_scrub
, bl
);
2359 ::encode(last_deep_scrub_stamp
, bl
);
2360 ::encode(stats_invalid
, bl
);
2361 ::encode(last_clean_scrub_stamp
, bl
);
2362 ::encode(last_became_active
, bl
);
2363 ::encode(dirty_stats_invalid
, bl
);
2364 ::encode(up_primary
, bl
);
2365 ::encode(acting_primary
, bl
);
2366 ::encode(omap_stats_invalid
, bl
);
2367 ::encode(hitset_stats_invalid
, bl
);
2368 ::encode(blocked_by
, bl
);
2369 ::encode(last_undegraded
, bl
);
2370 ::encode(last_fullsized
, bl
);
2371 ::encode(hitset_bytes_stats_invalid
, bl
);
2372 ::encode(last_peered
, bl
);
2373 ::encode(last_became_peered
, bl
);
2374 ::encode(pin_stats_invalid
, bl
);
2375 ::encode(snaptrimq_len
, bl
);
2379 void pg_stat_t::decode(bufferlist::iterator
&bl
)
2382 DECODE_START(22, bl
);
2383 ::decode(version
, bl
);
2384 ::decode(reported_seq
, bl
);
2385 ::decode(reported_epoch
, bl
);
2386 ::decode(state
, bl
);
2387 ::decode(log_start
, bl
);
2388 ::decode(ondisk_log_start
, bl
);
2389 ::decode(created
, bl
);
2390 ::decode(last_epoch_clean
, bl
);
2391 ::decode(parent
, bl
);
2392 ::decode(parent_split_bits
, bl
);
2393 ::decode(last_scrub
, bl
);
2394 ::decode(last_scrub_stamp
, bl
);
2395 ::decode(stats
, bl
);
2396 ::decode(log_size
, bl
);
2397 ::decode(ondisk_log_size
, bl
);
2399 ::decode(acting
, bl
);
2400 ::decode(last_fresh
, bl
);
2401 ::decode(last_change
, bl
);
2402 ::decode(last_active
, bl
);
2403 ::decode(last_clean
, bl
);
2404 ::decode(last_unstale
, bl
);
2405 ::decode(mapping_epoch
, bl
);
2406 ::decode(last_deep_scrub
, bl
);
2407 ::decode(last_deep_scrub_stamp
, bl
);
2409 stats_invalid
= tmp
;
2410 ::decode(last_clean_scrub_stamp
, bl
);
2411 ::decode(last_became_active
, bl
);
2413 dirty_stats_invalid
= tmp
;
2414 ::decode(up_primary
, bl
);
2415 ::decode(acting_primary
, bl
);
2417 omap_stats_invalid
= tmp
;
2419 hitset_stats_invalid
= tmp
;
2420 ::decode(blocked_by
, bl
);
2421 ::decode(last_undegraded
, bl
);
2422 ::decode(last_fullsized
, bl
);
2424 hitset_bytes_stats_invalid
= tmp
;
2425 ::decode(last_peered
, bl
);
2426 ::decode(last_became_peered
, bl
);
2428 pin_stats_invalid
= tmp
;
2429 if (struct_v
>= 23) {
2430 ::decode(snaptrimq_len
, bl
);
2435 void pg_stat_t::generate_test_instances(list
<pg_stat_t
*>& o
)
2438 o
.push_back(new pg_stat_t(a
));
2440 a
.version
= eversion_t(1, 3);
2441 a
.reported_epoch
= 1;
2444 a
.mapping_epoch
= 998;
2445 a
.last_fresh
= utime_t(1002, 1);
2446 a
.last_change
= utime_t(1002, 2);
2447 a
.last_active
= utime_t(1002, 3);
2448 a
.last_clean
= utime_t(1002, 4);
2449 a
.last_unstale
= utime_t(1002, 5);
2450 a
.last_undegraded
= utime_t(1002, 7);
2451 a
.last_fullsized
= utime_t(1002, 8);
2452 a
.log_start
= eversion_t(1, 4);
2453 a
.ondisk_log_start
= eversion_t(1, 5);
2455 a
.last_epoch_clean
= 7;
2456 a
.parent
= pg_t(1, 2, 3);
2457 a
.parent_split_bits
= 12;
2458 a
.last_scrub
= eversion_t(9, 10);
2459 a
.last_scrub_stamp
= utime_t(11, 12);
2460 a
.last_deep_scrub
= eversion_t(13, 14);
2461 a
.last_deep_scrub_stamp
= utime_t(15, 16);
2462 a
.last_clean_scrub_stamp
= utime_t(17, 18);
2463 a
.snaptrimq_len
= 1048576;
2464 list
<object_stat_collection_t
*> l
;
2465 object_stat_collection_t::generate_test_instances(l
);
2466 a
.stats
= *l
.back();
2468 a
.ondisk_log_size
= 88;
2469 a
.up
.push_back(123);
2471 a
.acting
.push_back(456);
2472 a
.acting_primary
= 456;
2473 o
.push_back(new pg_stat_t(a
));
2475 a
.up
.push_back(124);
2477 a
.acting
.push_back(124);
2478 a
.acting_primary
= 124;
2479 a
.blocked_by
.push_back(155);
2480 a
.blocked_by
.push_back(156);
2481 o
.push_back(new pg_stat_t(a
));
2484 bool operator==(const pg_stat_t
& l
, const pg_stat_t
& r
)
2487 l
.version
== r
.version
&&
2488 l
.reported_seq
== r
.reported_seq
&&
2489 l
.reported_epoch
== r
.reported_epoch
&&
2490 l
.state
== r
.state
&&
2491 l
.last_fresh
== r
.last_fresh
&&
2492 l
.last_change
== r
.last_change
&&
2493 l
.last_active
== r
.last_active
&&
2494 l
.last_peered
== r
.last_peered
&&
2495 l
.last_clean
== r
.last_clean
&&
2496 l
.last_unstale
== r
.last_unstale
&&
2497 l
.last_undegraded
== r
.last_undegraded
&&
2498 l
.last_fullsized
== r
.last_fullsized
&&
2499 l
.log_start
== r
.log_start
&&
2500 l
.ondisk_log_start
== r
.ondisk_log_start
&&
2501 l
.created
== r
.created
&&
2502 l
.last_epoch_clean
== r
.last_epoch_clean
&&
2503 l
.parent
== r
.parent
&&
2504 l
.parent_split_bits
== r
.parent_split_bits
&&
2505 l
.last_scrub
== r
.last_scrub
&&
2506 l
.last_deep_scrub
== r
.last_deep_scrub
&&
2507 l
.last_scrub_stamp
== r
.last_scrub_stamp
&&
2508 l
.last_deep_scrub_stamp
== r
.last_deep_scrub_stamp
&&
2509 l
.last_clean_scrub_stamp
== r
.last_clean_scrub_stamp
&&
2510 l
.stats
== r
.stats
&&
2511 l
.stats_invalid
== r
.stats_invalid
&&
2512 l
.log_size
== r
.log_size
&&
2513 l
.ondisk_log_size
== r
.ondisk_log_size
&&
2515 l
.acting
== r
.acting
&&
2516 l
.mapping_epoch
== r
.mapping_epoch
&&
2517 l
.blocked_by
== r
.blocked_by
&&
2518 l
.last_became_active
== r
.last_became_active
&&
2519 l
.last_became_peered
== r
.last_became_peered
&&
2520 l
.dirty_stats_invalid
== r
.dirty_stats_invalid
&&
2521 l
.omap_stats_invalid
== r
.omap_stats_invalid
&&
2522 l
.hitset_stats_invalid
== r
.hitset_stats_invalid
&&
2523 l
.hitset_bytes_stats_invalid
== r
.hitset_bytes_stats_invalid
&&
2524 l
.up_primary
== r
.up_primary
&&
2525 l
.acting_primary
== r
.acting_primary
&&
2526 l
.pin_stats_invalid
== r
.pin_stats_invalid
&&
2527 l
.snaptrimq_len
== r
.snaptrimq_len
;
2530 // -- pool_stat_t --
2532 void pool_stat_t::dump(Formatter
*f
) const
2535 f
->dump_int("log_size", log_size
);
2536 f
->dump_int("ondisk_log_size", ondisk_log_size
);
2537 f
->dump_int("up", up
);
2538 f
->dump_int("acting", acting
);
2541 void pool_stat_t::encode(bufferlist
&bl
, uint64_t features
) const
2543 if ((features
& CEPH_FEATURE_OSDENC
) == 0) {
2546 ::encode(stats
, bl
);
2547 ::encode(log_size
, bl
);
2548 ::encode(ondisk_log_size
, bl
);
2552 ENCODE_START(6, 5, bl
);
2553 ::encode(stats
, bl
);
2554 ::encode(log_size
, bl
);
2555 ::encode(ondisk_log_size
, bl
);
2557 ::encode(acting
, bl
);
2561 void pool_stat_t::decode(bufferlist::iterator
&bl
)
2563 DECODE_START_LEGACY_COMPAT_LEN(6, 5, 5, bl
);
2564 if (struct_v
>= 4) {
2565 ::decode(stats
, bl
);
2566 ::decode(log_size
, bl
);
2567 ::decode(ondisk_log_size
, bl
);
2568 if (struct_v
>= 6) {
2570 ::decode(acting
, bl
);
2576 ::decode(stats
.sum
.num_bytes
, bl
);
2578 ::decode(num_kb
, bl
);
2579 ::decode(stats
.sum
.num_objects
, bl
);
2580 ::decode(stats
.sum
.num_object_clones
, bl
);
2581 ::decode(stats
.sum
.num_object_copies
, bl
);
2582 ::decode(stats
.sum
.num_objects_missing_on_primary
, bl
);
2583 ::decode(stats
.sum
.num_objects_degraded
, bl
);
2584 ::decode(log_size
, bl
);
2585 ::decode(ondisk_log_size
, bl
);
2586 if (struct_v
>= 2) {
2587 ::decode(stats
.sum
.num_rd
, bl
);
2588 ::decode(stats
.sum
.num_rd_kb
, bl
);
2589 ::decode(stats
.sum
.num_wr
, bl
);
2590 ::decode(stats
.sum
.num_wr_kb
, bl
);
2592 if (struct_v
>= 3) {
2593 ::decode(stats
.sum
.num_objects_unfound
, bl
);
2599 void pool_stat_t::generate_test_instances(list
<pool_stat_t
*>& o
)
2602 o
.push_back(new pool_stat_t(a
));
2604 list
<object_stat_collection_t
*> l
;
2605 object_stat_collection_t::generate_test_instances(l
);
2606 a
.stats
= *l
.back();
2608 a
.ondisk_log_size
= 456;
2611 o
.push_back(new pool_stat_t(a
));
2615 // -- pg_history_t --
2617 void pg_history_t::encode(bufferlist
&bl
) const
2619 ENCODE_START(9, 4, bl
);
2620 ::encode(epoch_created
, bl
);
2621 ::encode(last_epoch_started
, bl
);
2622 ::encode(last_epoch_clean
, bl
);
2623 ::encode(last_epoch_split
, bl
);
2624 ::encode(same_interval_since
, bl
);
2625 ::encode(same_up_since
, bl
);
2626 ::encode(same_primary_since
, bl
);
2627 ::encode(last_scrub
, bl
);
2628 ::encode(last_scrub_stamp
, bl
);
2629 ::encode(last_deep_scrub
, bl
);
2630 ::encode(last_deep_scrub_stamp
, bl
);
2631 ::encode(last_clean_scrub_stamp
, bl
);
2632 ::encode(last_epoch_marked_full
, bl
);
2633 ::encode(last_interval_started
, bl
);
2634 ::encode(last_interval_clean
, bl
);
2635 ::encode(epoch_pool_created
, bl
);
2639 void pg_history_t::decode(bufferlist::iterator
&bl
)
2641 DECODE_START_LEGACY_COMPAT_LEN(9, 4, 4, bl
);
2642 ::decode(epoch_created
, bl
);
2643 ::decode(last_epoch_started
, bl
);
2645 ::decode(last_epoch_clean
, bl
);
2647 last_epoch_clean
= last_epoch_started
; // careful, it's a lie!
2648 ::decode(last_epoch_split
, bl
);
2649 ::decode(same_interval_since
, bl
);
2650 ::decode(same_up_since
, bl
);
2651 ::decode(same_primary_since
, bl
);
2652 if (struct_v
>= 2) {
2653 ::decode(last_scrub
, bl
);
2654 ::decode(last_scrub_stamp
, bl
);
2656 if (struct_v
>= 5) {
2657 ::decode(last_deep_scrub
, bl
);
2658 ::decode(last_deep_scrub_stamp
, bl
);
2660 if (struct_v
>= 6) {
2661 ::decode(last_clean_scrub_stamp
, bl
);
2663 if (struct_v
>= 7) {
2664 ::decode(last_epoch_marked_full
, bl
);
2666 if (struct_v
>= 8) {
2667 ::decode(last_interval_started
, bl
);
2668 ::decode(last_interval_clean
, bl
);
2670 if (last_epoch_started
>= same_interval_since
) {
2671 last_interval_started
= same_interval_since
;
2673 last_interval_started
= last_epoch_started
; // best guess
2675 if (last_epoch_clean
>= same_interval_since
) {
2676 last_interval_clean
= same_interval_since
;
2678 last_interval_clean
= last_epoch_clean
; // best guess
2681 if (struct_v
>= 9) {
2682 ::decode(epoch_pool_created
, bl
);
2684 epoch_pool_created
= epoch_created
;
2689 void pg_history_t::dump(Formatter
*f
) const
2691 f
->dump_int("epoch_created", epoch_created
);
2692 f
->dump_int("epoch_pool_created", epoch_pool_created
);
2693 f
->dump_int("last_epoch_started", last_epoch_started
);
2694 f
->dump_int("last_interval_started", last_interval_started
);
2695 f
->dump_int("last_epoch_clean", last_epoch_clean
);
2696 f
->dump_int("last_interval_clean", last_interval_clean
);
2697 f
->dump_int("last_epoch_split", last_epoch_split
);
2698 f
->dump_int("last_epoch_marked_full", last_epoch_marked_full
);
2699 f
->dump_int("same_up_since", same_up_since
);
2700 f
->dump_int("same_interval_since", same_interval_since
);
2701 f
->dump_int("same_primary_since", same_primary_since
);
2702 f
->dump_stream("last_scrub") << last_scrub
;
2703 f
->dump_stream("last_scrub_stamp") << last_scrub_stamp
;
2704 f
->dump_stream("last_deep_scrub") << last_deep_scrub
;
2705 f
->dump_stream("last_deep_scrub_stamp") << last_deep_scrub_stamp
;
2706 f
->dump_stream("last_clean_scrub_stamp") << last_clean_scrub_stamp
;
2709 void pg_history_t::generate_test_instances(list
<pg_history_t
*>& o
)
2711 o
.push_back(new pg_history_t
);
2712 o
.push_back(new pg_history_t
);
2713 o
.back()->epoch_created
= 1;
2714 o
.back()->epoch_pool_created
= 1;
2715 o
.back()->last_epoch_started
= 2;
2716 o
.back()->last_interval_started
= 2;
2717 o
.back()->last_epoch_clean
= 3;
2718 o
.back()->last_interval_clean
= 2;
2719 o
.back()->last_epoch_split
= 4;
2720 o
.back()->same_up_since
= 5;
2721 o
.back()->same_interval_since
= 6;
2722 o
.back()->same_primary_since
= 7;
2723 o
.back()->last_scrub
= eversion_t(8, 9);
2724 o
.back()->last_scrub_stamp
= utime_t(10, 11);
2725 o
.back()->last_deep_scrub
= eversion_t(12, 13);
2726 o
.back()->last_deep_scrub_stamp
= utime_t(14, 15);
2727 o
.back()->last_clean_scrub_stamp
= utime_t(16, 17);
2728 o
.back()->last_epoch_marked_full
= 18;
2734 void pg_info_t::encode(bufferlist
&bl
) const
2736 ENCODE_START(32, 26, bl
);
2737 ::encode(pgid
.pgid
, bl
);
2738 ::encode(last_update
, bl
);
2739 ::encode(last_complete
, bl
);
2740 ::encode(log_tail
, bl
);
2741 if (last_backfill_bitwise
&& !last_backfill
.is_max()) {
2742 ::encode(hobject_t(), bl
);
2744 ::encode(last_backfill
, bl
);
2746 ::encode(stats
, bl
);
2748 ::encode(purged_snaps
, bl
);
2749 ::encode(last_epoch_started
, bl
);
2750 ::encode(last_user_version
, bl
);
2751 ::encode(hit_set
, bl
);
2752 ::encode(pgid
.shard
, bl
);
2753 ::encode(last_backfill
, bl
);
2754 ::encode(last_backfill_bitwise
, bl
);
2755 ::encode(last_interval_started
, bl
);
2759 void pg_info_t::decode(bufferlist::iterator
&bl
)
2761 DECODE_START(32, bl
);
2762 ::decode(pgid
.pgid
, bl
);
2763 ::decode(last_update
, bl
);
2764 ::decode(last_complete
, bl
);
2765 ::decode(log_tail
, bl
);
2767 hobject_t old_last_backfill
;
2768 ::decode(old_last_backfill
, bl
);
2770 ::decode(stats
, bl
);
2772 ::decode(purged_snaps
, bl
);
2773 ::decode(last_epoch_started
, bl
);
2774 ::decode(last_user_version
, bl
);
2775 ::decode(hit_set
, bl
);
2776 ::decode(pgid
.shard
, bl
);
2777 ::decode(last_backfill
, bl
);
2778 ::decode(last_backfill_bitwise
, bl
);
2779 if (struct_v
>= 32) {
2780 ::decode(last_interval_started
, bl
);
2782 last_interval_started
= last_epoch_started
;
2789 void pg_info_t::dump(Formatter
*f
) const
2791 f
->dump_stream("pgid") << pgid
;
2792 f
->dump_stream("last_update") << last_update
;
2793 f
->dump_stream("last_complete") << last_complete
;
2794 f
->dump_stream("log_tail") << log_tail
;
2795 f
->dump_int("last_user_version", last_user_version
);
2796 f
->dump_stream("last_backfill") << last_backfill
;
2797 f
->dump_int("last_backfill_bitwise", (int)last_backfill_bitwise
);
2798 f
->open_array_section("purged_snaps");
2799 for (interval_set
<snapid_t
>::const_iterator i
=purged_snaps
.begin();
2800 i
!= purged_snaps
.end();
2802 f
->open_object_section("purged_snap_interval");
2803 f
->dump_stream("start") << i
.get_start();
2804 f
->dump_stream("length") << i
.get_len();
2808 f
->open_object_section("history");
2811 f
->open_object_section("stats");
2815 f
->dump_int("empty", is_empty());
2816 f
->dump_int("dne", dne());
2817 f
->dump_int("incomplete", is_incomplete());
2818 f
->dump_int("last_epoch_started", last_epoch_started
);
2820 f
->open_object_section("hit_set_history");
2825 void pg_info_t::generate_test_instances(list
<pg_info_t
*>& o
)
2827 o
.push_back(new pg_info_t
);
2828 o
.push_back(new pg_info_t
);
2829 list
<pg_history_t
*> h
;
2830 pg_history_t::generate_test_instances(h
);
2831 o
.back()->history
= *h
.back();
2832 o
.back()->pgid
= spg_t(pg_t(1, 2, -1), shard_id_t::NO_SHARD
);
2833 o
.back()->last_update
= eversion_t(3, 4);
2834 o
.back()->last_complete
= eversion_t(5, 6);
2835 o
.back()->last_user_version
= 2;
2836 o
.back()->log_tail
= eversion_t(7, 8);
2837 o
.back()->last_backfill
= hobject_t(object_t("objname"), "key", 123, 456, -1, "");
2838 o
.back()->last_backfill_bitwise
= true;
2841 pg_stat_t::generate_test_instances(s
);
2842 o
.back()->stats
= *s
.back();
2845 list
<pg_hit_set_history_t
*> s
;
2846 pg_hit_set_history_t::generate_test_instances(s
);
2847 o
.back()->hit_set
= *s
.back();
2851 // -- pg_notify_t --
2852 void pg_notify_t::encode(bufferlist
&bl
) const
2854 ENCODE_START(2, 2, bl
);
2855 ::encode(query_epoch
, bl
);
2856 ::encode(epoch_sent
, bl
);
2863 void pg_notify_t::decode(bufferlist::iterator
&bl
)
2865 DECODE_START(2, bl
);
2866 ::decode(query_epoch
, bl
);
2867 ::decode(epoch_sent
, bl
);
2874 void pg_notify_t::dump(Formatter
*f
) const
2876 f
->dump_int("from", from
);
2877 f
->dump_int("to", to
);
2878 f
->dump_unsigned("query_epoch", query_epoch
);
2879 f
->dump_unsigned("epoch_sent", epoch_sent
);
2881 f
->open_object_section("info");
2887 void pg_notify_t::generate_test_instances(list
<pg_notify_t
*>& o
)
2889 o
.push_back(new pg_notify_t(shard_id_t(3), shard_id_t::NO_SHARD
, 1, 1, pg_info_t()));
2890 o
.push_back(new pg_notify_t(shard_id_t(0), shard_id_t(0), 3, 10, pg_info_t()));
2893 ostream
&operator<<(ostream
&lhs
, const pg_notify_t
¬ify
)
2895 lhs
<< "(query:" << notify
.query_epoch
2896 << " sent:" << notify
.epoch_sent
2897 << " " << notify
.info
;
2898 if (notify
.from
!= shard_id_t::NO_SHARD
||
2899 notify
.to
!= shard_id_t::NO_SHARD
)
2900 lhs
<< " " << (unsigned)notify
.from
2901 << "->" << (unsigned)notify
.to
;
2905 // -- pg_interval_t --
2907 void PastIntervals::pg_interval_t::encode(bufferlist
& bl
) const
2909 ENCODE_START(4, 2, bl
);
2910 ::encode(first
, bl
);
2913 ::encode(acting
, bl
);
2914 ::encode(maybe_went_rw
, bl
);
2915 ::encode(primary
, bl
);
2916 ::encode(up_primary
, bl
);
2920 void PastIntervals::pg_interval_t::decode(bufferlist::iterator
& bl
)
2922 DECODE_START_LEGACY_COMPAT_LEN(4, 2, 2, bl
);
2923 ::decode(first
, bl
);
2926 ::decode(acting
, bl
);
2927 ::decode(maybe_went_rw
, bl
);
2928 if (struct_v
>= 3) {
2929 ::decode(primary
, bl
);
2932 primary
= acting
[0];
2934 if (struct_v
>= 4) {
2935 ::decode(up_primary
, bl
);
2943 void PastIntervals::pg_interval_t::dump(Formatter
*f
) const
2945 f
->dump_unsigned("first", first
);
2946 f
->dump_unsigned("last", last
);
2947 f
->dump_int("maybe_went_rw", maybe_went_rw
? 1 : 0);
2948 f
->open_array_section("up");
2949 for (vector
<int>::const_iterator p
= up
.begin(); p
!= up
.end(); ++p
)
2950 f
->dump_int("osd", *p
);
2952 f
->open_array_section("acting");
2953 for (vector
<int>::const_iterator p
= acting
.begin(); p
!= acting
.end(); ++p
)
2954 f
->dump_int("osd", *p
);
2956 f
->dump_int("primary", primary
);
2957 f
->dump_int("up_primary", up_primary
);
2960 void PastIntervals::pg_interval_t::generate_test_instances(list
<pg_interval_t
*>& o
)
2962 o
.push_back(new pg_interval_t
);
2963 o
.push_back(new pg_interval_t
);
2964 o
.back()->up
.push_back(1);
2965 o
.back()->acting
.push_back(2);
2966 o
.back()->acting
.push_back(3);
2967 o
.back()->first
= 4;
2969 o
.back()->maybe_went_rw
= true;
2972 WRITE_CLASS_ENCODER(PastIntervals::pg_interval_t
)
2974 class pi_simple_rep
: public PastIntervals::interval_rep
{
2975 map
<epoch_t
, PastIntervals::pg_interval_t
> interval_map
;
2979 std::list
<PastIntervals::pg_interval_t
> &&intervals
) {
2980 for (auto &&i
: intervals
)
2981 add_interval(ec_pool
, i
);
2985 pi_simple_rep() = default;
2986 pi_simple_rep(const pi_simple_rep
&) = default;
2987 pi_simple_rep(pi_simple_rep
&&) = default;
2988 pi_simple_rep
&operator=(pi_simple_rep
&&) = default;
2989 pi_simple_rep
&operator=(const pi_simple_rep
&) = default;
2991 size_t size() const override
{ return interval_map
.size(); }
2992 bool empty() const override
{ return interval_map
.empty(); }
2993 void clear() override
{ interval_map
.clear(); }
2994 pair
<epoch_t
, epoch_t
> get_bounds() const override
{
2995 auto iter
= interval_map
.begin();
2996 if (iter
!= interval_map
.end()) {
2997 auto riter
= interval_map
.rbegin();
3000 riter
->second
.last
+ 1);
3002 return make_pair(0, 0);
3005 set
<pg_shard_t
> get_all_participants(
3006 bool ec_pool
) const override
{
3007 set
<pg_shard_t
> all_participants
;
3009 // We need to decide who might have unfound objects that we need
3010 auto p
= interval_map
.rbegin();
3011 auto end
= interval_map
.rend();
3012 for (; p
!= end
; ++p
) {
3013 const PastIntervals::pg_interval_t
&interval(p
->second
);
3014 // If nothing changed, we don't care about this interval.
3015 if (!interval
.maybe_went_rw
)
3019 std::vector
<int>::const_iterator a
= interval
.acting
.begin();
3020 std::vector
<int>::const_iterator a_end
= interval
.acting
.end();
3021 for (; a
!= a_end
; ++a
, ++i
) {
3022 pg_shard_t
shard(*a
, ec_pool
? shard_id_t(i
) : shard_id_t::NO_SHARD
);
3023 if (*a
!= CRUSH_ITEM_NONE
)
3024 all_participants
.insert(shard
);
3027 return all_participants
;
3031 const PastIntervals::pg_interval_t
&interval
) override
{
3032 interval_map
[interval
.first
] = interval
;
3034 unique_ptr
<PastIntervals::interval_rep
> clone() const override
{
3035 return unique_ptr
<PastIntervals::interval_rep
>(new pi_simple_rep(*this));
3037 ostream
&print(ostream
&out
) const override
{
3038 return out
<< interval_map
;
3040 void encode(bufferlist
&bl
) const override
{
3041 ::encode(interval_map
, bl
);
3043 void decode(bufferlist::iterator
&bl
) override
{
3044 ::decode(interval_map
, bl
);
3046 void dump(Formatter
*f
) const override
{
3047 f
->open_array_section("PastIntervals::compat_rep");
3048 for (auto &&i
: interval_map
) {
3049 f
->open_object_section("pg_interval_t");
3050 f
->dump_int("epoch", i
.first
);
3051 f
->open_object_section("interval");
3058 bool is_classic() const override
{
3061 static void generate_test_instances(list
<pi_simple_rep
*> &o
) {
3062 using ival
= PastIntervals::pg_interval_t
;
3063 using ivallst
= std::list
<ival
>;
3067 { ival
{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
3068 , ival
{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
3069 , ival
{{ 2}, { 2}, 31, 35, false, 2, 2}
3070 , ival
{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
3075 { ival
{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
3076 , ival
{{ 1, 2}, { 1, 2}, 20, 30, true, 1, 1}
3077 , ival
{{ 2}, { 2}, 31, 35, false, 2, 2}
3078 , ival
{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
3083 { ival
{{2, 1, 0}, {2, 1, 0}, 10, 20, true, 1, 1}
3084 , ival
{{ 0, 2}, { 0, 2}, 21, 30, true, 0, 0}
3085 , ival
{{ 0, 2}, {2, 0}, 31, 35, true, 2, 2}
3086 , ival
{{ 0, 2}, { 0, 2}, 36, 50, true, 0, 0}
3090 void iterate_mayberw_back_to(
3093 std::function
<void(epoch_t
, const set
<pg_shard_t
> &)> &&f
) const override
{
3094 for (auto i
= interval_map
.rbegin(); i
!= interval_map
.rend(); ++i
) {
3095 if (!i
->second
.maybe_went_rw
)
3097 if (i
->second
.last
< les
)
3099 set
<pg_shard_t
> actingset
;
3100 for (unsigned j
= 0; j
< i
->second
.acting
.size(); ++j
) {
3101 if (i
->second
.acting
[j
] == CRUSH_ITEM_NONE
)
3105 i
->second
.acting
[j
],
3106 ec_pool
? shard_id_t(j
) : shard_id_t::NO_SHARD
));
3108 f(i
->second
.first
, actingset
);
3112 bool has_full_intervals() const override
{ return true; }
3113 void iterate_all_intervals(
3114 std::function
<void(const PastIntervals::pg_interval_t
&)> &&f
3116 for (auto &&i
: interval_map
) {
3120 virtual ~pi_simple_rep() override
{}
3126 * PastIntervals only needs to be able to answer two questions:
3127 * 1) Where should the primary look for unfound objects?
3128 * 2) List a set of subsets of the OSDs such that contacting at least
3129 * one from each subset guarrantees we speak to at least one witness
3130 * of any completed write.
3132 * Crucially, 2) does not require keeping *all* past intervals. Certainly,
3133 * we don't need to keep any where maybe_went_rw would be false. We also
3134 * needn't keep two intervals where the actingset in one is a subset
3135 * of the other (only need to keep the smaller of the two sets). In order
3136 * to accurately trim the set of intervals as last_epoch_started changes
3137 * without rebuilding the set from scratch, we'll retain the larger set
3138 * if it in an older interval.
3140 struct compact_interval_t
{
3143 set
<pg_shard_t
> acting
;
3144 bool supersedes(const compact_interval_t
&other
) {
3145 for (auto &&i
: acting
) {
3146 if (!other
.acting
.count(i
))
3151 void dump(Formatter
*f
) const {
3152 f
->open_object_section("compact_interval_t");
3153 f
->dump_stream("first") << first
;
3154 f
->dump_stream("last") << last
;
3155 f
->dump_stream("acting") << acting
;
3158 void encode(bufferlist
&bl
) const {
3159 ENCODE_START(1, 1, bl
);
3160 ::encode(first
, bl
);
3162 ::encode(acting
, bl
);
3165 void decode(bufferlist::iterator
&bl
) {
3166 DECODE_START(1, bl
);
3167 ::decode(first
, bl
);
3169 ::decode(acting
, bl
);
3172 static void generate_test_instances(list
<compact_interval_t
*> & o
) {
3173 /* Not going to be used, we'll generate pi_compact_rep directly */
3176 ostream
&operator<<(ostream
&o
, const compact_interval_t
&rhs
)
3178 return o
<< "([" << rhs
.first
<< "," << rhs
.last
3179 << "] acting " << rhs
.acting
<< ")";
3181 WRITE_CLASS_ENCODER(compact_interval_t
)
3183 class pi_compact_rep
: public PastIntervals::interval_rep
{
3185 epoch_t last
= 0; // inclusive
3186 set
<pg_shard_t
> all_participants
;
3187 list
<compact_interval_t
> intervals
;
3190 std::list
<PastIntervals::pg_interval_t
> &&intervals
) {
3191 for (auto &&i
: intervals
)
3192 add_interval(ec_pool
, i
);
3195 pi_compact_rep() = default;
3196 pi_compact_rep(const pi_compact_rep
&) = default;
3197 pi_compact_rep(pi_compact_rep
&&) = default;
3198 pi_compact_rep
&operator=(const pi_compact_rep
&) = default;
3199 pi_compact_rep
&operator=(pi_compact_rep
&&) = default;
3201 size_t size() const override
{ return intervals
.size(); }
3202 bool empty() const override
{
3203 return first
> last
|| (first
== 0 && last
== 0);
3205 void clear() override
{
3206 *this = pi_compact_rep();
3208 pair
<epoch_t
, epoch_t
> get_bounds() const override
{
3209 return make_pair(first
, last
+ 1);
3211 set
<pg_shard_t
> get_all_participants(
3212 bool ec_pool
) const override
{
3213 return all_participants
;
3216 bool ec_pool
, const PastIntervals::pg_interval_t
&interval
) override
{
3218 first
= interval
.first
;
3219 assert(interval
.last
> last
);
3220 last
= interval
.last
;
3221 set
<pg_shard_t
> acting
;
3222 for (unsigned i
= 0; i
< interval
.acting
.size(); ++i
) {
3223 if (interval
.acting
[i
] == CRUSH_ITEM_NONE
)
3228 ec_pool
? shard_id_t(i
) : shard_id_t::NO_SHARD
));
3230 all_participants
.insert(acting
.begin(), acting
.end());
3231 if (!interval
.maybe_went_rw
)
3233 intervals
.push_back(
3234 compact_interval_t
{interval
.first
, interval
.last
, acting
});
3235 auto plast
= intervals
.end();
3237 for (auto cur
= intervals
.begin(); cur
!= plast
; ) {
3238 if (plast
->supersedes(*cur
)) {
3239 intervals
.erase(cur
++);
3245 unique_ptr
<PastIntervals::interval_rep
> clone() const override
{
3246 return unique_ptr
<PastIntervals::interval_rep
>(new pi_compact_rep(*this));
3248 ostream
&print(ostream
&out
) const override
{
3249 return out
<< "([" << first
<< "," << last
3250 << "] intervals=" << intervals
<< ")";
3252 void encode(bufferlist
&bl
) const override
{
3253 ENCODE_START(1, 1, bl
);
3254 ::encode(first
, bl
);
3256 ::encode(all_participants
, bl
);
3257 ::encode(intervals
, bl
);
3260 void decode(bufferlist::iterator
&bl
) override
{
3261 DECODE_START(1, bl
);
3262 ::decode(first
, bl
);
3264 ::decode(all_participants
, bl
);
3265 ::decode(intervals
, bl
);
3268 void dump(Formatter
*f
) const override
{
3269 f
->open_object_section("PastIntervals::compact_rep");
3270 f
->dump_stream("first") << first
;
3271 f
->dump_stream("last") << last
;
3272 f
->open_array_section("all_participants");
3273 for (auto& i
: all_participants
) {
3274 f
->dump_object("pg_shard", i
);
3277 f
->open_array_section("intervals");
3278 for (auto &&i
: intervals
) {
3284 bool is_classic() const override
{
3287 static void generate_test_instances(list
<pi_compact_rep
*> &o
) {
3288 using ival
= PastIntervals::pg_interval_t
;
3289 using ivallst
= std::list
<ival
>;
3293 { ival
{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
3294 , ival
{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
3295 , ival
{{ 2}, { 2}, 31, 35, false, 2, 2}
3296 , ival
{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
3301 { ival
{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
3302 , ival
{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
3303 , ival
{{ 2}, { 2}, 31, 35, false, 2, 2}
3304 , ival
{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
3309 { ival
{{2, 1, 0}, {2, 1, 0}, 10, 20, true, 1, 1}
3310 , ival
{{ 0, 2}, { 0, 2}, 21, 30, true, 0, 0}
3311 , ival
{{ 0, 2}, {2, 0}, 31, 35, true, 2, 2}
3312 , ival
{{ 0, 2}, { 0, 2}, 36, 50, true, 0, 0}
3315 void iterate_mayberw_back_to(
3318 std::function
<void(epoch_t
, const set
<pg_shard_t
> &)> &&f
) const override
{
3319 for (auto i
= intervals
.rbegin(); i
!= intervals
.rend(); ++i
) {
3322 f(i
->first
, i
->acting
);
3325 virtual ~pi_compact_rep() override
{}
3327 WRITE_CLASS_ENCODER(pi_compact_rep
)
3329 PastIntervals::PastIntervals(const PastIntervals
&rhs
)
3330 : past_intervals(rhs
.past_intervals
?
3331 rhs
.past_intervals
->clone() :
3334 PastIntervals
&PastIntervals::operator=(const PastIntervals
&rhs
)
3336 PastIntervals
other(rhs
);
3341 ostream
& operator<<(ostream
& out
, const PastIntervals
&i
)
3343 if (i
.past_intervals
) {
3344 return i
.past_intervals
->print(out
);
3346 return out
<< "(empty)";
3350 ostream
& operator<<(ostream
& out
, const PastIntervals::PriorSet
&i
)
3352 return out
<< "PriorSet("
3353 << "ec_pool: " << i
.ec_pool
3354 << ", probe: " << i
.probe
3355 << ", down: " << i
.down
3356 << ", blocked_by: " << i
.blocked_by
3357 << ", pg_down: " << i
.pg_down
3361 void PastIntervals::decode(bufferlist::iterator
&bl
)
3363 DECODE_START(1, bl
);
3370 past_intervals
.reset(new pi_simple_rep
);
3371 past_intervals
->decode(bl
);
3374 past_intervals
.reset(new pi_compact_rep
);
3375 past_intervals
->decode(bl
);
3381 void PastIntervals::decode_classic(bufferlist::iterator
&bl
)
3383 past_intervals
.reset(new pi_simple_rep
);
3384 past_intervals
->decode(bl
);
3387 void PastIntervals::generate_test_instances(list
<PastIntervals
*> &o
)
3390 list
<pi_simple_rep
*> simple
;
3391 pi_simple_rep::generate_test_instances(simple
);
3392 for (auto &&i
: simple
) {
3393 // takes ownership of contents
3394 o
.push_back(new PastIntervals(i
));
3398 list
<pi_compact_rep
*> compact
;
3399 pi_compact_rep::generate_test_instances(compact
);
3400 for (auto &&i
: compact
) {
3401 // takes ownership of contents
3402 o
.push_back(new PastIntervals(i
));
3408 void PastIntervals::update_type(bool ec_pool
, bool compact
)
3411 if (!past_intervals
) {
3412 past_intervals
.reset(new pi_simple_rep
);
3414 // we never convert from compact back to classic
3415 assert(is_classic());
3418 if (!past_intervals
) {
3419 past_intervals
.reset(new pi_compact_rep
);
3420 } else if (is_classic()) {
3421 auto old
= std::move(past_intervals
);
3422 past_intervals
.reset(new pi_compact_rep
);
3423 assert(old
->has_full_intervals());
3424 old
->iterate_all_intervals([&](const pg_interval_t
&i
) {
3425 past_intervals
->add_interval(ec_pool
, i
);
3431 void PastIntervals::update_type_from_map(bool ec_pool
, const OSDMap
&osdmap
)
3433 update_type(ec_pool
, osdmap
.require_osd_release
>= CEPH_RELEASE_LUMINOUS
);
3436 bool PastIntervals::is_new_interval(
3437 int old_acting_primary
,
3438 int new_acting_primary
,
3439 const vector
<int> &old_acting
,
3440 const vector
<int> &new_acting
,
3443 const vector
<int> &old_up
,
3444 const vector
<int> &new_up
,
3449 unsigned old_pg_num
,
3450 unsigned new_pg_num
,
3451 bool old_sort_bitwise
,
3452 bool new_sort_bitwise
,
3453 bool old_recovery_deletes
,
3454 bool new_recovery_deletes
,
3456 return old_acting_primary
!= new_acting_primary
||
3457 new_acting
!= old_acting
||
3458 old_up_primary
!= new_up_primary
||
3460 old_min_size
!= new_min_size
||
3461 old_size
!= new_size
||
3462 pgid
.is_split(old_pg_num
, new_pg_num
, 0) ||
3463 old_sort_bitwise
!= new_sort_bitwise
||
3464 old_recovery_deletes
!= new_recovery_deletes
;
3467 bool PastIntervals::is_new_interval(
3468 int old_acting_primary
,
3469 int new_acting_primary
,
3470 const vector
<int> &old_acting
,
3471 const vector
<int> &new_acting
,
3474 const vector
<int> &old_up
,
3475 const vector
<int> &new_up
,
3479 return !(lastmap
->get_pools().count(pgid
.pool())) ||
3480 is_new_interval(old_acting_primary
,
3488 lastmap
->get_pools().find(pgid
.pool())->second
.size
,
3489 osdmap
->get_pools().find(pgid
.pool())->second
.size
,
3490 lastmap
->get_pools().find(pgid
.pool())->second
.min_size
,
3491 osdmap
->get_pools().find(pgid
.pool())->second
.min_size
,
3492 lastmap
->get_pg_num(pgid
.pool()),
3493 osdmap
->get_pg_num(pgid
.pool()),
3494 lastmap
->test_flag(CEPH_OSDMAP_SORTBITWISE
),
3495 osdmap
->test_flag(CEPH_OSDMAP_SORTBITWISE
),
3496 lastmap
->test_flag(CEPH_OSDMAP_RECOVERY_DELETES
),
3497 osdmap
->test_flag(CEPH_OSDMAP_RECOVERY_DELETES
),
3501 bool PastIntervals::check_new_interval(
3502 int old_acting_primary
,
3503 int new_acting_primary
,
3504 const vector
<int> &old_acting
,
3505 const vector
<int> &new_acting
,
3508 const vector
<int> &old_up
,
3509 const vector
<int> &new_up
,
3510 epoch_t same_interval_since
,
3511 epoch_t last_epoch_clean
,
3515 IsPGRecoverablePredicate
*could_have_gone_active
,
3516 PastIntervals
*past_intervals
,
3520 * We have to be careful to gracefully deal with situations like
3521 * so. Say we have a power outage or something that takes out both
3522 * OSDs, but the monitor doesn't mark them down in the same epoch.
3523 * The history may look like
3527 * 3: let's say B dies for good, too (say, from the power spike)
3530 * which makes it look like B may have applied updates to the PG
3531 * that we need in order to proceed. This sucks...
3533 * To minimize the risk of this happening, we CANNOT go active if
3534 * _any_ OSDs in the prior set are down until we send an MOSDAlive
3535 * to the monitor such that the OSDMap sets osd_up_thru to an epoch.
3536 * Then, we have something like
3543 * -> we can ignore B, bc it couldn't have gone active (up_thru still 0).
3553 * -> we must wait for B, bc it was alive through 2, and could have
3554 * written to the pg.
3556 * If B is really dead, then an administrator will need to manually
3557 * intervene by marking the OSD as "lost."
3560 // remember past interval
3561 // NOTE: a change in the up set primary triggers an interval
3562 // change, even though the interval members in the pg_interval_t
3564 assert(past_intervals
);
3565 assert(past_intervals
->past_intervals
);
3566 if (is_new_interval(
3579 i
.first
= same_interval_since
;
3580 i
.last
= osdmap
->get_epoch() - 1;
3581 assert(i
.first
<= i
.last
);
3582 i
.acting
= old_acting
;
3584 i
.primary
= old_acting_primary
;
3585 i
.up_primary
= old_up_primary
;
3587 unsigned num_acting
= 0;
3588 for (vector
<int>::const_iterator p
= i
.acting
.begin(); p
!= i
.acting
.end();
3590 if (*p
!= CRUSH_ITEM_NONE
)
3593 assert(lastmap
->get_pools().count(pgid
.pool()));
3594 const pg_pool_t
& old_pg_pool
= lastmap
->get_pools().find(pgid
.pool())->second
;
3595 set
<pg_shard_t
> old_acting_shards
;
3596 old_pg_pool
.convert_to_pg_shards(old_acting
, &old_acting_shards
);
3600 num_acting
>= old_pg_pool
.min_size
&&
3601 (*could_have_gone_active
)(old_acting_shards
)) {
3603 *out
<< __func__
<< " " << i
3605 << " up_thru " << lastmap
->get_up_thru(i
.primary
)
3606 << " up_from " << lastmap
->get_up_from(i
.primary
)
3607 << " last_epoch_clean " << last_epoch_clean
3609 if (lastmap
->get_up_thru(i
.primary
) >= i
.first
&&
3610 lastmap
->get_up_from(i
.primary
) <= i
.first
) {
3611 i
.maybe_went_rw
= true;
3613 *out
<< __func__
<< " " << i
3614 << " : primary up " << lastmap
->get_up_from(i
.primary
)
3615 << "-" << lastmap
->get_up_thru(i
.primary
)
3616 << " includes interval"
3618 } else if (last_epoch_clean
>= i
.first
&&
3619 last_epoch_clean
<= i
.last
) {
3620 // If the last_epoch_clean is included in this interval, then
3621 // the pg must have been rw (for recovery to have completed).
3622 // This is important because we won't know the _real_
3623 // first_epoch because we stop at last_epoch_clean, and we
3624 // don't want the oldest interval to randomly have
3625 // maybe_went_rw false depending on the relative up_thru vs
3626 // last_epoch_clean timing.
3627 i
.maybe_went_rw
= true;
3629 *out
<< __func__
<< " " << i
3630 << " : includes last_epoch_clean " << last_epoch_clean
3631 << " and presumed to have been rw"
3634 i
.maybe_went_rw
= false;
3636 *out
<< __func__
<< " " << i
3637 << " : primary up " << lastmap
->get_up_from(i
.primary
)
3638 << "-" << lastmap
->get_up_thru(i
.primary
)
3639 << " does not include interval"
3643 i
.maybe_went_rw
= false;
3645 *out
<< __func__
<< " " << i
<< " : acting set is too small" << std::endl
;
3647 past_intervals
->past_intervals
->add_interval(old_pg_pool
.ec_pool(), i
);
3655 // true if the given map affects the prior set
3656 bool PastIntervals::PriorSet::affected_by_map(
3657 const OSDMap
&osdmap
,
3658 const DoutPrefixProvider
*dpp
) const
3660 for (set
<pg_shard_t
>::iterator p
= probe
.begin();
3665 // did someone in the prior set go down?
3666 if (osdmap
.is_down(o
) && down
.count(o
) == 0) {
3667 ldpp_dout(dpp
, 10) << "affected_by_map osd." << o
<< " now down" << dendl
;
3671 // did a down osd in cur get (re)marked as lost?
3672 map
<int, epoch_t
>::const_iterator r
= blocked_by
.find(o
);
3673 if (r
!= blocked_by
.end()) {
3674 if (!osdmap
.exists(o
)) {
3675 ldpp_dout(dpp
, 10) << "affected_by_map osd." << o
<< " no longer exists" << dendl
;
3678 if (osdmap
.get_info(o
).lost_at
!= r
->second
) {
3679 ldpp_dout(dpp
, 10) << "affected_by_map osd." << o
<< " (re)marked as lost" << dendl
;
3685 // did someone in the prior down set go up?
3686 for (set
<int>::const_iterator p
= down
.begin();
3691 if (osdmap
.is_up(o
)) {
3692 ldpp_dout(dpp
, 10) << "affected_by_map osd." << o
<< " now up" << dendl
;
3696 // did someone in the prior set get lost or destroyed?
3697 if (!osdmap
.exists(o
)) {
3698 ldpp_dout(dpp
, 10) << "affected_by_map osd." << o
<< " no longer exists" << dendl
;
3701 // did a down osd in down get (re)marked as lost?
3702 map
<int, epoch_t
>::const_iterator r
= blocked_by
.find(o
);
3703 if (r
!= blocked_by
.end()) {
3704 if (osdmap
.get_info(o
).lost_at
!= r
->second
) {
3705 ldpp_dout(dpp
, 10) << "affected_by_map osd." << o
<< " (re)marked as lost" << dendl
;
3714 ostream
& operator<<(ostream
& out
, const PastIntervals::pg_interval_t
& i
)
3716 out
<< "interval(" << i
.first
<< "-" << i
.last
3717 << " up " << i
.up
<< "(" << i
.up_primary
<< ")"
3718 << " acting " << i
.acting
<< "(" << i
.primary
<< ")";
3719 if (i
.maybe_went_rw
)
3720 out
<< " maybe_went_rw";
3729 void pg_query_t::encode(bufferlist
&bl
, uint64_t features
) const {
3730 ENCODE_START(3, 3, bl
);
3732 ::encode(since
, bl
);
3734 ::encode(epoch_sent
, bl
);
3740 void pg_query_t::decode(bufferlist::iterator
&bl
) {
3741 DECODE_START(3, bl
);
3743 ::decode(since
, bl
);
3745 ::decode(epoch_sent
, bl
);
3751 void pg_query_t::dump(Formatter
*f
) const
3753 f
->dump_int("from", from
);
3754 f
->dump_int("to", to
);
3755 f
->dump_string("type", get_type_name());
3756 f
->dump_stream("since") << since
;
3757 f
->dump_stream("epoch_sent") << epoch_sent
;
3758 f
->open_object_section("history");
3762 void pg_query_t::generate_test_instances(list
<pg_query_t
*>& o
)
3764 o
.push_back(new pg_query_t());
3765 list
<pg_history_t
*> h
;
3766 pg_history_t::generate_test_instances(h
);
3767 o
.push_back(new pg_query_t(pg_query_t::INFO
, shard_id_t(1), shard_id_t(2), *h
.back(), 4));
3768 o
.push_back(new pg_query_t(pg_query_t::MISSING
, shard_id_t(2), shard_id_t(3), *h
.back(), 4));
3769 o
.push_back(new pg_query_t(pg_query_t::LOG
, shard_id_t(0), shard_id_t(0),
3770 eversion_t(4, 5), *h
.back(), 4));
3771 o
.push_back(new pg_query_t(pg_query_t::FULLLOG
,
3772 shard_id_t::NO_SHARD
, shard_id_t::NO_SHARD
,
3776 // -- ObjectModDesc --
3777 void ObjectModDesc::visit(Visitor
*visitor
) const
3779 bufferlist::iterator bp
= bl
.begin();
3782 DECODE_START(max_required_version
, bp
);
3789 visitor
->append(size
);
3793 map
<string
, boost::optional
<bufferlist
> > attrs
;
3794 ::decode(attrs
, bp
);
3795 visitor
->setattrs(attrs
);
3799 version_t old_version
;
3800 ::decode(old_version
, bp
);
3801 visitor
->rmobject(old_version
);
3808 case UPDATE_SNAPS
: {
3809 set
<snapid_t
> snaps
;
3810 ::decode(snaps
, bp
);
3811 visitor
->update_snaps(snaps
);
3815 version_t old_version
;
3816 ::decode(old_version
, bp
);
3817 visitor
->try_rmobject(old_version
);
3820 case ROLLBACK_EXTENTS
: {
3821 vector
<pair
<uint64_t, uint64_t> > extents
;
3824 ::decode(extents
, bp
);
3825 visitor
->rollback_extents(gen
,extents
);
3829 assert(0 == "Invalid rollback code");
3834 assert(0 == "Invalid encoding");
3838 struct DumpVisitor
: public ObjectModDesc::Visitor
{
3840 explicit DumpVisitor(Formatter
*f
) : f(f
) {}
3841 void append(uint64_t old_size
) override
{
3842 f
->open_object_section("op");
3843 f
->dump_string("code", "APPEND");
3844 f
->dump_unsigned("old_size", old_size
);
3847 void setattrs(map
<string
, boost::optional
<bufferlist
> > &attrs
) override
{
3848 f
->open_object_section("op");
3849 f
->dump_string("code", "SETATTRS");
3850 f
->open_array_section("attrs");
3851 for (map
<string
, boost::optional
<bufferlist
> >::iterator i
= attrs
.begin();
3854 f
->dump_string("attr_name", i
->first
);
3859 void rmobject(version_t old_version
) override
{
3860 f
->open_object_section("op");
3861 f
->dump_string("code", "RMOBJECT");
3862 f
->dump_unsigned("old_version", old_version
);
3865 void try_rmobject(version_t old_version
) override
{
3866 f
->open_object_section("op");
3867 f
->dump_string("code", "TRY_RMOBJECT");
3868 f
->dump_unsigned("old_version", old_version
);
3871 void create() override
{
3872 f
->open_object_section("op");
3873 f
->dump_string("code", "CREATE");
3876 void update_snaps(const set
<snapid_t
> &snaps
) override
{
3877 f
->open_object_section("op");
3878 f
->dump_string("code", "UPDATE_SNAPS");
3879 f
->dump_stream("snaps") << snaps
;
3882 void rollback_extents(
3884 const vector
<pair
<uint64_t, uint64_t> > &extents
) override
{
3885 f
->open_object_section("op");
3886 f
->dump_string("code", "ROLLBACK_EXTENTS");
3887 f
->dump_unsigned("gen", gen
);
3888 f
->dump_stream("snaps") << extents
;
3893 void ObjectModDesc::dump(Formatter
*f
) const
3895 f
->open_object_section("object_mod_desc");
3896 f
->dump_bool("can_local_rollback", can_local_rollback
);
3897 f
->dump_bool("rollback_info_completed", rollback_info_completed
);
3899 f
->open_array_section("ops");
3907 void ObjectModDesc::generate_test_instances(list
<ObjectModDesc
*>& o
)
3909 map
<string
, boost::optional
<bufferlist
> > attrs
;
3913 o
.push_back(new ObjectModDesc());
3914 o
.back()->append(100);
3915 o
.back()->setattrs(attrs
);
3916 o
.push_back(new ObjectModDesc());
3917 o
.back()->rmobject(1001);
3918 o
.push_back(new ObjectModDesc());
3920 o
.back()->setattrs(attrs
);
3921 o
.push_back(new ObjectModDesc());
3923 o
.back()->setattrs(attrs
);
3924 o
.back()->mark_unrollbackable();
3925 o
.back()->append(1000);
3928 void ObjectModDesc::encode(bufferlist
&_bl
) const
3930 ENCODE_START(max_required_version
, max_required_version
, _bl
);
3931 ::encode(can_local_rollback
, _bl
);
3932 ::encode(rollback_info_completed
, _bl
);
3936 void ObjectModDesc::decode(bufferlist::iterator
&_bl
)
3938 DECODE_START(2, _bl
);
3939 max_required_version
= struct_v
;
3940 ::decode(can_local_rollback
, _bl
);
3941 ::decode(rollback_info_completed
, _bl
);
3943 // ensure bl does not pin a larger buffer in memory
3945 bl
.reassign_to_mempool(mempool::mempool_osd_pglog
);
3949 // -- pg_log_entry_t --
3951 string
pg_log_entry_t::get_key_name() const
3953 return version
.get_key_name();
3956 void pg_log_entry_t::encode_with_checksum(bufferlist
& bl
) const
3958 bufferlist
ebl(sizeof(*this)*2);
3960 __u32 crc
= ebl
.crc32c(0);
3965 void pg_log_entry_t::decode_with_checksum(bufferlist::iterator
& p
)
3971 if (crc
!= bl
.crc32c(0))
3972 throw buffer::malformed_input("bad checksum on pg_log_entry_t");
3973 bufferlist::iterator q
= bl
.begin();
3977 void pg_log_entry_t::encode(bufferlist
&bl
) const
3979 ENCODE_START(11, 4, bl
);
3982 ::encode(version
, bl
);
3985 * Added with reverting_to:
3986 * Previous code used prior_version to encode
3987 * what we now call reverting_to. This will
3988 * allow older code to decode reverting_to
3989 * into prior_version as expected.
3991 if (op
== LOST_REVERT
)
3992 ::encode(reverting_to
, bl
);
3994 ::encode(prior_version
, bl
);
3996 ::encode(reqid
, bl
);
3997 ::encode(mtime
, bl
);
3998 if (op
== LOST_REVERT
)
3999 ::encode(prior_version
, bl
);
4000 ::encode(snaps
, bl
);
4001 ::encode(user_version
, bl
);
4002 ::encode(mod_desc
, bl
);
4003 ::encode(extra_reqids
, bl
);
4005 ::encode(return_code
, bl
);
4009 void pg_log_entry_t::decode(bufferlist::iterator
&bl
)
4011 DECODE_START_LEGACY_COMPAT_LEN(11, 4, 4, bl
);
4015 ::decode(old_soid
, bl
);
4016 soid
.oid
= old_soid
.oid
;
4017 soid
.snap
= old_soid
.snap
;
4018 invalid_hash
= true;
4023 invalid_hash
= true;
4024 ::decode(version
, bl
);
4026 if (struct_v
>= 6 && op
== LOST_REVERT
)
4027 ::decode(reverting_to
, bl
);
4029 ::decode(prior_version
, bl
);
4031 ::decode(reqid
, bl
);
4033 ::decode(mtime
, bl
);
4035 invalid_pool
= true;
4037 if (op
== LOST_REVERT
) {
4038 if (struct_v
>= 6) {
4039 ::decode(prior_version
, bl
);
4041 reverting_to
= prior_version
;
4044 if (struct_v
>= 7 || // for v >= 7, this is for all ops.
4045 op
== CLONE
) { // for v < 7, it's only present for CLONE.
4046 ::decode(snaps
, bl
);
4047 // ensure snaps does not pin a larger buffer in memory
4049 snaps
.reassign_to_mempool(mempool::mempool_osd_pglog
);
4053 ::decode(user_version
, bl
);
4055 user_version
= version
.version
;
4058 ::decode(mod_desc
, bl
);
4060 mod_desc
.mark_unrollbackable();
4062 ::decode(extra_reqids
, bl
);
4063 if (struct_v
>= 11 && op
== ERROR
)
4064 ::decode(return_code
, bl
);
4068 void pg_log_entry_t::dump(Formatter
*f
) const
4070 f
->dump_string("op", get_op_name());
4071 f
->dump_stream("object") << soid
;
4072 f
->dump_stream("version") << version
;
4073 f
->dump_stream("prior_version") << prior_version
;
4074 f
->dump_stream("reqid") << reqid
;
4075 f
->open_array_section("extra_reqids");
4076 for (auto p
= extra_reqids
.begin();
4077 p
!= extra_reqids
.end();
4079 f
->open_object_section("extra_reqid");
4080 f
->dump_stream("reqid") << p
->first
;
4081 f
->dump_stream("user_version") << p
->second
;
4085 f
->dump_stream("mtime") << mtime
;
4086 f
->dump_int("return_code", return_code
);
4087 if (snaps
.length() > 0) {
4089 bufferlist c
= snaps
;
4090 bufferlist::iterator p
= c
.begin();
4096 f
->open_object_section("snaps");
4097 for (vector
<snapid_t
>::iterator p
= v
.begin(); p
!= v
.end(); ++p
)
4098 f
->dump_unsigned("snap", *p
);
4102 f
->open_object_section("mod_desc");
4108 void pg_log_entry_t::generate_test_instances(list
<pg_log_entry_t
*>& o
)
4110 o
.push_back(new pg_log_entry_t());
4111 hobject_t
oid(object_t("objname"), "key", 123, 456, 0, "");
4112 o
.push_back(new pg_log_entry_t(MODIFY
, oid
, eversion_t(1,2), eversion_t(3,4),
4113 1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
4115 o
.push_back(new pg_log_entry_t(ERROR
, oid
, eversion_t(1,2), eversion_t(3,4),
4116 1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
4117 utime_t(8,9), -ENOENT
));
4120 ostream
& operator<<(ostream
& out
, const pg_log_entry_t
& e
)
4122 out
<< e
.version
<< " (" << e
.prior_version
<< ") "
4123 << std::left
<< std::setw(8) << e
.get_op_name() << ' '
4124 << e
.soid
<< " by " << e
.reqid
<< " " << e
.mtime
4125 << " " << e
.return_code
;
4126 if (e
.snaps
.length()) {
4127 vector
<snapid_t
> snaps
;
4128 bufferlist c
= e
.snaps
;
4129 bufferlist::iterator p
= c
.begin();
4135 out
<< " snaps " << snaps
;
4140 // -- pg_log_dup_t --
4142 string
pg_log_dup_t::get_key_name() const
4144 return "dup_" + version
.get_key_name();
4147 void pg_log_dup_t::encode(bufferlist
&bl
) const
4149 ENCODE_START(1, 1, bl
);
4150 ::encode(reqid
, bl
);
4151 ::encode(version
, bl
);
4152 ::encode(user_version
, bl
);
4153 ::encode(return_code
, bl
);
4157 void pg_log_dup_t::decode(bufferlist::iterator
&bl
)
4159 DECODE_START(1, bl
);
4160 ::decode(reqid
, bl
);
4161 ::decode(version
, bl
);
4162 ::decode(user_version
, bl
);
4163 ::decode(return_code
, bl
);
4167 void pg_log_dup_t::dump(Formatter
*f
) const
4169 f
->dump_stream("reqid") << reqid
;
4170 f
->dump_stream("version") << version
;
4171 f
->dump_stream("user_version") << user_version
;
4172 f
->dump_stream("return_code") << return_code
;
4175 void pg_log_dup_t::generate_test_instances(list
<pg_log_dup_t
*>& o
)
4177 o
.push_back(new pg_log_dup_t());
4178 o
.push_back(new pg_log_dup_t(eversion_t(1,2),
4180 osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
4182 o
.push_back(new pg_log_dup_t(eversion_t(1,2),
4184 osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
4189 std::ostream
& operator<<(std::ostream
& out
, const pg_log_dup_t
& e
) {
4190 return out
<< "log_dup(reqid=" << e
.reqid
<<
4191 " v=" << e
.version
<< " uv=" << e
.user_version
<<
4192 " rc=" << e
.return_code
<< ")";
4198 // out: pg_log_t that only has entries that apply to import_pgid using curmap
4199 // reject: Entries rejected from "in" are in the reject.log. Other fields not set.
4200 void pg_log_t::filter_log(spg_t import_pgid
, const OSDMap
&curmap
,
4201 const string
&hit_set_namespace
, const pg_log_t
&in
,
4202 pg_log_t
&out
, pg_log_t
&reject
)
4208 for (list
<pg_log_entry_t
>::const_iterator i
= in
.log
.begin();
4209 i
!= in
.log
.end(); ++i
) {
4211 // Reject pg log entries for temporary objects
4212 if (i
->soid
.is_temp()) {
4213 reject
.log
.push_back(*i
);
4217 if (i
->soid
.nspace
!= hit_set_namespace
) {
4218 object_t oid
= i
->soid
.oid
;
4219 object_locator_t
loc(i
->soid
);
4220 pg_t raw_pgid
= curmap
.object_locator_to_pg(oid
, loc
);
4221 pg_t pgid
= curmap
.raw_pg_to_pg(raw_pgid
);
4223 if (import_pgid
.pgid
== pgid
) {
4224 out
.log
.push_back(*i
);
4226 reject
.log
.push_back(*i
);
4229 out
.log
.push_back(*i
);
4234 void pg_log_t::encode(bufferlist
& bl
) const
4236 ENCODE_START(7, 3, bl
);
4240 ::encode(can_rollback_to
, bl
);
4241 ::encode(rollback_info_trimmed_to
, bl
);
4246 void pg_log_t::decode(bufferlist::iterator
&bl
, int64_t pool
)
4248 DECODE_START_LEGACY_COMPAT_LEN(7, 3, 3, bl
);
4253 ::decode(backlog
, bl
);
4257 ::decode(can_rollback_to
, bl
);
4260 ::decode(rollback_info_trimmed_to
, bl
);
4262 rollback_info_trimmed_to
= tail
;
4269 // handle hobject_t format change
4271 for (list
<pg_log_entry_t
>::iterator i
= log
.begin();
4274 if (!i
->soid
.is_max() && i
->soid
.pool
== -1)
4275 i
->soid
.pool
= pool
;
4280 void pg_log_t::dump(Formatter
*f
) const
4282 f
->dump_stream("head") << head
;
4283 f
->dump_stream("tail") << tail
;
4284 f
->open_array_section("log");
4285 for (list
<pg_log_entry_t
>::const_iterator p
= log
.begin(); p
!= log
.end(); ++p
) {
4286 f
->open_object_section("entry");
4291 f
->open_array_section("dups");
4292 for (const auto& entry
: dups
) {
4293 f
->open_object_section("entry");
4300 void pg_log_t::generate_test_instances(list
<pg_log_t
*>& o
)
4302 o
.push_back(new pg_log_t
);
4304 // this is nonsensical:
4305 o
.push_back(new pg_log_t
);
4306 o
.back()->head
= eversion_t(1,2);
4307 o
.back()->tail
= eversion_t(3,4);
4308 list
<pg_log_entry_t
*> e
;
4309 pg_log_entry_t::generate_test_instances(e
);
4310 for (list
<pg_log_entry_t
*>::iterator p
= e
.begin(); p
!= e
.end(); ++p
)
4311 o
.back()->log
.push_back(**p
);
4314 void pg_log_t::copy_after(const pg_log_t
&other
, eversion_t v
)
4316 can_rollback_to
= other
.can_rollback_to
;
4319 for (list
<pg_log_entry_t
>::const_reverse_iterator i
= other
.log
.rbegin();
4320 i
!= other
.log
.rend();
4322 assert(i
->version
> other
.tail
);
4323 if (i
->version
<= v
) {
4324 // make tail accurate.
4332 void pg_log_t::copy_range(const pg_log_t
&other
, eversion_t from
, eversion_t to
)
4334 can_rollback_to
= other
.can_rollback_to
;
4335 list
<pg_log_entry_t
>::const_reverse_iterator i
= other
.log
.rbegin();
4336 assert(i
!= other
.log
.rend());
4337 while (i
->version
> to
) {
4339 assert(i
!= other
.log
.rend());
4341 assert(i
->version
== to
);
4343 for ( ; i
!= other
.log
.rend(); ++i
) {
4344 if (i
->version
<= from
) {
4352 void pg_log_t::copy_up_to(const pg_log_t
&other
, int max
)
4354 can_rollback_to
= other
.can_rollback_to
;
4358 for (list
<pg_log_entry_t
>::const_reverse_iterator i
= other
.log
.rbegin();
4359 i
!= other
.log
.rend();
4369 ostream
& pg_log_t::print(ostream
& out
) const
4371 out
<< *this << std::endl
;
4372 for (list
<pg_log_entry_t
>::const_iterator p
= log
.begin();
4375 out
<< *p
<< std::endl
;
4376 for (const auto& entry
: dups
) {
4377 out
<< " dup entry: " << entry
<< std::endl
;
4382 // -- pg_missing_t --
4384 ostream
& operator<<(ostream
& out
, const pg_missing_item
& i
)
4387 if (i
.have
!= eversion_t())
4388 out
<< "(" << i
.have
<< ")";
4389 out
<< " flags = " << i
.flag_str();
4393 // -- object_copy_cursor_t --
4395 void object_copy_cursor_t::encode(bufferlist
& bl
) const
4397 ENCODE_START(1, 1, bl
);
4398 ::encode(attr_complete
, bl
);
4399 ::encode(data_offset
, bl
);
4400 ::encode(data_complete
, bl
);
4401 ::encode(omap_offset
, bl
);
4402 ::encode(omap_complete
, bl
);
4406 void object_copy_cursor_t::decode(bufferlist::iterator
&bl
)
4408 DECODE_START(1, bl
);
4409 ::decode(attr_complete
, bl
);
4410 ::decode(data_offset
, bl
);
4411 ::decode(data_complete
, bl
);
4412 ::decode(omap_offset
, bl
);
4413 ::decode(omap_complete
, bl
);
4417 void object_copy_cursor_t::dump(Formatter
*f
) const
4419 f
->dump_unsigned("attr_complete", (int)attr_complete
);
4420 f
->dump_unsigned("data_offset", data_offset
);
4421 f
->dump_unsigned("data_complete", (int)data_complete
);
4422 f
->dump_string("omap_offset", omap_offset
);
4423 f
->dump_unsigned("omap_complete", (int)omap_complete
);
4426 void object_copy_cursor_t::generate_test_instances(list
<object_copy_cursor_t
*>& o
)
4428 o
.push_back(new object_copy_cursor_t
);
4429 o
.push_back(new object_copy_cursor_t
);
4430 o
.back()->attr_complete
= true;
4431 o
.back()->data_offset
= 123;
4432 o
.push_back(new object_copy_cursor_t
);
4433 o
.back()->attr_complete
= true;
4434 o
.back()->data_complete
= true;
4435 o
.back()->omap_offset
= "foo";
4436 o
.push_back(new object_copy_cursor_t
);
4437 o
.back()->attr_complete
= true;
4438 o
.back()->data_complete
= true;
4439 o
.back()->omap_complete
= true;
4442 // -- object_copy_data_t --
4444 void object_copy_data_t::encode(bufferlist
& bl
, uint64_t features
) const
4446 ENCODE_START(7, 5, bl
);
4448 ::encode(mtime
, bl
);
4449 ::encode(attrs
, bl
);
4451 ::encode(omap_data
, bl
);
4452 ::encode(cursor
, bl
);
4453 ::encode(omap_header
, bl
);
4454 ::encode(snaps
, bl
);
4455 ::encode(snap_seq
, bl
);
4456 ::encode(flags
, bl
);
4457 ::encode(data_digest
, bl
);
4458 ::encode(omap_digest
, bl
);
4459 ::encode(reqids
, bl
);
4460 ::encode(truncate_seq
, bl
);
4461 ::encode(truncate_size
, bl
);
4465 void object_copy_data_t::decode(bufferlist::iterator
& bl
)
4467 DECODE_START(7, bl
);
4471 ::decode(mtime
, bl
);
4474 ::decode(category
, bl
); // no longer used
4476 ::decode(attrs
, bl
);
4479 map
<string
,bufferlist
> omap
;
4483 ::encode(omap
, omap_data
);
4485 ::decode(cursor
, bl
);
4487 ::decode(omap_header
, bl
);
4488 if (struct_v
>= 3) {
4489 ::decode(snaps
, bl
);
4490 ::decode(snap_seq
, bl
);
4495 if (struct_v
>= 4) {
4496 ::decode(flags
, bl
);
4497 ::decode(data_digest
, bl
);
4498 ::decode(omap_digest
, bl
);
4503 ::decode(mtime
, bl
);
4504 ::decode(attrs
, bl
);
4506 ::decode(omap_data
, bl
);
4507 ::decode(cursor
, bl
);
4508 ::decode(omap_header
, bl
);
4509 ::decode(snaps
, bl
);
4510 ::decode(snap_seq
, bl
);
4511 if (struct_v
>= 4) {
4512 ::decode(flags
, bl
);
4513 ::decode(data_digest
, bl
);
4514 ::decode(omap_digest
, bl
);
4516 if (struct_v
>= 6) {
4517 ::decode(reqids
, bl
);
4519 if (struct_v
>= 7) {
4520 ::decode(truncate_seq
, bl
);
4521 ::decode(truncate_size
, bl
);
4527 void object_copy_data_t::generate_test_instances(list
<object_copy_data_t
*>& o
)
4529 o
.push_back(new object_copy_data_t());
4531 list
<object_copy_cursor_t
*> cursors
;
4532 object_copy_cursor_t::generate_test_instances(cursors
);
4533 list
<object_copy_cursor_t
*>::iterator ci
= cursors
.begin();
4534 o
.back()->cursor
= **(ci
++);
4536 o
.push_back(new object_copy_data_t());
4537 o
.back()->cursor
= **(ci
++);
4539 o
.push_back(new object_copy_data_t());
4540 o
.back()->size
= 1234;
4541 o
.back()->mtime
.set_from_double(1234);
4542 bufferptr
bp("there", 5);
4545 o
.back()->attrs
["hello"] = bl
;
4546 bufferptr
bp2("not", 3);
4549 map
<string
,bufferlist
> omap
;
4551 ::encode(omap
, o
.back()->omap_data
);
4552 bufferptr
databp("iamsomedatatocontain", 20);
4553 o
.back()->data
.push_back(databp
);
4554 o
.back()->omap_header
.append("this is an omap header");
4555 o
.back()->snaps
.push_back(123);
4556 o
.back()->reqids
.push_back(make_pair(osd_reqid_t(), version_t()));
4559 void object_copy_data_t::dump(Formatter
*f
) const
4561 f
->open_object_section("cursor");
4563 f
->close_section(); // cursor
4564 f
->dump_int("size", size
);
4565 f
->dump_stream("mtime") << mtime
;
4566 /* we should really print out the attrs here, but bufferlist
4567 const-correctness prevents that */
4568 f
->dump_int("attrs_size", attrs
.size());
4569 f
->dump_int("flags", flags
);
4570 f
->dump_unsigned("data_digest", data_digest
);
4571 f
->dump_unsigned("omap_digest", omap_digest
);
4572 f
->dump_int("omap_data_length", omap_data
.length());
4573 f
->dump_int("omap_header_length", omap_header
.length());
4574 f
->dump_int("data_length", data
.length());
4575 f
->open_array_section("snaps");
4576 for (vector
<snapid_t
>::const_iterator p
= snaps
.begin();
4577 p
!= snaps
.end(); ++p
)
4578 f
->dump_unsigned("snap", *p
);
4580 f
->open_array_section("reqids");
4581 for (auto p
= reqids
.begin();
4584 f
->open_object_section("extra_reqid");
4585 f
->dump_stream("reqid") << p
->first
;
4586 f
->dump_stream("user_version") << p
->second
;
4592 // -- pg_create_t --
4594 void pg_create_t::encode(bufferlist
&bl
) const
4596 ENCODE_START(1, 1, bl
);
4597 ::encode(created
, bl
);
4598 ::encode(parent
, bl
);
4599 ::encode(split_bits
, bl
);
4603 void pg_create_t::decode(bufferlist::iterator
&bl
)
4605 DECODE_START(1, bl
);
4606 ::decode(created
, bl
);
4607 ::decode(parent
, bl
);
4608 ::decode(split_bits
, bl
);
4612 void pg_create_t::dump(Formatter
*f
) const
4614 f
->dump_unsigned("created", created
);
4615 f
->dump_stream("parent") << parent
;
4616 f
->dump_int("split_bits", split_bits
);
4619 void pg_create_t::generate_test_instances(list
<pg_create_t
*>& o
)
4621 o
.push_back(new pg_create_t
);
4622 o
.push_back(new pg_create_t(1, pg_t(3, 4, -1), 2));
4626 // -- pg_hit_set_info_t --
4628 void pg_hit_set_info_t::encode(bufferlist
& bl
) const
4630 ENCODE_START(2, 1, bl
);
4631 ::encode(begin
, bl
);
4633 ::encode(version
, bl
);
4634 ::encode(using_gmt
, bl
);
4638 void pg_hit_set_info_t::decode(bufferlist::iterator
& p
)
4643 ::decode(version
, p
);
4644 if (struct_v
>= 2) {
4645 ::decode(using_gmt
, p
);
4652 void pg_hit_set_info_t::dump(Formatter
*f
) const
4654 f
->dump_stream("begin") << begin
;
4655 f
->dump_stream("end") << end
;
4656 f
->dump_stream("version") << version
;
4657 f
->dump_stream("using_gmt") << using_gmt
;
4660 void pg_hit_set_info_t::generate_test_instances(list
<pg_hit_set_info_t
*>& ls
)
4662 ls
.push_back(new pg_hit_set_info_t
);
4663 ls
.push_back(new pg_hit_set_info_t
);
4664 ls
.back()->begin
= utime_t(1, 2);
4665 ls
.back()->end
= utime_t(3, 4);
4669 // -- pg_hit_set_history_t --
4671 void pg_hit_set_history_t::encode(bufferlist
& bl
) const
4673 ENCODE_START(1, 1, bl
);
4674 ::encode(current_last_update
, bl
);
4676 utime_t dummy_stamp
;
4677 ::encode(dummy_stamp
, bl
);
4680 pg_hit_set_info_t dummy_info
;
4681 ::encode(dummy_info
, bl
);
4683 ::encode(history
, bl
);
4687 void pg_hit_set_history_t::decode(bufferlist::iterator
& p
)
4690 ::decode(current_last_update
, p
);
4692 utime_t dummy_stamp
;
4693 ::decode(dummy_stamp
, p
);
4696 pg_hit_set_info_t dummy_info
;
4697 ::decode(dummy_info
, p
);
4699 ::decode(history
, p
);
4703 void pg_hit_set_history_t::dump(Formatter
*f
) const
4705 f
->dump_stream("current_last_update") << current_last_update
;
4706 f
->open_array_section("history");
4707 for (list
<pg_hit_set_info_t
>::const_iterator p
= history
.begin();
4708 p
!= history
.end(); ++p
) {
4709 f
->open_object_section("info");
4716 void pg_hit_set_history_t::generate_test_instances(list
<pg_hit_set_history_t
*>& ls
)
4718 ls
.push_back(new pg_hit_set_history_t
);
4719 ls
.push_back(new pg_hit_set_history_t
);
4720 ls
.back()->current_last_update
= eversion_t(1, 2);
4721 ls
.back()->history
.push_back(pg_hit_set_info_t());
4724 // -- osd_peer_stat_t --
4726 void osd_peer_stat_t::encode(bufferlist
& bl
) const
4728 ENCODE_START(1, 1, bl
);
4729 ::encode(stamp
, bl
);
4733 void osd_peer_stat_t::decode(bufferlist::iterator
& bl
)
4735 DECODE_START(1, bl
);
4736 ::decode(stamp
, bl
);
4740 void osd_peer_stat_t::dump(Formatter
*f
) const
4742 f
->dump_stream("stamp") << stamp
;
4745 void osd_peer_stat_t::generate_test_instances(list
<osd_peer_stat_t
*>& o
)
4747 o
.push_back(new osd_peer_stat_t
);
4748 o
.push_back(new osd_peer_stat_t
);
4749 o
.back()->stamp
= utime_t(1, 2);
4752 ostream
& operator<<(ostream
& out
, const osd_peer_stat_t
&stat
)
4754 return out
<< "stat(" << stat
.stamp
<< ")";
4758 // -- OSDSuperblock --
4760 void OSDSuperblock::encode(bufferlist
&bl
) const
4762 ENCODE_START(8, 5, bl
);
4763 ::encode(cluster_fsid
, bl
);
4764 ::encode(whoami
, bl
);
4765 ::encode(current_epoch
, bl
);
4766 ::encode(oldest_map
, bl
);
4767 ::encode(newest_map
, bl
);
4768 ::encode(weight
, bl
);
4769 compat_features
.encode(bl
);
4770 ::encode(clean_thru
, bl
);
4771 ::encode(mounted
, bl
);
4772 ::encode(osd_fsid
, bl
);
4773 ::encode((epoch_t
)0, bl
); // epoch_t last_epoch_marked_full
4774 ::encode((uint32_t)0, bl
); // map<int64_t,epoch_t> pool_last_epoch_marked_full
4778 void OSDSuperblock::decode(bufferlist::iterator
&bl
)
4780 DECODE_START_LEGACY_COMPAT_LEN(8, 5, 5, bl
);
4783 ::decode(magic
, bl
);
4785 ::decode(cluster_fsid
, bl
);
4786 ::decode(whoami
, bl
);
4787 ::decode(current_epoch
, bl
);
4788 ::decode(oldest_map
, bl
);
4789 ::decode(newest_map
, bl
);
4790 ::decode(weight
, bl
);
4791 if (struct_v
>= 2) {
4792 compat_features
.decode(bl
);
4793 } else { //upgrade it!
4794 compat_features
.incompat
.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE
);
4796 ::decode(clean_thru
, bl
);
4797 ::decode(mounted
, bl
);
4799 ::decode(osd_fsid
, bl
);
4800 if (struct_v
>= 6) {
4801 epoch_t last_map_marked_full
;
4802 ::decode(last_map_marked_full
, bl
);
4804 if (struct_v
>= 7) {
4805 map
<int64_t,epoch_t
> pool_last_map_marked_full
;
4806 ::decode(pool_last_map_marked_full
, bl
);
4811 void OSDSuperblock::dump(Formatter
*f
) const
4813 f
->dump_stream("cluster_fsid") << cluster_fsid
;
4814 f
->dump_stream("osd_fsid") << osd_fsid
;
4815 f
->dump_int("whoami", whoami
);
4816 f
->dump_int("current_epoch", current_epoch
);
4817 f
->dump_int("oldest_map", oldest_map
);
4818 f
->dump_int("newest_map", newest_map
);
4819 f
->dump_float("weight", weight
);
4820 f
->open_object_section("compat");
4821 compat_features
.dump(f
);
4823 f
->dump_int("clean_thru", clean_thru
);
4824 f
->dump_int("last_epoch_mounted", mounted
);
4827 void OSDSuperblock::generate_test_instances(list
<OSDSuperblock
*>& o
)
4830 o
.push_back(new OSDSuperblock(z
));
4831 memset(&z
.cluster_fsid
, 1, sizeof(z
.cluster_fsid
));
4832 memset(&z
.osd_fsid
, 2, sizeof(z
.osd_fsid
));
4834 z
.current_epoch
= 4;
4839 o
.push_back(new OSDSuperblock(z
));
4840 o
.push_back(new OSDSuperblock(z
));
4845 void SnapSet::encode(bufferlist
& bl
) const
4847 ENCODE_START(3, 2, bl
);
4849 ::encode(head_exists
, bl
);
4850 ::encode(snaps
, bl
);
4851 ::encode(clones
, bl
);
4852 ::encode(clone_overlap
, bl
);
4853 ::encode(clone_size
, bl
);
4854 ::encode(clone_snaps
, bl
);
4858 void SnapSet::decode(bufferlist::iterator
& bl
)
4860 DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl
);
4862 ::decode(head_exists
, bl
);
4863 ::decode(snaps
, bl
);
4864 ::decode(clones
, bl
);
4865 ::decode(clone_overlap
, bl
);
4866 ::decode(clone_size
, bl
);
4867 if (struct_v
>= 3) {
4868 ::decode(clone_snaps
, bl
);
4870 clone_snaps
.clear();
4875 void SnapSet::dump(Formatter
*f
) const
4877 SnapContext
sc(seq
, snaps
);
4878 f
->open_object_section("snap_context");
4881 f
->dump_int("head_exists", head_exists
);
4882 f
->open_array_section("clones");
4883 for (vector
<snapid_t
>::const_iterator p
= clones
.begin(); p
!= clones
.end(); ++p
) {
4884 f
->open_object_section("clone");
4885 f
->dump_unsigned("snap", *p
);
4886 f
->dump_unsigned("size", clone_size
.find(*p
)->second
);
4887 f
->dump_stream("overlap") << clone_overlap
.find(*p
)->second
;
4888 auto q
= clone_snaps
.find(*p
);
4889 if (q
!= clone_snaps
.end()) {
4890 f
->open_array_section("snaps");
4891 for (auto s
: q
->second
) {
4892 f
->dump_unsigned("snap", s
);
4901 void SnapSet::generate_test_instances(list
<SnapSet
*>& o
)
4903 o
.push_back(new SnapSet
);
4904 o
.push_back(new SnapSet
);
4905 o
.back()->head_exists
= true;
4906 o
.back()->seq
= 123;
4907 o
.back()->snaps
.push_back(123);
4908 o
.back()->snaps
.push_back(12);
4909 o
.push_back(new SnapSet
);
4910 o
.back()->head_exists
= true;
4911 o
.back()->seq
= 123;
4912 o
.back()->snaps
.push_back(123);
4913 o
.back()->snaps
.push_back(12);
4914 o
.back()->clones
.push_back(12);
4915 o
.back()->clone_size
[12] = 12345;
4916 o
.back()->clone_overlap
[12];
4917 o
.back()->clone_snaps
[12] = {12, 10, 8};
4920 ostream
& operator<<(ostream
& out
, const SnapSet
& cs
)
4922 if (cs
.is_legacy()) {
4923 out
<< cs
.seq
<< "=" << cs
.snaps
<< ":"
4925 << (cs
.head_exists
? "+head":"");
4926 if (!cs
.clone_snaps
.empty()) {
4927 out
<< "+stray_clone_snaps=" << cs
.clone_snaps
;
4931 return out
<< cs
.seq
<< "=" << cs
.snaps
<< ":"
4936 void SnapSet::from_snap_set(const librados::snap_set_t
& ss
, bool legacy
)
4938 // NOTE: our reconstruction of snaps (and the snapc) is not strictly
4939 // correct: it will not include snaps that still logically exist
4940 // but for which there was no clone that is defined. For all
4941 // practical purposes this doesn't matter, since we only use that
4942 // information to clone on the OSD, and we have already moved
4943 // forward past that part of the object history.
4946 set
<snapid_t
> _snaps
;
4947 set
<snapid_t
> _clones
;
4948 head_exists
= false;
4949 for (vector
<librados::clone_info_t
>::const_iterator p
= ss
.clones
.begin();
4950 p
!= ss
.clones
.end();
4952 if (p
->cloneid
== librados::SNAP_HEAD
) {
4955 _clones
.insert(p
->cloneid
);
4956 _snaps
.insert(p
->snaps
.begin(), p
->snaps
.end());
4957 clone_size
[p
->cloneid
] = p
->size
;
4958 clone_overlap
[p
->cloneid
]; // the entry must exist, even if it's empty.
4959 for (vector
<pair
<uint64_t, uint64_t> >::const_iterator q
=
4960 p
->overlap
.begin(); q
!= p
->overlap
.end(); ++q
)
4961 clone_overlap
[p
->cloneid
].insert(q
->first
, q
->second
);
4963 // p->snaps is ascending; clone_snaps is descending
4964 vector
<snapid_t
>& v
= clone_snaps
[p
->cloneid
];
4965 for (auto q
= p
->snaps
.rbegin(); q
!= p
->snaps
.rend(); ++q
) {
4974 clones
.reserve(_clones
.size());
4975 for (set
<snapid_t
>::iterator p
= _clones
.begin(); p
!= _clones
.end(); ++p
)
4976 clones
.push_back(*p
);
4980 snaps
.reserve(_snaps
.size());
4981 for (set
<snapid_t
>::reverse_iterator p
= _snaps
.rbegin();
4982 p
!= _snaps
.rend(); ++p
)
4983 snaps
.push_back(*p
);
4986 uint64_t SnapSet::get_clone_bytes(snapid_t clone
) const
4988 assert(clone_size
.count(clone
));
4989 uint64_t size
= clone_size
.find(clone
)->second
;
4990 assert(clone_overlap
.count(clone
));
4991 const interval_set
<uint64_t> &overlap
= clone_overlap
.find(clone
)->second
;
4992 for (interval_set
<uint64_t>::const_iterator i
= overlap
.begin();
4995 assert(size
>= i
.get_len());
4996 size
-= i
.get_len();
5001 void SnapSet::filter(const pg_pool_t
&pinfo
)
5003 vector
<snapid_t
> oldsnaps
;
5004 oldsnaps
.swap(snaps
);
5005 for (vector
<snapid_t
>::const_iterator i
= oldsnaps
.begin();
5006 i
!= oldsnaps
.end();
5008 if (!pinfo
.is_removed_snap(*i
))
5009 snaps
.push_back(*i
);
5013 SnapSet
SnapSet::get_filtered(const pg_pool_t
&pinfo
) const
5020 // -- watch_info_t --
5022 void watch_info_t::encode(bufferlist
& bl
, uint64_t features
) const
5024 ENCODE_START(4, 3, bl
);
5025 ::encode(cookie
, bl
);
5026 ::encode(timeout_seconds
, bl
);
5027 ::encode(addr
, bl
, features
);
5031 void watch_info_t::decode(bufferlist::iterator
& bl
)
5033 DECODE_START_LEGACY_COMPAT_LEN(4, 3, 3, bl
);
5034 ::decode(cookie
, bl
);
5039 ::decode(timeout_seconds
, bl
);
5040 if (struct_v
>= 4) {
5046 void watch_info_t::dump(Formatter
*f
) const
5048 f
->dump_unsigned("cookie", cookie
);
5049 f
->dump_unsigned("timeout_seconds", timeout_seconds
);
5050 f
->open_object_section("addr");
5055 void watch_info_t::generate_test_instances(list
<watch_info_t
*>& o
)
5057 o
.push_back(new watch_info_t
);
5058 o
.push_back(new watch_info_t
);
5059 o
.back()->cookie
= 123;
5060 o
.back()->timeout_seconds
= 99;
5062 ea
.set_type(entity_addr_t::TYPE_LEGACY
);
5064 ea
.set_family(AF_INET
);
5065 ea
.set_in4_quad(0, 127);
5066 ea
.set_in4_quad(1, 0);
5067 ea
.set_in4_quad(2, 1);
5068 ea
.set_in4_quad(3, 2);
5070 o
.back()->addr
= ea
;
5073 // -- object_manifest_t --
5075 void object_manifest_t::encode(bufferlist
& bl
) const
5077 ENCODE_START(1, 1, bl
);
5080 case TYPE_NONE
: break;
5082 ::encode(redirect_target
, bl
);
5090 void object_manifest_t::decode(bufferlist::iterator
& bl
)
5092 DECODE_START(1, bl
);
5095 case TYPE_NONE
: break;
5097 ::decode(redirect_target
, bl
);
5105 void object_manifest_t::dump(Formatter
*f
) const
5107 f
->dump_unsigned("type", type
);
5108 f
->open_object_section("redirect_target");
5109 redirect_target
.dump(f
);
5113 void object_manifest_t::generate_test_instances(list
<object_manifest_t
*>& o
)
5115 o
.push_back(new object_manifest_t());
5116 o
.back()->type
= TYPE_REDIRECT
;
5119 ostream
& operator<<(ostream
& out
, const object_manifest_t
& om
)
5121 return out
<< "type:" << om
.type
<< " redirect_target:" << om
.redirect_target
;
5124 // -- object_info_t --
5126 void object_info_t::copy_user_bits(const object_info_t
& other
)
5128 // these bits are copied from head->clone.
5130 mtime
= other
.mtime
;
5131 local_mtime
= other
.local_mtime
;
5132 last_reqid
= other
.last_reqid
;
5133 truncate_seq
= other
.truncate_seq
;
5134 truncate_size
= other
.truncate_size
;
5135 flags
= other
.flags
;
5136 user_version
= other
.user_version
;
5137 data_digest
= other
.data_digest
;
5138 omap_digest
= other
.omap_digest
;
5141 ps_t
object_info_t::legacy_object_locator_to_ps(const object_t
&oid
,
5142 const object_locator_t
&loc
) {
5144 if (loc
.key
.length())
5145 // Hack, we don't have the osd map, so we don't really know the hash...
5146 ps
= ceph_str_hash(CEPH_STR_HASH_RJENKINS
, loc
.key
.c_str(),
5149 ps
= ceph_str_hash(CEPH_STR_HASH_RJENKINS
, oid
.name
.c_str(),
5154 void object_info_t::encode(bufferlist
& bl
, uint64_t features
) const
5156 object_locator_t
myoloc(soid
);
5157 map
<entity_name_t
, watch_info_t
> old_watchers
;
5158 for (map
<pair
<uint64_t, entity_name_t
>, watch_info_t
>::const_iterator i
=
5160 i
!= watchers
.end();
5162 old_watchers
.insert(make_pair(i
->first
.second
, i
->second
));
5164 ENCODE_START(17, 8, bl
);
5166 ::encode(myoloc
, bl
); //Retained for compatibility
5167 ::encode((__u32
)0, bl
); // was category, no longer used
5168 ::encode(version
, bl
);
5169 ::encode(prior_version
, bl
);
5170 ::encode(last_reqid
, bl
);
5172 ::encode(mtime
, bl
);
5173 if (soid
.snap
== CEPH_NOSNAP
)
5174 ::encode(osd_reqid_t(), bl
); // used to be wrlock_by
5176 ::encode(legacy_snaps
, bl
);
5177 ::encode(truncate_seq
, bl
);
5178 ::encode(truncate_size
, bl
);
5179 ::encode(is_lost(), bl
);
5180 ::encode(old_watchers
, bl
, features
);
5181 /* shenanigans to avoid breaking backwards compatibility in the disk format.
5182 * When we can, switch this out for simply putting the version_t on disk. */
5183 eversion_t
user_eversion(0, user_version
);
5184 ::encode(user_eversion
, bl
);
5185 ::encode(test_flag(FLAG_USES_TMAP
), bl
);
5186 ::encode(watchers
, bl
, features
);
5187 __u32 _flags
= flags
;
5188 ::encode(_flags
, bl
);
5189 ::encode(local_mtime
, bl
);
5190 ::encode(data_digest
, bl
);
5191 ::encode(omap_digest
, bl
);
5192 ::encode(expected_object_size
, bl
);
5193 ::encode(expected_write_size
, bl
);
5194 ::encode(alloc_hint_flags
, bl
);
5195 if (has_manifest()) {
5196 ::encode(manifest
, bl
);
5201 void object_info_t::decode(bufferlist::iterator
& bl
)
5203 object_locator_t myoloc
;
5204 DECODE_START_LEGACY_COMPAT_LEN(17, 8, 8, bl
);
5205 map
<entity_name_t
, watch_info_t
> old_watchers
;
5207 ::decode(myoloc
, bl
);
5210 ::decode(category
, bl
); // no longer used
5212 ::decode(version
, bl
);
5213 ::decode(prior_version
, bl
);
5214 ::decode(last_reqid
, bl
);
5216 ::decode(mtime
, bl
);
5217 if (soid
.snap
== CEPH_NOSNAP
) {
5218 osd_reqid_t wrlock_by
;
5219 ::decode(wrlock_by
, bl
);
5221 ::decode(legacy_snaps
, bl
);
5223 ::decode(truncate_seq
, bl
);
5224 ::decode(truncate_size
, bl
);
5226 // if this is struct_v >= 13, we will overwrite this
5227 // below since this field is just here for backwards
5233 ::decode(old_watchers
, bl
);
5234 eversion_t user_eversion
;
5235 ::decode(user_eversion
, bl
);
5236 user_version
= user_eversion
.version
;
5238 if (struct_v
>= 9) {
5239 bool uses_tmap
= false;
5240 ::decode(uses_tmap
, bl
);
5242 set_flag(FLAG_USES_TMAP
);
5244 set_flag(FLAG_USES_TMAP
);
5247 soid
.pool
= myoloc
.pool
;
5248 if (struct_v
>= 11) {
5249 ::decode(watchers
, bl
);
5251 for (map
<entity_name_t
, watch_info_t
>::iterator i
= old_watchers
.begin();
5252 i
!= old_watchers
.end();
5256 make_pair(i
->second
.cookie
, i
->first
), i
->second
));
5259 if (struct_v
>= 13) {
5261 ::decode(_flags
, bl
);
5262 flags
= (flag_t
)_flags
;
5264 if (struct_v
>= 14) {
5265 ::decode(local_mtime
, bl
);
5267 local_mtime
= utime_t();
5269 if (struct_v
>= 15) {
5270 ::decode(data_digest
, bl
);
5271 ::decode(omap_digest
, bl
);
5273 data_digest
= omap_digest
= -1;
5274 clear_flag(FLAG_DATA_DIGEST
);
5275 clear_flag(FLAG_OMAP_DIGEST
);
5277 if (struct_v
>= 16) {
5278 ::decode(expected_object_size
, bl
);
5279 ::decode(expected_write_size
, bl
);
5280 ::decode(alloc_hint_flags
, bl
);
5282 expected_object_size
= 0;
5283 expected_write_size
= 0;
5284 alloc_hint_flags
= 0;
5286 if (struct_v
>= 17) {
5287 if (has_manifest()) {
5288 ::decode(manifest
, bl
);
5294 void object_info_t::dump(Formatter
*f
) const
5296 f
->open_object_section("oid");
5299 f
->dump_stream("version") << version
;
5300 f
->dump_stream("prior_version") << prior_version
;
5301 f
->dump_stream("last_reqid") << last_reqid
;
5302 f
->dump_unsigned("user_version", user_version
);
5303 f
->dump_unsigned("size", size
);
5304 f
->dump_stream("mtime") << mtime
;
5305 f
->dump_stream("local_mtime") << local_mtime
;
5306 f
->dump_unsigned("lost", (int)is_lost());
5307 f
->dump_unsigned("flags", (int)flags
);
5308 f
->open_array_section("legacy_snaps");
5309 for (auto s
: legacy_snaps
) {
5310 f
->dump_unsigned("snap", s
);
5313 f
->dump_unsigned("truncate_seq", truncate_seq
);
5314 f
->dump_unsigned("truncate_size", truncate_size
);
5315 f
->dump_unsigned("data_digest", data_digest
);
5316 f
->dump_unsigned("omap_digest", omap_digest
);
5317 f
->dump_unsigned("expected_object_size", expected_object_size
);
5318 f
->dump_unsigned("expected_write_size", expected_write_size
);
5319 f
->dump_unsigned("alloc_hint_flags", alloc_hint_flags
);
5320 f
->dump_object("manifest", manifest
);
5321 f
->open_object_section("watchers");
5322 for (map
<pair
<uint64_t, entity_name_t
>,watch_info_t
>::const_iterator p
=
5323 watchers
.begin(); p
!= watchers
.end(); ++p
) {
5325 ss
<< p
->first
.second
;
5326 f
->open_object_section(ss
.str().c_str());
5333 void object_info_t::generate_test_instances(list
<object_info_t
*>& o
)
5335 o
.push_back(new object_info_t());
5341 ostream
& operator<<(ostream
& out
, const object_info_t
& oi
)
5343 out
<< oi
.soid
<< "(" << oi
.version
5344 << " " << oi
.last_reqid
;
5345 if (oi
.soid
.snap
!= CEPH_NOSNAP
&& !oi
.legacy_snaps
.empty())
5346 out
<< " " << oi
.legacy_snaps
;
5348 out
<< " " << oi
.get_flag_string();
5349 out
<< " s " << oi
.size
;
5350 out
<< " uv " << oi
.user_version
;
5351 if (oi
.is_data_digest())
5352 out
<< " dd " << std::hex
<< oi
.data_digest
<< std::dec
;
5353 if (oi
.is_omap_digest())
5354 out
<< " od " << std::hex
<< oi
.omap_digest
<< std::dec
;
5355 out
<< " alloc_hint [" << oi
.expected_object_size
5356 << " " << oi
.expected_write_size
5357 << " " << oi
.alloc_hint_flags
<< "]";
5358 if (oi
.has_manifest())
5359 out
<< " " << oi
.manifest
;
5365 // -- ObjectRecovery --
5366 void ObjectRecoveryProgress::encode(bufferlist
&bl
) const
5368 ENCODE_START(1, 1, bl
);
5369 ::encode(first
, bl
);
5370 ::encode(data_complete
, bl
);
5371 ::encode(data_recovered_to
, bl
);
5372 ::encode(omap_recovered_to
, bl
);
5373 ::encode(omap_complete
, bl
);
5377 void ObjectRecoveryProgress::decode(bufferlist::iterator
&bl
)
5379 DECODE_START(1, bl
);
5380 ::decode(first
, bl
);
5381 ::decode(data_complete
, bl
);
5382 ::decode(data_recovered_to
, bl
);
5383 ::decode(omap_recovered_to
, bl
);
5384 ::decode(omap_complete
, bl
);
5388 ostream
&operator<<(ostream
&out
, const ObjectRecoveryProgress
&prog
)
5390 return prog
.print(out
);
5393 void ObjectRecoveryProgress::generate_test_instances(
5394 list
<ObjectRecoveryProgress
*>& o
)
5396 o
.push_back(new ObjectRecoveryProgress
);
5397 o
.back()->first
= false;
5398 o
.back()->data_complete
= true;
5399 o
.back()->omap_complete
= true;
5400 o
.back()->data_recovered_to
= 100;
5402 o
.push_back(new ObjectRecoveryProgress
);
5403 o
.back()->first
= true;
5404 o
.back()->data_complete
= false;
5405 o
.back()->omap_complete
= false;
5406 o
.back()->data_recovered_to
= 0;
5409 ostream
&ObjectRecoveryProgress::print(ostream
&out
) const
5411 return out
<< "ObjectRecoveryProgress("
5412 << ( first
? "" : "!" ) << "first, "
5413 << "data_recovered_to:" << data_recovered_to
5414 << ", data_complete:" << ( data_complete
? "true" : "false" )
5415 << ", omap_recovered_to:" << omap_recovered_to
5416 << ", omap_complete:" << ( omap_complete
? "true" : "false" )
5417 << ", error:" << ( error
? "true" : "false" )
5421 void ObjectRecoveryProgress::dump(Formatter
*f
) const
5423 f
->dump_int("first?", first
);
5424 f
->dump_int("data_complete?", data_complete
);
5425 f
->dump_unsigned("data_recovered_to", data_recovered_to
);
5426 f
->dump_int("omap_complete?", omap_complete
);
5427 f
->dump_string("omap_recovered_to", omap_recovered_to
);
5430 void ObjectRecoveryInfo::encode(bufferlist
&bl
, uint64_t features
) const
5432 ENCODE_START(2, 1, bl
);
5434 ::encode(version
, bl
);
5436 ::encode(oi
, bl
, features
);
5438 ::encode(copy_subset
, bl
);
5439 ::encode(clone_subset
, bl
);
5443 void ObjectRecoveryInfo::decode(bufferlist::iterator
&bl
,
5446 DECODE_START(2, bl
);
5448 ::decode(version
, bl
);
5452 ::decode(copy_subset
, bl
);
5453 ::decode(clone_subset
, bl
);
5457 if (!soid
.is_max() && soid
.pool
== -1)
5459 map
<hobject_t
, interval_set
<uint64_t>> tmp
;
5460 tmp
.swap(clone_subset
);
5461 for (map
<hobject_t
, interval_set
<uint64_t>>::iterator i
= tmp
.begin();
5464 hobject_t
first(i
->first
);
5465 if (!first
.is_max() && first
.pool
== -1)
5467 clone_subset
[first
].swap(i
->second
);
5472 void ObjectRecoveryInfo::generate_test_instances(
5473 list
<ObjectRecoveryInfo
*>& o
)
5475 o
.push_back(new ObjectRecoveryInfo
);
5476 o
.back()->soid
= hobject_t(sobject_t("key", CEPH_NOSNAP
));
5477 o
.back()->version
= eversion_t(0,0);
5478 o
.back()->size
= 100;
5482 void ObjectRecoveryInfo::dump(Formatter
*f
) const
5484 f
->dump_stream("object") << soid
;
5485 f
->dump_stream("at_version") << version
;
5486 f
->dump_stream("size") << size
;
5488 f
->open_object_section("object_info");
5493 f
->open_object_section("snapset");
5497 f
->dump_stream("copy_subset") << copy_subset
;
5498 f
->dump_stream("clone_subset") << clone_subset
;
5501 ostream
& operator<<(ostream
& out
, const ObjectRecoveryInfo
&inf
)
5503 return inf
.print(out
);
5506 ostream
&ObjectRecoveryInfo::print(ostream
&out
) const
5508 return out
<< "ObjectRecoveryInfo("
5509 << soid
<< "@" << version
5510 << ", size: " << size
5511 << ", copy_subset: " << copy_subset
5512 << ", clone_subset: " << clone_subset
5513 << ", snapset: " << ss
5517 // -- PushReplyOp --
5518 void PushReplyOp::generate_test_instances(list
<PushReplyOp
*> &o
)
5520 o
.push_back(new PushReplyOp
);
5521 o
.push_back(new PushReplyOp
);
5522 o
.back()->soid
= hobject_t(sobject_t("asdf", 2));
5523 o
.push_back(new PushReplyOp
);
5524 o
.back()->soid
= hobject_t(sobject_t("asdf", CEPH_NOSNAP
));
5527 void PushReplyOp::encode(bufferlist
&bl
) const
5529 ENCODE_START(1, 1, bl
);
5534 void PushReplyOp::decode(bufferlist::iterator
&bl
)
5536 DECODE_START(1, bl
);
5541 void PushReplyOp::dump(Formatter
*f
) const
5543 f
->dump_stream("soid") << soid
;
5546 ostream
&PushReplyOp::print(ostream
&out
) const
5549 << "PushReplyOp(" << soid
5553 ostream
& operator<<(ostream
& out
, const PushReplyOp
&op
)
5555 return op
.print(out
);
5558 uint64_t PushReplyOp::cost(CephContext
*cct
) const
5561 return cct
->_conf
->osd_push_per_object_cost
+
5562 cct
->_conf
->osd_recovery_max_chunk
;
5566 void PullOp::generate_test_instances(list
<PullOp
*> &o
)
5568 o
.push_back(new PullOp
);
5569 o
.push_back(new PullOp
);
5570 o
.back()->soid
= hobject_t(sobject_t("asdf", 2));
5571 o
.back()->recovery_info
.version
= eversion_t(3, 10);
5572 o
.push_back(new PullOp
);
5573 o
.back()->soid
= hobject_t(sobject_t("asdf", CEPH_NOSNAP
));
5574 o
.back()->recovery_info
.version
= eversion_t(0, 0);
5577 void PullOp::encode(bufferlist
&bl
, uint64_t features
) const
5579 ENCODE_START(1, 1, bl
);
5581 ::encode(recovery_info
, bl
, features
);
5582 ::encode(recovery_progress
, bl
);
5586 void PullOp::decode(bufferlist::iterator
&bl
)
5588 DECODE_START(1, bl
);
5590 ::decode(recovery_info
, bl
);
5591 ::decode(recovery_progress
, bl
);
5595 void PullOp::dump(Formatter
*f
) const
5597 f
->dump_stream("soid") << soid
;
5599 f
->open_object_section("recovery_info");
5600 recovery_info
.dump(f
);
5604 f
->open_object_section("recovery_progress");
5605 recovery_progress
.dump(f
);
5610 ostream
&PullOp::print(ostream
&out
) const
5613 << "PullOp(" << soid
5614 << ", recovery_info: " << recovery_info
5615 << ", recovery_progress: " << recovery_progress
5619 ostream
& operator<<(ostream
& out
, const PullOp
&op
)
5621 return op
.print(out
);
5624 uint64_t PullOp::cost(CephContext
*cct
) const
5626 return cct
->_conf
->osd_push_per_object_cost
+
5627 cct
->_conf
->osd_recovery_max_chunk
;
5631 void PushOp::generate_test_instances(list
<PushOp
*> &o
)
5633 o
.push_back(new PushOp
);
5634 o
.push_back(new PushOp
);
5635 o
.back()->soid
= hobject_t(sobject_t("asdf", 2));
5636 o
.back()->version
= eversion_t(3, 10);
5637 o
.push_back(new PushOp
);
5638 o
.back()->soid
= hobject_t(sobject_t("asdf", CEPH_NOSNAP
));
5639 o
.back()->version
= eversion_t(0, 0);
5642 void PushOp::encode(bufferlist
&bl
, uint64_t features
) const
5644 ENCODE_START(1, 1, bl
);
5646 ::encode(version
, bl
);
5648 ::encode(data_included
, bl
);
5649 ::encode(omap_header
, bl
);
5650 ::encode(omap_entries
, bl
);
5651 ::encode(attrset
, bl
);
5652 ::encode(recovery_info
, bl
, features
);
5653 ::encode(after_progress
, bl
);
5654 ::encode(before_progress
, bl
);
5658 void PushOp::decode(bufferlist::iterator
&bl
)
5660 DECODE_START(1, bl
);
5662 ::decode(version
, bl
);
5664 ::decode(data_included
, bl
);
5665 ::decode(omap_header
, bl
);
5666 ::decode(omap_entries
, bl
);
5667 ::decode(attrset
, bl
);
5668 ::decode(recovery_info
, bl
);
5669 ::decode(after_progress
, bl
);
5670 ::decode(before_progress
, bl
);
5674 void PushOp::dump(Formatter
*f
) const
5676 f
->dump_stream("soid") << soid
;
5677 f
->dump_stream("version") << version
;
5678 f
->dump_int("data_len", data
.length());
5679 f
->dump_stream("data_included") << data_included
;
5680 f
->dump_int("omap_header_len", omap_header
.length());
5681 f
->dump_int("omap_entries_len", omap_entries
.size());
5682 f
->dump_int("attrset_len", attrset
.size());
5684 f
->open_object_section("recovery_info");
5685 recovery_info
.dump(f
);
5689 f
->open_object_section("after_progress");
5690 after_progress
.dump(f
);
5694 f
->open_object_section("before_progress");
5695 before_progress
.dump(f
);
5700 ostream
&PushOp::print(ostream
&out
) const
5703 << "PushOp(" << soid
5704 << ", version: " << version
5705 << ", data_included: " << data_included
5706 << ", data_size: " << data
.length()
5707 << ", omap_header_size: " << omap_header
.length()
5708 << ", omap_entries_size: " << omap_entries
.size()
5709 << ", attrset_size: " << attrset
.size()
5710 << ", recovery_info: " << recovery_info
5711 << ", after_progress: " << after_progress
5712 << ", before_progress: " << before_progress
5716 ostream
& operator<<(ostream
& out
, const PushOp
&op
)
5718 return op
.print(out
);
5721 uint64_t PushOp::cost(CephContext
*cct
) const
5723 uint64_t cost
= data_included
.size();
5724 for (map
<string
, bufferlist
>::const_iterator i
=
5725 omap_entries
.begin();
5726 i
!= omap_entries
.end();
5728 cost
+= i
->second
.length();
5730 cost
+= cct
->_conf
->osd_push_per_object_cost
;
5736 void ScrubMap::merge_incr(const ScrubMap
&l
)
5738 assert(valid_through
== l
.incr_since
);
5739 valid_through
= l
.valid_through
;
5741 for (map
<hobject_t
,object
>::const_iterator p
= l
.objects
.begin();
5742 p
!= l
.objects
.end();
5744 if (p
->second
.negative
) {
5745 map
<hobject_t
,object
>::iterator q
= objects
.find(p
->first
);
5746 if (q
!= objects
.end()) {
5750 objects
[p
->first
] = p
->second
;
5755 void ScrubMap::encode(bufferlist
& bl
) const
5757 ENCODE_START(3, 2, bl
);
5758 ::encode(objects
, bl
);
5759 ::encode((__u32
)0, bl
); // used to be attrs; now deprecated
5760 bufferlist old_logbl
; // not used
5761 ::encode(old_logbl
, bl
);
5762 ::encode(valid_through
, bl
);
5763 ::encode(incr_since
, bl
);
5767 void ScrubMap::decode(bufferlist::iterator
& bl
, int64_t pool
)
5769 DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl
);
5770 ::decode(objects
, bl
);
5772 map
<string
,string
> attrs
; // deprecated
5773 ::decode(attrs
, bl
);
5775 bufferlist old_logbl
; // not used
5776 ::decode(old_logbl
, bl
);
5777 ::decode(valid_through
, bl
);
5778 ::decode(incr_since
, bl
);
5781 // handle hobject_t upgrade
5783 map
<hobject_t
, object
> tmp
;
5785 for (map
<hobject_t
, object
>::iterator i
= tmp
.begin();
5788 hobject_t
first(i
->first
);
5789 if (!first
.is_max() && first
.pool
== -1)
5791 objects
[first
] = i
->second
;
5796 void ScrubMap::dump(Formatter
*f
) const
5798 f
->dump_stream("valid_through") << valid_through
;
5799 f
->dump_stream("incremental_since") << incr_since
;
5800 f
->open_array_section("objects");
5801 for (map
<hobject_t
,object
>::const_iterator p
= objects
.begin(); p
!= objects
.end(); ++p
) {
5802 f
->open_object_section("object");
5803 f
->dump_string("name", p
->first
.oid
.name
);
5804 f
->dump_unsigned("hash", p
->first
.get_hash());
5805 f
->dump_string("key", p
->first
.get_key());
5806 f
->dump_int("snapid", p
->first
.snap
);
5813 void ScrubMap::generate_test_instances(list
<ScrubMap
*>& o
)
5815 o
.push_back(new ScrubMap
);
5816 o
.push_back(new ScrubMap
);
5817 o
.back()->valid_through
= eversion_t(1, 2);
5818 o
.back()->incr_since
= eversion_t(3, 4);
5820 object::generate_test_instances(obj
);
5821 o
.back()->objects
[hobject_t(object_t("foo"), "fookey", 123, 456, 0, "")] = *obj
.back();
5823 o
.back()->objects
[hobject_t(object_t("bar"), string(), 123, 456, 0, "")] = *obj
.back();
5826 // -- ScrubMap::object --
5828 void ScrubMap::object::encode(bufferlist
& bl
) const
5830 bool compat_read_error
= read_error
|| ec_hash_mismatch
|| ec_size_mismatch
;
5831 ENCODE_START(8, 7, bl
);
5833 ::encode(negative
, bl
);
5834 ::encode(attrs
, bl
);
5835 ::encode(digest
, bl
);
5836 ::encode(digest_present
, bl
);
5837 ::encode((uint32_t)0, bl
); // obsolete nlinks
5838 ::encode((uint32_t)0, bl
); // snapcolls
5839 ::encode(omap_digest
, bl
);
5840 ::encode(omap_digest_present
, bl
);
5841 ::encode(compat_read_error
, bl
);
5842 ::encode(stat_error
, bl
);
5843 ::encode(read_error
, bl
);
5844 ::encode(ec_hash_mismatch
, bl
);
5845 ::encode(ec_size_mismatch
, bl
);
5849 void ScrubMap::object::decode(bufferlist::iterator
& bl
)
5851 DECODE_START(8, bl
);
5853 bool tmp
, compat_read_error
= false;
5856 ::decode(attrs
, bl
);
5857 ::decode(digest
, bl
);
5859 digest_present
= tmp
;
5862 ::decode(nlinks
, bl
);
5863 set
<snapid_t
> snapcolls
;
5864 ::decode(snapcolls
, bl
);
5866 ::decode(omap_digest
, bl
);
5868 omap_digest_present
= tmp
;
5869 ::decode(compat_read_error
, bl
);
5872 if (struct_v
>= 8) {
5876 ec_hash_mismatch
= tmp
;
5878 ec_size_mismatch
= tmp
;
5880 // If older encoder found a read_error, set read_error
5881 if (compat_read_error
&& !read_error
&& !ec_hash_mismatch
&& !ec_size_mismatch
)
5886 void ScrubMap::object::dump(Formatter
*f
) const
5888 f
->dump_int("size", size
);
5889 f
->dump_int("negative", negative
);
5890 f
->open_array_section("attrs");
5891 for (map
<string
,bufferptr
>::const_iterator p
= attrs
.begin(); p
!= attrs
.end(); ++p
) {
5892 f
->open_object_section("attr");
5893 f
->dump_string("name", p
->first
);
5894 f
->dump_int("length", p
->second
.length());
5900 void ScrubMap::object::generate_test_instances(list
<object
*>& o
)
5902 o
.push_back(new object
);
5903 o
.push_back(new object
);
5904 o
.back()->negative
= true;
5905 o
.push_back(new object
);
5906 o
.back()->size
= 123;
5907 o
.back()->attrs
["foo"] = buffer::copy("foo", 3);
5908 o
.back()->attrs
["bar"] = buffer::copy("barval", 6);
5913 ostream
& operator<<(ostream
& out
, const OSDOp
& op
)
5915 out
<< ceph_osd_op_name(op
.op
.op
);
5916 if (ceph_osd_op_type_data(op
.op
.op
)) {
5919 case CEPH_OSD_OP_ASSERT_VER
:
5920 out
<< " v" << op
.op
.assert_ver
.ver
;
5922 case CEPH_OSD_OP_TRUNCATE
:
5923 out
<< " " << op
.op
.extent
.offset
;
5925 case CEPH_OSD_OP_MASKTRUNC
:
5926 case CEPH_OSD_OP_TRIMTRUNC
:
5927 out
<< " " << op
.op
.extent
.truncate_seq
<< "@"
5928 << (int64_t)op
.op
.extent
.truncate_size
;
5930 case CEPH_OSD_OP_ROLLBACK
:
5931 out
<< " " << snapid_t(op
.op
.snap
.snapid
);
5933 case CEPH_OSD_OP_WATCH
:
5934 out
<< " " << ceph_osd_watch_op_name(op
.op
.watch
.op
)
5935 << " cookie " << op
.op
.watch
.cookie
;
5936 if (op
.op
.watch
.gen
)
5937 out
<< " gen " << op
.op
.watch
.gen
;
5939 case CEPH_OSD_OP_NOTIFY
:
5940 case CEPH_OSD_OP_NOTIFY_ACK
:
5941 out
<< " cookie " << op
.op
.notify
.cookie
;
5943 case CEPH_OSD_OP_COPY_GET
:
5944 out
<< " max " << op
.op
.copy_get
.max
;
5946 case CEPH_OSD_OP_COPY_FROM
:
5947 out
<< " ver " << op
.op
.copy_from
.src_version
;
5949 case CEPH_OSD_OP_SETALLOCHINT
:
5950 out
<< " object_size " << op
.op
.alloc_hint
.expected_object_size
5951 << " write_size " << op
.op
.alloc_hint
.expected_write_size
;
5953 case CEPH_OSD_OP_READ
:
5954 case CEPH_OSD_OP_SPARSE_READ
:
5955 case CEPH_OSD_OP_SYNC_READ
:
5956 case CEPH_OSD_OP_WRITE
:
5957 case CEPH_OSD_OP_WRITEFULL
:
5958 case CEPH_OSD_OP_ZERO
:
5959 case CEPH_OSD_OP_APPEND
:
5960 case CEPH_OSD_OP_MAPEXT
:
5961 out
<< " " << op
.op
.extent
.offset
<< "~" << op
.op
.extent
.length
;
5962 if (op
.op
.extent
.truncate_seq
)
5963 out
<< " [" << op
.op
.extent
.truncate_seq
<< "@"
5964 << (int64_t)op
.op
.extent
.truncate_size
<< "]";
5966 out
<< " [" << ceph_osd_op_flag_string(op
.op
.flags
) << "]";
5968 // don't show any arg info
5971 } else if (ceph_osd_op_type_attr(op
.op
.op
)) {
5973 if (op
.op
.xattr
.name_len
&& op
.indata
.length()) {
5975 op
.indata
.write(0, op
.op
.xattr
.name_len
, out
);
5977 if (op
.op
.xattr
.value_len
)
5978 out
<< " (" << op
.op
.xattr
.value_len
<< ")";
5979 if (op
.op
.op
== CEPH_OSD_OP_CMPXATTR
)
5980 out
<< " op " << (int)op
.op
.xattr
.cmp_op
5981 << " mode " << (int)op
.op
.xattr
.cmp_mode
;
5982 } else if (ceph_osd_op_type_exec(op
.op
.op
)) {
5984 if (op
.op
.cls
.class_len
&& op
.indata
.length()) {
5986 op
.indata
.write(0, op
.op
.cls
.class_len
, out
);
5988 op
.indata
.write(op
.op
.cls
.class_len
, op
.op
.cls
.method_len
, out
);
5990 } else if (ceph_osd_op_type_pg(op
.op
.op
)) {
5992 case CEPH_OSD_OP_PGLS
:
5993 case CEPH_OSD_OP_PGLS_FILTER
:
5994 case CEPH_OSD_OP_PGNLS
:
5995 case CEPH_OSD_OP_PGNLS_FILTER
:
5996 out
<< " start_epoch " << op
.op
.pgls
.start_epoch
;
5998 case CEPH_OSD_OP_PG_HITSET_LS
:
6000 case CEPH_OSD_OP_PG_HITSET_GET
:
6001 out
<< " " << utime_t(op
.op
.hit_set_get
.stamp
);
6003 case CEPH_OSD_OP_SCRUBLS
:
6011 void OSDOp::split_osd_op_vector_in_data(vector
<OSDOp
>& ops
, bufferlist
& in
)
6013 bufferlist::iterator datap
= in
.begin();
6014 for (unsigned i
= 0; i
< ops
.size(); i
++) {
6015 if (ops
[i
].op
.payload_len
) {
6016 datap
.copy(ops
[i
].op
.payload_len
, ops
[i
].indata
);
6021 void OSDOp::merge_osd_op_vector_in_data(vector
<OSDOp
>& ops
, bufferlist
& out
)
6023 for (unsigned i
= 0; i
< ops
.size(); i
++) {
6024 if (ops
[i
].indata
.length()) {
6025 ops
[i
].op
.payload_len
= ops
[i
].indata
.length();
6026 out
.append(ops
[i
].indata
);
6031 void OSDOp::split_osd_op_vector_out_data(vector
<OSDOp
>& ops
, bufferlist
& in
)
6033 bufferlist::iterator datap
= in
.begin();
6034 for (unsigned i
= 0; i
< ops
.size(); i
++) {
6035 if (ops
[i
].op
.payload_len
) {
6036 datap
.copy(ops
[i
].op
.payload_len
, ops
[i
].outdata
);
6041 void OSDOp::merge_osd_op_vector_out_data(vector
<OSDOp
>& ops
, bufferlist
& out
)
6043 for (unsigned i
= 0; i
< ops
.size(); i
++) {
6044 if (ops
[i
].outdata
.length()) {
6045 ops
[i
].op
.payload_len
= ops
[i
].outdata
.length();
6046 out
.append(ops
[i
].outdata
);
6051 bool store_statfs_t::operator==(const store_statfs_t
& other
) const
6053 return total
== other
.total
6054 && available
== other
.available
6055 && allocated
== other
.allocated
6056 && stored
== other
.stored
6057 && compressed
== other
.compressed
6058 && compressed_allocated
== other
.compressed_allocated
6059 && compressed_original
== other
.compressed_original
;
6062 void store_statfs_t::dump(Formatter
*f
) const
6064 f
->dump_int("total", total
);
6065 f
->dump_int("available", available
);
6066 f
->dump_int("allocated", allocated
);
6067 f
->dump_int("stored", stored
);
6068 f
->dump_int("compressed", compressed
);
6069 f
->dump_int("compressed_allocated", compressed_allocated
);
6070 f
->dump_int("compressed_original", compressed_original
);
6073 ostream
& operator<<(ostream
& out
, const store_statfs_t
&s
)
6076 << "store_statfs(0x" << s
.available
6078 << ", stored 0x" << s
.stored
6079 << "/0x" << s
.allocated
6080 << ", compress 0x" << s
.compressed
6081 << "/0x" << s
.compressed_allocated
6082 << "/0x" << s
.compressed_original
6088 void OSDOp::clear_data(vector
<OSDOp
>& ops
)
6090 for (unsigned i
= 0; i
< ops
.size(); i
++) {
6093 if (ceph_osd_op_type_attr(op
.op
.op
) &&
6094 op
.op
.xattr
.name_len
&&
6095 op
.indata
.length() >= op
.op
.xattr
.name_len
) {
6096 bufferptr
bp(op
.op
.xattr
.name_len
);
6099 bl
.copy_in(0, op
.op
.xattr
.name_len
, op
.indata
);
6100 op
.indata
.claim(bl
);
6101 } else if (ceph_osd_op_type_exec(op
.op
.op
) &&
6102 op
.op
.cls
.class_len
&&
6103 op
.indata
.length() >
6104 (op
.op
.cls
.class_len
+ op
.op
.cls
.method_len
)) {
6105 __u8 len
= op
.op
.cls
.class_len
+ op
.op
.cls
.method_len
;
6109 bl
.copy_in(0, len
, op
.indata
);
6110 op
.indata
.claim(bl
);