1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 * Ceph - scalable distributed file system
6 * Copyright (C) 2014 Red Hat
8 * This is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License version 2.1, as published by the Free Software
11 * Foundation. See file COPYING.
15 #ifndef CEPH_OSD_BLUESTORE_BLUESTORE_TYPES_H
16 #define CEPH_OSD_BLUESTORE_BLUESTORE_TYPES_H
20 #include "include/types.h"
21 #include "include/interval_set.h"
22 #include "include/utime.h"
23 #include "common/hobject.h"
24 #include "compressor/Compressor.h"
25 #include "common/Checksummer.h"
26 #include "include/mempool.h"
32 /// label for block device
33 struct bluestore_bdev_label_t
{
34 uuid_d osd_uuid
; ///< osd uuid
35 uint64_t size
; ///< device size
36 utime_t btime
; ///< birth time
37 string description
; ///< device description
39 void encode(bufferlist
& bl
) const;
40 void decode(bufferlist::iterator
& p
);
41 void dump(Formatter
*f
) const;
42 static void generate_test_instances(list
<bluestore_bdev_label_t
*>& o
);
44 WRITE_CLASS_ENCODER(bluestore_bdev_label_t
)
46 ostream
& operator<<(ostream
& out
, const bluestore_bdev_label_t
& l
);
48 /// collection metadata
49 struct bluestore_cnode_t
{
50 uint32_t bits
; ///< how many bits of coll pgid are significant
52 explicit bluestore_cnode_t(int b
=0) : bits(b
) {}
54 DENC(bluestore_cnode_t
, v
, p
) {
59 void dump(Formatter
*f
) const;
60 static void generate_test_instances(list
<bluestore_cnode_t
*>& o
);
62 WRITE_CLASS_DENC(bluestore_cnode_t
)
65 typedef mempool::bluestore_alloc::vector
<AllocExtent
> AllocExtentVector
;
76 AllocExtent(int64_t off
, int32_t len
) : offset(off
), length(len
) { }
77 uint64_t end() const {
78 return offset
+ length
;
80 bool operator==(const AllocExtent
& other
) const {
81 return offset
== other
.offset
&& length
== other
.length
;
85 inline static ostream
& operator<<(ostream
& out
, const AllocExtent
& e
) {
86 return out
<< "0x" << std::hex
<< e
.offset
<< "~" << e
.length
<< std::dec
;
90 AllocExtentVector
*m_extents
;
95 void init(AllocExtentVector
*extents
, int64_t block_size
,
96 uint64_t max_alloc_size
) {
98 m_block_size
= block_size
;
99 m_max_blocks
= max_alloc_size
/ block_size
;
100 assert(m_extents
->empty());
103 ExtentList(AllocExtentVector
*extents
, int64_t block_size
) {
104 init(extents
, block_size
, 0);
107 ExtentList(AllocExtentVector
*extents
, int64_t block_size
,
108 uint64_t max_alloc_size
) {
109 init(extents
, block_size
, max_alloc_size
);
116 void add_extents(int64_t start
, int64_t count
);
118 AllocExtentVector
*get_extents() {
122 std::pair
<int64_t, int64_t> get_nth_extent(int index
) {
123 return std::make_pair
124 ((*m_extents
)[index
].offset
/ m_block_size
,
125 (*m_extents
)[index
].length
/ m_block_size
);
128 int64_t get_extent_count() {
129 return m_extents
->size();
134 /// pextent: physical extent
135 struct bluestore_pextent_t
: public AllocExtent
{
136 const static uint64_t INVALID_OFFSET
= ~0ull;
138 bluestore_pextent_t() : AllocExtent() {}
139 bluestore_pextent_t(uint64_t o
, uint64_t l
) : AllocExtent(o
, l
) {}
140 bluestore_pextent_t(const AllocExtent
&ext
) :
141 AllocExtent(ext
.offset
, ext
.length
) { }
143 bluestore_pextent_t
& operator=(const AllocExtent
&ext
) {
148 bool is_valid() const {
149 return offset
!= INVALID_OFFSET
;
152 DENC(bluestore_pextent_t
, v
, p
) {
153 denc_lba(v
.offset
, p
);
154 denc_varint_lowz(v
.length
, p
);
157 void dump(Formatter
*f
) const;
158 static void generate_test_instances(list
<bluestore_pextent_t
*>& ls
);
160 WRITE_CLASS_DENC(bluestore_pextent_t
)
162 ostream
& operator<<(ostream
& out
, const bluestore_pextent_t
& o
);
164 typedef mempool::bluestore_cache_other::vector
<bluestore_pextent_t
> PExtentVector
;
167 struct denc_traits
<PExtentVector
> {
168 static constexpr bool supported
= true;
169 static constexpr bool bounded
= false;
170 static constexpr bool featured
= false;
171 static constexpr bool need_contiguous
= true;
172 static void bound_encode(const PExtentVector
& v
, size_t& p
) {
173 p
+= sizeof(uint32_t);
174 const auto size
= v
.size();
177 denc(v
.front(), per
);
181 static void encode(const PExtentVector
& v
,
182 bufferlist::contiguous_appender
& p
) {
183 denc_varint(v
.size(), p
);
188 static void decode(PExtentVector
& v
, bufferptr::iterator
& p
) {
193 for (unsigned i
=0; i
<num
; ++i
) {
200 /// extent_map: a map of reference counted extents
201 struct bluestore_extent_ref_map_t
{
205 record_t(uint32_t l
=0, uint32_t r
=0) : length(l
), refs(r
) {}
206 DENC(bluestore_extent_ref_map_t::record_t
, v
, p
) {
207 denc_varint_lowz(v
.length
, p
);
208 denc_varint(v
.refs
, p
);
212 typedef mempool::bluestore_cache_other::map
<uint64_t,record_t
> map_t
;
216 void _maybe_merge_left(map_t::iterator
& p
);
222 return ref_map
.empty();
225 void get(uint64_t offset
, uint32_t len
);
226 void put(uint64_t offset
, uint32_t len
, PExtentVector
*release
,
227 bool *maybe_unshared
);
229 bool contains(uint64_t offset
, uint32_t len
) const;
230 bool intersects(uint64_t offset
, uint32_t len
) const;
232 void bound_encode(size_t& p
) const {
233 denc_varint((uint32_t)0, p
);
234 if (!ref_map
.empty()) {
235 size_t elem_size
= 0;
236 denc_varint_lowz((uint64_t)0, elem_size
);
237 ref_map
.begin()->second
.bound_encode(elem_size
);
238 p
+= elem_size
* ref_map
.size();
241 void encode(bufferlist::contiguous_appender
& p
) const {
242 uint32_t n
= ref_map
.size();
245 auto i
= ref_map
.begin();
246 denc_varint_lowz(i
->first
, p
);
248 int64_t pos
= i
->first
;
251 denc_varint_lowz((int64_t)i
->first
- pos
, p
);
257 void decode(bufferptr::iterator
& p
) {
262 denc_varint_lowz(pos
, p
);
263 ref_map
[pos
].decode(p
);
266 denc_varint_lowz(delta
, p
);
268 ref_map
[pos
].decode(p
);
273 void dump(Formatter
*f
) const;
274 static void generate_test_instances(list
<bluestore_extent_ref_map_t
*>& o
);
276 WRITE_CLASS_DENC(bluestore_extent_ref_map_t
)
279 ostream
& operator<<(ostream
& out
, const bluestore_extent_ref_map_t
& rm
);
280 static inline bool operator==(const bluestore_extent_ref_map_t::record_t
& l
,
281 const bluestore_extent_ref_map_t::record_t
& r
) {
282 return l
.length
== r
.length
&& l
.refs
== r
.refs
;
284 static inline bool operator==(const bluestore_extent_ref_map_t
& l
,
285 const bluestore_extent_ref_map_t
& r
) {
286 return l
.ref_map
== r
.ref_map
;
288 static inline bool operator!=(const bluestore_extent_ref_map_t
& l
,
289 const bluestore_extent_ref_map_t
& r
) {
293 /// blob_use_tracker: a set of per-alloc unit ref counters to track blob usage
294 struct bluestore_blob_use_tracker_t
{
295 // N.B.: There is no need to minimize au_size/num_au
296 // as much as possible (e.g. have just a single byte for au_size) since:
297 // 1) Struct isn't packed hence it's padded. And even if it's packed see 2)
298 // 2) Mem manager has its own granularity, most probably >= 8 bytes
300 uint32_t au_size
; // Allocation (=tracking) unit size,
301 // == 0 if uninitialized
302 uint32_t num_au
; // Amount of allocation units tracked
303 // == 0 if single unit or the whole blob is tracked
306 uint32_t* bytes_per_au
;
307 uint32_t total_bytes
;
310 bluestore_blob_use_tracker_t()
311 : au_size(0), num_au(0), bytes_per_au(nullptr) {
313 ~bluestore_blob_use_tracker_t() {
319 delete[] bytes_per_au
;
326 uint32_t get_referenced_bytes() const {
331 for (size_t i
= 0; i
< num_au
; ++i
) {
332 total
+= bytes_per_au
[i
];
337 bool is_not_empty() const {
339 return total_bytes
!= 0;
341 for (size_t i
= 0; i
< num_au
; ++i
) {
342 if (bytes_per_au
[i
]) {
349 bool is_empty() const {
350 return !is_not_empty();
352 void prune_tail(uint32_t new_len
) {
354 new_len
= ROUND_UP_TO(new_len
, au_size
);
355 uint32_t _num_au
= new_len
/ au_size
;
356 assert(_num_au
<= num_au
);
358 num_au
= _num_au
; // bytes_per_au array is left unmodified
365 void add_tail(uint32_t new_len
, uint32_t _au_size
) {
366 auto full_size
= au_size
* (num_au
? num_au
: 1);
367 assert(new_len
>= full_size
);
368 if (new_len
== full_size
) {
372 uint32_t old_total
= total_bytes
;
374 init(new_len
, _au_size
);
376 bytes_per_au
[0] = old_total
;
378 assert(_au_size
== au_size
);
379 new_len
= ROUND_UP_TO(new_len
, au_size
);
380 uint32_t _num_au
= new_len
/ au_size
;
381 assert(_num_au
>= num_au
);
382 if (_num_au
> num_au
) {
383 auto old_bytes
= bytes_per_au
;
384 auto old_num_au
= num_au
;
387 for (size_t i
= 0; i
< old_num_au
; i
++) {
388 bytes_per_au
[i
] = old_bytes
[i
];
390 for (size_t i
= old_num_au
; i
< num_au
; i
++) {
399 uint32_t full_length
,
406 /// put: return true if the blob has no references any more after the call,
407 /// no release_units is filled for the sake of performance.
408 /// return false if there are some references to the blob,
409 /// in this case release_units contains pextents
410 /// (identified by their offsets relative to the blob start)
411 /// that are not used any more and can be safely deallocated.
415 PExtentVector
*release
);
417 bool can_split() const;
418 bool can_split_at(uint32_t blob_offset
) const;
420 uint32_t blob_offset
,
421 bluestore_blob_use_tracker_t
* r
);
424 const bluestore_blob_use_tracker_t
& other
) const;
426 void bound_encode(size_t& p
) const {
427 denc_varint(au_size
, p
);
429 denc_varint(num_au
, p
);
431 denc_varint(total_bytes
, p
);
433 size_t elem_size
= 0;
434 denc_varint((uint32_t)0, elem_size
);
435 p
+= elem_size
* num_au
;
439 void encode(bufferlist::contiguous_appender
& p
) const {
440 denc_varint(au_size
, p
);
442 denc_varint(num_au
, p
);
444 denc_varint(total_bytes
, p
);
446 size_t elem_size
= 0;
447 denc_varint((uint32_t)0, elem_size
);
448 for (size_t i
= 0; i
< num_au
; ++i
) {
449 denc_varint(bytes_per_au
[i
], p
);
454 void decode(bufferptr::iterator
& p
) {
456 denc_varint(au_size
, p
);
458 denc_varint(num_au
, p
);
460 denc_varint(total_bytes
, p
);
463 for (size_t i
= 0; i
< num_au
; ++i
) {
464 denc_varint(bytes_per_au
[i
], p
);
470 void dump(Formatter
*f
) const;
471 static void generate_test_instances(list
<bluestore_blob_use_tracker_t
*>& o
);
475 WRITE_CLASS_DENC(bluestore_blob_use_tracker_t
)
476 ostream
& operator<<(ostream
& out
, const bluestore_blob_use_tracker_t
& rm
);
478 /// blob: a piece of data on disk
479 struct bluestore_blob_t
{
481 PExtentVector extents
; ///< raw data position on device
482 uint32_t logical_length
= 0; ///< original length of data stored in the blob
483 uint32_t compressed_length
= 0; ///< compressed length if any
487 LEGACY_FLAG_MUTABLE
= 1, ///< [legacy] blob can be overwritten or split
488 FLAG_COMPRESSED
= 2, ///< blob is compressed
489 FLAG_CSUM
= 4, ///< blob has checksums
490 FLAG_HAS_UNUSED
= 8, ///< blob has unused map
491 FLAG_SHARED
= 16, ///< blob is shared; see external SharedBlob
493 static string
get_flags_string(unsigned flags
);
495 uint32_t flags
= 0; ///< FLAG_*
497 typedef uint16_t unused_t
;
498 unused_t unused
= 0; ///< portion that has never been written to (bitmap)
500 uint8_t csum_type
= Checksummer::CSUM_NONE
; ///< CSUM_*
501 uint8_t csum_chunk_order
= 0; ///< csum block size is 1<<block_order bytes
503 bufferptr csum_data
; ///< opaque vector of csum data
505 bluestore_blob_t(uint32_t f
= 0) : flags(f
) {}
507 const PExtentVector
& get_extents() const {
512 void bound_encode(size_t& p
, uint64_t struct_v
) const {
513 assert(struct_v
== 1 || struct_v
== 2);
515 denc_varint(flags
, p
);
516 denc_varint_lowz(logical_length
, p
);
517 denc_varint_lowz(compressed_length
, p
);
519 denc(csum_chunk_order
, p
);
520 denc_varint(csum_data
.length(), p
);
521 p
+= csum_data
.length();
522 p
+= sizeof(unused_t
);
525 void encode(bufferlist::contiguous_appender
& p
, uint64_t struct_v
) const {
526 assert(struct_v
== 1 || struct_v
== 2);
528 denc_varint(flags
, p
);
529 if (is_compressed()) {
530 denc_varint_lowz(logical_length
, p
);
531 denc_varint_lowz(compressed_length
, p
);
535 denc(csum_chunk_order
, p
);
536 denc_varint(csum_data
.length(), p
);
537 memcpy(p
.get_pos_add(csum_data
.length()), csum_data
.c_str(),
545 void decode(bufferptr::iterator
& p
, uint64_t struct_v
) {
546 assert(struct_v
== 1 || struct_v
== 2);
548 denc_varint(flags
, p
);
549 if (is_compressed()) {
550 denc_varint_lowz(logical_length
, p
);
551 denc_varint_lowz(compressed_length
, p
);
553 logical_length
= get_ondisk_length();
557 denc(csum_chunk_order
, p
);
560 csum_data
= p
.get_ptr(len
);
567 bool can_split() const {
569 !has_flag(FLAG_SHARED
) &&
570 !has_flag(FLAG_COMPRESSED
) &&
571 !has_flag(FLAG_HAS_UNUSED
); // splitting unused set is complex
573 bool can_split_at(uint32_t blob_offset
) const {
574 return !has_csum() || blob_offset
% get_csum_chunk_size() == 0;
577 void dump(Formatter
*f
) const;
578 static void generate_test_instances(list
<bluestore_blob_t
*>& ls
);
580 bool has_flag(unsigned f
) const {
583 void set_flag(unsigned f
) {
586 void clear_flag(unsigned f
) {
589 string
get_flags_string() const {
590 return get_flags_string(flags
);
593 void set_compressed(uint64_t clen_orig
, uint64_t clen
) {
594 set_flag(FLAG_COMPRESSED
);
595 logical_length
= clen_orig
;
596 compressed_length
= clen
;
598 bool is_mutable() const {
599 return !is_compressed() && !is_shared();
601 bool is_compressed() const {
602 return has_flag(FLAG_COMPRESSED
);
604 bool has_csum() const {
605 return has_flag(FLAG_CSUM
);
607 bool has_unused() const {
608 return has_flag(FLAG_HAS_UNUSED
);
610 bool is_shared() const {
611 return has_flag(FLAG_SHARED
);
614 /// return chunk (i.e. min readable block) size for the blob
615 uint64_t get_chunk_size(uint64_t dev_block_size
) const {
617 MAX(dev_block_size
, get_csum_chunk_size()) : dev_block_size
;
619 uint32_t get_csum_chunk_size() const {
620 return 1 << csum_chunk_order
;
622 uint32_t get_compressed_payload_length() const {
623 return is_compressed() ? compressed_length
: 0;
625 uint64_t calc_offset(uint64_t x_off
, uint64_t *plen
) const {
626 auto p
= extents
.begin();
627 assert(p
!= extents
.end());
628 while (x_off
>= p
->length
) {
631 assert(p
!= extents
.end());
634 *plen
= p
->length
- x_off
;
635 return p
->offset
+ x_off
;
638 // validate whether or not the status of pextents within the given range
639 // meets the requirement(allocated or unallocated).
640 bool _validate_range(uint64_t b_off
, uint64_t b_len
,
641 bool require_allocated
) const {
642 auto p
= extents
.begin();
643 assert(p
!= extents
.end());
644 while (b_off
>= p
->length
) {
647 assert(p
!= extents
.end());
651 assert(p
!= extents
.end());
652 if (require_allocated
!= p
->is_valid()) {
656 if (p
->length
>= b_len
) {
662 assert(0 == "we should not get here");
665 /// return true if the entire range is allocated
666 /// (mapped to extents on disk)
667 bool is_allocated(uint64_t b_off
, uint64_t b_len
) const {
668 return _validate_range(b_off
, b_len
, true);
671 /// return true if the entire range is unallocated
672 /// (not mapped to extents on disk)
673 bool is_unallocated(uint64_t b_off
, uint64_t b_len
) const {
674 return _validate_range(b_off
, b_len
, false);
677 /// return true if the logical range has never been used
678 bool is_unused(uint64_t offset
, uint64_t length
) const {
682 uint64_t blob_len
= get_logical_length();
683 assert((blob_len
% (sizeof(unused
)*8)) == 0);
684 assert(offset
+ length
<= blob_len
);
685 uint64_t chunk_size
= blob_len
/ (sizeof(unused
)*8);
686 uint64_t start
= offset
/ chunk_size
;
687 uint64_t end
= ROUND_UP_TO(offset
+ length
, chunk_size
) / chunk_size
;
689 while (i
< end
&& (unused
& (1u << i
))) {
695 /// mark a range that has never been used
696 void add_unused(uint64_t offset
, uint64_t length
) {
697 uint64_t blob_len
= get_logical_length();
698 assert((blob_len
% (sizeof(unused
)*8)) == 0);
699 assert(offset
+ length
<= blob_len
);
700 uint64_t chunk_size
= blob_len
/ (sizeof(unused
)*8);
701 uint64_t start
= ROUND_UP_TO(offset
, chunk_size
) / chunk_size
;
702 uint64_t end
= (offset
+ length
) / chunk_size
;
703 for (auto i
= start
; i
< end
; ++i
) {
707 set_flag(FLAG_HAS_UNUSED
);
711 /// indicate that a range has (now) been used.
712 void mark_used(uint64_t offset
, uint64_t length
) {
714 uint64_t blob_len
= get_logical_length();
715 assert((blob_len
% (sizeof(unused
)*8)) == 0);
716 assert(offset
+ length
<= blob_len
);
717 uint64_t chunk_size
= blob_len
/ (sizeof(unused
)*8);
718 uint64_t start
= offset
/ chunk_size
;
719 uint64_t end
= ROUND_UP_TO(offset
+ length
, chunk_size
) / chunk_size
;
720 for (auto i
= start
; i
< end
; ++i
) {
721 unused
&= ~(1u << i
);
724 clear_flag(FLAG_HAS_UNUSED
);
729 int map(uint64_t x_off
, uint64_t x_len
,
730 std::function
<int(uint64_t,uint64_t)> f
) const {
731 auto p
= extents
.begin();
732 assert(p
!= extents
.end());
733 while (x_off
>= p
->length
) {
736 assert(p
!= extents
.end());
739 assert(p
!= extents
.end());
740 uint64_t l
= MIN(p
->length
- x_off
, x_len
);
741 int r
= f(p
->offset
+ x_off
, l
);
750 void map_bl(uint64_t x_off
,
752 std::function
<void(uint64_t,bufferlist
&)> f
) const {
753 auto p
= extents
.begin();
754 assert(p
!= extents
.end());
755 while (x_off
>= p
->length
) {
758 assert(p
!= extents
.end());
760 bufferlist::iterator it
= bl
.begin();
761 uint64_t x_len
= bl
.length();
763 assert(p
!= extents
.end());
764 uint64_t l
= MIN(p
->length
- x_off
, x_len
);
767 f(p
->offset
+ x_off
, t
);
774 uint32_t get_ondisk_length() const {
776 for (auto &p
: extents
) {
782 uint32_t get_logical_length() const {
783 return logical_length
;
785 size_t get_csum_value_size() const;
787 size_t get_csum_count() const {
788 size_t vs
= get_csum_value_size();
791 return csum_data
.length() / vs
;
793 uint64_t get_csum_item(unsigned i
) const {
794 size_t cs
= get_csum_value_size();
795 const char *p
= csum_data
.c_str();
798 assert(0 == "no csum data, bad index");
800 return reinterpret_cast<const uint8_t*>(p
)[i
];
802 return reinterpret_cast<const __le16
*>(p
)[i
];
804 return reinterpret_cast<const __le32
*>(p
)[i
];
806 return reinterpret_cast<const __le64
*>(p
)[i
];
808 assert(0 == "unrecognized csum word size");
811 const char *get_csum_item_ptr(unsigned i
) const {
812 size_t cs
= get_csum_value_size();
813 return csum_data
.c_str() + (cs
* i
);
815 char *get_csum_item_ptr(unsigned i
) {
816 size_t cs
= get_csum_value_size();
817 return csum_data
.c_str() + (cs
* i
);
820 void init_csum(unsigned type
, unsigned order
, unsigned len
) {
823 csum_chunk_order
= order
;
824 csum_data
= buffer::create(get_csum_value_size() * len
/ get_csum_chunk_size());
828 /// calculate csum for the buffer at the given b_off
829 void calc_csum(uint64_t b_off
, const bufferlist
& bl
);
831 /// verify csum: return -EOPNOTSUPP for unsupported checksum type;
832 /// return -1 and valid(nonnegative) b_bad_off for checksum error;
833 /// return 0 if all is well.
834 int verify_csum(uint64_t b_off
, const bufferlist
& bl
, int* b_bad_off
,
835 uint64_t *bad_csum
) const;
837 bool can_prune_tail() const {
839 extents
.size() > 1 && // if it's all invalid it's not pruning.
840 !extents
.back().is_valid() &&
844 const auto &p
= extents
.back();
845 logical_length
-= p
.length
;
850 csum_data
= bufferptr(t
.c_str(),
851 get_logical_length() / get_csum_chunk_size() *
852 get_csum_value_size());
855 void add_tail(uint32_t new_len
) {
856 assert(is_mutable());
857 assert(!has_unused());
858 assert(new_len
> logical_length
);
859 extents
.emplace_back(
861 bluestore_pextent_t::INVALID_OFFSET
,
862 new_len
- logical_length
));
863 logical_length
= new_len
;
867 csum_data
= buffer::create(
868 get_csum_value_size() * logical_length
/ get_csum_chunk_size());
869 csum_data
.copy_in(0, t
.length(), t
.c_str());
870 csum_data
.zero(t
.length(), csum_data
.length() - t
.length());
873 uint32_t get_release_size(uint32_t min_alloc_size
) const {
874 if (is_compressed()) {
875 return get_logical_length();
877 uint32_t res
= get_csum_chunk_size();
878 if (!has_csum() || res
< min_alloc_size
) {
879 res
= min_alloc_size
;
884 void split(uint32_t blob_offset
, bluestore_blob_t
& rb
);
885 void allocated(uint32_t b_off
, uint32_t length
, const AllocExtentVector
& allocs
);
886 void allocated_test(const bluestore_pextent_t
& alloc
); // intended for UT only
888 /// updates blob's pextents container and return unused pextents eligible
890 /// all - indicates that the whole blob to be released.
891 /// logical - specifies set of logical extents within blob's
893 /// Returns true if blob has no more valid pextents
894 bool release_extents(
896 const PExtentVector
& logical
,
899 WRITE_CLASS_DENC_FEATURED(bluestore_blob_t
)
901 ostream
& operator<<(ostream
& out
, const bluestore_blob_t
& o
);
904 /// shared blob state
905 struct bluestore_shared_blob_t
{
906 uint64_t sbid
; ///> shared blob id
907 bluestore_extent_ref_map_t ref_map
; ///< shared blob extents
909 bluestore_shared_blob_t(uint64_t _sbid
) : sbid(_sbid
) {}
911 DENC(bluestore_shared_blob_t
, v
, p
) {
918 void dump(Formatter
*f
) const;
919 static void generate_test_instances(list
<bluestore_shared_blob_t
*>& ls
);
922 return ref_map
.empty();
925 WRITE_CLASS_DENC(bluestore_shared_blob_t
)
927 ostream
& operator<<(ostream
& out
, const bluestore_shared_blob_t
& o
);
929 /// onode: per-object metadata
930 struct bluestore_onode_t
{
931 uint64_t nid
= 0; ///< numeric id (locally unique)
932 uint64_t size
= 0; ///< object size
933 map
<mempool::bluestore_cache_other::string
, bufferptr
> attrs
; ///< attrs
936 uint32_t offset
= 0; ///< logical offset for start of shard
937 uint32_t bytes
= 0; ///< encoded bytes
938 DENC(shard_info
, v
, p
) {
939 denc_varint(v
.offset
, p
);
940 denc_varint(v
.bytes
, p
);
942 void dump(Formatter
*f
) const;
944 vector
<shard_info
> extent_map_shards
; ///< extent map shards (if any)
946 uint32_t expected_object_size
= 0;
947 uint32_t expected_write_size
= 0;
948 uint32_t alloc_hint_flags
= 0;
956 string
get_flags_string() const {
958 if (flags
& FLAG_OMAP
) {
964 bool has_flag(unsigned f
) const {
968 void set_flag(unsigned f
) {
972 void clear_flag(unsigned f
) {
976 bool has_omap() const {
977 return has_flag(FLAG_OMAP
);
980 void set_omap_flag() {
984 void clear_omap_flag() {
985 clear_flag(FLAG_OMAP
);
988 DENC(bluestore_onode_t
, v
, p
) {
990 denc_varint(v
.nid
, p
);
991 denc_varint(v
.size
, p
);
994 denc(v
.extent_map_shards
, p
);
995 denc_varint(v
.expected_object_size
, p
);
996 denc_varint(v
.expected_write_size
, p
);
997 denc_varint(v
.alloc_hint_flags
, p
);
1000 void dump(Formatter
*f
) const;
1001 static void generate_test_instances(list
<bluestore_onode_t
*>& o
);
1003 WRITE_CLASS_DENC(bluestore_onode_t::shard_info
)
1004 WRITE_CLASS_DENC(bluestore_onode_t
)
1006 ostream
& operator<<(ostream
& out
, const bluestore_onode_t::shard_info
& si
);
1008 /// writeahead-logged op
1009 struct bluestore_deferred_op_t
{
1015 PExtentVector extents
;
1018 DENC(bluestore_deferred_op_t
, v
, p
) {
1019 DENC_START(1, 1, p
);
1025 void dump(Formatter
*f
) const;
1026 static void generate_test_instances(list
<bluestore_deferred_op_t
*>& o
);
1028 WRITE_CLASS_DENC(bluestore_deferred_op_t
)
1031 /// writeahead-logged transaction
1032 struct bluestore_deferred_transaction_t
{
1034 list
<bluestore_deferred_op_t
> ops
;
1035 interval_set
<uint64_t> released
; ///< allocations to release after tx
1037 bluestore_deferred_transaction_t() : seq(0) {}
1039 DENC(bluestore_deferred_transaction_t
, v
, p
) {
1040 DENC_START(1, 1, p
);
1043 denc(v
.released
, p
);
1046 void dump(Formatter
*f
) const;
1047 static void generate_test_instances(list
<bluestore_deferred_transaction_t
*>& o
);
1049 WRITE_CLASS_DENC(bluestore_deferred_transaction_t
)
1051 struct bluestore_compression_header_t
{
1052 uint8_t type
= Compressor::COMP_ALG_NONE
;
1053 uint32_t length
= 0;
1055 bluestore_compression_header_t() {}
1056 bluestore_compression_header_t(uint8_t _type
)
1059 DENC(bluestore_compression_header_t
, v
, p
) {
1060 DENC_START(1, 1, p
);
1065 void dump(Formatter
*f
) const;
1066 static void generate_test_instances(list
<bluestore_compression_header_t
*>& o
);
1068 WRITE_CLASS_DENC(bluestore_compression_header_t
)