]>
git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/os/seastore/segment_manager.h
1669d124a6b8ac079bb09d6d0b7f8a44707a1539
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
8 #include <boost/intrusive_ptr.hpp>
9 #include <boost/smart_ptr/intrusive_ref_counter.hpp>
10 #include <boost/iterator/counting_iterator.hpp>
11 #include <seastar/core/future.hh>
13 #include "include/buffer_fwd.h"
14 #include "include/ceph_assert.h"
16 #include "crimson/common/config_proxy.h"
17 #include "crimson/os/seastore/seastore_types.h"
18 #include "crimson/osd/exceptions.h"
21 namespace crimson::os::seastore
{
24 struct block_shard_info_t
{
27 uint64_t tracker_offset
;
28 uint64_t first_segment_offset
;
30 DENC(block_shard_info_t
, v
, p
) {
34 denc(v
.tracker_offset
, p
);
35 denc(v
.first_segment_offset
, p
);
40 struct block_sm_superblock_t
{
41 unsigned int shard_num
= 0;
42 size_t segment_size
= 0;
43 size_t block_size
= 0;
45 std::vector
<block_shard_info_t
> shard_infos
;
47 device_config_t config
;
49 DENC(block_sm_superblock_t
, v
, p
) {
52 denc(v
.segment_size
, p
);
53 denc(v
.block_size
, p
);
54 denc(v
.shard_infos
, p
);
59 void validate() const {
60 ceph_assert(shard_num
== seastar::smp::count
);
61 ceph_assert(block_size
> 0);
62 ceph_assert(segment_size
> 0 &&
63 segment_size
% block_size
== 0);
64 ceph_assert_always(segment_size
<= SEGMENT_OFF_MAX
);
65 for (unsigned int i
= 0; i
< seastar::smp::count
; i
++) {
66 ceph_assert(shard_infos
[i
].size
> segment_size
&&
67 shard_infos
[i
].size
% block_size
== 0);
68 ceph_assert_always(shard_infos
[i
].size
<= DEVICE_OFF_MAX
);
69 ceph_assert(shard_infos
[i
].segments
> 0);
70 ceph_assert_always(shard_infos
[i
].segments
<= DEVICE_SEGMENT_ID_MAX
);
71 ceph_assert(shard_infos
[i
].tracker_offset
> 0 &&
72 shard_infos
[i
].tracker_offset
% block_size
== 0);
73 ceph_assert(shard_infos
[i
].first_segment_offset
> shard_infos
[i
].tracker_offset
&&
74 shard_infos
[i
].first_segment_offset
% block_size
== 0);
76 ceph_assert(config
.spec
.magic
!= 0);
77 ceph_assert(get_default_backend_of_device(config
.spec
.dtype
) ==
78 backend_type_t::SEGMENTED
);
79 ceph_assert(config
.spec
.id
<= DEVICE_ID_MAX_VALID
);
80 if (!config
.major_dev
) {
81 ceph_assert(config
.secondary_devices
.size() == 0);
83 for (const auto& [k
, v
] : config
.secondary_devices
) {
84 ceph_assert(k
!= config
.spec
.id
);
85 ceph_assert(k
<= DEVICE_ID_MAX_VALID
);
86 ceph_assert(k
== v
.id
);
87 ceph_assert(v
.magic
!= 0);
88 ceph_assert(v
.dtype
> device_type_t::NONE
);
89 ceph_assert(v
.dtype
< device_type_t::NUM_TYPES
);
94 std::ostream
& operator<<(std::ostream
&, const block_shard_info_t
&);
95 std::ostream
& operator<<(std::ostream
&, const block_sm_superblock_t
&);
97 class Segment
: public boost::intrusive_ref_counter
<
99 boost::thread_unsafe_counter
>{
102 enum class segment_state_t
: uint8_t {
111 virtual segment_id_t
get_segment_id() const = 0;
114 * min next write location
116 virtual segment_off_t
get_write_ptr() const = 0;
121 virtual segment_off_t
get_write_capacity() const = 0;
126 * Closes segment for writes. Won't complete until
127 * outstanding writes to this segment are complete.
129 using close_ertr
= crimson::errorator
<
130 crimson::ct_error::input_output_error
,
131 crimson::ct_error::invarg
,
132 crimson::ct_error::enoent
>;
133 virtual close_ertr::future
<> close() = 0;
139 * @param offset offset of write, must be aligned to <> and >= write pointer, advances
141 * @param bl buffer to write, will be padded if not aligned
143 using write_ertr
= crimson::errorator
<
144 crimson::ct_error::input_output_error
, // media error or corruption
145 crimson::ct_error::invarg
, // if offset is < write pointer or misaligned
146 crimson::ct_error::ebadf
, // segment closed
147 crimson::ct_error::enospc
// write exceeds segment size
149 virtual write_ertr::future
<> write(
150 segment_off_t offset
, ceph::bufferlist bl
) = 0;
155 * advance the segment write pointer,
156 * needed when writing at wp is strictly implemented. ex: ZNS backed segments
157 * @param offset: advance write pointer till the given offset
159 virtual write_ertr::future
<> advance_wp(
160 segment_off_t offset
) = 0;
162 virtual ~Segment() {}
164 using SegmentRef
= boost::intrusive_ptr
<Segment
>;
166 std::ostream
& operator<<(std::ostream
& out
, Segment::segment_state_t
);
168 constexpr size_t PADDR_SIZE
= sizeof(paddr_t
);
169 class SegmentManager
;
171 using SegmentManagerRef
= std::unique_ptr
<SegmentManager
>;
173 class SegmentManager
: public Device
{
175 backend_type_t
get_backend_type() const final
{
176 return backend_type_t::SEGMENTED
;
179 using open_ertr
= crimson::errorator
<
180 crimson::ct_error::input_output_error
,
181 crimson::ct_error::invarg
,
182 crimson::ct_error::enoent
>;
183 virtual open_ertr::future
<SegmentRef
> open(segment_id_t id
) = 0;
185 using release_ertr
= crimson::errorator
<
186 crimson::ct_error::input_output_error
,
187 crimson::ct_error::invarg
,
188 crimson::ct_error::enoent
>;
189 virtual release_ertr::future
<> release(segment_id_t id
) = 0;
191 /* Methods for discovering device geometry, segmentid set, etc */
192 virtual segment_off_t
get_segment_size() const = 0;
193 virtual device_segment_id_t
get_num_segments() const {
194 ceph_assert(get_available_size() % get_segment_size() == 0);
195 return ((device_segment_id_t
)(get_available_size() / get_segment_size()));
198 virtual ~SegmentManager() {}
200 static seastar::future
<SegmentManagerRef
>
201 get_segment_manager(const std::string
&device
, device_type_t dtype
);
207 crimson::os::seastore::block_shard_info_t
210 crimson::os::seastore::block_sm_superblock_t
213 #if FMT_VERSION >= 90000
214 template <> struct fmt::formatter
<crimson::os::seastore::block_shard_info_t
> : fmt::ostream_formatter
{};
215 template <> struct fmt::formatter
<crimson::os::seastore::block_sm_superblock_t
> : fmt::ostream_formatter
{};