]> git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/os/seastore/segment_manager.h
719fa6075ed9b4dc7649bf51eb56fab8843eba37
[ceph.git] / ceph / src / crimson / os / seastore / segment_manager.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #pragma once
5
6 #include <iosfwd>
7
8 #include <boost/intrusive_ptr.hpp>
9 #include <boost/smart_ptr/intrusive_ref_counter.hpp>
10 #include <boost/iterator/counting_iterator.hpp>
11 #include <seastar/core/future.hh>
12
13 #include "include/buffer_fwd.h"
14 #include "include/ceph_assert.h"
15
16 #include "crimson/common/config_proxy.h"
17 #include "crimson/os/seastore/seastore_types.h"
18 #include "crimson/osd/exceptions.h"
19 #include "device.h"
20
21 namespace crimson::os::seastore {
22
23 using std::vector;
24 struct block_shard_info_t {
25 std::size_t size;
26 std::size_t segments;
27 uint64_t tracker_offset;
28 uint64_t first_segment_offset;
29
30 DENC(block_shard_info_t, v, p) {
31 DENC_START(1, 1, p);
32 denc(v.size, p);
33 denc(v.segments, p);
34 denc(v.tracker_offset, p);
35 denc(v.first_segment_offset, p);
36 DENC_FINISH(p);
37 }
38 };
39
40 struct block_sm_superblock_t {
41 unsigned int shard_num = 0;
42 size_t segment_size = 0;
43 size_t block_size = 0;
44
45 std::vector<block_shard_info_t> shard_infos;
46
47 device_config_t config;
48
49 DENC(block_sm_superblock_t, v, p) {
50 DENC_START(1, 1, p);
51 denc(v.shard_num, p);
52 denc(v.segment_size, p);
53 denc(v.block_size, p);
54 denc(v.shard_infos, p);
55 denc(v.config, p);
56 DENC_FINISH(p);
57 }
58
59 void validate() const {
60 ceph_assert(shard_num == seastar::smp::count);
61 ceph_assert(block_size > 0);
62 ceph_assert(segment_size > 0 &&
63 segment_size % block_size == 0);
64 ceph_assert_always(segment_size <= SEGMENT_OFF_MAX);
65 for (unsigned int i = 0; i < seastar::smp::count; i ++) {
66 ceph_assert(shard_infos[i].size > segment_size &&
67 shard_infos[i].size % block_size == 0);
68 ceph_assert_always(shard_infos[i].size <= DEVICE_OFF_MAX);
69 ceph_assert(shard_infos[i].segments > 0);
70 ceph_assert_always(shard_infos[i].segments <= DEVICE_SEGMENT_ID_MAX);
71 ceph_assert(shard_infos[i].tracker_offset > 0 &&
72 shard_infos[i].tracker_offset % block_size == 0);
73 ceph_assert(shard_infos[i].first_segment_offset > shard_infos[i].tracker_offset &&
74 shard_infos[i].first_segment_offset % block_size == 0);
75 }
76 ceph_assert(config.spec.magic != 0);
77 ceph_assert(get_default_backend_of_device(config.spec.dtype) ==
78 backend_type_t::SEGMENTED);
79 ceph_assert(config.spec.id <= DEVICE_ID_MAX_VALID);
80 if (!config.major_dev) {
81 ceph_assert(config.secondary_devices.size() == 0);
82 }
83 for (const auto& [k, v] : config.secondary_devices) {
84 ceph_assert(k != config.spec.id);
85 ceph_assert(k <= DEVICE_ID_MAX_VALID);
86 ceph_assert(k == v.id);
87 ceph_assert(v.magic != 0);
88 ceph_assert(v.dtype > device_type_t::NONE);
89 ceph_assert(v.dtype < device_type_t::NUM_TYPES);
90 }
91 }
92 };
93
94 std::ostream& operator<<(std::ostream&, const block_shard_info_t&);
95 std::ostream& operator<<(std::ostream&, const block_sm_superblock_t&);
96
97 class Segment : public boost::intrusive_ref_counter<
98 Segment,
99 boost::thread_unsafe_counter>{
100 public:
101
102 enum class segment_state_t : uint8_t {
103 EMPTY = 0,
104 OPEN = 1,
105 CLOSED = 2
106 };
107
108 /**
109 * get_segment_id
110 */
111 virtual segment_id_t get_segment_id() const = 0;
112
113 /**
114 * min next write location
115 */
116 virtual segment_off_t get_write_ptr() const = 0;
117
118 /**
119 * max capacity
120 */
121 virtual segment_off_t get_write_capacity() const = 0;
122
123 /**
124 * close
125 *
126 * Closes segment for writes. Won't complete until
127 * outstanding writes to this segment are complete.
128 */
129 using close_ertr = crimson::errorator<
130 crimson::ct_error::input_output_error,
131 crimson::ct_error::invarg,
132 crimson::ct_error::enoent>;
133 virtual close_ertr::future<> close() = 0;
134
135
136 /**
137 * write
138 *
139 * @param offset offset of write, must be aligned to <> and >= write pointer, advances
140 * write pointer
141 * @param bl buffer to write, will be padded if not aligned
142 */
143 using write_ertr = crimson::errorator<
144 crimson::ct_error::input_output_error, // media error or corruption
145 crimson::ct_error::invarg, // if offset is < write pointer or misaligned
146 crimson::ct_error::ebadf, // segment closed
147 crimson::ct_error::enospc // write exceeds segment size
148 >;
149 virtual write_ertr::future<> write(
150 segment_off_t offset, ceph::bufferlist bl) = 0;
151
152 /**
153 * advance_wp
154 *
155 * advance the segment write pointer,
156 * needed when writing at wp is strictly implemented. ex: ZBD backed segments
157 * @param offset: advance write pointer till the given offset
158 */
159 virtual write_ertr::future<> advance_wp(
160 segment_off_t offset) = 0;
161
162 virtual ~Segment() {}
163 };
164 using SegmentRef = boost::intrusive_ptr<Segment>;
165
166 std::ostream& operator<<(std::ostream& out, Segment::segment_state_t);
167
168 constexpr size_t PADDR_SIZE = sizeof(paddr_t);
169 class SegmentManager;
170
171 using SegmentManagerRef = std::unique_ptr<SegmentManager>;
172
173 class SegmentManager : public Device {
174 public:
175 backend_type_t get_backend_type() const final {
176 return backend_type_t::SEGMENTED;
177 }
178
179 using open_ertr = crimson::errorator<
180 crimson::ct_error::input_output_error,
181 crimson::ct_error::invarg,
182 crimson::ct_error::enoent>;
183 virtual open_ertr::future<SegmentRef> open(segment_id_t id) = 0;
184
185 using release_ertr = crimson::errorator<
186 crimson::ct_error::input_output_error,
187 crimson::ct_error::invarg,
188 crimson::ct_error::enoent>;
189 virtual release_ertr::future<> release(segment_id_t id) = 0;
190
191 /* Methods for discovering device geometry, segmentid set, etc */
192 virtual segment_off_t get_segment_size() const = 0;
193 virtual device_segment_id_t get_num_segments() const {
194 ceph_assert(get_available_size() % get_segment_size() == 0);
195 return ((device_segment_id_t)(get_available_size() / get_segment_size()));
196 }
197
198 virtual ~SegmentManager() {}
199
200 static seastar::future<SegmentManagerRef>
201 get_segment_manager(const std::string &device, device_type_t dtype);
202 };
203
204 }
205
206 WRITE_CLASS_DENC(
207 crimson::os::seastore::block_shard_info_t
208 )
209 WRITE_CLASS_DENC(
210 crimson::os::seastore::block_sm_superblock_t
211 )
212
213 #if FMT_VERSION >= 90000
214 template <> struct fmt::formatter<crimson::os::seastore::block_shard_info_t> : fmt::ostream_formatter {};
215 template <> struct fmt::formatter<crimson::os::seastore::block_sm_superblock_t> : fmt::ostream_formatter {};
216 #endif