]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/crimson/seastore/test_randomblock_manager.cc
import quincy beta 17.1.0
[ceph.git] / ceph / src / test / crimson / seastore / test_randomblock_manager.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "test/crimson/gtest_seastar.h"
5
6 #include <random>
7
8 #include "crimson/common/log.h"
9 #include "crimson/os/seastore/random_block_manager/nvme_manager.h"
10 #include "crimson/os/seastore/random_block_manager/nvmedevice.h"
11 #include "test/crimson/seastore/transaction_manager_test_state.h"
12
13 using namespace crimson;
14 using namespace crimson::os;
15 using namespace crimson::os::seastore;
16
17 namespace {
18 [[maybe_unused]] seastar::logger& logger() {
19 return crimson::get_logger(ceph_subsys_test);
20 }
21 }
22
23 constexpr uint64_t DEFAULT_TEST_SIZE = 1 << 20;
24 constexpr uint64_t DEFAULT_BLOCK_SIZE = 4096;
25
26 struct rbm_test_t :
27 public seastar_test_suite_t, TMTestState {
28 std::unique_ptr<NVMeManager> rbm_manager;
29 std::unique_ptr<nvme_device::NVMeBlockDevice> device;
30
31 struct rbm_transaction {
32 void add_rbm_allocated_blocks(rbm_alloc_delta_t &d) {
33 allocated_blocks.push_back(d);
34 }
35 void clear_rbm_allocated_blocks() {
36 if (!allocated_blocks.empty()) {
37 allocated_blocks.clear();
38 }
39 }
40 const auto &get_rbm_allocated_blocks() {
41 return allocated_blocks;
42 }
43 std::vector<rbm_alloc_delta_t> allocated_blocks;
44 };
45
46 std::default_random_engine generator;
47
48 const uint64_t block_size = DEFAULT_BLOCK_SIZE;
49
50 RandomBlockManager::mkfs_config_t config;
51 paddr_t current;
52
53 rbm_test_t() = default;
54
55 seastar::future<> set_up_fut() final {
56 device.reset(new nvme_device::TestMemory(DEFAULT_TEST_SIZE));
57 rbm_manager.reset(new NVMeManager(device.get(), std::string()));
58 config.start = paddr_t::make_seg_paddr(0, 0, 0);
59 config.end = paddr_t::make_seg_paddr(0, 0, DEFAULT_TEST_SIZE);
60 config.block_size = DEFAULT_BLOCK_SIZE;
61 config.total_size = DEFAULT_TEST_SIZE;
62 return tm_setup();
63 }
64
65 seastar::future<> tear_down_fut() final {
66 rbm_manager.reset();
67 device.reset();
68 return tm_teardown();
69 }
70
71 auto mkfs() {
72 return rbm_manager->mkfs(config).unsafe_get0();
73 }
74
75 auto read_rbm_header() {
76 blk_paddr_t addr = convert_paddr_to_blk_paddr(
77 config.start,
78 config.block_size,
79 config.blocks_per_segment);
80 return rbm_manager->read_rbm_header(addr).unsafe_get0();
81 }
82
83 auto open() {
84 return rbm_manager->open("", config.start).unsafe_get0();
85 }
86
87 auto write(uint64_t addr, bufferptr &ptr) {
88 return rbm_manager->write(addr, ptr).unsafe_get0();
89 }
90
91 auto read(uint64_t addr, bufferptr &ptr) {
92 return rbm_manager->read(addr, ptr).unsafe_get0();
93 }
94
95 auto create_rbm_transaction() {
96 return std::make_unique<rbm_transaction>();
97 }
98
99 auto alloc_extent(rbm_transaction &t, size_t size) {
100 auto tt = create_mutate_transaction(); // dummy transaction
101 auto extent = rbm_manager->find_free_block(*tt, size).unsafe_get0();
102 if (!extent.empty()) {
103 rbm_alloc_delta_t alloc_info;
104 for (auto p : extent) {
105 paddr_t paddr = convert_blk_paddr_to_paddr(
106 p.first * block_size,
107 block_size,
108 config.blocks_per_segment,
109 0);
110 size_t len = p.second * block_size;
111 alloc_info.alloc_blk_ranges.push_back(std::make_pair(paddr, len));
112 alloc_info.op = rbm_alloc_delta_t::op_types_t::SET;
113 }
114 t.add_rbm_allocated_blocks(alloc_info);
115 }
116 }
117
118 void free_extent(rbm_transaction &t, interval_set<blk_id_t> range) {
119 for (auto [off, len] : range) {
120 logger().debug("free_extent: start {} len {}", off * DEFAULT_BLOCK_SIZE,
121 len * DEFAULT_BLOCK_SIZE);
122 rbm_manager->add_free_extent(t.allocated_blocks, off * DEFAULT_BLOCK_SIZE,
123 len * DEFAULT_BLOCK_SIZE);
124 }
125 }
126
127 interval_set<blk_id_t> get_allocated_blk_ids(rbm_transaction &t) {
128 auto allocated_blocks = t.get_rbm_allocated_blocks();
129 interval_set<blk_id_t> alloc_ids;
130 for (auto p : allocated_blocks) {
131 for (auto b : p.alloc_blk_ranges) {
132 blk_paddr_t addr =
133 convert_paddr_to_blk_paddr(
134 b.first,
135 block_size,
136 config.blocks_per_segment);
137 alloc_ids.insert(addr / block_size, b.second / block_size);
138 }
139 }
140 logger().debug(" get allocated blockid {}", alloc_ids);
141 return alloc_ids;
142 }
143
144 bool check_ids_are_allocated(interval_set<blk_id_t> &ids, bool allocated = true) {
145 bool ret = true;
146 for (auto r : ids) {
147 for (blk_id_t id = r.first; id < r.first + r.second; id++) {
148 auto addr = rbm_manager->get_start_block_alloc_area() +
149 (id / rbm_manager->max_block_by_bitmap_block())
150 * DEFAULT_BLOCK_SIZE;
151 logger().debug(" addr {} id {} ", addr, id);
152 auto bp = bufferptr(ceph::buffer::create_page_aligned(DEFAULT_BLOCK_SIZE));
153 rbm_manager->read(addr, bp).unsafe_get0();
154 rbm_bitmap_block_t b_block(DEFAULT_BLOCK_SIZE);
155 bufferlist bl;
156 bl.append(bp);
157 auto b_bl = bl.cbegin();
158 decode(b_block, b_bl);
159 if (!b_block.is_allocated(id % rbm_manager->max_block_by_bitmap_block())) {
160 logger().debug(" block id {} is not allocated", id);
161 if (allocated) {
162 ret = false;
163 return ret;
164 }
165 } else {
166 logger().debug(" block id {} allocated", id);
167 if (!allocated) {
168 ret = false;
169 return ret;
170 }
171 }
172 }
173 }
174 return ret;
175 }
176
177 auto complete_allocation(rbm_transaction &t) {
178 auto alloc_blocks = t.get_rbm_allocated_blocks();
179 return rbm_manager->sync_allocation(alloc_blocks).unsafe_get0();
180 }
181
182 bufferptr generate_extent(size_t blocks) {
183 std::uniform_int_distribution<char> distribution(
184 std::numeric_limits<char>::min(),
185 std::numeric_limits<char>::max()
186 );
187 char contents = distribution(generator);
188 return buffer::ptr(buffer::create(blocks * block_size, contents));
189 }
190
191 };
192
193 TEST_F(rbm_test_t, mkfs_test)
194 {
195 run_async([this] {
196 mkfs();
197 open();
198 auto super = read_rbm_header();
199 ASSERT_TRUE(
200 super.block_size == DEFAULT_BLOCK_SIZE &&
201 super.end == DEFAULT_TEST_SIZE &&
202 super.start_alloc_area == DEFAULT_BLOCK_SIZE &&
203 super.free_block_count == DEFAULT_TEST_SIZE / DEFAULT_BLOCK_SIZE - 2 &&
204 super.alloc_area_size == DEFAULT_BLOCK_SIZE
205 );
206
207 });
208 }
209
210 TEST_F(rbm_test_t, open_test)
211 {
212 run_async([this] {
213 mkfs();
214 open();
215 auto content = generate_extent(1);
216 write(
217 DEFAULT_BLOCK_SIZE,
218 content
219 );
220 auto bp = bufferptr(ceph::buffer::create_page_aligned(DEFAULT_BLOCK_SIZE));
221 read(
222 DEFAULT_BLOCK_SIZE,
223 bp
224 );
225 bufferlist bl;
226 bufferlist block;
227 bl.append(bp);
228 block.append(content);
229 ASSERT_EQ(
230 bl.begin().crc32c(bl.length(), 1),
231 block.begin().crc32c(block.length(), 1));
232
233 });
234 }
235
236 TEST_F(rbm_test_t, block_alloc_test)
237 {
238 run_async([this] {
239 mkfs();
240 open();
241 auto t = create_rbm_transaction();
242 alloc_extent(*t, DEFAULT_BLOCK_SIZE);
243 auto alloc_ids = get_allocated_blk_ids(*t);
244 complete_allocation(*t);
245 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
246
247 auto t2 = create_rbm_transaction();
248 alloc_extent(*t2, DEFAULT_BLOCK_SIZE * 3);
249 alloc_ids = get_allocated_blk_ids(*t2);
250 complete_allocation(*t2);
251 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
252 });
253 }
254
255 TEST_F(rbm_test_t, block_alloc_free_test)
256 {
257 run_async([this] {
258 mkfs();
259 open();
260 auto t = create_rbm_transaction();
261 alloc_extent(*t, DEFAULT_BLOCK_SIZE);
262 auto alloc_ids = get_allocated_blk_ids(*t);
263 free_extent(*t, alloc_ids);
264 complete_allocation(*t);
265 ASSERT_TRUE(check_ids_are_allocated(alloc_ids, false));
266
267 auto t2 = create_rbm_transaction();
268 alloc_extent(*t2, DEFAULT_BLOCK_SIZE * 4);
269 alloc_ids = get_allocated_blk_ids(*t2);
270 free_extent(*t2, alloc_ids);
271 complete_allocation(*t2);
272 ASSERT_TRUE(check_ids_are_allocated(alloc_ids, false));
273
274 auto t3 = create_rbm_transaction();
275 alloc_extent(*t3, DEFAULT_BLOCK_SIZE * 8);
276 alloc_ids = get_allocated_blk_ids(*t3);
277 complete_allocation(*t3);
278 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
279
280 auto t4 = create_rbm_transaction();
281 free_extent(*t4, alloc_ids);
282 complete_allocation(*t4);
283 ASSERT_TRUE(check_ids_are_allocated(alloc_ids, false));
284 });
285 }
286
287 TEST_F(rbm_test_t, many_block_alloc)
288 {
289 run_async([this] {
290 config.start = paddr_t::make_seg_paddr(0, 0, 0);
291 config.end = paddr_t::make_seg_paddr(0, 0, DEFAULT_TEST_SIZE * 1024);
292 config.block_size = DEFAULT_BLOCK_SIZE;
293 config.total_size = DEFAULT_TEST_SIZE * 1024;
294 mkfs();
295 open();
296 auto max = rbm_manager->max_block_by_bitmap_block();
297 rbm_manager->rbm_sync_block_bitmap_by_range(max + 10, max + 14, bitmap_op_types_t::ALL_SET).unsafe_get0();
298 interval_set<blk_id_t> alloc_ids;
299 alloc_ids.insert(max + 12, 2);
300 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
301 alloc_ids.clear();
302 alloc_ids.insert(max + 10, 4);
303 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
304 rbm_manager->rbm_sync_block_bitmap_by_range(max + 10, max + 14, bitmap_op_types_t::ALL_CLEAR).unsafe_get0();
305 ASSERT_TRUE(check_ids_are_allocated(alloc_ids, false));
306 rbm_manager->rbm_sync_block_bitmap_by_range(max + 10, max + max + 10, bitmap_op_types_t::ALL_SET).unsafe_get0();
307 alloc_ids.clear();
308 alloc_ids.insert(max + 10000, 10);
309 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
310 alloc_ids.clear();
311 alloc_ids.insert(max + max, 10);
312 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
313 rbm_manager->rbm_sync_block_bitmap_by_range(max, max * 3, bitmap_op_types_t::ALL_SET).unsafe_get0();
314 alloc_ids.clear();
315 alloc_ids.insert(max * 3 - 1, 1);
316 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
317 alloc_ids.clear();
318 alloc_ids.insert(max * 3, 1);
319 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
320 alloc_ids.clear();
321 alloc_ids.insert(max, 1);
322 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
323 rbm_manager->rbm_sync_block_bitmap_by_range(max, max * 6, bitmap_op_types_t::ALL_SET).unsafe_get0();
324 alloc_ids.clear();
325 alloc_ids.insert(max * 5, 10);
326 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
327 alloc_ids.clear();
328 alloc_ids.insert(max * 6, 1);
329 ASSERT_TRUE(check_ids_are_allocated(alloc_ids));
330 rbm_manager->rbm_sync_block_bitmap_by_range(max, max * 6, bitmap_op_types_t::ALL_CLEAR).unsafe_get0();
331 alloc_ids.clear();
332 alloc_ids.insert(max * 3, 10);
333 ASSERT_TRUE(check_ids_are_allocated(alloc_ids, false));
334 alloc_ids.clear();
335 alloc_ids.insert(max * 5, 10);
336 ASSERT_TRUE(check_ids_are_allocated(alloc_ids, false));
337 alloc_ids.clear();
338 alloc_ids.insert(max * 6, 1);
339 ASSERT_TRUE(check_ids_are_allocated(alloc_ids, false));
340 });
341 }
342
343 TEST_F(rbm_test_t, check_free_blocks)
344 {
345 run_async([this] {
346 mkfs();
347 open();
348 rbm_manager->rbm_sync_block_bitmap_by_range(10, 12, bitmap_op_types_t::ALL_SET).unsafe_get0();
349 rbm_manager->check_bitmap_blocks().unsafe_get0();
350 ASSERT_TRUE(rbm_manager->get_free_blocks() == DEFAULT_TEST_SIZE/DEFAULT_BLOCK_SIZE - 5);
351 auto free = rbm_manager->get_free_blocks();
352 interval_set<blk_id_t> alloc_ids;
353 auto t = create_rbm_transaction();
354 alloc_extent(*t, DEFAULT_BLOCK_SIZE * 4);
355 alloc_ids = get_allocated_blk_ids(*t);
356 complete_allocation(*t);
357 ASSERT_TRUE(rbm_manager->get_free_blocks() == free - 4);
358
359 free = rbm_manager->get_free_blocks();
360 auto t2 = create_rbm_transaction();
361 free_extent(*t2, alloc_ids);
362 complete_allocation(*t2);
363 ASSERT_TRUE(rbm_manager->get_free_blocks() == free + 4);
364 });
365 }