]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/objectstore/test_bluestore_types.cc
bf69df0b9d4edc34d320f2ffdfc23ceb89c43f1e
[ceph.git] / ceph / src / test / objectstore / test_bluestore_types.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "include/types.h"
5 #include "os/bluestore/bluestore_types.h"
6 #include "gtest/gtest.h"
7 #include "include/stringify.h"
8 #include "common/ceph_time.h"
9 #include "os/bluestore/BlueStore.h"
10 #include "os/bluestore/simple_bitmap.h"
11 #include "os/bluestore/AvlAllocator.h"
12 #include "common/ceph_argparse.h"
13 #include "global/global_init.h"
14 #include "global/global_context.h"
15 #include "perfglue/heap_profiler.h"
16
17 #include <sstream>
18
19 #define _STR(x) #x
20 #define STRINGIFY(x) _STR(x)
21
22 using namespace std;
23
24 TEST(bluestore, sizeof) {
25 #define P(t) cout << STRINGIFY(t) << "\t" << sizeof(t) << std::endl
26 P(BlueStore::Onode);
27 P(BlueStore::Extent);
28 P(BlueStore::Blob);
29 P(BlueStore::SharedBlob);
30 P(BlueStore::ExtentMap);
31 P(BlueStore::extent_map_t);
32 P(BlueStore::blob_map_t);
33 P(BlueStore::BufferSpace);
34 P(BlueStore::Buffer);
35 P(bluestore_onode_t);
36 P(bluestore_blob_t);
37 P(PExtentVector);
38 P(ghobject_t);
39 P(bluestore_shared_blob_t);
40 P(bluestore_extent_ref_map_t);
41 P(bluestore_extent_ref_map_t::record_t);
42 P(bluestore_blob_use_tracker_t);
43 P(std::atomic_int);
44 P(BlueStore::SharedBlobRef);
45 P(boost::intrusive::set_base_hook<>);
46 P(boost::intrusive::unordered_set_base_hook<>);
47 P(bufferlist);
48 P(bufferptr);
49 P(range_seg_t);
50 P(sb_info_t);
51 P(SimpleBitmap);
52 cout << "map<uint64_t,uint64_t>\t" << sizeof(map<uint64_t,uint64_t>) << std::endl;
53 cout << "map<char,char>\t" << sizeof(map<char,char>) << std::endl;
54 }
55
56 void dump_mempools()
57 {
58 ostringstream ostr;
59 Formatter* f = Formatter::create("json-pretty", "json-pretty", "json-pretty");
60 ostr << "Mempools: ";
61 f->open_object_section("mempools");
62 mempool::dump(f);
63 f->close_section();
64 f->flush(ostr);
65 delete f;
66 cout << ostr.str() << std::endl;
67 }
68 /*void get_mempool_stats(uint64_t* total_bytes, uint64_t* total_items)
69 {
70 uint64_t meta_allocated = mempool::bluestore_cache_meta::allocated_bytes();
71 uint64_t onode_allocated = mempool::bluestore_cache_onode::allocated_bytes();
72 uint64_t other_allocated = mempool::bluestore_cache_other::allocated_bytes();
73
74 uint64_t meta_items = mempool::bluestore_cache_meta::allocated_items();
75 uint64_t onode_items = mempool::bluestore_cache_onode::allocated_items();
76 uint64_t other_items = mempool::bluestore_cache_other::allocated_items();
77 cout << "meta(" << meta_allocated << "/" << meta_items
78 << ") onode(" << onode_allocated << "/" << onode_items
79 << ") other(" << other_allocated << "/" << other_items
80 << ")" << std::endl;
81 *total_bytes = meta_allocated + onode_allocated + other_allocated;
82 *total_items = onode_items;
83 }*/
84
85 TEST(sb_info_space_efficient_map_t, basic) {
86 sb_info_space_efficient_map_t sb_info;
87 const size_t num_shared = 1000;
88 for (size_t i = 0; i < num_shared; i += 2) {
89 auto& sbi = sb_info.add_maybe_stray(i);
90 sbi.pool_id = i;
91 }
92 ASSERT_TRUE(sb_info.find(0) != sb_info.end());
93 ASSERT_TRUE(sb_info.find(1) == sb_info.end());
94 ASSERT_TRUE(sb_info.find(2) != sb_info.end());
95 ASSERT_TRUE(sb_info.find(4)->pool_id == 4);
96 ASSERT_TRUE(sb_info.find(num_shared) == sb_info.end());
97
98 // ordered insertion
99 sb_info.add_or_adopt(num_shared).pool_id = num_shared;
100 ASSERT_TRUE(sb_info.find(num_shared) != sb_info.end());
101 ASSERT_TRUE(sb_info.find(num_shared)->pool_id == num_shared);
102
103 // out of order insertion
104 sb_info.add_or_adopt(1).pool_id = 1;
105 ASSERT_TRUE(sb_info.find(1) != sb_info.end());
106 ASSERT_TRUE(sb_info.find(1)->pool_id == 1);
107
108 // ordered insertion
109 sb_info.add_maybe_stray(num_shared + 1).pool_id = num_shared + 1;
110 ASSERT_TRUE(sb_info.find(num_shared + 1) != sb_info.end());
111 ASSERT_TRUE(sb_info.find(num_shared + 1)->pool_id == num_shared + 1);
112
113 // out of order insertion
114 sb_info.add_maybe_stray(105).pool_id = 105;
115 ASSERT_TRUE(sb_info.find(105) != sb_info.end());
116 ASSERT_TRUE(sb_info.find(105)->pool_id == 105);
117 }
118
119 TEST(sb_info_space_efficient_map_t, size) {
120 const size_t num_shared = 10000000;
121 sb_info_space_efficient_map_t sb_info;
122
123 BlueStore store(g_ceph_context, "", 4096);
124 BlueStore::OnodeCacheShard* oc = BlueStore::OnodeCacheShard::create(
125 g_ceph_context, "lru", NULL);
126 BlueStore::BufferCacheShard* bc = BlueStore::BufferCacheShard::create(
127 g_ceph_context, "lru", NULL);
128
129 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
130
131 for (size_t i = 0; i < num_shared; i++) {
132 auto& sbi = sb_info.add_or_adopt(i);
133 // primarily to silent the 'unused' warning
134 ceph_assert(sbi.pool_id == sb_info_t::INVALID_POOL_ID);
135 }
136 dump_mempools();
137 }
138
139 TEST(bluestore_extent_ref_map_t, add)
140 {
141 bluestore_extent_ref_map_t m;
142 m.get(10, 10);
143 ASSERT_EQ(1u, m.ref_map.size());
144 cout << m << std::endl;
145 m.get(20, 10);
146 cout << m << std::endl;
147 ASSERT_EQ(1u, m.ref_map.size());
148 ASSERT_EQ(20u, m.ref_map[10].length);
149 ASSERT_EQ(1u, m.ref_map[10].refs);
150 m.get(40, 10);
151 cout << m << std::endl;
152 ASSERT_EQ(2u, m.ref_map.size());
153 m.get(30, 10);
154 cout << m << std::endl;
155 ASSERT_EQ(1u, m.ref_map.size());
156 m.get(50, 10);
157 cout << m << std::endl;
158 ASSERT_EQ(1u, m.ref_map.size());
159 m.get(5, 5);
160 cout << m << std::endl;
161 ASSERT_EQ(1u, m.ref_map.size());
162 }
163
164 TEST(bluestore_extent_ref_map_t, get)
165 {
166 bluestore_extent_ref_map_t m;
167 m.get(00, 30);
168 cout << m << std::endl;
169 m.get(10, 10);
170 cout << m << std::endl;
171 ASSERT_EQ(3u, m.ref_map.size());
172 ASSERT_EQ(10u, m.ref_map[0].length);
173 ASSERT_EQ(1u, m.ref_map[0].refs);
174 ASSERT_EQ(10u, m.ref_map[10].length);
175 ASSERT_EQ(2u, m.ref_map[10].refs);
176 ASSERT_EQ(10u, m.ref_map[20].length);
177 ASSERT_EQ(1u, m.ref_map[20].refs);
178 m.get(20, 5);
179 cout << m << std::endl;
180 ASSERT_EQ(3u, m.ref_map.size());
181 ASSERT_EQ(15u, m.ref_map[10].length);
182 ASSERT_EQ(2u, m.ref_map[10].refs);
183 ASSERT_EQ(5u, m.ref_map[25].length);
184 ASSERT_EQ(1u, m.ref_map[25].refs);
185 m.get(5, 20);
186 cout << m << std::endl;
187 ASSERT_EQ(4u, m.ref_map.size());
188 ASSERT_EQ(5u, m.ref_map[0].length);
189 ASSERT_EQ(1u, m.ref_map[0].refs);
190 ASSERT_EQ(5u, m.ref_map[5].length);
191 ASSERT_EQ(2u, m.ref_map[5].refs);
192 ASSERT_EQ(15u, m.ref_map[10].length);
193 ASSERT_EQ(3u, m.ref_map[10].refs);
194 ASSERT_EQ(5u, m.ref_map[25].length);
195 ASSERT_EQ(1u, m.ref_map[25].refs);
196 m.get(25, 3);
197 cout << m << std::endl;
198 ASSERT_EQ(5u, m.ref_map.size());
199 ASSERT_EQ(5u, m.ref_map[0].length);
200 ASSERT_EQ(1u, m.ref_map[0].refs);
201 ASSERT_EQ(5u, m.ref_map[5].length);
202 ASSERT_EQ(2u, m.ref_map[5].refs);
203 ASSERT_EQ(15u, m.ref_map[10].length);
204 ASSERT_EQ(3u, m.ref_map[10].refs);
205 ASSERT_EQ(3u, m.ref_map[25].length);
206 ASSERT_EQ(2u, m.ref_map[25].refs);
207 ASSERT_EQ(2u, m.ref_map[28].length);
208 ASSERT_EQ(1u, m.ref_map[28].refs);
209 }
210
211 TEST(bluestore_extent_ref_map_t, put)
212 {
213 bluestore_extent_ref_map_t m;
214 PExtentVector r;
215 bool maybe_unshared = false;
216 m.get(10, 30);
217 maybe_unshared = true;
218 m.put(10, 30, &r, &maybe_unshared);
219 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
220 ASSERT_EQ(0u, m.ref_map.size());
221 ASSERT_EQ(1u, r.size());
222 ASSERT_EQ(10u, r[0].offset);
223 ASSERT_EQ(30u, r[0].length);
224 ASSERT_TRUE(maybe_unshared);
225 r.clear();
226 m.get(10, 30);
227 m.get(20, 10);
228 maybe_unshared = true;
229 m.put(10, 30, &r, &maybe_unshared);
230 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
231 ASSERT_EQ(1u, m.ref_map.size());
232 ASSERT_EQ(10u, m.ref_map[20].length);
233 ASSERT_EQ(1u, m.ref_map[20].refs);
234 ASSERT_EQ(2u, r.size());
235 ASSERT_EQ(10u, r[0].offset);
236 ASSERT_EQ(10u, r[0].length);
237 ASSERT_EQ(30u, r[1].offset);
238 ASSERT_EQ(10u, r[1].length);
239 ASSERT_TRUE(maybe_unshared);
240 r.clear();
241 m.get(30, 10);
242 m.get(30, 10);
243 maybe_unshared = true;
244 m.put(20, 15, &r, &maybe_unshared);
245 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
246 ASSERT_EQ(2u, m.ref_map.size());
247 ASSERT_EQ(5u, m.ref_map[30].length);
248 ASSERT_EQ(1u, m.ref_map[30].refs);
249 ASSERT_EQ(5u, m.ref_map[35].length);
250 ASSERT_EQ(2u, m.ref_map[35].refs);
251 ASSERT_EQ(1u, r.size());
252 ASSERT_EQ(20u, r[0].offset);
253 ASSERT_EQ(10u, r[0].length);
254 ASSERT_FALSE(maybe_unshared);
255 r.clear();
256 maybe_unshared = true;
257 m.put(33, 5, &r, &maybe_unshared);
258 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
259 ASSERT_EQ(3u, m.ref_map.size());
260 ASSERT_EQ(3u, m.ref_map[30].length);
261 ASSERT_EQ(1u, m.ref_map[30].refs);
262 ASSERT_EQ(3u, m.ref_map[35].length);
263 ASSERT_EQ(1u, m.ref_map[35].refs);
264 ASSERT_EQ(2u, m.ref_map[38].length);
265 ASSERT_EQ(2u, m.ref_map[38].refs);
266 ASSERT_EQ(1u, r.size());
267 ASSERT_EQ(33u, r[0].offset);
268 ASSERT_EQ(2u, r[0].length);
269 ASSERT_FALSE(maybe_unshared);
270 r.clear();
271 maybe_unshared = true;
272 m.put(38, 2, &r, &maybe_unshared);
273 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
274 ASSERT_TRUE(maybe_unshared);
275 }
276
277 TEST(bluestore_extent_ref_map_t, contains)
278 {
279 bluestore_extent_ref_map_t m;
280 m.get(10, 30);
281 ASSERT_TRUE(m.contains(10, 30));
282 ASSERT_TRUE(m.contains(10, 10));
283 ASSERT_TRUE(m.contains(30, 10));
284 ASSERT_FALSE(m.contains(0, 10));
285 ASSERT_FALSE(m.contains(0, 20));
286 ASSERT_FALSE(m.contains(0, 100));
287 ASSERT_FALSE(m.contains(40, 10));
288 ASSERT_FALSE(m.contains(30, 11));
289 m.get(40, 10);
290 m.get(40, 10);
291 ASSERT_TRUE(m.contains(30, 11));
292 ASSERT_TRUE(m.contains(30, 20));
293 ASSERT_TRUE(m.contains(10, 40));
294 ASSERT_FALSE(m.contains(0, 50));
295 ASSERT_FALSE(m.contains(40, 20));
296 m.get(60, 100);
297 ASSERT_TRUE(m.contains(60, 10));
298 ASSERT_TRUE(m.contains(40, 10));
299 ASSERT_FALSE(m.contains(40, 11));
300 ASSERT_FALSE(m.contains(40, 20));
301 ASSERT_FALSE(m.contains(40, 30));
302 ASSERT_FALSE(m.contains(40, 3000));
303 ASSERT_FALSE(m.contains(4000, 30));
304 }
305
306 TEST(bluestore_extent_ref_map_t, intersects)
307 {
308 bluestore_extent_ref_map_t m;
309 m.get(10, 30);
310 ASSERT_TRUE(m.intersects(10, 30));
311 ASSERT_TRUE(m.intersects(0, 11));
312 ASSERT_TRUE(m.intersects(10, 40));
313 ASSERT_TRUE(m.intersects(15, 40));
314 ASSERT_FALSE(m.intersects(0, 10));
315 ASSERT_FALSE(m.intersects(0, 5));
316 ASSERT_FALSE(m.intersects(40, 20));
317 ASSERT_FALSE(m.intersects(41, 20));
318 m.get(40, 10);
319 m.get(40, 10);
320 ASSERT_TRUE(m.intersects(0, 100));
321 ASSERT_TRUE(m.intersects(10, 35));
322 ASSERT_TRUE(m.intersects(45, 10));
323 ASSERT_FALSE(m.intersects(50, 5));
324 m.get(60, 100);
325 ASSERT_TRUE(m.intersects(45, 10));
326 ASSERT_TRUE(m.intersects(55, 10));
327 ASSERT_TRUE(m.intersects(50, 11));
328 ASSERT_FALSE(m.intersects(50, 10));
329 ASSERT_FALSE(m.intersects(51, 9));
330 ASSERT_FALSE(m.intersects(55, 1));
331 }
332
333 TEST(bluestore_blob_t, calc_csum)
334 {
335 bufferlist bl;
336 bl.append("asdfghjkqwertyuizxcvbnm,");
337 bufferlist bl2;
338 bl2.append("xxxxXXXXyyyyYYYYzzzzZZZZ");
339 bufferlist f;
340 f.substr_of(bl, 0, 8);
341 bufferlist m;
342 m.substr_of(bl, 8, 8);
343 bufferlist e;
344 e.substr_of(bl, 16, 8);
345 bufferlist n;
346 n.append("12345678");
347
348 for (unsigned csum_type = Checksummer::CSUM_NONE + 1;
349 csum_type < Checksummer::CSUM_MAX;
350 ++csum_type) {
351 cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
352 << std::endl;
353
354 bluestore_blob_t b;
355 int bad_off;
356 uint64_t bad_csum;
357 ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
358 ASSERT_EQ(-1, bad_off);
359
360 b.init_csum(csum_type, 3, 24);
361 cout << " value size " << b.get_csum_value_size() << std::endl;
362 b.calc_csum(0, bl);
363 ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
364 ASSERT_EQ(-1, bad_off);
365 ASSERT_EQ(-1, b.verify_csum(0, bl2, &bad_off, &bad_csum));
366 ASSERT_EQ(0, bad_off);
367
368 ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
369 ASSERT_EQ(-1, bad_off);
370 ASSERT_EQ(-1, b.verify_csum(8, f, &bad_off, &bad_csum));
371 ASSERT_EQ(8, bad_off);
372 ASSERT_EQ(-1, b.verify_csum(16, f, &bad_off, &bad_csum));
373 ASSERT_EQ(16, bad_off);
374
375 ASSERT_EQ(-1, b.verify_csum(0, m, &bad_off, &bad_csum));
376 ASSERT_EQ(0, bad_off);
377 ASSERT_EQ(0, b.verify_csum(8, m, &bad_off, &bad_csum));
378 ASSERT_EQ(-1, bad_off);
379 ASSERT_EQ(-1, b.verify_csum(16, m, &bad_off, &bad_csum));
380 ASSERT_EQ(16, bad_off);
381
382 ASSERT_EQ(-1, b.verify_csum(0, e, &bad_off, &bad_csum));
383 ASSERT_EQ(0, bad_off);
384 ASSERT_EQ(-1, b.verify_csum(8, e, &bad_off, &bad_csum));
385 ASSERT_EQ(8, bad_off);
386 ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
387 ASSERT_EQ(-1, bad_off);
388
389 b.calc_csum(8, n);
390 ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
391 ASSERT_EQ(-1, bad_off);
392 ASSERT_EQ(0, b.verify_csum(8, n, &bad_off, &bad_csum));
393 ASSERT_EQ(-1, bad_off);
394 ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
395 ASSERT_EQ(-1, bad_off);
396 ASSERT_EQ(-1, b.verify_csum(0, bl, &bad_off, &bad_csum));
397 ASSERT_EQ(8, bad_off);
398 }
399 }
400
401 TEST(bluestore_blob_t, csum_bench)
402 {
403 bufferlist bl;
404 bufferptr bp(10485760);
405 for (char *a = bp.c_str(); a < bp.c_str() + bp.length(); ++a)
406 *a = (unsigned long)a & 0xff;
407 bl.append(bp);
408 int count = 256;
409 for (unsigned csum_type = 1;
410 csum_type < Checksummer::CSUM_MAX;
411 ++csum_type) {
412 bluestore_blob_t b;
413 b.init_csum(csum_type, 12, bl.length());
414 ceph::mono_clock::time_point start = ceph::mono_clock::now();
415 for (int i = 0; i<count; ++i) {
416 b.calc_csum(0, bl);
417 }
418 ceph::mono_clock::time_point end = ceph::mono_clock::now();
419 auto dur = std::chrono::duration_cast<ceph::timespan>(end - start);
420 double mbsec = (double)count * (double)bl.length() / 1000000.0 / (double)dur.count() * 1000000000.0;
421 cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
422 << ", " << dur << " seconds, "
423 << mbsec << " MB/sec" << std::endl;
424 }
425 }
426
427 TEST(Blob, put_ref)
428 {
429 {
430 BlueStore store(g_ceph_context, "", 4096);
431 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
432 g_ceph_context, "lru", NULL);
433 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
434 g_ceph_context, "lru", NULL);
435
436 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
437 BlueStore::Blob b;
438 b.shared_blob = new BlueStore::SharedBlob(coll.get());
439 b.dirty_blob().allocated_test(bluestore_pextent_t(0x40715000, 0x2000));
440 b.dirty_blob().allocated_test(
441 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x8000));
442 b.dirty_blob().allocated_test(bluestore_pextent_t(0x4071f000, 0x5000));
443 b.get_ref(coll.get(), 0, 0x1200);
444 b.get_ref(coll.get(), 0xae00, 0x4200);
445 ASSERT_EQ(0x5400u, b.get_referenced_bytes());
446 cout << b << std::endl;
447 PExtentVector r;
448
449 ASSERT_FALSE(b.put_ref(coll.get(), 0, 0x1200, &r));
450 ASSERT_EQ(0x4200u, b.get_referenced_bytes());
451 cout << " r " << r << std::endl;
452 cout << b << std::endl;
453
454 r.clear();
455 ASSERT_TRUE(b.put_ref(coll.get(), 0xae00, 0x4200, &r));
456 ASSERT_EQ(0u, b.get_referenced_bytes());
457 cout << " r " << r << std::endl;
458 cout << b << std::endl;
459 }
460
461 unsigned mas = 4096;
462 BlueStore store(g_ceph_context, "", 8192);
463 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
464 g_ceph_context, "lru", NULL);
465 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
466 g_ceph_context, "lru", NULL);
467 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
468
469 {
470 BlueStore::Blob B;
471 B.shared_blob = new BlueStore::SharedBlob(coll.get());
472 bluestore_blob_t& b = B.dirty_blob();
473 PExtentVector r;
474 b.allocated_test(bluestore_pextent_t(0, mas * 2));
475 B.get_ref(coll.get(), 0, mas*2);
476 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
477 ASSERT_TRUE(b.is_allocated(0, mas*2));
478 ASSERT_TRUE(B.put_ref(coll.get(), 0, mas*2, &r));
479 ASSERT_EQ(0u, B.get_referenced_bytes());
480 cout << "r " << r << " " << b << std::endl;
481 ASSERT_EQ(1u, r.size());
482 ASSERT_EQ(0u, r[0].offset);
483 ASSERT_EQ(mas*2, r[0].length);
484 ASSERT_FALSE(b.is_allocated(0, mas*2));
485 ASSERT_FALSE(b.is_allocated(0, mas));
486 ASSERT_FALSE(b.is_allocated(mas, 0));
487 ASSERT_FALSE(b.get_extents()[0].is_valid());
488 ASSERT_EQ(mas*2, b.get_extents()[0].length);
489 }
490 {
491 BlueStore::Blob B;
492 B.shared_blob = new BlueStore::SharedBlob(coll.get());
493 bluestore_blob_t& b = B.dirty_blob();
494 PExtentVector r;
495 b.allocated_test(bluestore_pextent_t(123, mas * 2));
496 B.get_ref(coll.get(), 0, mas*2);
497 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
498 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
499 ASSERT_EQ(mas, B.get_referenced_bytes());
500 cout << "r " << r << " " << b << std::endl;
501 ASSERT_EQ(0u, r.size());
502 ASSERT_TRUE(b.is_allocated(0, mas*2));
503 ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
504 ASSERT_EQ(0u, B.get_referenced_bytes());
505 ASSERT_EQ(0u, B.get_referenced_bytes());
506 cout << "r " << r << " " << b << std::endl;
507 ASSERT_EQ(1u, r.size());
508 ASSERT_EQ(123u, r[0].offset);
509 ASSERT_EQ(mas*2, r[0].length);
510 ASSERT_FALSE(b.is_allocated(0, mas*2));
511 ASSERT_FALSE(b.get_extents()[0].is_valid());
512 ASSERT_EQ(mas*2, b.get_extents()[0].length);
513 }
514 {
515 BlueStore::Blob B;
516 B.shared_blob = new BlueStore::SharedBlob(coll.get());
517 bluestore_blob_t& b = B.dirty_blob();
518 PExtentVector r;
519 b.allocated_test(bluestore_pextent_t(1, mas));
520 b.allocated_test(bluestore_pextent_t(2, mas));
521 b.allocated_test(bluestore_pextent_t(3, mas));
522 b.allocated_test(bluestore_pextent_t(4, mas));
523 B.get_ref(coll.get(), 0, mas*4);
524 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
525 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
526 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
527 cout << "r " << r << " " << b << std::endl;
528 ASSERT_EQ(0u, r.size());
529 ASSERT_TRUE(b.is_allocated(0, mas*4));
530 ASSERT_TRUE(b.is_allocated(mas, mas));
531 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
532 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
533 cout << "r " << r << " " << b << std::endl;
534 ASSERT_EQ(0u, r.size());
535 ASSERT_TRUE(b.is_allocated(mas*2, mas));
536 ASSERT_TRUE(b.is_allocated(0, mas*4));
537 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
538 ASSERT_EQ(mas, B.get_referenced_bytes());
539 cout << "r " << r << " " << b << std::endl;
540 ASSERT_EQ(2u, r.size());
541 ASSERT_EQ(3u, r[0].offset);
542 ASSERT_EQ(mas, r[0].length);
543 ASSERT_EQ(4u, r[1].offset);
544 ASSERT_EQ(mas, r[1].length);
545 ASSERT_TRUE(b.is_allocated(0, mas*2));
546 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
547 ASSERT_TRUE(b.get_extents()[0].is_valid());
548 ASSERT_TRUE(b.get_extents()[1].is_valid());
549 ASSERT_FALSE(b.get_extents()[2].is_valid());
550 ASSERT_EQ(3u, b.get_extents().size());
551 }
552 {
553 BlueStore::Blob B;
554 B.shared_blob = new BlueStore::SharedBlob(coll.get());
555 bluestore_blob_t& b = B.dirty_blob();
556 PExtentVector r;
557 b.allocated_test(bluestore_pextent_t(1, mas));
558 b.allocated_test(bluestore_pextent_t(2, mas));
559 b.allocated_test(bluestore_pextent_t(3, mas));
560 b.allocated_test(bluestore_pextent_t(4, mas));
561 b.allocated_test(bluestore_pextent_t(5, mas));
562 b.allocated_test(bluestore_pextent_t(6, mas));
563 B.get_ref(coll.get(), 0, mas*6);
564 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
565 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
566 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
567 cout << "r " << r << " " << b << std::endl;
568 ASSERT_EQ(0u, r.size());
569 ASSERT_TRUE(b.is_allocated(0, mas*6));
570 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
571 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
572 cout << "r " << r << " " << b << std::endl;
573 ASSERT_EQ(0u, r.size());
574 ASSERT_TRUE(b.is_allocated(0, mas*6));
575 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
576 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
577 cout << "r " << r << " " << b << std::endl;
578 ASSERT_EQ(2u, r.size());
579 ASSERT_EQ(3u, r[0].offset);
580 ASSERT_EQ(mas, r[0].length);
581 ASSERT_EQ(4u, r[1].offset);
582 ASSERT_EQ(mas, r[1].length);
583 ASSERT_TRUE(b.is_allocated(0, mas*2));
584 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
585 ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
586 ASSERT_EQ(5u, b.get_extents().size());
587 ASSERT_TRUE(b.get_extents()[0].is_valid());
588 ASSERT_TRUE(b.get_extents()[1].is_valid());
589 ASSERT_FALSE(b.get_extents()[2].is_valid());
590 ASSERT_TRUE(b.get_extents()[3].is_valid());
591 ASSERT_TRUE(b.get_extents()[4].is_valid());
592 }
593 {
594 BlueStore::Blob B;
595 B.shared_blob = new BlueStore::SharedBlob(coll.get());
596 bluestore_blob_t& b = B.dirty_blob();
597 PExtentVector r;
598 b.allocated_test(bluestore_pextent_t(1, mas * 6));
599 B.get_ref(coll.get(), 0, mas*6);
600 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
601 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
602 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
603 cout << "r " << r << " " << b << std::endl;
604 ASSERT_EQ(0u, r.size());
605 ASSERT_TRUE(b.is_allocated(0, mas*6));
606 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
607 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
608 cout << "r " << r << " " << b << std::endl;
609 ASSERT_EQ(0u, r.size());
610 ASSERT_TRUE(b.is_allocated(0, mas*6));
611 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
612 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
613 cout << "r " << r << " " << b << std::endl;
614 ASSERT_EQ(1u, r.size());
615 ASSERT_EQ(0x2001u, r[0].offset);
616 ASSERT_EQ(mas*2, r[0].length);
617 ASSERT_TRUE(b.is_allocated(0, mas*2));
618 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
619 ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
620 ASSERT_EQ(3u, b.get_extents().size());
621 ASSERT_TRUE(b.get_extents()[0].is_valid());
622 ASSERT_FALSE(b.get_extents()[1].is_valid());
623 ASSERT_TRUE(b.get_extents()[2].is_valid());
624 }
625 {
626 BlueStore::Blob B;
627 B.shared_blob = new BlueStore::SharedBlob(coll.get());
628 bluestore_blob_t& b = B.dirty_blob();
629 PExtentVector r;
630 b.allocated_test(bluestore_pextent_t(1, mas * 4));
631 b.allocated_test(bluestore_pextent_t(2, mas * 4));
632 b.allocated_test(bluestore_pextent_t(3, mas * 4));
633 B.get_ref(coll.get(), 0, mas*12);
634 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
635 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
636 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
637 cout << "r " << r << " " << b << std::endl;
638 ASSERT_EQ(0u, r.size());
639 ASSERT_TRUE(b.is_allocated(0, mas*12));
640 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
641 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
642 cout << "r " << r << " " << b << std::endl;
643 ASSERT_EQ(0u, r.size());
644 ASSERT_TRUE(b.is_allocated(0, mas*12));
645 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
646 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
647 cout << "r " << r << " " << b << std::endl;
648 ASSERT_EQ(3u, r.size());
649 ASSERT_EQ(0x2001u, r[0].offset);
650 ASSERT_EQ(mas*2, r[0].length);
651 ASSERT_EQ(0x2u, r[1].offset);
652 ASSERT_EQ(mas*4, r[1].length);
653 ASSERT_EQ(0x3u, r[2].offset);
654 ASSERT_EQ(mas*2, r[2].length);
655 ASSERT_TRUE(b.is_allocated(0, mas*2));
656 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
657 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
658 ASSERT_EQ(3u, b.get_extents().size());
659 ASSERT_TRUE(b.get_extents()[0].is_valid());
660 ASSERT_FALSE(b.get_extents()[1].is_valid());
661 ASSERT_TRUE(b.get_extents()[2].is_valid());
662 }
663 {
664 BlueStore::Blob B;
665 B.shared_blob = new BlueStore::SharedBlob(coll.get());
666 bluestore_blob_t& b = B.dirty_blob();
667 PExtentVector r;
668 b.allocated_test(bluestore_pextent_t(1, mas * 4));
669 b.allocated_test(bluestore_pextent_t(2, mas * 4));
670 b.allocated_test(bluestore_pextent_t(3, mas * 4));
671 B.get_ref(coll.get(), 0, mas*12);
672 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
673 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
674 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
675 cout << "r " << r << " " << b << std::endl;
676 ASSERT_EQ(0u, r.size());
677 ASSERT_TRUE(b.is_allocated(0, mas*12));
678 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
679 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
680 cout << "r " << r << " " << b << std::endl;
681 ASSERT_EQ(0u, r.size());
682 ASSERT_TRUE(b.is_allocated(0, mas*12));
683 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
684 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
685 cout << "r " << r << " " << b << std::endl;
686 ASSERT_EQ(3u, r.size());
687 ASSERT_EQ(0x2001u, r[0].offset);
688 ASSERT_EQ(mas*2, r[0].length);
689 ASSERT_EQ(0x2u, r[1].offset);
690 ASSERT_EQ(mas*4, r[1].length);
691 ASSERT_EQ(0x3u, r[2].offset);
692 ASSERT_EQ(mas*2, r[2].length);
693 ASSERT_TRUE(b.is_allocated(0, mas*2));
694 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
695 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
696 ASSERT_EQ(3u, b.get_extents().size());
697 ASSERT_TRUE(b.get_extents()[0].is_valid());
698 ASSERT_FALSE(b.get_extents()[1].is_valid());
699 ASSERT_TRUE(b.get_extents()[2].is_valid());
700 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
701 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
702 cout << "r " << r << " " << b << std::endl;
703 ASSERT_EQ(1u, r.size());
704 ASSERT_EQ(0x1u, r[0].offset);
705 ASSERT_EQ(mas*2, r[0].length);
706 ASSERT_EQ(2u, b.get_extents().size());
707 ASSERT_FALSE(b.get_extents()[0].is_valid());
708 ASSERT_TRUE(b.get_extents()[1].is_valid());
709 ASSERT_TRUE(B.put_ref(coll.get(), mas*10, mas*2, &r));
710 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
711 cout << "r " << r << " " << b << std::endl;
712 ASSERT_EQ(1u, r.size());
713 ASSERT_EQ(0x2003u, r[0].offset);
714 ASSERT_EQ(mas*2, r[0].length);
715 ASSERT_EQ(1u, b.get_extents().size());
716 ASSERT_FALSE(b.get_extents()[0].is_valid());
717 }
718 {
719 BlueStore::Blob B;
720 B.shared_blob = new BlueStore::SharedBlob(coll.get());
721 bluestore_blob_t& b = B.dirty_blob();
722 PExtentVector r;
723 b.allocated_test(bluestore_pextent_t(1, mas * 4));
724 b.allocated_test(bluestore_pextent_t(2, mas * 4));
725 b.allocated_test(bluestore_pextent_t(3, mas * 4));
726 B.get_ref(coll.get(), 0, mas*12);
727 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
728 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
729 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
730 cout << "r " << r << " " << b << std::endl;
731 ASSERT_EQ(0u, r.size());
732 ASSERT_TRUE(b.is_allocated(0, mas*12));
733 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
734 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
735 cout << "r " << r << " " << b << std::endl;
736 ASSERT_EQ(0u, r.size());
737 ASSERT_TRUE(b.is_allocated(0, mas*12));
738 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
739 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
740 cout << "r " << r << " " << b << std::endl;
741 ASSERT_EQ(3u, r.size());
742 ASSERT_EQ(0x2001u, r[0].offset);
743 ASSERT_EQ(mas*2, r[0].length);
744 ASSERT_EQ(0x2u, r[1].offset);
745 ASSERT_EQ(mas*4, r[1].length);
746 ASSERT_EQ(0x3u, r[2].offset);
747 ASSERT_EQ(mas*2, r[2].length);
748 ASSERT_TRUE(b.is_allocated(0, mas*2));
749 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
750 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
751 ASSERT_EQ(3u, b.get_extents().size());
752 ASSERT_TRUE(b.get_extents()[0].is_valid());
753 ASSERT_FALSE(b.get_extents()[1].is_valid());
754 ASSERT_TRUE(b.get_extents()[2].is_valid());
755 ASSERT_FALSE(B.put_ref(coll.get(), mas*10, mas*2, &r));
756 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
757 cout << "r " << r << " " << b << std::endl;
758 ASSERT_EQ(1u, r.size());
759 ASSERT_EQ(0x2003u, r[0].offset);
760 ASSERT_EQ(mas*2, r[0].length);
761 ASSERT_EQ(2u, b.get_extents().size());
762 ASSERT_TRUE(b.get_extents()[0].is_valid());
763 ASSERT_FALSE(b.get_extents()[1].is_valid());
764 ASSERT_TRUE(B.put_ref(coll.get(), 0, mas, &r));
765 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
766 cout << "r " << r << " " << b << std::endl;
767 ASSERT_EQ(1u, r.size());
768 ASSERT_EQ(0x1u, r[0].offset);
769 ASSERT_EQ(mas*2, r[0].length);
770 ASSERT_EQ(1u, b.get_extents().size());
771 ASSERT_FALSE(b.get_extents()[0].is_valid());
772 }
773 {
774 BlueStore::Blob B;
775 B.shared_blob = new BlueStore::SharedBlob(coll.get());
776 bluestore_blob_t& b = B.dirty_blob();
777 PExtentVector r;
778 b.allocated_test(bluestore_pextent_t(1, mas * 8));
779 B.get_ref(coll.get(), 0, mas*8);
780 ASSERT_EQ(mas * 8, B.get_referenced_bytes());
781 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
782 ASSERT_EQ(mas * 7, B.get_referenced_bytes());
783 cout << "r " << r << " " << b << std::endl;
784 ASSERT_EQ(0u, r.size());
785 ASSERT_TRUE(b.is_allocated(0, mas*8));
786 ASSERT_FALSE(B.put_ref(coll.get(), mas*7, mas, &r));
787 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
788 cout << "r " << r << " " << b << std::endl;
789 ASSERT_EQ(0u, r.size());
790 ASSERT_TRUE(b.is_allocated(0, mas*8));
791 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
792 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
793 cout << "r " << r << " " << b << std::endl;
794 ASSERT_EQ(0u, r.size());
795 ASSERT_TRUE(b.is_allocated(0, 8));
796 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas*4, &r));
797 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
798 ASSERT_EQ(1u, r.size());
799 ASSERT_EQ(0x2001u, r[0].offset);
800 ASSERT_EQ(mas*6, r[0].length);
801 ASSERT_TRUE(b.is_allocated(0, mas*2));
802 ASSERT_FALSE(b.is_allocated(mas*2, mas*6));
803 ASSERT_EQ(2u, b.get_extents().size());
804 ASSERT_TRUE(b.get_extents()[0].is_valid());
805 ASSERT_FALSE(b.get_extents()[1].is_valid());
806 ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
807 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
808 cout << "r " << r << " " << b << std::endl;
809 ASSERT_EQ(1u, r.size());
810 ASSERT_EQ(0x1u, r[0].offset);
811 ASSERT_EQ(mas*2, r[0].length);
812 ASSERT_EQ(1u, b.get_extents().size());
813 ASSERT_FALSE(b.get_extents()[0].is_valid());
814 }
815 // verify csum chunk size if factored in properly
816 {
817 BlueStore::Blob B;
818 B.shared_blob = new BlueStore::SharedBlob(coll.get());
819 bluestore_blob_t& b = B.dirty_blob();
820 PExtentVector r;
821 b.allocated_test(bluestore_pextent_t(0, mas*4));
822 b.init_csum(Checksummer::CSUM_CRC32C, 14, mas * 4);
823 B.get_ref(coll.get(), 0, mas*4);
824 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
825 ASSERT_TRUE(b.is_allocated(0, mas*4));
826 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas*3, &r));
827 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
828 cout << "r " << r << " " << b << std::endl;
829 ASSERT_EQ(0u, r.size());
830 ASSERT_TRUE(b.is_allocated(0, mas*4));
831 ASSERT_TRUE(b.get_extents()[0].is_valid());
832 ASSERT_EQ(mas*4, b.get_extents()[0].length);
833 }
834 {
835 BlueStore::Blob B;
836 B.shared_blob = new BlueStore::SharedBlob(coll.get());
837 bluestore_blob_t& b = B.dirty_blob();
838 b.allocated_test(bluestore_pextent_t(0x40101000, 0x4000));
839 b.allocated_test(bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET,
840 0x13000));
841
842 b.allocated_test(bluestore_pextent_t(0x40118000, 0x7000));
843 B.get_ref(coll.get(), 0x0, 0x3800);
844 B.get_ref(coll.get(), 0x17c00, 0x6400);
845 ASSERT_EQ(0x3800u + 0x6400u, B.get_referenced_bytes());
846 b.set_flag(bluestore_blob_t::FLAG_SHARED);
847 b.init_csum(Checksummer::CSUM_CRC32C, 12, 0x1e000);
848
849 cout << "before: " << B << std::endl;
850 PExtentVector r;
851 ASSERT_FALSE(B.put_ref(coll.get(), 0x1800, 0x2000, &r));
852 ASSERT_EQ(0x3800u + 0x6400u - 0x2000u, B.get_referenced_bytes());
853 cout << "after: " << B << std::endl;
854 cout << "r " << r << std::endl;
855 }
856 {
857 BlueStore::Blob B;
858 B.shared_blob = new BlueStore::SharedBlob(coll.get());
859 bluestore_blob_t& b = B.dirty_blob();
860 b.allocated_test(bluestore_pextent_t(1, 0x5000));
861 b.allocated_test(bluestore_pextent_t(2, 0x5000));
862 B.get_ref(coll.get(), 0x0, 0xa000);
863 ASSERT_EQ(0xa000u, B.get_referenced_bytes());
864 cout << "before: " << B << std::endl;
865 PExtentVector r;
866 ASSERT_FALSE(B.put_ref(coll.get(), 0x8000, 0x2000, &r));
867 cout << "after: " << B << std::endl;
868 cout << "r " << r << std::endl;
869 ASSERT_EQ(0x8000u, B.get_referenced_bytes());
870 ASSERT_EQ(1u, r.size());
871 ASSERT_EQ(0x3002u, r[0].offset);
872 ASSERT_EQ(0x2000u, r[0].length);
873 }
874 {
875 BlueStore::Blob B;
876 B.shared_blob = new BlueStore::SharedBlob(coll.get());
877 bluestore_blob_t& b = B.dirty_blob();
878 b.allocated_test(bluestore_pextent_t(1, 0x7000));
879 b.allocated_test(bluestore_pextent_t(2, 0x7000));
880 B.get_ref(coll.get(), 0x0, 0xe000);
881 ASSERT_EQ(0xe000u, B.get_referenced_bytes());
882 cout << "before: " << B << std::endl;
883 PExtentVector r;
884 ASSERT_FALSE(B.put_ref(coll.get(), 0, 0xb000, &r));
885 ASSERT_EQ(0x3000u, B.get_referenced_bytes());
886 cout << "after: " << B << std::endl;
887 cout << "r " << r << std::endl;
888 ASSERT_EQ(0x3000u, B.get_referenced_bytes());
889 ASSERT_EQ(2u, r.size());
890 ASSERT_EQ(1u, r[0].offset);
891 ASSERT_EQ(0x7000u, r[0].length);
892 ASSERT_EQ(2u, r[1].offset);
893 ASSERT_EQ(0x3000u, r[1].length); // we have 0x1000 bytes less due to
894 // alignment caused by min_alloc_size = 0x2000
895 }
896 {
897 BlueStore store(g_ceph_context, "", 0x4000);
898 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
899 g_ceph_context, "lru", NULL);
900 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
901 g_ceph_context, "lru", NULL);
902
903 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
904 BlueStore::Blob B;
905 B.shared_blob = new BlueStore::SharedBlob(coll.get());
906 bluestore_blob_t& b = B.dirty_blob();
907 b.allocated_test(bluestore_pextent_t(1, 0x5000));
908 b.allocated_test(bluestore_pextent_t(2, 0x7000));
909 B.get_ref(coll.get(), 0x0, 0xc000);
910 ASSERT_EQ(0xc000u, B.get_referenced_bytes());
911 cout << "before: " << B << std::endl;
912 PExtentVector r;
913 ASSERT_FALSE(B.put_ref(coll.get(), 0x2000, 0xa000, &r));
914 cout << "after: " << B << std::endl;
915 cout << "r " << r << std::endl;
916 ASSERT_EQ(0x2000u, B.get_referenced_bytes());
917 ASSERT_EQ(2u, r.size());
918 ASSERT_EQ(0x4001u, r[0].offset);
919 ASSERT_EQ(0x1000u, r[0].length);
920 ASSERT_EQ(2u, r[1].offset);
921 ASSERT_EQ(0x7000u, r[1].length);
922 ASSERT_EQ(1u, b.get_extents()[0].offset);
923 ASSERT_EQ(0x4000u, b.get_extents()[0].length);
924 }
925 }
926
927 TEST(bluestore_blob_t, can_split)
928 {
929 bluestore_blob_t a;
930 ASSERT_TRUE(a.can_split());
931 a.flags = bluestore_blob_t::FLAG_SHARED;
932 ASSERT_FALSE(a.can_split());
933 a.flags = bluestore_blob_t::FLAG_COMPRESSED;
934 ASSERT_FALSE(a.can_split());
935 a.flags = bluestore_blob_t::FLAG_HAS_UNUSED;
936 ASSERT_FALSE(a.can_split());
937 }
938
939 TEST(bluestore_blob_t, can_split_at)
940 {
941 bluestore_blob_t a;
942 a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
943 a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
944 ASSERT_TRUE(a.can_split_at(0x1000));
945 ASSERT_TRUE(a.can_split_at(0x1800));
946 a.init_csum(Checksummer::CSUM_CRC32C, 12, 0x4000);
947 ASSERT_TRUE(a.can_split_at(0x1000));
948 ASSERT_TRUE(a.can_split_at(0x2000));
949 ASSERT_TRUE(a.can_split_at(0x3000));
950 ASSERT_FALSE(a.can_split_at(0x2800));
951 }
952
953 TEST(bluestore_blob_t, prune_tail)
954 {
955 bluestore_blob_t a;
956 a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
957 a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
958 ASSERT_FALSE(a.can_prune_tail());
959 a.allocated_test(
960 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
961 ASSERT_TRUE(a.can_prune_tail());
962 a.prune_tail();
963 ASSERT_FALSE(a.can_prune_tail());
964 ASSERT_EQ(2u, a.get_extents().size());
965 ASSERT_EQ(0x4000u, a.get_logical_length());
966
967 a.allocated_test(
968 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
969 a.init_csum(Checksummer::CSUM_CRC32C_8, 12, 0x6000);
970 ASSERT_EQ(6u, a.csum_data.length());
971 ASSERT_TRUE(a.can_prune_tail());
972 a.prune_tail();
973 ASSERT_FALSE(a.can_prune_tail());
974 ASSERT_EQ(2u, a.get_extents().size());
975 ASSERT_EQ(0x4000u, a.get_logical_length());
976 ASSERT_EQ(4u, a.csum_data.length());
977
978 bluestore_blob_t b;
979 b.allocated_test(
980 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
981 ASSERT_FALSE(a.can_prune_tail());
982 }
983
984 TEST(Blob, split)
985 {
986 BlueStore store(g_ceph_context, "", 4096);
987 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
988 g_ceph_context, "lru", NULL);
989 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
990 g_ceph_context, "lru", NULL);
991 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
992 {
993 BlueStore::Blob L, R;
994 L.shared_blob = new BlueStore::SharedBlob(coll.get());
995 R.shared_blob = new BlueStore::SharedBlob(coll.get());
996 L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x2000));
997 L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
998 L.get_ref(coll.get(), 0, 0x2000);
999 L.split(coll.get(), 0x1000, &R);
1000 ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
1001 ASSERT_EQ(4u, L.get_blob().csum_data.length());
1002 ASSERT_EQ(1u, L.get_blob().get_extents().size());
1003 ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
1004 ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
1005 ASSERT_EQ(0x1000u, L.get_referenced_bytes());
1006 ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
1007 ASSERT_EQ(4u, R.get_blob().csum_data.length());
1008 ASSERT_EQ(1u, R.get_blob().get_extents().size());
1009 ASSERT_EQ(0x3000u, R.get_blob().get_extents().front().offset);
1010 ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
1011 ASSERT_EQ(0x1000u, R.get_referenced_bytes());
1012 }
1013 {
1014 BlueStore::Blob L, R;
1015 L.shared_blob = new BlueStore::SharedBlob(coll.get());
1016 R.shared_blob = new BlueStore::SharedBlob(coll.get());
1017 L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x1000));
1018 L.dirty_blob().allocated_test(bluestore_pextent_t(0x12000, 0x1000));
1019 L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
1020 L.get_ref(coll.get(), 0, 0x1000);
1021 L.get_ref(coll.get(), 0x1000, 0x1000);
1022 L.split(coll.get(), 0x1000, &R);
1023 ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
1024 ASSERT_EQ(4u, L.get_blob().csum_data.length());
1025 ASSERT_EQ(1u, L.get_blob().get_extents().size());
1026 ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
1027 ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
1028 ASSERT_EQ(0x1000u, L.get_referenced_bytes());
1029 ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
1030 ASSERT_EQ(4u, R.get_blob().csum_data.length());
1031 ASSERT_EQ(1u, R.get_blob().get_extents().size());
1032 ASSERT_EQ(0x12000u, R.get_blob().get_extents().front().offset);
1033 ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
1034 ASSERT_EQ(0x1000u, R.get_referenced_bytes());
1035 }
1036 }
1037
1038 TEST(Blob, legacy_decode)
1039 {
1040 BlueStore store(g_ceph_context, "", 4096);
1041 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1042 g_ceph_context, "lru", NULL);
1043 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1044 g_ceph_context, "lru", NULL);
1045 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1046 bufferlist bl, bl2;
1047 {
1048 BlueStore::Blob B;
1049
1050 B.shared_blob = new BlueStore::SharedBlob(coll.get());
1051 B.dirty_blob().allocated_test(bluestore_pextent_t(0x1, 0x2000));
1052 B.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
1053 B.get_ref(coll.get(), 0, 0xff0);
1054 B.get_ref(coll.get(), 0x1fff, 1);
1055
1056 bluestore_extent_ref_map_t fake_ref_map;
1057 fake_ref_map.get(0, 0xff0);
1058 fake_ref_map.get(0x1fff, 1);
1059
1060 size_t bound = 0, bound2 = 0;
1061
1062 B.bound_encode(
1063 bound,
1064 1, /*struct_v*/
1065 0, /*sbid*/
1066 false);
1067 fake_ref_map.bound_encode(bound);
1068
1069 B.bound_encode(
1070 bound2,
1071 2, /*struct_v*/
1072 0, /*sbid*/
1073 true);
1074
1075 {
1076 auto app = bl.get_contiguous_appender(bound);
1077 auto app2 = bl2.get_contiguous_appender(bound2);
1078 B.encode(
1079 app,
1080 1, /*struct_v*/
1081 0, /*sbid*/
1082 false);
1083 fake_ref_map.encode(app);
1084
1085 B.encode(
1086 app2,
1087 2, /*struct_v*/
1088 0, /*sbid*/
1089 true);
1090 }
1091
1092 auto p = bl.front().begin_deep();
1093 auto p2 = bl2.front().begin_deep();
1094 BlueStore::Blob Bres, Bres2;
1095 Bres.shared_blob = new BlueStore::SharedBlob(coll.get());
1096 Bres2.shared_blob = new BlueStore::SharedBlob(coll.get());
1097
1098 uint64_t sbid, sbid2;
1099 Bres.decode(
1100 coll.get(),
1101 p,
1102 1, /*struct_v*/
1103 &sbid,
1104 true);
1105 Bres2.decode(
1106 coll.get(),
1107 p2,
1108 2, /*struct_v*/
1109 &sbid2,
1110 true);
1111
1112 ASSERT_EQ(0xff0u + 1u, Bres.get_blob_use_tracker().get_referenced_bytes());
1113 ASSERT_EQ(0xff0u + 1u, Bres2.get_blob_use_tracker().get_referenced_bytes());
1114 ASSERT_TRUE(Bres.get_blob_use_tracker().equal(Bres2.get_blob_use_tracker()));
1115 }
1116 }
1117
1118 TEST(ExtentMap, seek_lextent)
1119 {
1120 BlueStore store(g_ceph_context, "", 4096);
1121 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1122 g_ceph_context, "lru", NULL);
1123 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1124 g_ceph_context, "lru", NULL);
1125
1126 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1127 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1128 BlueStore::ExtentMap em(&onode);
1129 BlueStore::BlobRef br(new BlueStore::Blob);
1130 br->shared_blob = new BlueStore::SharedBlob(coll.get());
1131
1132 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0));
1133 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(100));
1134
1135 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br));
1136 auto a = em.find(100);
1137 ASSERT_EQ(a, em.seek_lextent(0));
1138 ASSERT_EQ(a, em.seek_lextent(99));
1139 ASSERT_EQ(a, em.seek_lextent(100));
1140 ASSERT_EQ(a, em.seek_lextent(101));
1141 ASSERT_EQ(a, em.seek_lextent(199));
1142 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(200));
1143
1144 em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br));
1145 auto b = em.find(200);
1146 ASSERT_EQ(a, em.seek_lextent(0));
1147 ASSERT_EQ(a, em.seek_lextent(99));
1148 ASSERT_EQ(a, em.seek_lextent(100));
1149 ASSERT_EQ(a, em.seek_lextent(101));
1150 ASSERT_EQ(a, em.seek_lextent(199));
1151 ASSERT_EQ(b, em.seek_lextent(200));
1152 ASSERT_EQ(b, em.seek_lextent(299));
1153 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(300));
1154
1155 em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br));
1156 auto d = em.find(400);
1157 ASSERT_EQ(a, em.seek_lextent(0));
1158 ASSERT_EQ(a, em.seek_lextent(99));
1159 ASSERT_EQ(a, em.seek_lextent(100));
1160 ASSERT_EQ(a, em.seek_lextent(101));
1161 ASSERT_EQ(a, em.seek_lextent(199));
1162 ASSERT_EQ(b, em.seek_lextent(200));
1163 ASSERT_EQ(b, em.seek_lextent(299));
1164 ASSERT_EQ(d, em.seek_lextent(300));
1165 ASSERT_EQ(d, em.seek_lextent(399));
1166 ASSERT_EQ(d, em.seek_lextent(400));
1167 ASSERT_EQ(d, em.seek_lextent(499));
1168 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(500));
1169 }
1170
1171 TEST(ExtentMap, has_any_lextents)
1172 {
1173 BlueStore store(g_ceph_context, "", 4096);
1174 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1175 g_ceph_context, "lru", NULL);
1176 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1177 g_ceph_context, "lru", NULL);
1178 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1179 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1180 BlueStore::ExtentMap em(&onode);
1181 BlueStore::BlobRef b(new BlueStore::Blob);
1182 b->shared_blob = new BlueStore::SharedBlob(coll.get());
1183
1184 ASSERT_FALSE(em.has_any_lextents(0, 0));
1185 ASSERT_FALSE(em.has_any_lextents(0, 1000));
1186 ASSERT_FALSE(em.has_any_lextents(1000, 1000));
1187
1188 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b));
1189 ASSERT_FALSE(em.has_any_lextents(0, 50));
1190 ASSERT_FALSE(em.has_any_lextents(0, 100));
1191 ASSERT_FALSE(em.has_any_lextents(50, 50));
1192 ASSERT_TRUE(em.has_any_lextents(50, 51));
1193 ASSERT_TRUE(em.has_any_lextents(50, 100051));
1194 ASSERT_TRUE(em.has_any_lextents(100, 100));
1195 ASSERT_TRUE(em.has_any_lextents(100, 1));
1196 ASSERT_TRUE(em.has_any_lextents(199, 1));
1197 ASSERT_TRUE(em.has_any_lextents(199, 2));
1198 ASSERT_FALSE(em.has_any_lextents(200, 2));
1199
1200 em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, b));
1201 ASSERT_TRUE(em.has_any_lextents(199, 1));
1202 ASSERT_TRUE(em.has_any_lextents(199, 2));
1203 ASSERT_TRUE(em.has_any_lextents(200, 2));
1204 ASSERT_TRUE(em.has_any_lextents(200, 200));
1205 ASSERT_TRUE(em.has_any_lextents(299, 1));
1206 ASSERT_FALSE(em.has_any_lextents(300, 1));
1207
1208 em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, b));
1209 ASSERT_TRUE(em.has_any_lextents(0, 10000));
1210 ASSERT_TRUE(em.has_any_lextents(199, 1));
1211 ASSERT_FALSE(em.has_any_lextents(300, 1));
1212 ASSERT_FALSE(em.has_any_lextents(300, 100));
1213 ASSERT_FALSE(em.has_any_lextents(399, 1));
1214 ASSERT_TRUE(em.has_any_lextents(400, 1));
1215 ASSERT_TRUE(em.has_any_lextents(400, 100));
1216 ASSERT_TRUE(em.has_any_lextents(400, 1000));
1217 ASSERT_TRUE(em.has_any_lextents(499, 1000));
1218 ASSERT_FALSE(em.has_any_lextents(500, 1000));
1219 }
1220
1221 void erase_and_delete(BlueStore::ExtentMap& em, size_t v)
1222 {
1223 auto d = em.find(v);
1224 ASSERT_NE(d, em.extent_map.end());
1225 em.extent_map.erase(d);
1226 delete &*d;
1227 }
1228
1229 TEST(ExtentMap, compress_extent_map)
1230 {
1231 BlueStore store(g_ceph_context, "", 4096);
1232 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1233 g_ceph_context, "lru", NULL);
1234 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1235 g_ceph_context, "lru", NULL);
1236
1237 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1238 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1239 BlueStore::ExtentMap em(&onode);
1240 BlueStore::BlobRef b1(new BlueStore::Blob);
1241 BlueStore::BlobRef b2(new BlueStore::Blob);
1242 BlueStore::BlobRef b3(new BlueStore::Blob);
1243 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1244 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1245 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1246
1247 em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, b1));
1248 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
1249 ASSERT_EQ(0, em.compress_extent_map(0, 10000));
1250 ASSERT_EQ(2u, em.extent_map.size());
1251
1252 em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b2));
1253 em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
1254 ASSERT_EQ(0, em.compress_extent_map(0, 0));
1255 ASSERT_EQ(0, em.compress_extent_map(100000, 1000));
1256 ASSERT_EQ(2, em.compress_extent_map(0, 100000));
1257 ASSERT_EQ(2u, em.extent_map.size());
1258 erase_and_delete(em, 100);
1259 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
1260 em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b3));
1261 em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
1262 ASSERT_EQ(0, em.compress_extent_map(0, 1));
1263 ASSERT_EQ(0, em.compress_extent_map(0, 100000));
1264 ASSERT_EQ(4u, em.extent_map.size());
1265
1266 em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
1267 em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, b2));
1268 em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, b2));
1269 em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, b1));
1270 em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, b3));
1271 ASSERT_EQ(0, em.compress_extent_map(0, 99));
1272 ASSERT_EQ(0, em.compress_extent_map(800, 1000));
1273 ASSERT_EQ(2, em.compress_extent_map(100, 500));
1274 ASSERT_EQ(7u, em.extent_map.size());
1275 erase_and_delete(em, 300);
1276 erase_and_delete(em, 500);
1277 erase_and_delete(em, 700);
1278 em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
1279 em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, b2));
1280 em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, b2));
1281 ASSERT_EQ(1, em.compress_extent_map(0, 1000));
1282 ASSERT_EQ(6u, em.extent_map.size());
1283 }
1284
1285
1286 void clear_and_dispose(BlueStore::old_extent_map_t& old_em)
1287 {
1288 auto oep = old_em.begin();
1289 while (oep != old_em.end()) {
1290 auto &lo = *oep;
1291 oep = old_em.erase(oep);
1292 delete &lo;
1293 }
1294 }
1295
1296 TEST(GarbageCollector, BasicTest)
1297 {
1298 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1299 g_ceph_context, "lru", NULL);
1300 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1301 g_ceph_context, "lru", NULL);
1302
1303 BlueStore store(g_ceph_context, "", 4096);
1304 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1305 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1306 BlueStore::ExtentMap em(&onode);
1307
1308 BlueStore::old_extent_map_t old_extents;
1309
1310
1311 /*
1312 min_alloc_size = 4096
1313 original disposition
1314 extent1 <loffs = 100, boffs = 100, len = 10>
1315 -> blob1<compressed, len_on_disk=4096, logical_len=8192>
1316 extent2 <loffs = 200, boffs = 200, len = 10>
1317 -> blob2<raw, len_on_disk=4096, llen=4096>
1318 extent3 <loffs = 300, boffs = 300, len = 10>
1319 -> blob1<compressed, len_on_disk=4096, llen=8192>
1320 extent4 <loffs = 4096, boffs = 0, len = 10>
1321 -> blob3<raw, len_on_disk=4096, llen=4096>
1322 on write(300~100) resulted in
1323 extent1 <loffs = 100, boffs = 100, len = 10>
1324 -> blob1<compressed, len_on_disk=4096, logical_len=8192>
1325 extent2 <loffs = 200, boffs = 200, len = 10>
1326 -> blob2<raw, len_on_disk=4096, llen=4096>
1327 extent3 <loffs = 300, boffs = 300, len = 100>
1328 -> blob4<raw, len_on_disk=4096, llen=4096>
1329 extent4 <loffs = 4096, boffs = 0, len = 10>
1330 -> blob3<raw, len_on_disk=4096, llen=4096>
1331 */
1332 {
1333 BlueStore::GarbageCollector gc(g_ceph_context);
1334 int64_t saving;
1335 BlueStore::BlobRef b1(new BlueStore::Blob);
1336 BlueStore::BlobRef b2(new BlueStore::Blob);
1337 BlueStore::BlobRef b3(new BlueStore::Blob);
1338 BlueStore::BlobRef b4(new BlueStore::Blob);
1339 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1340 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1341 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1342 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1343 b1->dirty_blob().set_compressed(0x2000, 0x1000);
1344 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x1000));
1345 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x1000));
1346 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x1000));
1347 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
1348 em.extent_map.insert(*new BlueStore::Extent(100, 100, 10, b1));
1349 b1->get_ref(coll.get(), 100, 10);
1350 em.extent_map.insert(*new BlueStore::Extent(200, 200, 10, b2));
1351 b2->get_ref(coll.get(), 200, 10);
1352 em.extent_map.insert(*new BlueStore::Extent(300, 300, 100, b4));
1353 b4->get_ref(coll.get(), 300, 100);
1354 em.extent_map.insert(*new BlueStore::Extent(4096, 0, 10, b3));
1355 b3->get_ref(coll.get(), 0, 10);
1356
1357 old_extents.push_back(*new BlueStore::OldExtent(300, 300, 10, b1));
1358
1359 saving = gc.estimate(300, 100, em, old_extents, 4096);
1360 ASSERT_EQ(saving, 1);
1361 auto& to_collect = gc.get_extents_to_collect();
1362 ASSERT_EQ(to_collect.num_intervals(), 1u);
1363 {
1364 auto it = to_collect.begin();
1365 using p = decltype(*it);
1366 auto v = p{100ul, 10ul};
1367 ASSERT_EQ(*it, v);
1368 }
1369 em.clear();
1370 clear_and_dispose(old_extents);
1371 }
1372 /*
1373 original disposition
1374 min_alloc_size = 0x10000
1375 extent1 <loffs = 0, boffs = 0, len = 0x40000>
1376 -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
1377 Write 0x8000~37000 resulted in the following extent map prior to GC
1378 for the last write_small(0x30000~0xf000):
1379
1380 extent1 <loffs = 0, boffs = 0, len = 0x8000>
1381 -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
1382 extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
1383 -> blob2<raw, len_on_disk=0x10000, llen=0x10000>
1384 extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
1385 -> blob3<raw, len_on_disk=0x20000, llen=0x20000>
1386 extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
1387 -> blob4<raw, len_on_disk=0x10000, llen=0x10000>
1388 extent5 <loffs = 0x3f000, boffs = 0x3f000, len = 0x1000>
1389 -> blob1<compressed, len_on_disk=0x20000, llen=0x40000>
1390 */
1391 {
1392 BlueStore store(g_ceph_context, "", 0x10000);
1393 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1394 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1395 BlueStore::ExtentMap em(&onode);
1396
1397 BlueStore::old_extent_map_t old_extents;
1398 BlueStore::GarbageCollector gc(g_ceph_context);
1399 int64_t saving;
1400 BlueStore::BlobRef b1(new BlueStore::Blob);
1401 BlueStore::BlobRef b2(new BlueStore::Blob);
1402 BlueStore::BlobRef b3(new BlueStore::Blob);
1403 BlueStore::BlobRef b4(new BlueStore::Blob);
1404 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1405 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1406 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1407 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1408 b1->dirty_blob().set_compressed(0x40000, 0x20000);
1409 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x20000));
1410 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
1411 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
1412 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x10000));
1413
1414 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b1));
1415 b1->get_ref(coll.get(), 0, 0x8000);
1416 em.extent_map.insert(
1417 *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
1418 b2->get_ref(coll.get(), 0x8000, 0x8000);
1419 em.extent_map.insert(
1420 *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
1421 b3->get_ref(coll.get(), 0, 0x20000);
1422 em.extent_map.insert(
1423 *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
1424 b4->get_ref(coll.get(), 0, 0xf000);
1425 em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x3f000, 0x1000, b1));
1426 b1->get_ref(coll.get(), 0x3f000, 0x1000);
1427
1428 old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b1));
1429 old_extents.push_back(
1430 *new BlueStore::OldExtent(0x10000, 0x10000, 0x20000, b1));
1431 old_extents.push_back(*new BlueStore::OldExtent(0x30000, 0x30000, 0xf000, b1));
1432
1433 saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
1434 ASSERT_EQ(saving, 2);
1435 auto& to_collect = gc.get_extents_to_collect();
1436 ASSERT_EQ(to_collect.num_intervals(), 2u);
1437 {
1438 auto it1 = to_collect.begin();
1439 auto it2 = ++to_collect.begin();
1440 using p = decltype(*it1);
1441 {
1442 auto v1 = p{0x0ul ,0x8000ul};
1443 auto v2 = p{0x0ul, 0x8000ul};
1444 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1445 }
1446 {
1447 auto v1 = p{0x3f000ul, 0x1000ul};
1448 auto v2 = p{0x3f000ul, 0x1000ul};
1449 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1450 }
1451 }
1452
1453 em.clear();
1454 clear_and_dispose(old_extents);
1455 }
1456 /*
1457 original disposition
1458 min_alloc_size = 0x1000
1459 extent1 <loffs = 0, boffs = 0, len = 0x4000>
1460 -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
1461 write 0x3000~4000 resulted in the following extent map
1462 (future feature - suppose we can compress incoming write prior to
1463 GC invocation)
1464
1465 extent1 <loffs = 0, boffs = 0, len = 0x4000>
1466 -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
1467 extent2 <loffs = 0x3000, boffs = 0, len = 0x4000>
1468 -> blob2<compressed, len_on_disk=0x2000, llen=0x4000>
1469 */
1470 {
1471 BlueStore::GarbageCollector gc(g_ceph_context);
1472 int64_t saving;
1473 BlueStore::BlobRef b1(new BlueStore::Blob);
1474 BlueStore::BlobRef b2(new BlueStore::Blob);
1475 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1476 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1477 b1->dirty_blob().set_compressed(0x4000, 0x2000);
1478 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
1479 b2->dirty_blob().set_compressed(0x4000, 0x2000);
1480 b2->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
1481
1482 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x3000, b1));
1483 b1->get_ref(coll.get(), 0, 0x3000);
1484 em.extent_map.insert(
1485 *new BlueStore::Extent(0x3000, 0, 0x4000, b2)); // new extent
1486 b2->get_ref(coll.get(), 0, 0x4000);
1487
1488 old_extents.push_back(*new BlueStore::OldExtent(0x3000, 0x3000, 0x1000, b1));
1489
1490 saving = gc.estimate(0x3000, 0x4000, em, old_extents, 0x1000);
1491 ASSERT_EQ(saving, 0);
1492 auto& to_collect = gc.get_extents_to_collect();
1493 ASSERT_EQ(to_collect.num_intervals(), 0u);
1494 em.clear();
1495 clear_and_dispose(old_extents);
1496 }
1497 /*
1498 original disposition
1499 min_alloc_size = 0x10000
1500 extent0 <loffs = 0, boffs = 0, len = 0x20000>
1501 -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
1502 extent1 <loffs = 0x20000, boffs = 0, len = 0x20000>
1503 -> blob1<compressed, len_on_disk=0x10000, logical_len=0x20000>
1504 write 0x8000~37000 resulted in the following extent map prior
1505 to GC for the last write_small(0x30000~0xf000)
1506
1507 extent0 <loffs = 0, boffs = 0, len = 0x8000>
1508 -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
1509 extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
1510 -> blob2<raw, len_on_disk=0x10000, llen=0x10000>
1511 extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
1512 -> blob3<raw, len_on_disk=0x20000, llen=0x20000>
1513 extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
1514 -> blob4<raw, len_on_disk=0x1000, llen=0x1000>
1515 extent5 <loffs = 0x3f000, boffs = 0x1f000, len = 0x1000>
1516 -> blob1<compressed, len_on_disk=0x10000, llen=0x20000>
1517 */
1518 {
1519 BlueStore store(g_ceph_context, "", 0x10000);
1520 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1521 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1522 BlueStore::ExtentMap em(&onode);
1523
1524 BlueStore::old_extent_map_t old_extents;
1525 BlueStore::GarbageCollector gc(g_ceph_context);
1526 int64_t saving;
1527 BlueStore::BlobRef b0(new BlueStore::Blob);
1528 BlueStore::BlobRef b1(new BlueStore::Blob);
1529 BlueStore::BlobRef b2(new BlueStore::Blob);
1530 BlueStore::BlobRef b3(new BlueStore::Blob);
1531 BlueStore::BlobRef b4(new BlueStore::Blob);
1532 b0->shared_blob = new BlueStore::SharedBlob(coll.get());
1533 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1534 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1535 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1536 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1537 b0->dirty_blob().set_compressed(0x2000, 0x1000);
1538 b0->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
1539 b1->dirty_blob().set_compressed(0x20000, 0x10000);
1540 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
1541 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
1542 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
1543 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
1544
1545 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b0));
1546 b0->get_ref(coll.get(), 0, 0x8000);
1547 em.extent_map.insert(
1548 *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
1549 b2->get_ref(coll.get(), 0x8000, 0x8000);
1550 em.extent_map.insert(
1551 *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
1552 b3->get_ref(coll.get(), 0, 0x20000);
1553 em.extent_map.insert(
1554 *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
1555 b4->get_ref(coll.get(), 0, 0xf000);
1556 em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x1f000, 0x1000, b1));
1557 b1->get_ref(coll.get(), 0x1f000, 0x1000);
1558
1559 old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b0));
1560 old_extents.push_back(
1561 *new BlueStore::OldExtent(0x10000, 0x10000, 0x10000, b0));
1562 old_extents.push_back(
1563 *new BlueStore::OldExtent(0x20000, 0x00000, 0x1f000, b1));
1564
1565 saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
1566 ASSERT_EQ(saving, 2);
1567 auto& to_collect = gc.get_extents_to_collect();
1568 ASSERT_EQ(to_collect.num_intervals(), 2u);
1569 {
1570 auto it1 = to_collect.begin();
1571 auto it2 = ++to_collect.begin();
1572 using p = decltype(*it1);
1573 {
1574 auto v1 = p{0x0ul, 0x8000ul};
1575 auto v2 = p{0x0ul, 0x8000ul};
1576 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1577 }
1578 {
1579 auto v1 = p{0x3f000ul, 0x1000ul};
1580 auto v2 = p{0x3f000ul, 0x1000ul};
1581 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1582 }
1583 }
1584
1585 em.clear();
1586 clear_and_dispose(old_extents);
1587 }
1588 }
1589
1590 TEST(BlueStoreRepairer, StoreSpaceTracker)
1591 {
1592 BlueStoreRepairer::StoreSpaceTracker bmap0;
1593 bmap0.init((uint64_t)4096 * 1024 * 1024 * 1024, 0x1000);
1594 ASSERT_EQ(bmap0.granularity, 2 * 1024 * 1024U);
1595 ASSERT_EQ(bmap0.collections_bfs.size(), 2048u * 1024u);
1596 ASSERT_EQ(bmap0.objects_bfs.size(), 2048u * 1024u);
1597
1598 BlueStoreRepairer::StoreSpaceTracker bmap;
1599 bmap.init(0x2000 * 0x1000 - 1, 0x1000, 512 * 1024);
1600 ASSERT_EQ(bmap.granularity, 0x1000u);
1601 ASSERT_EQ(bmap.collections_bfs.size(), 0x2000u);
1602 ASSERT_EQ(bmap.objects_bfs.size(), 0x2000u);
1603
1604 coll_t cid;
1605 ghobject_t hoid;
1606
1607 ASSERT_FALSE(bmap.is_used(cid, 0));
1608 ASSERT_FALSE(bmap.is_used(hoid, 0));
1609 bmap.set_used(0, 1, cid, hoid);
1610 ASSERT_TRUE(bmap.is_used(cid, 0));
1611 ASSERT_TRUE(bmap.is_used(hoid, 0));
1612
1613 ASSERT_FALSE(bmap.is_used(cid, 0x1023));
1614 ASSERT_FALSE(bmap.is_used(hoid, 0x1023));
1615 ASSERT_FALSE(bmap.is_used(cid, 0x2023));
1616 ASSERT_FALSE(bmap.is_used(hoid, 0x2023));
1617 ASSERT_FALSE(bmap.is_used(cid, 0x3023));
1618 ASSERT_FALSE(bmap.is_used(hoid, 0x3023));
1619 bmap.set_used(0x1023, 0x3000, cid, hoid);
1620 ASSERT_TRUE(bmap.is_used(cid, 0x1023));
1621 ASSERT_TRUE(bmap.is_used(hoid, 0x1023));
1622 ASSERT_TRUE(bmap.is_used(cid, 0x2023));
1623 ASSERT_TRUE(bmap.is_used(hoid, 0x2023));
1624 ASSERT_TRUE(bmap.is_used(cid, 0x3023));
1625 ASSERT_TRUE(bmap.is_used(hoid, 0x3023));
1626
1627 ASSERT_FALSE(bmap.is_used(cid, 0x9001));
1628 ASSERT_FALSE(bmap.is_used(hoid, 0x9001));
1629 ASSERT_FALSE(bmap.is_used(cid, 0xa001));
1630 ASSERT_FALSE(bmap.is_used(hoid, 0xa001));
1631 ASSERT_FALSE(bmap.is_used(cid, 0xb000));
1632 ASSERT_FALSE(bmap.is_used(hoid, 0xb000));
1633 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1634 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1635 bmap.set_used(0x9001, 0x2fff, cid, hoid);
1636 ASSERT_TRUE(bmap.is_used(cid, 0x9001));
1637 ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
1638 ASSERT_TRUE(bmap.is_used(cid, 0xa001));
1639 ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
1640 ASSERT_TRUE(bmap.is_used(cid, 0xb001));
1641 ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
1642 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1643 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1644
1645 bmap.set_used(0xa001, 0x2, cid, hoid);
1646 ASSERT_TRUE(bmap.is_used(cid, 0x9001));
1647 ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
1648 ASSERT_TRUE(bmap.is_used(cid, 0xa001));
1649 ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
1650 ASSERT_TRUE(bmap.is_used(cid, 0xb001));
1651 ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
1652 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1653 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1654
1655 ASSERT_FALSE(bmap.is_used(cid, 0xc0000));
1656 ASSERT_FALSE(bmap.is_used(hoid, 0xc0000));
1657 ASSERT_FALSE(bmap.is_used(cid, 0xc1000));
1658 ASSERT_FALSE(bmap.is_used(hoid, 0xc1000));
1659
1660 bmap.set_used(0xc0000, 0x2000, cid, hoid);
1661 ASSERT_TRUE(bmap.is_used(cid, 0xc0000));
1662 ASSERT_TRUE(bmap.is_used(hoid, 0xc0000));
1663 ASSERT_TRUE(bmap.is_used(cid, 0xc1000));
1664 ASSERT_TRUE(bmap.is_used(hoid, 0xc1000));
1665
1666 interval_set<uint64_t> extents;
1667 extents.insert(0,0x500);
1668 extents.insert(0x800,0x100);
1669 extents.insert(0x1000,0x1000);
1670 extents.insert(0xa001,1);
1671 extents.insert(0xa0000,0xff8);
1672
1673 ASSERT_EQ(3u, bmap.filter_out(extents));
1674 ASSERT_TRUE(bmap.is_used(cid));
1675 ASSERT_TRUE(bmap.is_used(hoid));
1676
1677 BlueStoreRepairer::StoreSpaceTracker bmap2;
1678 bmap2.init((uint64_t)0x3223b1d1000, 0x10000);
1679 ASSERT_EQ(0x1a0000u, bmap2.granularity);
1680 ASSERT_EQ(0x1edae4u, bmap2.collections_bfs.size());
1681 ASSERT_EQ(0x1edae4u, bmap2.objects_bfs.size());
1682 bmap2.set_used(0x3223b190000, 0x10000, cid, hoid);
1683 ASSERT_TRUE(bmap2.is_used(cid, 0x3223b190000));
1684 ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b190000));
1685 ASSERT_TRUE(bmap2.is_used(cid, 0x3223b19f000));
1686 ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b19ffff));
1687 }
1688
1689 TEST(bluestore_blob_t, unused)
1690 {
1691 {
1692 bluestore_blob_t b;
1693 uint64_t min_alloc_size = 64 << 10; // 64 kB
1694
1695 // _do_write_small 0x0~1000
1696 uint64_t offset = 0x0;
1697 uint64_t length = 0x1000; // 4kB
1698 uint64_t suggested_boff = 0;
1699 PExtentVector extents;
1700 extents.emplace_back(0x1a560000, min_alloc_size);
1701 b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
1702 b.mark_used(offset, length);
1703 ASSERT_FALSE(b.is_unused(offset, length));
1704
1705 // _do_write_small 0x2000~1000
1706 offset = 0x2000;
1707 length = 0x1000;
1708 b.add_unused(0, 0x10000);
1709 ASSERT_TRUE(b.is_unused(offset, length));
1710 b.mark_used(offset, length);
1711 ASSERT_FALSE(b.is_unused(offset, length));
1712
1713 // _do_write_small 0xc000~2000
1714 offset = 0xc000;
1715 length = 0x2000;
1716 ASSERT_TRUE(b.is_unused(offset, length));
1717 b.mark_used(offset, length);
1718 ASSERT_FALSE(b.is_unused(offset, length));
1719 }
1720
1721 {
1722 bluestore_blob_t b;
1723 uint64_t min_alloc_size = 64 << 10; // 64 kB
1724
1725 // _do_write_small 0x11000~1000
1726 uint64_t offset = 0x11000;
1727 uint64_t length = 0x1000; // 4kB
1728 uint64_t suggested_boff = 0x11000;
1729 PExtentVector extents;
1730 extents.emplace_back(0x1a560000, min_alloc_size);
1731 b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
1732 b.add_unused(0, offset);
1733 b.add_unused(offset + length, min_alloc_size * 2 - offset - length);
1734 b.mark_used(offset, length);
1735 ASSERT_FALSE(b.is_unused(offset, length));
1736
1737 // _do_write_small 0x15000~3000
1738 offset = 0x15000;
1739 length = 0x3000;
1740 ASSERT_TRUE(b.is_unused(offset, length));
1741 b.mark_used(offset, length);
1742 ASSERT_FALSE(b.is_unused(offset, length));
1743 }
1744
1745 {
1746 // reuse blob
1747 bluestore_blob_t b;
1748 uint64_t min_alloc_size = 64 << 10; // 64 kB
1749
1750 // _do_write_small 0x2a000~1000
1751 // and 0x1d000~1000
1752 uint64_t unused_granularity = 0x3000;
1753 // offsets and lenght below are selected to
1754 // be aligned with unused_granularity
1755 uint64_t offset0 = 0x2a000;
1756 uint64_t offset = 0x1d000;
1757 uint64_t length = 0x1000; // 4kB
1758 PExtentVector extents;
1759 extents.emplace_back(0x410000, min_alloc_size);
1760 b.allocated(p2align(offset0, min_alloc_size), min_alloc_size, extents);
1761 b.add_unused(0, min_alloc_size * 3);
1762 b.mark_used(offset0, length);
1763 ASSERT_FALSE(b.is_unused(offset0, length));
1764 ASSERT_TRUE(b.is_unused(offset, length));
1765
1766 extents.clear();
1767 extents.emplace_back(0x430000, min_alloc_size);
1768 b.allocated(p2align(offset, min_alloc_size), min_alloc_size, extents);
1769 b.mark_used(offset, length);
1770 ASSERT_FALSE(b.is_unused(offset0, length));
1771 ASSERT_FALSE(b.is_unused(offset, length));
1772 ASSERT_FALSE(b.is_unused(offset, unused_granularity));
1773
1774 ASSERT_TRUE(b.is_unused(0, offset / unused_granularity * unused_granularity));
1775 ASSERT_TRUE(b.is_unused(offset + length, offset0 - offset - length));
1776 auto end0_aligned = round_up_to(offset0 + length, unused_granularity);
1777 ASSERT_TRUE(b.is_unused(end0_aligned, min_alloc_size * 3 - end0_aligned));
1778 }
1779 }
1780 // This UT is primarily intended to show how repair procedure
1781 // causes erroneous write to INVALID_OFFSET which is reported in
1782 // https://tracker.ceph.com/issues/51682
1783 // Basic map_any functionality is tested as well though.
1784 //
1785 TEST(bluestore_blob_t, wrong_map_bl_in_51682)
1786 {
1787 {
1788 bluestore_blob_t b;
1789 uint64_t min_alloc_size = 4 << 10; // 64 kB
1790
1791 b.allocated_test(bluestore_pextent_t(0x17ba000, 4 * min_alloc_size));
1792 b.allocated_test(bluestore_pextent_t(0x17bf000, 4 * min_alloc_size));
1793 b.allocated_test(
1794 bluestore_pextent_t(
1795 bluestore_pextent_t::INVALID_OFFSET,
1796 1 * min_alloc_size));
1797 b.allocated_test(bluestore_pextent_t(0x153c44d000, 7 * min_alloc_size));
1798
1799 b.mark_used(0, 0x8000);
1800 b.mark_used(0x9000, 0x7000);
1801
1802 string s(0x7000, 'a');
1803 bufferlist bl;
1804 bl.append(s);
1805 const size_t num_expected_entries = 5;
1806 uint64_t expected[num_expected_entries][2] = {
1807 {0x17ba000, 0x4000},
1808 {0x17bf000, 0x3000},
1809 {0x17c0000, 0x3000},
1810 {0xffffffffffffffff, 0x1000},
1811 {0x153c44d000, 0x3000}};
1812 size_t expected_pos = 0;
1813 b.map_bl(0, bl,
1814 [&](uint64_t o, bufferlist& bl) {
1815 ASSERT_EQ(o, expected[expected_pos][0]);
1816 ASSERT_EQ(bl.length(), expected[expected_pos][1]);
1817 ++expected_pos;
1818 });
1819 // 0x5000 is an improper offset presumably provided when doing a repair
1820 b.map_bl(0x5000, bl,
1821 [&](uint64_t o, bufferlist& bl) {
1822 ASSERT_EQ(o, expected[expected_pos][0]);
1823 ASSERT_EQ(bl.length(), expected[expected_pos][1]);
1824 ++expected_pos;
1825 });
1826 ASSERT_EQ(expected_pos, num_expected_entries);
1827 }
1828 }
1829
1830 //---------------------------------------------------------------------------------
1831 static int verify_extent(const extent_t & ext, const extent_t *ext_arr, uint64_t ext_arr_size, uint64_t idx)
1832 {
1833 const extent_t & ext_ref = ext_arr[idx];
1834 if (ext.offset == ext_ref.offset && ext.length == ext_ref.length) {
1835 return 0;
1836 } else {
1837 std::cerr << "mismatch was found at index " << idx << std::endl;
1838 if (ext.length == 0) {
1839 std::cerr << "Null extent was returned at idx = " << idx << std::endl;
1840 }
1841 unsigned start = std::max(((int32_t)(idx)-3), 0);
1842 unsigned end = std::min(idx+3, ext_arr_size);
1843 for (unsigned j = start; j < end; j++) {
1844 const extent_t & ext_ref = ext_arr[j];
1845 std::cerr << j << ") ref_ext = [" << ext_ref.offset << ", " << ext_ref.length << "]" << std::endl;
1846 }
1847 std::cerr << idx << ") ext = [" << ext.offset << ", " << ext.length << "]" << std::endl;
1848 return -1;
1849 }
1850 }
1851
1852 //---------------------------------------------------------------------------------
1853 static int test_extents(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
1854 {
1855 const uint64_t MAX_JUMP_BIG = 1523;
1856 const uint64_t MAX_JUMP_SMALL = 19;
1857 const uint64_t MAX_LEN_BIG = 523;
1858 const uint64_t MAX_LEN_SMALL = 23;
1859
1860 uint64_t n = sbmap.get_size();
1861 uint64_t offset = 0;
1862 unsigned length, jump, i;
1863 for (i = 0; i < ext_arr_size; i++) {
1864 if (i & 3) {
1865 jump = std::rand() % MAX_JUMP_BIG;
1866 } else {
1867 jump = std::rand() % MAX_JUMP_SMALL;
1868 }
1869 offset += jump;
1870 if (i & 1) {
1871 length = std::rand() % MAX_LEN_BIG;
1872 } else {
1873 length = std::rand() % MAX_LEN_SMALL;
1874 }
1875 // make sure no zero length will be used
1876 length++;
1877 if (offset + length >= n) {
1878 break;
1879 }
1880
1881 bool success;
1882 if (set) {
1883 success = sbmap.set(offset, length);
1884 } else {
1885 success = sbmap.clr(offset, length);
1886 }
1887 if (!success) {
1888 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
1889 return -1;
1890 }
1891
1892 // if this is not the first entry and no jump -> merge extents
1893 if ( (i==0) || (jump > 0) ) {
1894 ext_arr[i] = {offset, length};
1895 } else {
1896 // merge 2 extents
1897 i --;
1898 ext_arr[i].length += length;
1899 }
1900 offset += length;
1901 }
1902 unsigned arr_size = std::min((uint64_t)i, ext_arr_size);
1903 std::cout << std::hex << std::right;
1904 std::cout << "[" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
1905 std::cout << std::dec << std::endl;
1906
1907 offset = 0;
1908 extent_t ext;
1909 for(unsigned i = 0; i < arr_size; i++) {
1910 if (set) {
1911 ext = sbmap.get_next_set_extent(offset);
1912 } else {
1913 ext = sbmap.get_next_clr_extent(offset);
1914 }
1915
1916 if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
1917 return -1;
1918 }
1919 offset = ext.offset + ext.length;
1920 }
1921
1922 if (set) {
1923 ext = sbmap.get_next_set_extent(offset);
1924 } else {
1925 ext = sbmap.get_next_clr_extent(offset);
1926 }
1927 if (ext.length == 0) {
1928 return 0;
1929 } else {
1930 std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
1931 return -1;
1932 }
1933 }
1934
1935 //---------------------------------------------------------------------------------
1936 TEST(SimpleBitmap, basic)
1937 {
1938 const uint64_t MAX_EXTENTS_COUNT = 7131177;
1939 std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
1940 ASSERT_TRUE(ext_arr != nullptr);
1941 const uint64_t BIT_COUNT = 4ULL << 30; // 4Gb = 512MB
1942 SimpleBitmap sbmap(g_ceph_context, BIT_COUNT);
1943
1944 // use current time as seed for random generator
1945 std::srand(std::time(nullptr));
1946 for (unsigned i = 0; i < 3; i++ ) {
1947 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
1948 sbmap.clear_all();
1949 ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
1950
1951 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
1952 sbmap.set_all();
1953 ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
1954 }
1955 }
1956
1957 //---------------------------------------------------------------------------------
1958 static int test_intersections(unsigned test_idx, SimpleBitmap &sbmap, uint8_t map[], uint64_t map_size)
1959 {
1960 const uint64_t MAX_LEN_BIG = 523;
1961 const uint64_t MAX_LEN_SMALL = 23;
1962
1963 bool success;
1964 uint64_t set_op_count = 0, clr_op_count = 0;
1965 unsigned length, i;
1966 for (i = 0; i < map_size / (MAX_LEN_BIG*2); i++) {
1967 uint64_t offset = (std::rand() % (map_size - 1));
1968 if (i & 1) {
1969 length = std::rand() % MAX_LEN_BIG;
1970 } else {
1971 length = std::rand() % MAX_LEN_SMALL;
1972 }
1973 // make sure no zero length will be used
1974 length++;
1975 if (offset + length >= map_size) {
1976 continue;
1977 }
1978 // 2:1 set/clr
1979 bool set = (std::rand() % 3);
1980 if (set) {
1981 success = sbmap.set(offset, length);
1982 memset(map+offset, 0xFF, length);
1983 set_op_count++;
1984 } else {
1985 success = sbmap.clr(offset, length);
1986 memset(map+offset, 0x0, length);
1987 clr_op_count++;
1988 }
1989 if (!success) {
1990 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
1991 return -1;
1992 }
1993 }
1994
1995 uint64_t set_bit_count = 0;
1996 uint64_t clr_bit_count = 0;
1997 for(uint64_t idx = 0; idx < map_size; idx++) {
1998 if (map[idx]) {
1999 set_bit_count++;
2000 success = sbmap.bit_is_set(idx);
2001 } else {
2002 clr_bit_count++;
2003 success = sbmap.bit_is_clr(idx);
2004 }
2005 if (!success) {
2006 std::cerr << "expected: sbmap.bit_is_" << (map[idx] ? "set(" : "clr(") << idx << ")"<< std::endl;
2007 return -1;
2008 }
2009
2010 }
2011 std::cout << std::hex << std::right << __func__ ;
2012 std::cout << " [" << test_idx << "] set_bit_count = 0x" << std::setfill('0') << std::setw(8) << set_bit_count
2013 << ", clr_bit_count = 0x" << std::setfill('0') << std::setw(8) << clr_bit_count
2014 << ", sum = 0x" << set_bit_count + clr_bit_count << std::endl;
2015 std::cout << std::dec;
2016 uint64_t offset = 0;
2017 for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
2018 extent_t ext = sbmap.get_next_set_extent(offset);
2019 //std::cout << "set_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
2020 for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
2021 if (map[idx] != 0xFF) {
2022 std::cerr << "map[" << idx << "] is clear, but extent [" << ext.offset << ", " << ext.length << "] is set" << std::endl;
2023 return -1;
2024 }
2025 }
2026 offset = ext.offset + ext.length;
2027 }
2028
2029 offset = 0;
2030 for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
2031 extent_t ext = sbmap.get_next_clr_extent(offset);
2032 //std::cout << "clr_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
2033 for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
2034 if (map[idx] ) {
2035 std::cerr << "map[" << idx << "] is set, but extent [" << ext.offset << ", " << ext.length << "] is free" << std::endl;
2036 return -1;
2037 }
2038 }
2039 offset = ext.offset + ext.length;
2040 }
2041
2042 return 0;
2043 }
2044
2045 //---------------------------------------------------------------------------------
2046 TEST(SimpleBitmap, intersection)
2047 {
2048 const uint64_t MAP_SIZE = 1ULL << 30; // 1G
2049 SimpleBitmap sbmap(g_ceph_context, MAP_SIZE);
2050
2051 // use current time as seed for random generator
2052 std::srand(std::time(nullptr));
2053
2054 std::unique_ptr<uint8_t[]> map = std::make_unique<uint8_t[]> (MAP_SIZE);
2055 ASSERT_TRUE(map != nullptr);
2056
2057 for (unsigned i = 0; i < 1; i++ ) {
2058 sbmap.clear_all();
2059 memset(map.get(), 0, MAP_SIZE);
2060 ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
2061
2062 sbmap.set_all();
2063 memset(map.get(), 0xFF, MAP_SIZE);
2064 ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
2065 }
2066 }
2067
2068
2069 //---------------------------------------------------------------------------------
2070 static int test_extents_boundaries(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
2071 {
2072 uint64_t n = sbmap.get_size();
2073 uint64_t offset = 0, k = 0;
2074 for(unsigned i = 0; i < 64; i++) {
2075 offset += i;
2076 if (offset >= n) {
2077 break;
2078 }
2079
2080 for(unsigned length = 1; length <= 128; length++) {
2081 if (offset + length >= n) {
2082 break;
2083 }
2084
2085 if (k >= ext_arr_size) {
2086 break;
2087 }
2088 bool success;
2089 if (set) {
2090 success = sbmap.set(offset, length);
2091 } else {
2092 success = sbmap.clr(offset, length);
2093 }
2094 if (!success) {
2095 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
2096 return -1;
2097 }
2098 ext_arr[k++] = {offset, length};
2099 if (length < 64) {
2100 offset += 64;
2101 } else {
2102 offset += 128;
2103 }
2104 }
2105 if (k >= ext_arr_size) {
2106 break;
2107 }
2108 }
2109
2110 unsigned arr_size = std::min((uint64_t)k, ext_arr_size);
2111 std::cout << std::hex << std::right << __func__ ;
2112 std::cout << " [" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
2113 std::cout << std::dec << std::endl;
2114
2115 offset = 0;
2116 extent_t ext;
2117 for(unsigned i = 0; i < arr_size; i++) {
2118 if (set) {
2119 ext = sbmap.get_next_set_extent(offset);
2120 } else {
2121 ext = sbmap.get_next_clr_extent(offset);
2122 }
2123
2124 if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
2125 return -1;
2126 }
2127 offset = ext.offset + ext.length;
2128 }
2129
2130 if (set) {
2131 ext = sbmap.get_next_set_extent(offset);
2132 } else {
2133 ext = sbmap.get_next_clr_extent(offset);
2134 }
2135 if (ext.length == 0) {
2136 return 0;
2137 } else {
2138 std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
2139 return -1;
2140 }
2141
2142 }
2143
2144 //---------------------------------------------------------------------------------
2145 TEST(SimpleBitmap, boundaries)
2146 {
2147 const uint64_t MAX_EXTENTS_COUNT = 64 << 10;
2148 std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
2149 ASSERT_TRUE(ext_arr != nullptr);
2150
2151 // use current time as seed for random generator
2152 std::srand(std::time(nullptr));
2153
2154 uint64_t bit_count = 32 << 20; // 32Mb = 4MB
2155 unsigned count = 0;
2156 for (unsigned i = 0; i < 64; i++) {
2157 SimpleBitmap sbmap(g_ceph_context, bit_count+i);
2158 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
2159 sbmap.clear_all();
2160 ASSERT_TRUE(test_extents_boundaries(count, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
2161
2162 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
2163 sbmap.set_all();
2164 ASSERT_TRUE(test_extents_boundaries(count++, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
2165 }
2166 }
2167
2168 //---------------------------------------------------------------------------------
2169 TEST(SimpleBitmap, boundaries2)
2170 {
2171 const uint64_t bit_count_base = 64 << 10; // 64Kb = 8MB
2172 const extent_t null_extent = {0, 0};
2173
2174 for (unsigned i = 0; i < 64; i++) {
2175 uint64_t bit_count = bit_count_base + i;
2176 extent_t full_extent = {0, bit_count};
2177 SimpleBitmap sbmap(g_ceph_context, bit_count);
2178
2179 sbmap.set(0, bit_count);
2180 ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
2181 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
2182
2183 for (uint64_t bit = 0; bit < bit_count; bit++) {
2184 sbmap.clr(bit, 1);
2185 }
2186 ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
2187 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
2188
2189 for (uint64_t bit = 0; bit < bit_count; bit++) {
2190 sbmap.set(bit, 1);
2191 }
2192 ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
2193 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
2194
2195 sbmap.clr(0, bit_count);
2196 ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
2197 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
2198 }
2199 }
2200
2201 TEST(shared_blob_2hash_tracker_t, basic_test)
2202 {
2203 shared_blob_2hash_tracker_t t1(1024 * 1024, 4096);
2204
2205 ASSERT_TRUE(t1.count_non_zero() == 0);
2206
2207 t1.inc(0, 0, 1);
2208 ASSERT_TRUE(t1.count_non_zero() != 0);
2209 t1.inc(0, 0, -1);
2210 ASSERT_TRUE(t1.count_non_zero() == 0);
2211
2212 t1.inc(3, 0x1000, 2);
2213 ASSERT_TRUE(t1.count_non_zero() != 0);
2214 t1.inc(3, 0x1000, -1);
2215 ASSERT_TRUE(t1.count_non_zero() != 0);
2216 t1.inc(3, 0x1000, -1);
2217 ASSERT_TRUE(t1.count_non_zero() == 0);
2218
2219 t1.inc(2, 0x2000, 5);
2220 ASSERT_TRUE(t1.count_non_zero() != 0);
2221 t1.inc(18, 0x2000, -5);
2222 ASSERT_TRUE(t1.count_non_zero() != 0);
2223 t1.inc(18, 0x2000, 1);
2224 ASSERT_TRUE(t1.count_non_zero() != 0);
2225 t1.inc(2, 0x2000, -1);
2226 ASSERT_TRUE(t1.count_non_zero() != 0);
2227 t1.inc(18, 0x2000, 4);
2228 ASSERT_TRUE(t1.count_non_zero() != 0);
2229 t1.inc(2, 0x2000, -4);
2230 ASSERT_TRUE(t1.count_non_zero() == 0);
2231
2232 t1.inc(3, 0x3000, 2);
2233 ASSERT_TRUE(t1.count_non_zero() != 0);
2234 t1.inc(4, 0x3000, -1);
2235 ASSERT_TRUE(t1.count_non_zero() != 0);
2236 t1.inc(4, 0x3000, -1);
2237 ASSERT_TRUE(t1.count_non_zero() != 0);
2238 t1.inc(3, 0x3000, -2);
2239 ASSERT_TRUE(t1.count_non_zero() != 0);
2240 t1.inc(4, 0x3000, 1);
2241 ASSERT_TRUE(t1.count_non_zero() != 0);
2242 t1.inc(4, 0x3000, 1);
2243 ASSERT_TRUE(t1.count_non_zero() == 0);
2244
2245 t1.inc(5, 0x1000, 1);
2246 t1.inc(5, 0x2000, 3);
2247 t1.inc(5, 0x3000, 2);
2248 t1.inc(5, 0x8000, 1);
2249
2250 ASSERT_TRUE(t1.count_non_zero() != 0);
2251
2252 ASSERT_TRUE(!t1.test_all_zero(5,0x1000));
2253 ASSERT_TRUE(!t1.test_all_zero(5, 0x2000));
2254 ASSERT_TRUE(!t1.test_all_zero(5, 0x3000));
2255 ASSERT_TRUE(t1.test_all_zero(5, 0x4000));
2256 ASSERT_TRUE(!t1.test_all_zero(5, 0x8000));
2257
2258 ASSERT_TRUE(t1.test_all_zero_range(5, 0, 0x1000));
2259 ASSERT_TRUE(t1.test_all_zero_range(5, 0x500, 0x500));
2260 ASSERT_TRUE(!t1.test_all_zero_range(5, 0x500, 0x1500));
2261 ASSERT_TRUE(!t1.test_all_zero_range(5, 0x1500, 0x3200));
2262 ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x1500));
2263 ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x3b00));
2264 ASSERT_TRUE(!t1.test_all_zero_range(5, 0, 0x9000));
2265 }
2266
2267 TEST(bluestore_blob_use_tracker_t, mempool_stats_test)
2268 {
2269 using mempool::bluestore_cache_other::allocated_items;
2270 using mempool::bluestore_cache_other::allocated_bytes;
2271 uint64_t other_items0 = allocated_items();
2272 uint64_t other_bytes0 = allocated_bytes();
2273 {
2274 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2275
2276 t1->init(1024 * 1024, 4096);
2277 ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
2278 ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
2279
2280 delete t1;
2281 ASSERT_EQ(allocated_items(), other_items0);
2282 ASSERT_EQ(allocated_bytes(), other_bytes0);
2283 }
2284 {
2285 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2286
2287 t1->init(1024 * 1024, 4096);
2288 t1->add_tail(2048 * 1024, 4096);
2289 // proper stats update after tail add
2290 ASSERT_EQ(512, allocated_items() - other_items0); // = 2M / 4K
2291 ASSERT_EQ(2048, allocated_bytes() - other_bytes0); // = 2M / 4K * 4
2292
2293 delete t1;
2294 ASSERT_EQ(allocated_items(), other_items0);
2295 ASSERT_EQ(allocated_bytes(), other_bytes0);
2296 }
2297 {
2298 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2299
2300 t1->init(1024 * 1024, 4096);
2301 t1->prune_tail(512 * 1024);
2302 // no changes in stats after pruning
2303 ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
2304 ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
2305
2306 delete t1;
2307 ASSERT_EQ(allocated_items(), other_items0);
2308 ASSERT_EQ(allocated_bytes(), other_bytes0);
2309 }
2310 {
2311 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2312 bluestore_blob_use_tracker_t* t2 = new bluestore_blob_use_tracker_t;
2313
2314 t1->init(1024 * 1024, 4096);
2315
2316 // t1 keeps the same amount of entries + t2 has got half of them
2317 t1->split(512 * 1024, t2);
2318 ASSERT_EQ(256 + 128, allocated_items() - other_items0); //= 1M / 4K*1.5
2319 ASSERT_EQ(1024 + 512, allocated_bytes() - other_bytes0); //= 1M / 4K*4*1.5
2320
2321 // t1 & t2 release everything, then t2 get one less entry than t2 had had
2322 // before
2323 t1->split(4096, t2);
2324 ASSERT_EQ(127, allocated_items() - other_items0); // = 512K / 4K - 1
2325 ASSERT_EQ(127 * 4, allocated_bytes() - other_bytes0); // = 512L / 4K * 4 - 4
2326 delete t1;
2327 delete t2;
2328 ASSERT_EQ(allocated_items(), other_items0);
2329 ASSERT_EQ(allocated_bytes(), other_bytes0);
2330 }
2331 }
2332
2333 int main(int argc, char **argv) {
2334 auto args = argv_to_vec(argc, argv);
2335 auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
2336 CODE_ENVIRONMENT_UTILITY,
2337 CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
2338 common_init_finish(g_ceph_context);
2339 ::testing::InitGoogleTest(&argc, argv);
2340 return RUN_ALL_TESTS();
2341 }