]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/objectstore/test_bluestore_types.cc
import quincy beta 17.1.0
[ceph.git] / ceph / src / test / objectstore / test_bluestore_types.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "include/types.h"
5 #include "os/bluestore/bluestore_types.h"
6 #include "gtest/gtest.h"
7 #include "include/stringify.h"
8 #include "common/ceph_time.h"
9 #include "os/bluestore/BlueStore.h"
10 #include "os/bluestore/simple_bitmap.h"
11 #include "os/bluestore/AvlAllocator.h"
12 #include "common/ceph_argparse.h"
13 #include "global/global_init.h"
14 #include "global/global_context.h"
15 #include "perfglue/heap_profiler.h"
16
17 #include <sstream>
18
19 #define _STR(x) #x
20 #define STRINGIFY(x) _STR(x)
21
22 using namespace std;
23
24 TEST(bluestore, sizeof) {
25 #define P(t) cout << STRINGIFY(t) << "\t" << sizeof(t) << std::endl
26 P(BlueStore::Onode);
27 P(BlueStore::Extent);
28 P(BlueStore::Blob);
29 P(BlueStore::SharedBlob);
30 P(BlueStore::ExtentMap);
31 P(BlueStore::extent_map_t);
32 P(BlueStore::blob_map_t);
33 P(BlueStore::BufferSpace);
34 P(BlueStore::Buffer);
35 P(bluestore_onode_t);
36 P(bluestore_blob_t);
37 P(PExtentVector);
38 P(ghobject_t);
39 P(bluestore_shared_blob_t);
40 P(bluestore_extent_ref_map_t);
41 P(bluestore_extent_ref_map_t::record_t);
42 P(bluestore_blob_use_tracker_t);
43 P(std::atomic_int);
44 P(BlueStore::SharedBlobRef);
45 P(boost::intrusive::set_base_hook<>);
46 P(boost::intrusive::unordered_set_base_hook<>);
47 P(bufferlist);
48 P(bufferptr);
49 P(range_seg_t);
50 P(sb_info_t);
51 P(SimpleBitmap);
52 cout << "map<uint64_t,uint64_t>\t" << sizeof(map<uint64_t,uint64_t>) << std::endl;
53 cout << "map<char,char>\t" << sizeof(map<char,char>) << std::endl;
54 }
55
56 void dump_mempools()
57 {
58 ostringstream ostr;
59 Formatter* f = Formatter::create("json-pretty", "json-pretty", "json-pretty");
60 ostr << "Mempools: ";
61 f->open_object_section("mempools");
62 mempool::dump(f);
63 f->close_section();
64 f->flush(ostr);
65 delete f;
66 cout << ostr.str() << std::endl;
67 }
68
69 TEST(sb_info_space_efficient_map_t, basic) {
70 sb_info_space_efficient_map_t sb_info;
71 const size_t num_shared = 1000;
72 for (size_t i = 0; i < num_shared; i += 2) {
73 auto& sbi = sb_info.add_maybe_stray(i);
74 sbi.pool_id = i;
75 }
76 ASSERT_TRUE(sb_info.find(0) != sb_info.end());
77 ASSERT_TRUE(sb_info.find(1) == sb_info.end());
78 ASSERT_TRUE(sb_info.find(2) != sb_info.end());
79 ASSERT_TRUE(sb_info.find(4)->pool_id == 4);
80 ASSERT_TRUE(sb_info.find(num_shared) == sb_info.end());
81
82 // ordered insertion
83 sb_info.add_or_adopt(num_shared).pool_id = num_shared;
84 ASSERT_TRUE(sb_info.find(num_shared) != sb_info.end());
85 ASSERT_TRUE(sb_info.find(num_shared)->pool_id == num_shared);
86
87 // out of order insertion
88 sb_info.add_or_adopt(1).pool_id = 1;
89 ASSERT_TRUE(sb_info.find(1) != sb_info.end());
90 ASSERT_TRUE(sb_info.find(1)->pool_id == 1);
91
92 // ordered insertion
93 sb_info.add_maybe_stray(num_shared + 1).pool_id = num_shared + 1;
94 ASSERT_TRUE(sb_info.find(num_shared + 1) != sb_info.end());
95 ASSERT_TRUE(sb_info.find(num_shared + 1)->pool_id == num_shared + 1);
96
97 // out of order insertion
98 sb_info.add_maybe_stray(105).pool_id = 105;
99 ASSERT_TRUE(sb_info.find(105) != sb_info.end());
100 ASSERT_TRUE(sb_info.find(105)->pool_id == 105);
101 }
102
103 TEST(sb_info_space_efficient_map_t, size) {
104 const size_t num_shared = 10000000;
105 sb_info_space_efficient_map_t sb_info;
106
107 BlueStore store(g_ceph_context, "", 4096);
108 BlueStore::OnodeCacheShard* oc = BlueStore::OnodeCacheShard::create(
109 g_ceph_context, "lru", NULL);
110 BlueStore::BufferCacheShard* bc = BlueStore::BufferCacheShard::create(
111 g_ceph_context, "lru", NULL);
112
113 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
114
115 for (size_t i = 0; i < num_shared; i++) {
116 auto& sbi = sb_info.add_or_adopt(i);
117 // primarily to silent the 'unused' warning
118 ceph_assert(sbi.pool_id == sb_info_t::INVALID_POOL_ID);
119 }
120 dump_mempools();
121 }
122
123 TEST(bluestore_extent_ref_map_t, add)
124 {
125 bluestore_extent_ref_map_t m;
126 m.get(10, 10);
127 ASSERT_EQ(1u, m.ref_map.size());
128 cout << m << std::endl;
129 m.get(20, 10);
130 cout << m << std::endl;
131 ASSERT_EQ(1u, m.ref_map.size());
132 ASSERT_EQ(20u, m.ref_map[10].length);
133 ASSERT_EQ(1u, m.ref_map[10].refs);
134 m.get(40, 10);
135 cout << m << std::endl;
136 ASSERT_EQ(2u, m.ref_map.size());
137 m.get(30, 10);
138 cout << m << std::endl;
139 ASSERT_EQ(1u, m.ref_map.size());
140 m.get(50, 10);
141 cout << m << std::endl;
142 ASSERT_EQ(1u, m.ref_map.size());
143 m.get(5, 5);
144 cout << m << std::endl;
145 ASSERT_EQ(1u, m.ref_map.size());
146 }
147
148 TEST(bluestore_extent_ref_map_t, get)
149 {
150 bluestore_extent_ref_map_t m;
151 m.get(00, 30);
152 cout << m << std::endl;
153 m.get(10, 10);
154 cout << m << std::endl;
155 ASSERT_EQ(3u, m.ref_map.size());
156 ASSERT_EQ(10u, m.ref_map[0].length);
157 ASSERT_EQ(1u, m.ref_map[0].refs);
158 ASSERT_EQ(10u, m.ref_map[10].length);
159 ASSERT_EQ(2u, m.ref_map[10].refs);
160 ASSERT_EQ(10u, m.ref_map[20].length);
161 ASSERT_EQ(1u, m.ref_map[20].refs);
162 m.get(20, 5);
163 cout << m << std::endl;
164 ASSERT_EQ(3u, m.ref_map.size());
165 ASSERT_EQ(15u, m.ref_map[10].length);
166 ASSERT_EQ(2u, m.ref_map[10].refs);
167 ASSERT_EQ(5u, m.ref_map[25].length);
168 ASSERT_EQ(1u, m.ref_map[25].refs);
169 m.get(5, 20);
170 cout << m << std::endl;
171 ASSERT_EQ(4u, m.ref_map.size());
172 ASSERT_EQ(5u, m.ref_map[0].length);
173 ASSERT_EQ(1u, m.ref_map[0].refs);
174 ASSERT_EQ(5u, m.ref_map[5].length);
175 ASSERT_EQ(2u, m.ref_map[5].refs);
176 ASSERT_EQ(15u, m.ref_map[10].length);
177 ASSERT_EQ(3u, m.ref_map[10].refs);
178 ASSERT_EQ(5u, m.ref_map[25].length);
179 ASSERT_EQ(1u, m.ref_map[25].refs);
180 m.get(25, 3);
181 cout << m << std::endl;
182 ASSERT_EQ(5u, m.ref_map.size());
183 ASSERT_EQ(5u, m.ref_map[0].length);
184 ASSERT_EQ(1u, m.ref_map[0].refs);
185 ASSERT_EQ(5u, m.ref_map[5].length);
186 ASSERT_EQ(2u, m.ref_map[5].refs);
187 ASSERT_EQ(15u, m.ref_map[10].length);
188 ASSERT_EQ(3u, m.ref_map[10].refs);
189 ASSERT_EQ(3u, m.ref_map[25].length);
190 ASSERT_EQ(2u, m.ref_map[25].refs);
191 ASSERT_EQ(2u, m.ref_map[28].length);
192 ASSERT_EQ(1u, m.ref_map[28].refs);
193 }
194
195 TEST(bluestore_extent_ref_map_t, put)
196 {
197 bluestore_extent_ref_map_t m;
198 PExtentVector r;
199 bool maybe_unshared = false;
200 m.get(10, 30);
201 maybe_unshared = true;
202 m.put(10, 30, &r, &maybe_unshared);
203 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
204 ASSERT_EQ(0u, m.ref_map.size());
205 ASSERT_EQ(1u, r.size());
206 ASSERT_EQ(10u, r[0].offset);
207 ASSERT_EQ(30u, r[0].length);
208 ASSERT_TRUE(maybe_unshared);
209 r.clear();
210 m.get(10, 30);
211 m.get(20, 10);
212 maybe_unshared = true;
213 m.put(10, 30, &r, &maybe_unshared);
214 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
215 ASSERT_EQ(1u, m.ref_map.size());
216 ASSERT_EQ(10u, m.ref_map[20].length);
217 ASSERT_EQ(1u, m.ref_map[20].refs);
218 ASSERT_EQ(2u, r.size());
219 ASSERT_EQ(10u, r[0].offset);
220 ASSERT_EQ(10u, r[0].length);
221 ASSERT_EQ(30u, r[1].offset);
222 ASSERT_EQ(10u, r[1].length);
223 ASSERT_TRUE(maybe_unshared);
224 r.clear();
225 m.get(30, 10);
226 m.get(30, 10);
227 maybe_unshared = true;
228 m.put(20, 15, &r, &maybe_unshared);
229 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
230 ASSERT_EQ(2u, m.ref_map.size());
231 ASSERT_EQ(5u, m.ref_map[30].length);
232 ASSERT_EQ(1u, m.ref_map[30].refs);
233 ASSERT_EQ(5u, m.ref_map[35].length);
234 ASSERT_EQ(2u, m.ref_map[35].refs);
235 ASSERT_EQ(1u, r.size());
236 ASSERT_EQ(20u, r[0].offset);
237 ASSERT_EQ(10u, r[0].length);
238 ASSERT_FALSE(maybe_unshared);
239 r.clear();
240 maybe_unshared = true;
241 m.put(33, 5, &r, &maybe_unshared);
242 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
243 ASSERT_EQ(3u, m.ref_map.size());
244 ASSERT_EQ(3u, m.ref_map[30].length);
245 ASSERT_EQ(1u, m.ref_map[30].refs);
246 ASSERT_EQ(3u, m.ref_map[35].length);
247 ASSERT_EQ(1u, m.ref_map[35].refs);
248 ASSERT_EQ(2u, m.ref_map[38].length);
249 ASSERT_EQ(2u, m.ref_map[38].refs);
250 ASSERT_EQ(1u, r.size());
251 ASSERT_EQ(33u, r[0].offset);
252 ASSERT_EQ(2u, r[0].length);
253 ASSERT_FALSE(maybe_unshared);
254 r.clear();
255 maybe_unshared = true;
256 m.put(38, 2, &r, &maybe_unshared);
257 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
258 ASSERT_TRUE(maybe_unshared);
259 }
260
261 TEST(bluestore_extent_ref_map_t, contains)
262 {
263 bluestore_extent_ref_map_t m;
264 m.get(10, 30);
265 ASSERT_TRUE(m.contains(10, 30));
266 ASSERT_TRUE(m.contains(10, 10));
267 ASSERT_TRUE(m.contains(30, 10));
268 ASSERT_FALSE(m.contains(0, 10));
269 ASSERT_FALSE(m.contains(0, 20));
270 ASSERT_FALSE(m.contains(0, 100));
271 ASSERT_FALSE(m.contains(40, 10));
272 ASSERT_FALSE(m.contains(30, 11));
273 m.get(40, 10);
274 m.get(40, 10);
275 ASSERT_TRUE(m.contains(30, 11));
276 ASSERT_TRUE(m.contains(30, 20));
277 ASSERT_TRUE(m.contains(10, 40));
278 ASSERT_FALSE(m.contains(0, 50));
279 ASSERT_FALSE(m.contains(40, 20));
280 m.get(60, 100);
281 ASSERT_TRUE(m.contains(60, 10));
282 ASSERT_TRUE(m.contains(40, 10));
283 ASSERT_FALSE(m.contains(40, 11));
284 ASSERT_FALSE(m.contains(40, 20));
285 ASSERT_FALSE(m.contains(40, 30));
286 ASSERT_FALSE(m.contains(40, 3000));
287 ASSERT_FALSE(m.contains(4000, 30));
288 }
289
290 TEST(bluestore_extent_ref_map_t, intersects)
291 {
292 bluestore_extent_ref_map_t m;
293 m.get(10, 30);
294 ASSERT_TRUE(m.intersects(10, 30));
295 ASSERT_TRUE(m.intersects(0, 11));
296 ASSERT_TRUE(m.intersects(10, 40));
297 ASSERT_TRUE(m.intersects(15, 40));
298 ASSERT_FALSE(m.intersects(0, 10));
299 ASSERT_FALSE(m.intersects(0, 5));
300 ASSERT_FALSE(m.intersects(40, 20));
301 ASSERT_FALSE(m.intersects(41, 20));
302 m.get(40, 10);
303 m.get(40, 10);
304 ASSERT_TRUE(m.intersects(0, 100));
305 ASSERT_TRUE(m.intersects(10, 35));
306 ASSERT_TRUE(m.intersects(45, 10));
307 ASSERT_FALSE(m.intersects(50, 5));
308 m.get(60, 100);
309 ASSERT_TRUE(m.intersects(45, 10));
310 ASSERT_TRUE(m.intersects(55, 10));
311 ASSERT_TRUE(m.intersects(50, 11));
312 ASSERT_FALSE(m.intersects(50, 10));
313 ASSERT_FALSE(m.intersects(51, 9));
314 ASSERT_FALSE(m.intersects(55, 1));
315 }
316
317 TEST(bluestore_blob_t, calc_csum)
318 {
319 bufferlist bl;
320 bl.append("asdfghjkqwertyuizxcvbnm,");
321 bufferlist bl2;
322 bl2.append("xxxxXXXXyyyyYYYYzzzzZZZZ");
323 bufferlist f;
324 f.substr_of(bl, 0, 8);
325 bufferlist m;
326 m.substr_of(bl, 8, 8);
327 bufferlist e;
328 e.substr_of(bl, 16, 8);
329 bufferlist n;
330 n.append("12345678");
331
332 for (unsigned csum_type = Checksummer::CSUM_NONE + 1;
333 csum_type < Checksummer::CSUM_MAX;
334 ++csum_type) {
335 cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
336 << std::endl;
337
338 bluestore_blob_t b;
339 int bad_off;
340 uint64_t bad_csum;
341 ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
342 ASSERT_EQ(-1, bad_off);
343
344 b.init_csum(csum_type, 3, 24);
345 cout << " value size " << b.get_csum_value_size() << std::endl;
346 b.calc_csum(0, bl);
347 ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
348 ASSERT_EQ(-1, bad_off);
349 ASSERT_EQ(-1, b.verify_csum(0, bl2, &bad_off, &bad_csum));
350 ASSERT_EQ(0, bad_off);
351
352 ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
353 ASSERT_EQ(-1, bad_off);
354 ASSERT_EQ(-1, b.verify_csum(8, f, &bad_off, &bad_csum));
355 ASSERT_EQ(8, bad_off);
356 ASSERT_EQ(-1, b.verify_csum(16, f, &bad_off, &bad_csum));
357 ASSERT_EQ(16, bad_off);
358
359 ASSERT_EQ(-1, b.verify_csum(0, m, &bad_off, &bad_csum));
360 ASSERT_EQ(0, bad_off);
361 ASSERT_EQ(0, b.verify_csum(8, m, &bad_off, &bad_csum));
362 ASSERT_EQ(-1, bad_off);
363 ASSERT_EQ(-1, b.verify_csum(16, m, &bad_off, &bad_csum));
364 ASSERT_EQ(16, bad_off);
365
366 ASSERT_EQ(-1, b.verify_csum(0, e, &bad_off, &bad_csum));
367 ASSERT_EQ(0, bad_off);
368 ASSERT_EQ(-1, b.verify_csum(8, e, &bad_off, &bad_csum));
369 ASSERT_EQ(8, bad_off);
370 ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
371 ASSERT_EQ(-1, bad_off);
372
373 b.calc_csum(8, n);
374 ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
375 ASSERT_EQ(-1, bad_off);
376 ASSERT_EQ(0, b.verify_csum(8, n, &bad_off, &bad_csum));
377 ASSERT_EQ(-1, bad_off);
378 ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
379 ASSERT_EQ(-1, bad_off);
380 ASSERT_EQ(-1, b.verify_csum(0, bl, &bad_off, &bad_csum));
381 ASSERT_EQ(8, bad_off);
382 }
383 }
384
385 TEST(bluestore_blob_t, csum_bench)
386 {
387 bufferlist bl;
388 bufferptr bp(10485760);
389 for (char *a = bp.c_str(); a < bp.c_str() + bp.length(); ++a)
390 *a = (unsigned long)a & 0xff;
391 bl.append(bp);
392 int count = 256;
393 for (unsigned csum_type = 1;
394 csum_type < Checksummer::CSUM_MAX;
395 ++csum_type) {
396 bluestore_blob_t b;
397 b.init_csum(csum_type, 12, bl.length());
398 ceph::mono_clock::time_point start = ceph::mono_clock::now();
399 for (int i = 0; i<count; ++i) {
400 b.calc_csum(0, bl);
401 }
402 ceph::mono_clock::time_point end = ceph::mono_clock::now();
403 auto dur = std::chrono::duration_cast<ceph::timespan>(end - start);
404 double mbsec = (double)count * (double)bl.length() / 1000000.0 / (double)dur.count() * 1000000000.0;
405 cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
406 << ", " << dur << " seconds, "
407 << mbsec << " MB/sec" << std::endl;
408 }
409 }
410
411 TEST(Blob, put_ref)
412 {
413 {
414 BlueStore store(g_ceph_context, "", 4096);
415 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
416 g_ceph_context, "lru", NULL);
417 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
418 g_ceph_context, "lru", NULL);
419
420 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
421 BlueStore::Blob b;
422 b.shared_blob = new BlueStore::SharedBlob(coll.get());
423 b.dirty_blob().allocated_test(bluestore_pextent_t(0x40715000, 0x2000));
424 b.dirty_blob().allocated_test(
425 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x8000));
426 b.dirty_blob().allocated_test(bluestore_pextent_t(0x4071f000, 0x5000));
427 b.get_ref(coll.get(), 0, 0x1200);
428 b.get_ref(coll.get(), 0xae00, 0x4200);
429 ASSERT_EQ(0x5400u, b.get_referenced_bytes());
430 cout << b << std::endl;
431 PExtentVector r;
432
433 ASSERT_FALSE(b.put_ref(coll.get(), 0, 0x1200, &r));
434 ASSERT_EQ(0x4200u, b.get_referenced_bytes());
435 cout << " r " << r << std::endl;
436 cout << b << std::endl;
437
438 r.clear();
439 ASSERT_TRUE(b.put_ref(coll.get(), 0xae00, 0x4200, &r));
440 ASSERT_EQ(0u, b.get_referenced_bytes());
441 cout << " r " << r << std::endl;
442 cout << b << std::endl;
443 }
444
445 unsigned mas = 4096;
446 BlueStore store(g_ceph_context, "", 8192);
447 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
448 g_ceph_context, "lru", NULL);
449 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
450 g_ceph_context, "lru", NULL);
451 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
452
453 {
454 BlueStore::Blob B;
455 B.shared_blob = new BlueStore::SharedBlob(coll.get());
456 bluestore_blob_t& b = B.dirty_blob();
457 PExtentVector r;
458 b.allocated_test(bluestore_pextent_t(0, mas * 2));
459 B.get_ref(coll.get(), 0, mas*2);
460 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
461 ASSERT_TRUE(b.is_allocated(0, mas*2));
462 ASSERT_TRUE(B.put_ref(coll.get(), 0, mas*2, &r));
463 ASSERT_EQ(0u, B.get_referenced_bytes());
464 cout << "r " << r << " " << b << std::endl;
465 ASSERT_EQ(1u, r.size());
466 ASSERT_EQ(0u, r[0].offset);
467 ASSERT_EQ(mas*2, r[0].length);
468 ASSERT_FALSE(b.is_allocated(0, mas*2));
469 ASSERT_FALSE(b.is_allocated(0, mas));
470 ASSERT_FALSE(b.is_allocated(mas, 0));
471 ASSERT_FALSE(b.get_extents()[0].is_valid());
472 ASSERT_EQ(mas*2, b.get_extents()[0].length);
473 }
474 {
475 BlueStore::Blob B;
476 B.shared_blob = new BlueStore::SharedBlob(coll.get());
477 bluestore_blob_t& b = B.dirty_blob();
478 PExtentVector r;
479 b.allocated_test(bluestore_pextent_t(123, mas * 2));
480 B.get_ref(coll.get(), 0, mas*2);
481 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
482 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
483 ASSERT_EQ(mas, B.get_referenced_bytes());
484 cout << "r " << r << " " << b << std::endl;
485 ASSERT_EQ(0u, r.size());
486 ASSERT_TRUE(b.is_allocated(0, mas*2));
487 ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
488 ASSERT_EQ(0u, B.get_referenced_bytes());
489 ASSERT_EQ(0u, B.get_referenced_bytes());
490 cout << "r " << r << " " << b << std::endl;
491 ASSERT_EQ(1u, r.size());
492 ASSERT_EQ(123u, r[0].offset);
493 ASSERT_EQ(mas*2, r[0].length);
494 ASSERT_FALSE(b.is_allocated(0, mas*2));
495 ASSERT_FALSE(b.get_extents()[0].is_valid());
496 ASSERT_EQ(mas*2, b.get_extents()[0].length);
497 }
498 {
499 BlueStore::Blob B;
500 B.shared_blob = new BlueStore::SharedBlob(coll.get());
501 bluestore_blob_t& b = B.dirty_blob();
502 PExtentVector r;
503 b.allocated_test(bluestore_pextent_t(1, mas));
504 b.allocated_test(bluestore_pextent_t(2, mas));
505 b.allocated_test(bluestore_pextent_t(3, mas));
506 b.allocated_test(bluestore_pextent_t(4, mas));
507 B.get_ref(coll.get(), 0, mas*4);
508 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
509 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
510 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
511 cout << "r " << r << " " << b << std::endl;
512 ASSERT_EQ(0u, r.size());
513 ASSERT_TRUE(b.is_allocated(0, mas*4));
514 ASSERT_TRUE(b.is_allocated(mas, mas));
515 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
516 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
517 cout << "r " << r << " " << b << std::endl;
518 ASSERT_EQ(0u, r.size());
519 ASSERT_TRUE(b.is_allocated(mas*2, mas));
520 ASSERT_TRUE(b.is_allocated(0, mas*4));
521 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
522 ASSERT_EQ(mas, B.get_referenced_bytes());
523 cout << "r " << r << " " << b << std::endl;
524 ASSERT_EQ(2u, r.size());
525 ASSERT_EQ(3u, r[0].offset);
526 ASSERT_EQ(mas, r[0].length);
527 ASSERT_EQ(4u, r[1].offset);
528 ASSERT_EQ(mas, r[1].length);
529 ASSERT_TRUE(b.is_allocated(0, mas*2));
530 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
531 ASSERT_TRUE(b.get_extents()[0].is_valid());
532 ASSERT_TRUE(b.get_extents()[1].is_valid());
533 ASSERT_FALSE(b.get_extents()[2].is_valid());
534 ASSERT_EQ(3u, b.get_extents().size());
535 }
536 {
537 BlueStore::Blob B;
538 B.shared_blob = new BlueStore::SharedBlob(coll.get());
539 bluestore_blob_t& b = B.dirty_blob();
540 PExtentVector r;
541 b.allocated_test(bluestore_pextent_t(1, mas));
542 b.allocated_test(bluestore_pextent_t(2, mas));
543 b.allocated_test(bluestore_pextent_t(3, mas));
544 b.allocated_test(bluestore_pextent_t(4, mas));
545 b.allocated_test(bluestore_pextent_t(5, mas));
546 b.allocated_test(bluestore_pextent_t(6, mas));
547 B.get_ref(coll.get(), 0, mas*6);
548 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
549 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
550 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
551 cout << "r " << r << " " << b << std::endl;
552 ASSERT_EQ(0u, r.size());
553 ASSERT_TRUE(b.is_allocated(0, mas*6));
554 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
555 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
556 cout << "r " << r << " " << b << std::endl;
557 ASSERT_EQ(0u, r.size());
558 ASSERT_TRUE(b.is_allocated(0, mas*6));
559 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
560 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
561 cout << "r " << r << " " << b << std::endl;
562 ASSERT_EQ(2u, r.size());
563 ASSERT_EQ(3u, r[0].offset);
564 ASSERT_EQ(mas, r[0].length);
565 ASSERT_EQ(4u, r[1].offset);
566 ASSERT_EQ(mas, r[1].length);
567 ASSERT_TRUE(b.is_allocated(0, mas*2));
568 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
569 ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
570 ASSERT_EQ(5u, b.get_extents().size());
571 ASSERT_TRUE(b.get_extents()[0].is_valid());
572 ASSERT_TRUE(b.get_extents()[1].is_valid());
573 ASSERT_FALSE(b.get_extents()[2].is_valid());
574 ASSERT_TRUE(b.get_extents()[3].is_valid());
575 ASSERT_TRUE(b.get_extents()[4].is_valid());
576 }
577 {
578 BlueStore::Blob B;
579 B.shared_blob = new BlueStore::SharedBlob(coll.get());
580 bluestore_blob_t& b = B.dirty_blob();
581 PExtentVector r;
582 b.allocated_test(bluestore_pextent_t(1, mas * 6));
583 B.get_ref(coll.get(), 0, mas*6);
584 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
585 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
586 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
587 cout << "r " << r << " " << b << std::endl;
588 ASSERT_EQ(0u, r.size());
589 ASSERT_TRUE(b.is_allocated(0, mas*6));
590 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
591 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
592 cout << "r " << r << " " << b << std::endl;
593 ASSERT_EQ(0u, r.size());
594 ASSERT_TRUE(b.is_allocated(0, mas*6));
595 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
596 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
597 cout << "r " << r << " " << b << std::endl;
598 ASSERT_EQ(1u, r.size());
599 ASSERT_EQ(0x2001u, r[0].offset);
600 ASSERT_EQ(mas*2, r[0].length);
601 ASSERT_TRUE(b.is_allocated(0, mas*2));
602 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
603 ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
604 ASSERT_EQ(3u, b.get_extents().size());
605 ASSERT_TRUE(b.get_extents()[0].is_valid());
606 ASSERT_FALSE(b.get_extents()[1].is_valid());
607 ASSERT_TRUE(b.get_extents()[2].is_valid());
608 }
609 {
610 BlueStore::Blob B;
611 B.shared_blob = new BlueStore::SharedBlob(coll.get());
612 bluestore_blob_t& b = B.dirty_blob();
613 PExtentVector r;
614 b.allocated_test(bluestore_pextent_t(1, mas * 4));
615 b.allocated_test(bluestore_pextent_t(2, mas * 4));
616 b.allocated_test(bluestore_pextent_t(3, mas * 4));
617 B.get_ref(coll.get(), 0, mas*12);
618 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
619 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
620 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
621 cout << "r " << r << " " << b << std::endl;
622 ASSERT_EQ(0u, r.size());
623 ASSERT_TRUE(b.is_allocated(0, mas*12));
624 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
625 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
626 cout << "r " << r << " " << b << std::endl;
627 ASSERT_EQ(0u, r.size());
628 ASSERT_TRUE(b.is_allocated(0, mas*12));
629 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
630 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
631 cout << "r " << r << " " << b << std::endl;
632 ASSERT_EQ(3u, r.size());
633 ASSERT_EQ(0x2001u, r[0].offset);
634 ASSERT_EQ(mas*2, r[0].length);
635 ASSERT_EQ(0x2u, r[1].offset);
636 ASSERT_EQ(mas*4, r[1].length);
637 ASSERT_EQ(0x3u, r[2].offset);
638 ASSERT_EQ(mas*2, r[2].length);
639 ASSERT_TRUE(b.is_allocated(0, mas*2));
640 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
641 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
642 ASSERT_EQ(3u, b.get_extents().size());
643 ASSERT_TRUE(b.get_extents()[0].is_valid());
644 ASSERT_FALSE(b.get_extents()[1].is_valid());
645 ASSERT_TRUE(b.get_extents()[2].is_valid());
646 }
647 {
648 BlueStore::Blob B;
649 B.shared_blob = new BlueStore::SharedBlob(coll.get());
650 bluestore_blob_t& b = B.dirty_blob();
651 PExtentVector r;
652 b.allocated_test(bluestore_pextent_t(1, mas * 4));
653 b.allocated_test(bluestore_pextent_t(2, mas * 4));
654 b.allocated_test(bluestore_pextent_t(3, mas * 4));
655 B.get_ref(coll.get(), 0, mas*12);
656 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
657 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
658 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
659 cout << "r " << r << " " << b << std::endl;
660 ASSERT_EQ(0u, r.size());
661 ASSERT_TRUE(b.is_allocated(0, mas*12));
662 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
663 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
664 cout << "r " << r << " " << b << std::endl;
665 ASSERT_EQ(0u, r.size());
666 ASSERT_TRUE(b.is_allocated(0, mas*12));
667 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
668 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
669 cout << "r " << r << " " << b << std::endl;
670 ASSERT_EQ(3u, r.size());
671 ASSERT_EQ(0x2001u, r[0].offset);
672 ASSERT_EQ(mas*2, r[0].length);
673 ASSERT_EQ(0x2u, r[1].offset);
674 ASSERT_EQ(mas*4, r[1].length);
675 ASSERT_EQ(0x3u, r[2].offset);
676 ASSERT_EQ(mas*2, r[2].length);
677 ASSERT_TRUE(b.is_allocated(0, mas*2));
678 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
679 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
680 ASSERT_EQ(3u, b.get_extents().size());
681 ASSERT_TRUE(b.get_extents()[0].is_valid());
682 ASSERT_FALSE(b.get_extents()[1].is_valid());
683 ASSERT_TRUE(b.get_extents()[2].is_valid());
684 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
685 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
686 cout << "r " << r << " " << b << std::endl;
687 ASSERT_EQ(1u, r.size());
688 ASSERT_EQ(0x1u, r[0].offset);
689 ASSERT_EQ(mas*2, r[0].length);
690 ASSERT_EQ(2u, b.get_extents().size());
691 ASSERT_FALSE(b.get_extents()[0].is_valid());
692 ASSERT_TRUE(b.get_extents()[1].is_valid());
693 ASSERT_TRUE(B.put_ref(coll.get(), mas*10, mas*2, &r));
694 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
695 cout << "r " << r << " " << b << std::endl;
696 ASSERT_EQ(1u, r.size());
697 ASSERT_EQ(0x2003u, r[0].offset);
698 ASSERT_EQ(mas*2, r[0].length);
699 ASSERT_EQ(1u, b.get_extents().size());
700 ASSERT_FALSE(b.get_extents()[0].is_valid());
701 }
702 {
703 BlueStore::Blob B;
704 B.shared_blob = new BlueStore::SharedBlob(coll.get());
705 bluestore_blob_t& b = B.dirty_blob();
706 PExtentVector r;
707 b.allocated_test(bluestore_pextent_t(1, mas * 4));
708 b.allocated_test(bluestore_pextent_t(2, mas * 4));
709 b.allocated_test(bluestore_pextent_t(3, mas * 4));
710 B.get_ref(coll.get(), 0, mas*12);
711 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
712 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
713 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
714 cout << "r " << r << " " << b << std::endl;
715 ASSERT_EQ(0u, r.size());
716 ASSERT_TRUE(b.is_allocated(0, mas*12));
717 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
718 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
719 cout << "r " << r << " " << b << std::endl;
720 ASSERT_EQ(0u, r.size());
721 ASSERT_TRUE(b.is_allocated(0, mas*12));
722 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
723 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
724 cout << "r " << r << " " << b << std::endl;
725 ASSERT_EQ(3u, r.size());
726 ASSERT_EQ(0x2001u, r[0].offset);
727 ASSERT_EQ(mas*2, r[0].length);
728 ASSERT_EQ(0x2u, r[1].offset);
729 ASSERT_EQ(mas*4, r[1].length);
730 ASSERT_EQ(0x3u, r[2].offset);
731 ASSERT_EQ(mas*2, r[2].length);
732 ASSERT_TRUE(b.is_allocated(0, mas*2));
733 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
734 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
735 ASSERT_EQ(3u, b.get_extents().size());
736 ASSERT_TRUE(b.get_extents()[0].is_valid());
737 ASSERT_FALSE(b.get_extents()[1].is_valid());
738 ASSERT_TRUE(b.get_extents()[2].is_valid());
739 ASSERT_FALSE(B.put_ref(coll.get(), mas*10, mas*2, &r));
740 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
741 cout << "r " << r << " " << b << std::endl;
742 ASSERT_EQ(1u, r.size());
743 ASSERT_EQ(0x2003u, r[0].offset);
744 ASSERT_EQ(mas*2, r[0].length);
745 ASSERT_EQ(2u, b.get_extents().size());
746 ASSERT_TRUE(b.get_extents()[0].is_valid());
747 ASSERT_FALSE(b.get_extents()[1].is_valid());
748 ASSERT_TRUE(B.put_ref(coll.get(), 0, mas, &r));
749 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
750 cout << "r " << r << " " << b << std::endl;
751 ASSERT_EQ(1u, r.size());
752 ASSERT_EQ(0x1u, r[0].offset);
753 ASSERT_EQ(mas*2, r[0].length);
754 ASSERT_EQ(1u, b.get_extents().size());
755 ASSERT_FALSE(b.get_extents()[0].is_valid());
756 }
757 {
758 BlueStore::Blob B;
759 B.shared_blob = new BlueStore::SharedBlob(coll.get());
760 bluestore_blob_t& b = B.dirty_blob();
761 PExtentVector r;
762 b.allocated_test(bluestore_pextent_t(1, mas * 8));
763 B.get_ref(coll.get(), 0, mas*8);
764 ASSERT_EQ(mas * 8, B.get_referenced_bytes());
765 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
766 ASSERT_EQ(mas * 7, B.get_referenced_bytes());
767 cout << "r " << r << " " << b << std::endl;
768 ASSERT_EQ(0u, r.size());
769 ASSERT_TRUE(b.is_allocated(0, mas*8));
770 ASSERT_FALSE(B.put_ref(coll.get(), mas*7, mas, &r));
771 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
772 cout << "r " << r << " " << b << std::endl;
773 ASSERT_EQ(0u, r.size());
774 ASSERT_TRUE(b.is_allocated(0, mas*8));
775 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
776 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
777 cout << "r " << r << " " << b << std::endl;
778 ASSERT_EQ(0u, r.size());
779 ASSERT_TRUE(b.is_allocated(0, 8));
780 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas*4, &r));
781 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
782 ASSERT_EQ(1u, r.size());
783 ASSERT_EQ(0x2001u, r[0].offset);
784 ASSERT_EQ(mas*6, r[0].length);
785 ASSERT_TRUE(b.is_allocated(0, mas*2));
786 ASSERT_FALSE(b.is_allocated(mas*2, mas*6));
787 ASSERT_EQ(2u, b.get_extents().size());
788 ASSERT_TRUE(b.get_extents()[0].is_valid());
789 ASSERT_FALSE(b.get_extents()[1].is_valid());
790 ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
791 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
792 cout << "r " << r << " " << b << std::endl;
793 ASSERT_EQ(1u, r.size());
794 ASSERT_EQ(0x1u, r[0].offset);
795 ASSERT_EQ(mas*2, r[0].length);
796 ASSERT_EQ(1u, b.get_extents().size());
797 ASSERT_FALSE(b.get_extents()[0].is_valid());
798 }
799 // verify csum chunk size if factored in properly
800 {
801 BlueStore::Blob B;
802 B.shared_blob = new BlueStore::SharedBlob(coll.get());
803 bluestore_blob_t& b = B.dirty_blob();
804 PExtentVector r;
805 b.allocated_test(bluestore_pextent_t(0, mas*4));
806 b.init_csum(Checksummer::CSUM_CRC32C, 14, mas * 4);
807 B.get_ref(coll.get(), 0, mas*4);
808 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
809 ASSERT_TRUE(b.is_allocated(0, mas*4));
810 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas*3, &r));
811 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
812 cout << "r " << r << " " << b << std::endl;
813 ASSERT_EQ(0u, r.size());
814 ASSERT_TRUE(b.is_allocated(0, mas*4));
815 ASSERT_TRUE(b.get_extents()[0].is_valid());
816 ASSERT_EQ(mas*4, b.get_extents()[0].length);
817 }
818 {
819 BlueStore::Blob B;
820 B.shared_blob = new BlueStore::SharedBlob(coll.get());
821 bluestore_blob_t& b = B.dirty_blob();
822 b.allocated_test(bluestore_pextent_t(0x40101000, 0x4000));
823 b.allocated_test(bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET,
824 0x13000));
825
826 b.allocated_test(bluestore_pextent_t(0x40118000, 0x7000));
827 B.get_ref(coll.get(), 0x0, 0x3800);
828 B.get_ref(coll.get(), 0x17c00, 0x6400);
829 ASSERT_EQ(0x3800u + 0x6400u, B.get_referenced_bytes());
830 b.set_flag(bluestore_blob_t::FLAG_SHARED);
831 b.init_csum(Checksummer::CSUM_CRC32C, 12, 0x1e000);
832
833 cout << "before: " << B << std::endl;
834 PExtentVector r;
835 ASSERT_FALSE(B.put_ref(coll.get(), 0x1800, 0x2000, &r));
836 ASSERT_EQ(0x3800u + 0x6400u - 0x2000u, B.get_referenced_bytes());
837 cout << "after: " << B << std::endl;
838 cout << "r " << r << std::endl;
839 }
840 {
841 BlueStore::Blob B;
842 B.shared_blob = new BlueStore::SharedBlob(coll.get());
843 bluestore_blob_t& b = B.dirty_blob();
844 b.allocated_test(bluestore_pextent_t(1, 0x5000));
845 b.allocated_test(bluestore_pextent_t(2, 0x5000));
846 B.get_ref(coll.get(), 0x0, 0xa000);
847 ASSERT_EQ(0xa000u, B.get_referenced_bytes());
848 cout << "before: " << B << std::endl;
849 PExtentVector r;
850 ASSERT_FALSE(B.put_ref(coll.get(), 0x8000, 0x2000, &r));
851 cout << "after: " << B << std::endl;
852 cout << "r " << r << std::endl;
853 ASSERT_EQ(0x8000u, B.get_referenced_bytes());
854 ASSERT_EQ(1u, r.size());
855 ASSERT_EQ(0x3002u, r[0].offset);
856 ASSERT_EQ(0x2000u, r[0].length);
857 }
858 {
859 BlueStore::Blob B;
860 B.shared_blob = new BlueStore::SharedBlob(coll.get());
861 bluestore_blob_t& b = B.dirty_blob();
862 b.allocated_test(bluestore_pextent_t(1, 0x7000));
863 b.allocated_test(bluestore_pextent_t(2, 0x7000));
864 B.get_ref(coll.get(), 0x0, 0xe000);
865 ASSERT_EQ(0xe000u, B.get_referenced_bytes());
866 cout << "before: " << B << std::endl;
867 PExtentVector r;
868 ASSERT_FALSE(B.put_ref(coll.get(), 0, 0xb000, &r));
869 ASSERT_EQ(0x3000u, B.get_referenced_bytes());
870 cout << "after: " << B << std::endl;
871 cout << "r " << r << std::endl;
872 ASSERT_EQ(0x3000u, B.get_referenced_bytes());
873 ASSERT_EQ(2u, r.size());
874 ASSERT_EQ(1u, r[0].offset);
875 ASSERT_EQ(0x7000u, r[0].length);
876 ASSERT_EQ(2u, r[1].offset);
877 ASSERT_EQ(0x3000u, r[1].length); // we have 0x1000 bytes less due to
878 // alignment caused by min_alloc_size = 0x2000
879 }
880 {
881 BlueStore store(g_ceph_context, "", 0x4000);
882 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
883 g_ceph_context, "lru", NULL);
884 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
885 g_ceph_context, "lru", NULL);
886
887 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
888 BlueStore::Blob B;
889 B.shared_blob = new BlueStore::SharedBlob(coll.get());
890 bluestore_blob_t& b = B.dirty_blob();
891 b.allocated_test(bluestore_pextent_t(1, 0x5000));
892 b.allocated_test(bluestore_pextent_t(2, 0x7000));
893 B.get_ref(coll.get(), 0x0, 0xc000);
894 ASSERT_EQ(0xc000u, B.get_referenced_bytes());
895 cout << "before: " << B << std::endl;
896 PExtentVector r;
897 ASSERT_FALSE(B.put_ref(coll.get(), 0x2000, 0xa000, &r));
898 cout << "after: " << B << std::endl;
899 cout << "r " << r << std::endl;
900 ASSERT_EQ(0x2000u, B.get_referenced_bytes());
901 ASSERT_EQ(2u, r.size());
902 ASSERT_EQ(0x4001u, r[0].offset);
903 ASSERT_EQ(0x1000u, r[0].length);
904 ASSERT_EQ(2u, r[1].offset);
905 ASSERT_EQ(0x7000u, r[1].length);
906 ASSERT_EQ(1u, b.get_extents()[0].offset);
907 ASSERT_EQ(0x4000u, b.get_extents()[0].length);
908 }
909 }
910
911 TEST(bluestore_blob_t, can_split)
912 {
913 bluestore_blob_t a;
914 ASSERT_TRUE(a.can_split());
915 a.flags = bluestore_blob_t::FLAG_SHARED;
916 ASSERT_FALSE(a.can_split());
917 a.flags = bluestore_blob_t::FLAG_COMPRESSED;
918 ASSERT_FALSE(a.can_split());
919 a.flags = bluestore_blob_t::FLAG_HAS_UNUSED;
920 ASSERT_FALSE(a.can_split());
921 }
922
923 TEST(bluestore_blob_t, can_split_at)
924 {
925 bluestore_blob_t a;
926 a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
927 a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
928 ASSERT_TRUE(a.can_split_at(0x1000));
929 ASSERT_TRUE(a.can_split_at(0x1800));
930 a.init_csum(Checksummer::CSUM_CRC32C, 12, 0x4000);
931 ASSERT_TRUE(a.can_split_at(0x1000));
932 ASSERT_TRUE(a.can_split_at(0x2000));
933 ASSERT_TRUE(a.can_split_at(0x3000));
934 ASSERT_FALSE(a.can_split_at(0x2800));
935 }
936
937 TEST(bluestore_blob_t, prune_tail)
938 {
939 bluestore_blob_t a;
940 a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
941 a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
942 ASSERT_FALSE(a.can_prune_tail());
943 a.allocated_test(
944 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
945 ASSERT_TRUE(a.can_prune_tail());
946 a.prune_tail();
947 ASSERT_FALSE(a.can_prune_tail());
948 ASSERT_EQ(2u, a.get_extents().size());
949 ASSERT_EQ(0x4000u, a.get_logical_length());
950
951 a.allocated_test(
952 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
953 a.init_csum(Checksummer::CSUM_CRC32C_8, 12, 0x6000);
954 ASSERT_EQ(6u, a.csum_data.length());
955 ASSERT_TRUE(a.can_prune_tail());
956 a.prune_tail();
957 ASSERT_FALSE(a.can_prune_tail());
958 ASSERT_EQ(2u, a.get_extents().size());
959 ASSERT_EQ(0x4000u, a.get_logical_length());
960 ASSERT_EQ(4u, a.csum_data.length());
961
962 bluestore_blob_t b;
963 b.allocated_test(
964 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
965 ASSERT_FALSE(a.can_prune_tail());
966 }
967
968 TEST(Blob, split)
969 {
970 BlueStore store(g_ceph_context, "", 4096);
971 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
972 g_ceph_context, "lru", NULL);
973 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
974 g_ceph_context, "lru", NULL);
975 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
976 {
977 BlueStore::Blob L, R;
978 L.shared_blob = new BlueStore::SharedBlob(coll.get());
979 R.shared_blob = new BlueStore::SharedBlob(coll.get());
980 L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x2000));
981 L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
982 L.get_ref(coll.get(), 0, 0x2000);
983 L.split(coll.get(), 0x1000, &R);
984 ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
985 ASSERT_EQ(4u, L.get_blob().csum_data.length());
986 ASSERT_EQ(1u, L.get_blob().get_extents().size());
987 ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
988 ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
989 ASSERT_EQ(0x1000u, L.get_referenced_bytes());
990 ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
991 ASSERT_EQ(4u, R.get_blob().csum_data.length());
992 ASSERT_EQ(1u, R.get_blob().get_extents().size());
993 ASSERT_EQ(0x3000u, R.get_blob().get_extents().front().offset);
994 ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
995 ASSERT_EQ(0x1000u, R.get_referenced_bytes());
996 }
997 {
998 BlueStore::Blob L, R;
999 L.shared_blob = new BlueStore::SharedBlob(coll.get());
1000 R.shared_blob = new BlueStore::SharedBlob(coll.get());
1001 L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x1000));
1002 L.dirty_blob().allocated_test(bluestore_pextent_t(0x12000, 0x1000));
1003 L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
1004 L.get_ref(coll.get(), 0, 0x1000);
1005 L.get_ref(coll.get(), 0x1000, 0x1000);
1006 L.split(coll.get(), 0x1000, &R);
1007 ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
1008 ASSERT_EQ(4u, L.get_blob().csum_data.length());
1009 ASSERT_EQ(1u, L.get_blob().get_extents().size());
1010 ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
1011 ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
1012 ASSERT_EQ(0x1000u, L.get_referenced_bytes());
1013 ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
1014 ASSERT_EQ(4u, R.get_blob().csum_data.length());
1015 ASSERT_EQ(1u, R.get_blob().get_extents().size());
1016 ASSERT_EQ(0x12000u, R.get_blob().get_extents().front().offset);
1017 ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
1018 ASSERT_EQ(0x1000u, R.get_referenced_bytes());
1019 }
1020 }
1021
1022 TEST(Blob, legacy_decode)
1023 {
1024 BlueStore store(g_ceph_context, "", 4096);
1025 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1026 g_ceph_context, "lru", NULL);
1027 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1028 g_ceph_context, "lru", NULL);
1029 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1030 bufferlist bl, bl2;
1031 {
1032 BlueStore::Blob B;
1033
1034 B.shared_blob = new BlueStore::SharedBlob(coll.get());
1035 B.dirty_blob().allocated_test(bluestore_pextent_t(0x1, 0x2000));
1036 B.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
1037 B.get_ref(coll.get(), 0, 0xff0);
1038 B.get_ref(coll.get(), 0x1fff, 1);
1039
1040 bluestore_extent_ref_map_t fake_ref_map;
1041 fake_ref_map.get(0, 0xff0);
1042 fake_ref_map.get(0x1fff, 1);
1043
1044 size_t bound = 0, bound2 = 0;
1045
1046 B.bound_encode(
1047 bound,
1048 1, /*struct_v*/
1049 0, /*sbid*/
1050 false);
1051 fake_ref_map.bound_encode(bound);
1052
1053 B.bound_encode(
1054 bound2,
1055 2, /*struct_v*/
1056 0, /*sbid*/
1057 true);
1058
1059 {
1060 auto app = bl.get_contiguous_appender(bound);
1061 auto app2 = bl2.get_contiguous_appender(bound2);
1062 B.encode(
1063 app,
1064 1, /*struct_v*/
1065 0, /*sbid*/
1066 false);
1067 fake_ref_map.encode(app);
1068
1069 B.encode(
1070 app2,
1071 2, /*struct_v*/
1072 0, /*sbid*/
1073 true);
1074 }
1075
1076 auto p = bl.front().begin_deep();
1077 auto p2 = bl2.front().begin_deep();
1078 BlueStore::Blob Bres, Bres2;
1079 Bres.shared_blob = new BlueStore::SharedBlob(coll.get());
1080 Bres2.shared_blob = new BlueStore::SharedBlob(coll.get());
1081
1082 uint64_t sbid, sbid2;
1083 Bres.decode(
1084 coll.get(),
1085 p,
1086 1, /*struct_v*/
1087 &sbid,
1088 true);
1089 Bres2.decode(
1090 coll.get(),
1091 p2,
1092 2, /*struct_v*/
1093 &sbid2,
1094 true);
1095
1096 ASSERT_EQ(0xff0u + 1u, Bres.get_blob_use_tracker().get_referenced_bytes());
1097 ASSERT_EQ(0xff0u + 1u, Bres2.get_blob_use_tracker().get_referenced_bytes());
1098 ASSERT_TRUE(Bres.get_blob_use_tracker().equal(Bres2.get_blob_use_tracker()));
1099 }
1100 }
1101
1102 TEST(ExtentMap, seek_lextent)
1103 {
1104 BlueStore store(g_ceph_context, "", 4096);
1105 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1106 g_ceph_context, "lru", NULL);
1107 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1108 g_ceph_context, "lru", NULL);
1109
1110 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1111 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1112 BlueStore::ExtentMap em(&onode);
1113 BlueStore::BlobRef br(new BlueStore::Blob);
1114 br->shared_blob = new BlueStore::SharedBlob(coll.get());
1115
1116 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0));
1117 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(100));
1118
1119 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br));
1120 auto a = em.find(100);
1121 ASSERT_EQ(a, em.seek_lextent(0));
1122 ASSERT_EQ(a, em.seek_lextent(99));
1123 ASSERT_EQ(a, em.seek_lextent(100));
1124 ASSERT_EQ(a, em.seek_lextent(101));
1125 ASSERT_EQ(a, em.seek_lextent(199));
1126 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(200));
1127
1128 em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br));
1129 auto b = em.find(200);
1130 ASSERT_EQ(a, em.seek_lextent(0));
1131 ASSERT_EQ(a, em.seek_lextent(99));
1132 ASSERT_EQ(a, em.seek_lextent(100));
1133 ASSERT_EQ(a, em.seek_lextent(101));
1134 ASSERT_EQ(a, em.seek_lextent(199));
1135 ASSERT_EQ(b, em.seek_lextent(200));
1136 ASSERT_EQ(b, em.seek_lextent(299));
1137 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(300));
1138
1139 em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br));
1140 auto d = em.find(400);
1141 ASSERT_EQ(a, em.seek_lextent(0));
1142 ASSERT_EQ(a, em.seek_lextent(99));
1143 ASSERT_EQ(a, em.seek_lextent(100));
1144 ASSERT_EQ(a, em.seek_lextent(101));
1145 ASSERT_EQ(a, em.seek_lextent(199));
1146 ASSERT_EQ(b, em.seek_lextent(200));
1147 ASSERT_EQ(b, em.seek_lextent(299));
1148 ASSERT_EQ(d, em.seek_lextent(300));
1149 ASSERT_EQ(d, em.seek_lextent(399));
1150 ASSERT_EQ(d, em.seek_lextent(400));
1151 ASSERT_EQ(d, em.seek_lextent(499));
1152 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(500));
1153 }
1154
1155 TEST(ExtentMap, has_any_lextents)
1156 {
1157 BlueStore store(g_ceph_context, "", 4096);
1158 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1159 g_ceph_context, "lru", NULL);
1160 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1161 g_ceph_context, "lru", NULL);
1162 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1163 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1164 BlueStore::ExtentMap em(&onode);
1165 BlueStore::BlobRef b(new BlueStore::Blob);
1166 b->shared_blob = new BlueStore::SharedBlob(coll.get());
1167
1168 ASSERT_FALSE(em.has_any_lextents(0, 0));
1169 ASSERT_FALSE(em.has_any_lextents(0, 1000));
1170 ASSERT_FALSE(em.has_any_lextents(1000, 1000));
1171
1172 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b));
1173 ASSERT_FALSE(em.has_any_lextents(0, 50));
1174 ASSERT_FALSE(em.has_any_lextents(0, 100));
1175 ASSERT_FALSE(em.has_any_lextents(50, 50));
1176 ASSERT_TRUE(em.has_any_lextents(50, 51));
1177 ASSERT_TRUE(em.has_any_lextents(50, 100051));
1178 ASSERT_TRUE(em.has_any_lextents(100, 100));
1179 ASSERT_TRUE(em.has_any_lextents(100, 1));
1180 ASSERT_TRUE(em.has_any_lextents(199, 1));
1181 ASSERT_TRUE(em.has_any_lextents(199, 2));
1182 ASSERT_FALSE(em.has_any_lextents(200, 2));
1183
1184 em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, b));
1185 ASSERT_TRUE(em.has_any_lextents(199, 1));
1186 ASSERT_TRUE(em.has_any_lextents(199, 2));
1187 ASSERT_TRUE(em.has_any_lextents(200, 2));
1188 ASSERT_TRUE(em.has_any_lextents(200, 200));
1189 ASSERT_TRUE(em.has_any_lextents(299, 1));
1190 ASSERT_FALSE(em.has_any_lextents(300, 1));
1191
1192 em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, b));
1193 ASSERT_TRUE(em.has_any_lextents(0, 10000));
1194 ASSERT_TRUE(em.has_any_lextents(199, 1));
1195 ASSERT_FALSE(em.has_any_lextents(300, 1));
1196 ASSERT_FALSE(em.has_any_lextents(300, 100));
1197 ASSERT_FALSE(em.has_any_lextents(399, 1));
1198 ASSERT_TRUE(em.has_any_lextents(400, 1));
1199 ASSERT_TRUE(em.has_any_lextents(400, 100));
1200 ASSERT_TRUE(em.has_any_lextents(400, 1000));
1201 ASSERT_TRUE(em.has_any_lextents(499, 1000));
1202 ASSERT_FALSE(em.has_any_lextents(500, 1000));
1203 }
1204
1205 void erase_and_delete(BlueStore::ExtentMap& em, size_t v)
1206 {
1207 auto d = em.find(v);
1208 ASSERT_NE(d, em.extent_map.end());
1209 em.extent_map.erase(d);
1210 delete &*d;
1211 }
1212
1213 TEST(ExtentMap, compress_extent_map)
1214 {
1215 BlueStore store(g_ceph_context, "", 4096);
1216 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1217 g_ceph_context, "lru", NULL);
1218 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1219 g_ceph_context, "lru", NULL);
1220
1221 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1222 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1223 BlueStore::ExtentMap em(&onode);
1224 BlueStore::BlobRef b1(new BlueStore::Blob);
1225 BlueStore::BlobRef b2(new BlueStore::Blob);
1226 BlueStore::BlobRef b3(new BlueStore::Blob);
1227 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1228 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1229 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1230
1231 em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, b1));
1232 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
1233 ASSERT_EQ(0, em.compress_extent_map(0, 10000));
1234 ASSERT_EQ(2u, em.extent_map.size());
1235
1236 em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b2));
1237 em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
1238 ASSERT_EQ(0, em.compress_extent_map(0, 0));
1239 ASSERT_EQ(0, em.compress_extent_map(100000, 1000));
1240 ASSERT_EQ(2, em.compress_extent_map(0, 100000));
1241 ASSERT_EQ(2u, em.extent_map.size());
1242 erase_and_delete(em, 100);
1243 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
1244 em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b3));
1245 em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
1246 ASSERT_EQ(0, em.compress_extent_map(0, 1));
1247 ASSERT_EQ(0, em.compress_extent_map(0, 100000));
1248 ASSERT_EQ(4u, em.extent_map.size());
1249
1250 em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
1251 em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, b2));
1252 em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, b2));
1253 em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, b1));
1254 em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, b3));
1255 ASSERT_EQ(0, em.compress_extent_map(0, 99));
1256 ASSERT_EQ(0, em.compress_extent_map(800, 1000));
1257 ASSERT_EQ(2, em.compress_extent_map(100, 500));
1258 ASSERT_EQ(7u, em.extent_map.size());
1259 erase_and_delete(em, 300);
1260 erase_and_delete(em, 500);
1261 erase_and_delete(em, 700);
1262 em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
1263 em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, b2));
1264 em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, b2));
1265 ASSERT_EQ(1, em.compress_extent_map(0, 1000));
1266 ASSERT_EQ(6u, em.extent_map.size());
1267 }
1268
1269
1270 void clear_and_dispose(BlueStore::old_extent_map_t& old_em)
1271 {
1272 auto oep = old_em.begin();
1273 while (oep != old_em.end()) {
1274 auto &lo = *oep;
1275 oep = old_em.erase(oep);
1276 delete &lo;
1277 }
1278 }
1279
1280 TEST(GarbageCollector, BasicTest)
1281 {
1282 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1283 g_ceph_context, "lru", NULL);
1284 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1285 g_ceph_context, "lru", NULL);
1286
1287 BlueStore store(g_ceph_context, "", 4096);
1288 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1289 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1290 BlueStore::ExtentMap em(&onode);
1291
1292 BlueStore::old_extent_map_t old_extents;
1293
1294
1295 /*
1296 min_alloc_size = 4096
1297 original disposition
1298 extent1 <loffs = 100, boffs = 100, len = 10>
1299 -> blob1<compressed, len_on_disk=4096, logical_len=8192>
1300 extent2 <loffs = 200, boffs = 200, len = 10>
1301 -> blob2<raw, len_on_disk=4096, llen=4096>
1302 extent3 <loffs = 300, boffs = 300, len = 10>
1303 -> blob1<compressed, len_on_disk=4096, llen=8192>
1304 extent4 <loffs = 4096, boffs = 0, len = 10>
1305 -> blob3<raw, len_on_disk=4096, llen=4096>
1306 on write(300~100) resulted in
1307 extent1 <loffs = 100, boffs = 100, len = 10>
1308 -> blob1<compressed, len_on_disk=4096, logical_len=8192>
1309 extent2 <loffs = 200, boffs = 200, len = 10>
1310 -> blob2<raw, len_on_disk=4096, llen=4096>
1311 extent3 <loffs = 300, boffs = 300, len = 100>
1312 -> blob4<raw, len_on_disk=4096, llen=4096>
1313 extent4 <loffs = 4096, boffs = 0, len = 10>
1314 -> blob3<raw, len_on_disk=4096, llen=4096>
1315 */
1316 {
1317 BlueStore::GarbageCollector gc(g_ceph_context);
1318 int64_t saving;
1319 BlueStore::BlobRef b1(new BlueStore::Blob);
1320 BlueStore::BlobRef b2(new BlueStore::Blob);
1321 BlueStore::BlobRef b3(new BlueStore::Blob);
1322 BlueStore::BlobRef b4(new BlueStore::Blob);
1323 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1324 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1325 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1326 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1327 b1->dirty_blob().set_compressed(0x2000, 0x1000);
1328 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x1000));
1329 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x1000));
1330 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x1000));
1331 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
1332 em.extent_map.insert(*new BlueStore::Extent(100, 100, 10, b1));
1333 b1->get_ref(coll.get(), 100, 10);
1334 em.extent_map.insert(*new BlueStore::Extent(200, 200, 10, b2));
1335 b2->get_ref(coll.get(), 200, 10);
1336 em.extent_map.insert(*new BlueStore::Extent(300, 300, 100, b4));
1337 b4->get_ref(coll.get(), 300, 100);
1338 em.extent_map.insert(*new BlueStore::Extent(4096, 0, 10, b3));
1339 b3->get_ref(coll.get(), 0, 10);
1340
1341 old_extents.push_back(*new BlueStore::OldExtent(300, 300, 10, b1));
1342
1343 saving = gc.estimate(300, 100, em, old_extents, 4096);
1344 ASSERT_EQ(saving, 1);
1345 auto& to_collect = gc.get_extents_to_collect();
1346 ASSERT_EQ(to_collect.num_intervals(), 1u);
1347 {
1348 auto it = to_collect.begin();
1349 using p = decltype(*it);
1350 auto v = p{100ul, 10ul};
1351 ASSERT_EQ(*it, v);
1352 }
1353 em.clear();
1354 clear_and_dispose(old_extents);
1355 }
1356 /*
1357 original disposition
1358 min_alloc_size = 0x10000
1359 extent1 <loffs = 0, boffs = 0, len = 0x40000>
1360 -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
1361 Write 0x8000~37000 resulted in the following extent map prior to GC
1362 for the last write_small(0x30000~0xf000):
1363
1364 extent1 <loffs = 0, boffs = 0, len = 0x8000>
1365 -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
1366 extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
1367 -> blob2<raw, len_on_disk=0x10000, llen=0x10000>
1368 extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
1369 -> blob3<raw, len_on_disk=0x20000, llen=0x20000>
1370 extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
1371 -> blob4<raw, len_on_disk=0x10000, llen=0x10000>
1372 extent5 <loffs = 0x3f000, boffs = 0x3f000, len = 0x1000>
1373 -> blob1<compressed, len_on_disk=0x20000, llen=0x40000>
1374 */
1375 {
1376 BlueStore store(g_ceph_context, "", 0x10000);
1377 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1378 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1379 BlueStore::ExtentMap em(&onode);
1380
1381 BlueStore::old_extent_map_t old_extents;
1382 BlueStore::GarbageCollector gc(g_ceph_context);
1383 int64_t saving;
1384 BlueStore::BlobRef b1(new BlueStore::Blob);
1385 BlueStore::BlobRef b2(new BlueStore::Blob);
1386 BlueStore::BlobRef b3(new BlueStore::Blob);
1387 BlueStore::BlobRef b4(new BlueStore::Blob);
1388 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1389 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1390 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1391 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1392 b1->dirty_blob().set_compressed(0x40000, 0x20000);
1393 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x20000));
1394 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
1395 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
1396 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x10000));
1397
1398 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b1));
1399 b1->get_ref(coll.get(), 0, 0x8000);
1400 em.extent_map.insert(
1401 *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
1402 b2->get_ref(coll.get(), 0x8000, 0x8000);
1403 em.extent_map.insert(
1404 *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
1405 b3->get_ref(coll.get(), 0, 0x20000);
1406 em.extent_map.insert(
1407 *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
1408 b4->get_ref(coll.get(), 0, 0xf000);
1409 em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x3f000, 0x1000, b1));
1410 b1->get_ref(coll.get(), 0x3f000, 0x1000);
1411
1412 old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b1));
1413 old_extents.push_back(
1414 *new BlueStore::OldExtent(0x10000, 0x10000, 0x20000, b1));
1415 old_extents.push_back(*new BlueStore::OldExtent(0x30000, 0x30000, 0xf000, b1));
1416
1417 saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
1418 ASSERT_EQ(saving, 2);
1419 auto& to_collect = gc.get_extents_to_collect();
1420 ASSERT_EQ(to_collect.num_intervals(), 2u);
1421 {
1422 auto it1 = to_collect.begin();
1423 auto it2 = ++to_collect.begin();
1424 using p = decltype(*it1);
1425 {
1426 auto v1 = p{0x0ul ,0x8000ul};
1427 auto v2 = p{0x0ul, 0x8000ul};
1428 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1429 }
1430 {
1431 auto v1 = p{0x3f000ul, 0x1000ul};
1432 auto v2 = p{0x3f000ul, 0x1000ul};
1433 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1434 }
1435 }
1436
1437 em.clear();
1438 clear_and_dispose(old_extents);
1439 }
1440 /*
1441 original disposition
1442 min_alloc_size = 0x1000
1443 extent1 <loffs = 0, boffs = 0, len = 0x4000>
1444 -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
1445 write 0x3000~4000 resulted in the following extent map
1446 (future feature - suppose we can compress incoming write prior to
1447 GC invocation)
1448
1449 extent1 <loffs = 0, boffs = 0, len = 0x4000>
1450 -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
1451 extent2 <loffs = 0x3000, boffs = 0, len = 0x4000>
1452 -> blob2<compressed, len_on_disk=0x2000, llen=0x4000>
1453 */
1454 {
1455 BlueStore::GarbageCollector gc(g_ceph_context);
1456 int64_t saving;
1457 BlueStore::BlobRef b1(new BlueStore::Blob);
1458 BlueStore::BlobRef b2(new BlueStore::Blob);
1459 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1460 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1461 b1->dirty_blob().set_compressed(0x4000, 0x2000);
1462 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
1463 b2->dirty_blob().set_compressed(0x4000, 0x2000);
1464 b2->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
1465
1466 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x3000, b1));
1467 b1->get_ref(coll.get(), 0, 0x3000);
1468 em.extent_map.insert(
1469 *new BlueStore::Extent(0x3000, 0, 0x4000, b2)); // new extent
1470 b2->get_ref(coll.get(), 0, 0x4000);
1471
1472 old_extents.push_back(*new BlueStore::OldExtent(0x3000, 0x3000, 0x1000, b1));
1473
1474 saving = gc.estimate(0x3000, 0x4000, em, old_extents, 0x1000);
1475 ASSERT_EQ(saving, 0);
1476 auto& to_collect = gc.get_extents_to_collect();
1477 ASSERT_EQ(to_collect.num_intervals(), 0u);
1478 em.clear();
1479 clear_and_dispose(old_extents);
1480 }
1481 /*
1482 original disposition
1483 min_alloc_size = 0x10000
1484 extent0 <loffs = 0, boffs = 0, len = 0x20000>
1485 -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
1486 extent1 <loffs = 0x20000, boffs = 0, len = 0x20000>
1487 -> blob1<compressed, len_on_disk=0x10000, logical_len=0x20000>
1488 write 0x8000~37000 resulted in the following extent map prior
1489 to GC for the last write_small(0x30000~0xf000)
1490
1491 extent0 <loffs = 0, boffs = 0, len = 0x8000>
1492 -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
1493 extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
1494 -> blob2<raw, len_on_disk=0x10000, llen=0x10000>
1495 extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
1496 -> blob3<raw, len_on_disk=0x20000, llen=0x20000>
1497 extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
1498 -> blob4<raw, len_on_disk=0x1000, llen=0x1000>
1499 extent5 <loffs = 0x3f000, boffs = 0x1f000, len = 0x1000>
1500 -> blob1<compressed, len_on_disk=0x10000, llen=0x20000>
1501 */
1502 {
1503 BlueStore store(g_ceph_context, "", 0x10000);
1504 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1505 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1506 BlueStore::ExtentMap em(&onode);
1507
1508 BlueStore::old_extent_map_t old_extents;
1509 BlueStore::GarbageCollector gc(g_ceph_context);
1510 int64_t saving;
1511 BlueStore::BlobRef b0(new BlueStore::Blob);
1512 BlueStore::BlobRef b1(new BlueStore::Blob);
1513 BlueStore::BlobRef b2(new BlueStore::Blob);
1514 BlueStore::BlobRef b3(new BlueStore::Blob);
1515 BlueStore::BlobRef b4(new BlueStore::Blob);
1516 b0->shared_blob = new BlueStore::SharedBlob(coll.get());
1517 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1518 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1519 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1520 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1521 b0->dirty_blob().set_compressed(0x2000, 0x1000);
1522 b0->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
1523 b1->dirty_blob().set_compressed(0x20000, 0x10000);
1524 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
1525 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
1526 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
1527 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
1528
1529 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b0));
1530 b0->get_ref(coll.get(), 0, 0x8000);
1531 em.extent_map.insert(
1532 *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
1533 b2->get_ref(coll.get(), 0x8000, 0x8000);
1534 em.extent_map.insert(
1535 *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
1536 b3->get_ref(coll.get(), 0, 0x20000);
1537 em.extent_map.insert(
1538 *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
1539 b4->get_ref(coll.get(), 0, 0xf000);
1540 em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x1f000, 0x1000, b1));
1541 b1->get_ref(coll.get(), 0x1f000, 0x1000);
1542
1543 old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b0));
1544 old_extents.push_back(
1545 *new BlueStore::OldExtent(0x10000, 0x10000, 0x10000, b0));
1546 old_extents.push_back(
1547 *new BlueStore::OldExtent(0x20000, 0x00000, 0x1f000, b1));
1548
1549 saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
1550 ASSERT_EQ(saving, 2);
1551 auto& to_collect = gc.get_extents_to_collect();
1552 ASSERT_EQ(to_collect.num_intervals(), 2u);
1553 {
1554 auto it1 = to_collect.begin();
1555 auto it2 = ++to_collect.begin();
1556 using p = decltype(*it1);
1557 {
1558 auto v1 = p{0x0ul, 0x8000ul};
1559 auto v2 = p{0x0ul, 0x8000ul};
1560 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1561 }
1562 {
1563 auto v1 = p{0x3f000ul, 0x1000ul};
1564 auto v2 = p{0x3f000ul, 0x1000ul};
1565 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1566 }
1567 }
1568
1569 em.clear();
1570 clear_and_dispose(old_extents);
1571 }
1572 }
1573
1574 TEST(BlueStoreRepairer, StoreSpaceTracker)
1575 {
1576 BlueStoreRepairer::StoreSpaceTracker bmap0;
1577 bmap0.init((uint64_t)4096 * 1024 * 1024 * 1024, 0x1000);
1578 ASSERT_EQ(bmap0.granularity, 2 * 1024 * 1024U);
1579 ASSERT_EQ(bmap0.collections_bfs.size(), 2048u * 1024u);
1580 ASSERT_EQ(bmap0.objects_bfs.size(), 2048u * 1024u);
1581
1582 BlueStoreRepairer::StoreSpaceTracker bmap;
1583 bmap.init(0x2000 * 0x1000 - 1, 0x1000, 512 * 1024);
1584 ASSERT_EQ(bmap.granularity, 0x1000u);
1585 ASSERT_EQ(bmap.collections_bfs.size(), 0x2000u);
1586 ASSERT_EQ(bmap.objects_bfs.size(), 0x2000u);
1587
1588 coll_t cid;
1589 ghobject_t hoid;
1590
1591 ASSERT_FALSE(bmap.is_used(cid, 0));
1592 ASSERT_FALSE(bmap.is_used(hoid, 0));
1593 bmap.set_used(0, 1, cid, hoid);
1594 ASSERT_TRUE(bmap.is_used(cid, 0));
1595 ASSERT_TRUE(bmap.is_used(hoid, 0));
1596
1597 ASSERT_FALSE(bmap.is_used(cid, 0x1023));
1598 ASSERT_FALSE(bmap.is_used(hoid, 0x1023));
1599 ASSERT_FALSE(bmap.is_used(cid, 0x2023));
1600 ASSERT_FALSE(bmap.is_used(hoid, 0x2023));
1601 ASSERT_FALSE(bmap.is_used(cid, 0x3023));
1602 ASSERT_FALSE(bmap.is_used(hoid, 0x3023));
1603 bmap.set_used(0x1023, 0x3000, cid, hoid);
1604 ASSERT_TRUE(bmap.is_used(cid, 0x1023));
1605 ASSERT_TRUE(bmap.is_used(hoid, 0x1023));
1606 ASSERT_TRUE(bmap.is_used(cid, 0x2023));
1607 ASSERT_TRUE(bmap.is_used(hoid, 0x2023));
1608 ASSERT_TRUE(bmap.is_used(cid, 0x3023));
1609 ASSERT_TRUE(bmap.is_used(hoid, 0x3023));
1610
1611 ASSERT_FALSE(bmap.is_used(cid, 0x9001));
1612 ASSERT_FALSE(bmap.is_used(hoid, 0x9001));
1613 ASSERT_FALSE(bmap.is_used(cid, 0xa001));
1614 ASSERT_FALSE(bmap.is_used(hoid, 0xa001));
1615 ASSERT_FALSE(bmap.is_used(cid, 0xb000));
1616 ASSERT_FALSE(bmap.is_used(hoid, 0xb000));
1617 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1618 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1619 bmap.set_used(0x9001, 0x2fff, cid, hoid);
1620 ASSERT_TRUE(bmap.is_used(cid, 0x9001));
1621 ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
1622 ASSERT_TRUE(bmap.is_used(cid, 0xa001));
1623 ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
1624 ASSERT_TRUE(bmap.is_used(cid, 0xb001));
1625 ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
1626 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1627 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1628
1629 bmap.set_used(0xa001, 0x2, cid, hoid);
1630 ASSERT_TRUE(bmap.is_used(cid, 0x9001));
1631 ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
1632 ASSERT_TRUE(bmap.is_used(cid, 0xa001));
1633 ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
1634 ASSERT_TRUE(bmap.is_used(cid, 0xb001));
1635 ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
1636 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1637 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1638
1639 ASSERT_FALSE(bmap.is_used(cid, 0xc0000));
1640 ASSERT_FALSE(bmap.is_used(hoid, 0xc0000));
1641 ASSERT_FALSE(bmap.is_used(cid, 0xc1000));
1642 ASSERT_FALSE(bmap.is_used(hoid, 0xc1000));
1643
1644 bmap.set_used(0xc0000, 0x2000, cid, hoid);
1645 ASSERT_TRUE(bmap.is_used(cid, 0xc0000));
1646 ASSERT_TRUE(bmap.is_used(hoid, 0xc0000));
1647 ASSERT_TRUE(bmap.is_used(cid, 0xc1000));
1648 ASSERT_TRUE(bmap.is_used(hoid, 0xc1000));
1649
1650 interval_set<uint64_t> extents;
1651 extents.insert(0,0x500);
1652 extents.insert(0x800,0x100);
1653 extents.insert(0x1000,0x1000);
1654 extents.insert(0xa001,1);
1655 extents.insert(0xa0000,0xff8);
1656
1657 ASSERT_EQ(3u, bmap.filter_out(extents));
1658 ASSERT_TRUE(bmap.is_used(cid));
1659 ASSERT_TRUE(bmap.is_used(hoid));
1660
1661 BlueStoreRepairer::StoreSpaceTracker bmap2;
1662 bmap2.init((uint64_t)0x3223b1d1000, 0x10000);
1663 ASSERT_EQ(0x1a0000u, bmap2.granularity);
1664 ASSERT_EQ(0x1edae4u, bmap2.collections_bfs.size());
1665 ASSERT_EQ(0x1edae4u, bmap2.objects_bfs.size());
1666 bmap2.set_used(0x3223b190000, 0x10000, cid, hoid);
1667 ASSERT_TRUE(bmap2.is_used(cid, 0x3223b190000));
1668 ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b190000));
1669 ASSERT_TRUE(bmap2.is_used(cid, 0x3223b19f000));
1670 ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b19ffff));
1671 }
1672
1673 TEST(bluestore_blob_t, unused)
1674 {
1675 {
1676 bluestore_blob_t b;
1677 uint64_t min_alloc_size = 64 << 10; // 64 kB
1678
1679 // _do_write_small 0x0~1000
1680 uint64_t offset = 0x0;
1681 uint64_t length = 0x1000; // 4kB
1682 uint64_t suggested_boff = 0;
1683 PExtentVector extents;
1684 extents.emplace_back(0x1a560000, min_alloc_size);
1685 b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
1686 b.mark_used(offset, length);
1687 ASSERT_FALSE(b.is_unused(offset, length));
1688
1689 // _do_write_small 0x2000~1000
1690 offset = 0x2000;
1691 length = 0x1000;
1692 b.add_unused(0, 0x10000);
1693 ASSERT_TRUE(b.is_unused(offset, length));
1694 b.mark_used(offset, length);
1695 ASSERT_FALSE(b.is_unused(offset, length));
1696
1697 // _do_write_small 0xc000~2000
1698 offset = 0xc000;
1699 length = 0x2000;
1700 ASSERT_TRUE(b.is_unused(offset, length));
1701 b.mark_used(offset, length);
1702 ASSERT_FALSE(b.is_unused(offset, length));
1703 }
1704
1705 {
1706 bluestore_blob_t b;
1707 uint64_t min_alloc_size = 64 << 10; // 64 kB
1708
1709 // _do_write_small 0x11000~1000
1710 uint64_t offset = 0x11000;
1711 uint64_t length = 0x1000; // 4kB
1712 uint64_t suggested_boff = 0x11000;
1713 PExtentVector extents;
1714 extents.emplace_back(0x1a560000, min_alloc_size);
1715 b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
1716 b.add_unused(0, offset);
1717 b.add_unused(offset + length, min_alloc_size * 2 - offset - length);
1718 b.mark_used(offset, length);
1719 ASSERT_FALSE(b.is_unused(offset, length));
1720
1721 // _do_write_small 0x15000~3000
1722 offset = 0x15000;
1723 length = 0x3000;
1724 ASSERT_TRUE(b.is_unused(offset, length));
1725 b.mark_used(offset, length);
1726 ASSERT_FALSE(b.is_unused(offset, length));
1727 }
1728
1729 {
1730 // reuse blob
1731 bluestore_blob_t b;
1732 uint64_t min_alloc_size = 64 << 10; // 64 kB
1733
1734 // _do_write_small 0x2a000~1000
1735 // and 0x1d000~1000
1736 uint64_t unused_granularity = 0x3000;
1737 // offsets and lenght below are selected to
1738 // be aligned with unused_granularity
1739 uint64_t offset0 = 0x2a000;
1740 uint64_t offset = 0x1d000;
1741 uint64_t length = 0x1000; // 4kB
1742 PExtentVector extents;
1743 extents.emplace_back(0x410000, min_alloc_size);
1744 b.allocated(p2align(offset0, min_alloc_size), min_alloc_size, extents);
1745 b.add_unused(0, min_alloc_size * 3);
1746 b.mark_used(offset0, length);
1747 ASSERT_FALSE(b.is_unused(offset0, length));
1748 ASSERT_TRUE(b.is_unused(offset, length));
1749
1750 extents.clear();
1751 extents.emplace_back(0x430000, min_alloc_size);
1752 b.allocated(p2align(offset, min_alloc_size), min_alloc_size, extents);
1753 b.mark_used(offset, length);
1754 ASSERT_FALSE(b.is_unused(offset0, length));
1755 ASSERT_FALSE(b.is_unused(offset, length));
1756 ASSERT_FALSE(b.is_unused(offset, unused_granularity));
1757
1758 ASSERT_TRUE(b.is_unused(0, offset / unused_granularity * unused_granularity));
1759 ASSERT_TRUE(b.is_unused(offset + length, offset0 - offset - length));
1760 auto end0_aligned = round_up_to(offset0 + length, unused_granularity);
1761 ASSERT_TRUE(b.is_unused(end0_aligned, min_alloc_size * 3 - end0_aligned));
1762 }
1763 }
1764 // This UT is primarily intended to show how repair procedure
1765 // causes erroneous write to INVALID_OFFSET which is reported in
1766 // https://tracker.ceph.com/issues/51682
1767 // Basic map_any functionality is tested as well though.
1768 //
1769 TEST(bluestore_blob_t, wrong_map_bl_in_51682)
1770 {
1771 {
1772 bluestore_blob_t b;
1773 uint64_t min_alloc_size = 4 << 10; // 64 kB
1774
1775 b.allocated_test(bluestore_pextent_t(0x17ba000, 4 * min_alloc_size));
1776 b.allocated_test(bluestore_pextent_t(0x17bf000, 4 * min_alloc_size));
1777 b.allocated_test(
1778 bluestore_pextent_t(
1779 bluestore_pextent_t::INVALID_OFFSET,
1780 1 * min_alloc_size));
1781 b.allocated_test(bluestore_pextent_t(0x153c44d000, 7 * min_alloc_size));
1782
1783 b.mark_used(0, 0x8000);
1784 b.mark_used(0x9000, 0x7000);
1785
1786 string s(0x7000, 'a');
1787 bufferlist bl;
1788 bl.append(s);
1789 const size_t num_expected_entries = 5;
1790 uint64_t expected[num_expected_entries][2] = {
1791 {0x17ba000, 0x4000},
1792 {0x17bf000, 0x3000},
1793 {0x17c0000, 0x3000},
1794 {0xffffffffffffffff, 0x1000},
1795 {0x153c44d000, 0x3000}};
1796 size_t expected_pos = 0;
1797 b.map_bl(0, bl,
1798 [&](uint64_t o, bufferlist& bl) {
1799 ASSERT_EQ(o, expected[expected_pos][0]);
1800 ASSERT_EQ(bl.length(), expected[expected_pos][1]);
1801 ++expected_pos;
1802 });
1803 // 0x5000 is an improper offset presumably provided when doing a repair
1804 b.map_bl(0x5000, bl,
1805 [&](uint64_t o, bufferlist& bl) {
1806 ASSERT_EQ(o, expected[expected_pos][0]);
1807 ASSERT_EQ(bl.length(), expected[expected_pos][1]);
1808 ++expected_pos;
1809 });
1810 ASSERT_EQ(expected_pos, num_expected_entries);
1811 }
1812 }
1813
1814 //---------------------------------------------------------------------------------
1815 static int verify_extent(const extent_t & ext, const extent_t *ext_arr, uint64_t ext_arr_size, uint64_t idx)
1816 {
1817 const extent_t & ext_ref = ext_arr[idx];
1818 if (ext.offset == ext_ref.offset && ext.length == ext_ref.length) {
1819 return 0;
1820 } else {
1821 std::cerr << "mismatch was found at index " << idx << std::endl;
1822 if (ext.length == 0) {
1823 std::cerr << "Null extent was returned at idx = " << idx << std::endl;
1824 }
1825 unsigned start = std::max(((int32_t)(idx)-3), 0);
1826 unsigned end = std::min(idx+3, ext_arr_size);
1827 for (unsigned j = start; j < end; j++) {
1828 const extent_t & ext_ref = ext_arr[j];
1829 std::cerr << j << ") ref_ext = [" << ext_ref.offset << ", " << ext_ref.length << "]" << std::endl;
1830 }
1831 std::cerr << idx << ") ext = [" << ext.offset << ", " << ext.length << "]" << std::endl;
1832 return -1;
1833 }
1834 }
1835
1836 //---------------------------------------------------------------------------------
1837 static int test_extents(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
1838 {
1839 const uint64_t MAX_JUMP_BIG = 1523;
1840 const uint64_t MAX_JUMP_SMALL = 19;
1841 const uint64_t MAX_LEN_BIG = 523;
1842 const uint64_t MAX_LEN_SMALL = 23;
1843
1844 uint64_t n = sbmap.get_size();
1845 uint64_t offset = 0;
1846 unsigned length, jump, i;
1847 for (i = 0; i < ext_arr_size; i++) {
1848 if (i & 3) {
1849 jump = std::rand() % MAX_JUMP_BIG;
1850 } else {
1851 jump = std::rand() % MAX_JUMP_SMALL;
1852 }
1853 offset += jump;
1854 if (i & 1) {
1855 length = std::rand() % MAX_LEN_BIG;
1856 } else {
1857 length = std::rand() % MAX_LEN_SMALL;
1858 }
1859 // make sure no zero length will be used
1860 length++;
1861 if (offset + length >= n) {
1862 break;
1863 }
1864
1865 bool success;
1866 if (set) {
1867 success = sbmap.set(offset, length);
1868 } else {
1869 success = sbmap.clr(offset, length);
1870 }
1871 if (!success) {
1872 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
1873 return -1;
1874 }
1875
1876 // if this is not the first entry and no jump -> merge extents
1877 if ( (i==0) || (jump > 0) ) {
1878 ext_arr[i] = {offset, length};
1879 } else {
1880 // merge 2 extents
1881 i --;
1882 ext_arr[i].length += length;
1883 }
1884 offset += length;
1885 }
1886 unsigned arr_size = std::min((uint64_t)i, ext_arr_size);
1887 std::cout << std::hex << std::right;
1888 std::cout << "[" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
1889 std::cout << std::dec << std::endl;
1890
1891 offset = 0;
1892 extent_t ext;
1893 for(unsigned i = 0; i < arr_size; i++) {
1894 if (set) {
1895 ext = sbmap.get_next_set_extent(offset);
1896 } else {
1897 ext = sbmap.get_next_clr_extent(offset);
1898 }
1899
1900 if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
1901 return -1;
1902 }
1903 offset = ext.offset + ext.length;
1904 }
1905
1906 if (set) {
1907 ext = sbmap.get_next_set_extent(offset);
1908 } else {
1909 ext = sbmap.get_next_clr_extent(offset);
1910 }
1911 if (ext.length == 0) {
1912 return 0;
1913 } else {
1914 std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
1915 return -1;
1916 }
1917 }
1918
1919 //---------------------------------------------------------------------------------
1920 TEST(SimpleBitmap, basic)
1921 {
1922 const uint64_t MAX_EXTENTS_COUNT = 7131177;
1923 std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
1924 ASSERT_TRUE(ext_arr != nullptr);
1925 const uint64_t BIT_COUNT = 4ULL << 30; // 4Gb = 512MB
1926 SimpleBitmap sbmap(g_ceph_context, BIT_COUNT);
1927
1928 // use current time as seed for random generator
1929 std::srand(std::time(nullptr));
1930 for (unsigned i = 0; i < 3; i++ ) {
1931 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
1932 sbmap.clear_all();
1933 ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
1934
1935 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
1936 sbmap.set_all();
1937 ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
1938 }
1939 }
1940
1941 //---------------------------------------------------------------------------------
1942 static int test_intersections(unsigned test_idx, SimpleBitmap &sbmap, uint8_t map[], uint64_t map_size)
1943 {
1944 const uint64_t MAX_LEN_BIG = 523;
1945 const uint64_t MAX_LEN_SMALL = 23;
1946
1947 bool success;
1948 uint64_t set_op_count = 0, clr_op_count = 0;
1949 unsigned length, i;
1950 for (i = 0; i < map_size / (MAX_LEN_BIG*2); i++) {
1951 uint64_t offset = (std::rand() % (map_size - 1));
1952 if (i & 1) {
1953 length = std::rand() % MAX_LEN_BIG;
1954 } else {
1955 length = std::rand() % MAX_LEN_SMALL;
1956 }
1957 // make sure no zero length will be used
1958 length++;
1959 if (offset + length >= map_size) {
1960 continue;
1961 }
1962 // 2:1 set/clr
1963 bool set = (std::rand() % 3);
1964 if (set) {
1965 success = sbmap.set(offset, length);
1966 memset(map+offset, 0xFF, length);
1967 set_op_count++;
1968 } else {
1969 success = sbmap.clr(offset, length);
1970 memset(map+offset, 0x0, length);
1971 clr_op_count++;
1972 }
1973 if (!success) {
1974 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
1975 return -1;
1976 }
1977 }
1978
1979 uint64_t set_bit_count = 0;
1980 uint64_t clr_bit_count = 0;
1981 for(uint64_t idx = 0; idx < map_size; idx++) {
1982 if (map[idx]) {
1983 set_bit_count++;
1984 success = sbmap.bit_is_set(idx);
1985 } else {
1986 clr_bit_count++;
1987 success = sbmap.bit_is_clr(idx);
1988 }
1989 if (!success) {
1990 std::cerr << "expected: sbmap.bit_is_" << (map[idx] ? "set(" : "clr(") << idx << ")"<< std::endl;
1991 return -1;
1992 }
1993
1994 }
1995 std::cout << std::hex << std::right << __func__ ;
1996 std::cout << " [" << test_idx << "] set_bit_count = 0x" << std::setfill('0') << std::setw(8) << set_bit_count
1997 << ", clr_bit_count = 0x" << std::setfill('0') << std::setw(8) << clr_bit_count
1998 << ", sum = 0x" << set_bit_count + clr_bit_count << std::endl;
1999 std::cout << std::dec;
2000 uint64_t offset = 0;
2001 for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
2002 extent_t ext = sbmap.get_next_set_extent(offset);
2003 //std::cout << "set_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
2004 for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
2005 if (map[idx] != 0xFF) {
2006 std::cerr << "map[" << idx << "] is clear, but extent [" << ext.offset << ", " << ext.length << "] is set" << std::endl;
2007 return -1;
2008 }
2009 }
2010 offset = ext.offset + ext.length;
2011 }
2012
2013 offset = 0;
2014 for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
2015 extent_t ext = sbmap.get_next_clr_extent(offset);
2016 //std::cout << "clr_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
2017 for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
2018 if (map[idx] ) {
2019 std::cerr << "map[" << idx << "] is set, but extent [" << ext.offset << ", " << ext.length << "] is free" << std::endl;
2020 return -1;
2021 }
2022 }
2023 offset = ext.offset + ext.length;
2024 }
2025
2026 return 0;
2027 }
2028
2029 //---------------------------------------------------------------------------------
2030 TEST(SimpleBitmap, intersection)
2031 {
2032 const uint64_t MAP_SIZE = 1ULL << 30; // 1G
2033 SimpleBitmap sbmap(g_ceph_context, MAP_SIZE);
2034
2035 // use current time as seed for random generator
2036 std::srand(std::time(nullptr));
2037
2038 std::unique_ptr<uint8_t[]> map = std::make_unique<uint8_t[]> (MAP_SIZE);
2039 ASSERT_TRUE(map != nullptr);
2040
2041 for (unsigned i = 0; i < 1; i++ ) {
2042 sbmap.clear_all();
2043 memset(map.get(), 0, MAP_SIZE);
2044 ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
2045
2046 sbmap.set_all();
2047 memset(map.get(), 0xFF, MAP_SIZE);
2048 ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
2049 }
2050 }
2051
2052
2053 //---------------------------------------------------------------------------------
2054 static int test_extents_boundaries(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
2055 {
2056 uint64_t n = sbmap.get_size();
2057 uint64_t offset = 0, k = 0;
2058 for(unsigned i = 0; i < 64; i++) {
2059 offset += i;
2060 if (offset >= n) {
2061 break;
2062 }
2063
2064 for(unsigned length = 1; length <= 128; length++) {
2065 if (offset + length >= n) {
2066 break;
2067 }
2068
2069 if (k >= ext_arr_size) {
2070 break;
2071 }
2072 bool success;
2073 if (set) {
2074 success = sbmap.set(offset, length);
2075 } else {
2076 success = sbmap.clr(offset, length);
2077 }
2078 if (!success) {
2079 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
2080 return -1;
2081 }
2082 ext_arr[k++] = {offset, length};
2083 if (length < 64) {
2084 offset += 64;
2085 } else {
2086 offset += 128;
2087 }
2088 }
2089 if (k >= ext_arr_size) {
2090 break;
2091 }
2092 }
2093
2094 unsigned arr_size = std::min((uint64_t)k, ext_arr_size);
2095 std::cout << std::hex << std::right << __func__ ;
2096 std::cout << " [" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
2097 std::cout << std::dec << std::endl;
2098
2099 offset = 0;
2100 extent_t ext;
2101 for(unsigned i = 0; i < arr_size; i++) {
2102 if (set) {
2103 ext = sbmap.get_next_set_extent(offset);
2104 } else {
2105 ext = sbmap.get_next_clr_extent(offset);
2106 }
2107
2108 if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
2109 return -1;
2110 }
2111 offset = ext.offset + ext.length;
2112 }
2113
2114 if (set) {
2115 ext = sbmap.get_next_set_extent(offset);
2116 } else {
2117 ext = sbmap.get_next_clr_extent(offset);
2118 }
2119 if (ext.length == 0) {
2120 return 0;
2121 } else {
2122 std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
2123 return -1;
2124 }
2125
2126 }
2127
2128 //---------------------------------------------------------------------------------
2129 TEST(SimpleBitmap, boundaries)
2130 {
2131 const uint64_t MAX_EXTENTS_COUNT = 64 << 10;
2132 std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
2133 ASSERT_TRUE(ext_arr != nullptr);
2134
2135 // use current time as seed for random generator
2136 std::srand(std::time(nullptr));
2137
2138 uint64_t bit_count = 32 << 20; // 32Mb = 4MB
2139 unsigned count = 0;
2140 for (unsigned i = 0; i < 64; i++) {
2141 SimpleBitmap sbmap(g_ceph_context, bit_count+i);
2142 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
2143 sbmap.clear_all();
2144 ASSERT_TRUE(test_extents_boundaries(count, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
2145
2146 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
2147 sbmap.set_all();
2148 ASSERT_TRUE(test_extents_boundaries(count++, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
2149 }
2150 }
2151
2152 TEST(shared_blob_2hash_tracker_t, basic_test)
2153 {
2154 shared_blob_2hash_tracker_t t1(1024 * 1024, 4096);
2155
2156 ASSERT_TRUE(t1.count_non_zero() == 0);
2157
2158 t1.inc(0, 0, 1);
2159 ASSERT_TRUE(t1.count_non_zero() != 0);
2160 t1.inc(0, 0, -1);
2161 ASSERT_TRUE(t1.count_non_zero() == 0);
2162
2163 t1.inc(3, 0x1000, 2);
2164 ASSERT_TRUE(t1.count_non_zero() != 0);
2165 t1.inc(3, 0x1000, -1);
2166 ASSERT_TRUE(t1.count_non_zero() != 0);
2167 t1.inc(3, 0x1000, -1);
2168 ASSERT_TRUE(t1.count_non_zero() == 0);
2169
2170 t1.inc(2, 0x2000, 5);
2171 ASSERT_TRUE(t1.count_non_zero() != 0);
2172 t1.inc(18, 0x2000, -5);
2173 ASSERT_TRUE(t1.count_non_zero() != 0);
2174 t1.inc(18, 0x2000, 1);
2175 ASSERT_TRUE(t1.count_non_zero() != 0);
2176 t1.inc(2, 0x2000, -1);
2177 ASSERT_TRUE(t1.count_non_zero() != 0);
2178 t1.inc(18, 0x2000, 4);
2179 ASSERT_TRUE(t1.count_non_zero() != 0);
2180 t1.inc(2, 0x2000, -4);
2181 ASSERT_TRUE(t1.count_non_zero() == 0);
2182
2183 t1.inc(3, 0x3000, 2);
2184 ASSERT_TRUE(t1.count_non_zero() != 0);
2185 t1.inc(4, 0x3000, -1);
2186 ASSERT_TRUE(t1.count_non_zero() != 0);
2187 t1.inc(4, 0x3000, -1);
2188 ASSERT_TRUE(t1.count_non_zero() != 0);
2189 t1.inc(3, 0x3000, -2);
2190 ASSERT_TRUE(t1.count_non_zero() != 0);
2191 t1.inc(4, 0x3000, 1);
2192 ASSERT_TRUE(t1.count_non_zero() != 0);
2193 t1.inc(4, 0x3000, 1);
2194 ASSERT_TRUE(t1.count_non_zero() == 0);
2195
2196 t1.inc(5, 0x1000, 1);
2197 t1.inc(5, 0x2000, 3);
2198 t1.inc(5, 0x3000, 2);
2199 t1.inc(5, 0x8000, 1);
2200
2201 ASSERT_TRUE(t1.count_non_zero() != 0);
2202
2203 ASSERT_TRUE(!t1.test_all_zero(5,0x1000));
2204 ASSERT_TRUE(!t1.test_all_zero(5, 0x2000));
2205 ASSERT_TRUE(!t1.test_all_zero(5, 0x3000));
2206 ASSERT_TRUE(t1.test_all_zero(5, 0x4000));
2207 ASSERT_TRUE(!t1.test_all_zero(5, 0x8000));
2208
2209 ASSERT_TRUE(t1.test_all_zero_range(5, 0, 0x1000));
2210 ASSERT_TRUE(t1.test_all_zero_range(5, 0x500, 0x500));
2211 ASSERT_TRUE(!t1.test_all_zero_range(5, 0x500, 0x1500));
2212 ASSERT_TRUE(!t1.test_all_zero_range(5, 0x1500, 0x3200));
2213 ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x1500));
2214 ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x3b00));
2215 ASSERT_TRUE(!t1.test_all_zero_range(5, 0, 0x9000));
2216 }
2217 int main(int argc, char **argv) {
2218 auto args = argv_to_vec(argc, argv);
2219 auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
2220 CODE_ENVIRONMENT_UTILITY,
2221 CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
2222 common_init_finish(g_ceph_context);
2223 ::testing::InitGoogleTest(&argc, argv);
2224 return RUN_ALL_TESTS();
2225 }