]> git.proxmox.com Git - ceph.git/blob - ceph/src/test/objectstore/test_bluestore_types.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / test / objectstore / test_bluestore_types.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "include/types.h"
5 #include "os/bluestore/bluestore_types.h"
6 #include "gtest/gtest.h"
7 #include "include/stringify.h"
8 #include "common/ceph_time.h"
9 #include "os/bluestore/BlueStore.h"
10 #include "os/bluestore/simple_bitmap.h"
11 #include "os/bluestore/AvlAllocator.h"
12 #include "common/ceph_argparse.h"
13 #include "global/global_init.h"
14 #include "global/global_context.h"
15 #include "perfglue/heap_profiler.h"
16
17 #include <sstream>
18
19 #define _STR(x) #x
20 #define STRINGIFY(x) _STR(x)
21
22 using namespace std;
23
24 TEST(bluestore, sizeof) {
25 #define P(t) cout << STRINGIFY(t) << "\t" << sizeof(t) << std::endl
26 P(BlueStore::Onode);
27 P(BlueStore::Extent);
28 P(BlueStore::Blob);
29 P(BlueStore::SharedBlob);
30 P(BlueStore::ExtentMap);
31 P(BlueStore::extent_map_t);
32 P(BlueStore::blob_map_t);
33 P(BlueStore::BufferSpace);
34 P(BlueStore::Buffer);
35 P(bluestore_onode_t);
36 P(bluestore_blob_t);
37 P(PExtentVector);
38 P(ghobject_t);
39 P(bluestore_shared_blob_t);
40 P(bluestore_extent_ref_map_t);
41 P(bluestore_extent_ref_map_t::record_t);
42 P(bluestore_blob_use_tracker_t);
43 P(std::atomic_int);
44 P(BlueStore::SharedBlobRef);
45 P(boost::intrusive::set_base_hook<>);
46 P(boost::intrusive::unordered_set_base_hook<>);
47 P(bufferlist);
48 P(bufferptr);
49 P(range_seg_t);
50 P(sb_info_t);
51 P(SimpleBitmap);
52 cout << "map<uint64_t,uint64_t>\t" << sizeof(map<uint64_t,uint64_t>) << std::endl;
53 cout << "map<char,char>\t" << sizeof(map<char,char>) << std::endl;
54 }
55
56 void dump_mempools()
57 {
58 ostringstream ostr;
59 auto f = Formatter::create_unique("json-pretty", "json-pretty", "json-pretty");
60 ostr << "Mempools: ";
61 f->open_object_section("mempools");
62 mempool::dump(f.get());
63 f->close_section();
64 f->flush(ostr);
65 cout << ostr.str() << std::endl;
66 }
67 /*void get_mempool_stats(uint64_t* total_bytes, uint64_t* total_items)
68 {
69 uint64_t meta_allocated = mempool::bluestore_cache_meta::allocated_bytes();
70 uint64_t onode_allocated = mempool::bluestore_cache_onode::allocated_bytes();
71 uint64_t other_allocated = mempool::bluestore_cache_other::allocated_bytes();
72
73 uint64_t meta_items = mempool::bluestore_cache_meta::allocated_items();
74 uint64_t onode_items = mempool::bluestore_cache_onode::allocated_items();
75 uint64_t other_items = mempool::bluestore_cache_other::allocated_items();
76 cout << "meta(" << meta_allocated << "/" << meta_items
77 << ") onode(" << onode_allocated << "/" << onode_items
78 << ") other(" << other_allocated << "/" << other_items
79 << ")" << std::endl;
80 *total_bytes = meta_allocated + onode_allocated + other_allocated;
81 *total_items = onode_items;
82 }*/
83
84 TEST(sb_info_space_efficient_map_t, basic) {
85 sb_info_space_efficient_map_t sb_info;
86 const size_t num_shared = 1000;
87 for (size_t i = 0; i < num_shared; i += 2) {
88 auto& sbi = sb_info.add_maybe_stray(i);
89 sbi.pool_id = i;
90 }
91 ASSERT_TRUE(sb_info.find(0) != sb_info.end());
92 ASSERT_TRUE(sb_info.find(1) == sb_info.end());
93 ASSERT_TRUE(sb_info.find(2) != sb_info.end());
94 ASSERT_TRUE(sb_info.find(4)->pool_id == 4);
95 ASSERT_TRUE(sb_info.find(num_shared) == sb_info.end());
96
97 // ordered insertion
98 sb_info.add_or_adopt(num_shared).pool_id = num_shared;
99 ASSERT_TRUE(sb_info.find(num_shared) != sb_info.end());
100 ASSERT_TRUE(sb_info.find(num_shared)->pool_id == num_shared);
101
102 // out of order insertion
103 sb_info.add_or_adopt(1).pool_id = 1;
104 ASSERT_TRUE(sb_info.find(1) != sb_info.end());
105 ASSERT_TRUE(sb_info.find(1)->pool_id == 1);
106
107 // ordered insertion
108 sb_info.add_maybe_stray(num_shared + 1).pool_id = num_shared + 1;
109 ASSERT_TRUE(sb_info.find(num_shared + 1) != sb_info.end());
110 ASSERT_TRUE(sb_info.find(num_shared + 1)->pool_id == num_shared + 1);
111
112 // out of order insertion
113 sb_info.add_maybe_stray(105).pool_id = 105;
114 ASSERT_TRUE(sb_info.find(105) != sb_info.end());
115 ASSERT_TRUE(sb_info.find(105)->pool_id == 105);
116 }
117
118 TEST(sb_info_space_efficient_map_t, size) {
119 const size_t num_shared = 10000000;
120 sb_info_space_efficient_map_t sb_info;
121
122 BlueStore store(g_ceph_context, "", 4096);
123 BlueStore::OnodeCacheShard* oc = BlueStore::OnodeCacheShard::create(
124 g_ceph_context, "lru", NULL);
125 BlueStore::BufferCacheShard* bc = BlueStore::BufferCacheShard::create(
126 g_ceph_context, "lru", NULL);
127
128 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
129
130 for (size_t i = 0; i < num_shared; i++) {
131 auto& sbi = sb_info.add_or_adopt(i);
132 // primarily to silent the 'unused' warning
133 ceph_assert(sbi.pool_id == sb_info_t::INVALID_POOL_ID);
134 }
135 dump_mempools();
136 }
137
138 TEST(bluestore_extent_ref_map_t, add)
139 {
140 bluestore_extent_ref_map_t m;
141 m.get(10, 10);
142 ASSERT_EQ(1u, m.ref_map.size());
143 cout << m << std::endl;
144 m.get(20, 10);
145 cout << m << std::endl;
146 ASSERT_EQ(1u, m.ref_map.size());
147 ASSERT_EQ(20u, m.ref_map[10].length);
148 ASSERT_EQ(1u, m.ref_map[10].refs);
149 m.get(40, 10);
150 cout << m << std::endl;
151 ASSERT_EQ(2u, m.ref_map.size());
152 m.get(30, 10);
153 cout << m << std::endl;
154 ASSERT_EQ(1u, m.ref_map.size());
155 m.get(50, 10);
156 cout << m << std::endl;
157 ASSERT_EQ(1u, m.ref_map.size());
158 m.get(5, 5);
159 cout << m << std::endl;
160 ASSERT_EQ(1u, m.ref_map.size());
161 }
162
163 TEST(bluestore_extent_ref_map_t, get)
164 {
165 bluestore_extent_ref_map_t m;
166 m.get(00, 30);
167 cout << m << std::endl;
168 m.get(10, 10);
169 cout << m << std::endl;
170 ASSERT_EQ(3u, m.ref_map.size());
171 ASSERT_EQ(10u, m.ref_map[0].length);
172 ASSERT_EQ(1u, m.ref_map[0].refs);
173 ASSERT_EQ(10u, m.ref_map[10].length);
174 ASSERT_EQ(2u, m.ref_map[10].refs);
175 ASSERT_EQ(10u, m.ref_map[20].length);
176 ASSERT_EQ(1u, m.ref_map[20].refs);
177 m.get(20, 5);
178 cout << m << std::endl;
179 ASSERT_EQ(3u, m.ref_map.size());
180 ASSERT_EQ(15u, m.ref_map[10].length);
181 ASSERT_EQ(2u, m.ref_map[10].refs);
182 ASSERT_EQ(5u, m.ref_map[25].length);
183 ASSERT_EQ(1u, m.ref_map[25].refs);
184 m.get(5, 20);
185 cout << m << std::endl;
186 ASSERT_EQ(4u, m.ref_map.size());
187 ASSERT_EQ(5u, m.ref_map[0].length);
188 ASSERT_EQ(1u, m.ref_map[0].refs);
189 ASSERT_EQ(5u, m.ref_map[5].length);
190 ASSERT_EQ(2u, m.ref_map[5].refs);
191 ASSERT_EQ(15u, m.ref_map[10].length);
192 ASSERT_EQ(3u, m.ref_map[10].refs);
193 ASSERT_EQ(5u, m.ref_map[25].length);
194 ASSERT_EQ(1u, m.ref_map[25].refs);
195 m.get(25, 3);
196 cout << m << std::endl;
197 ASSERT_EQ(5u, m.ref_map.size());
198 ASSERT_EQ(5u, m.ref_map[0].length);
199 ASSERT_EQ(1u, m.ref_map[0].refs);
200 ASSERT_EQ(5u, m.ref_map[5].length);
201 ASSERT_EQ(2u, m.ref_map[5].refs);
202 ASSERT_EQ(15u, m.ref_map[10].length);
203 ASSERT_EQ(3u, m.ref_map[10].refs);
204 ASSERT_EQ(3u, m.ref_map[25].length);
205 ASSERT_EQ(2u, m.ref_map[25].refs);
206 ASSERT_EQ(2u, m.ref_map[28].length);
207 ASSERT_EQ(1u, m.ref_map[28].refs);
208 }
209
210 TEST(bluestore_extent_ref_map_t, put)
211 {
212 bluestore_extent_ref_map_t m;
213 PExtentVector r;
214 bool maybe_unshared = false;
215 m.get(10, 30);
216 maybe_unshared = true;
217 m.put(10, 30, &r, &maybe_unshared);
218 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
219 ASSERT_EQ(0u, m.ref_map.size());
220 ASSERT_EQ(1u, r.size());
221 ASSERT_EQ(10u, r[0].offset);
222 ASSERT_EQ(30u, r[0].length);
223 ASSERT_TRUE(maybe_unshared);
224 r.clear();
225 m.get(10, 30);
226 m.get(20, 10);
227 maybe_unshared = true;
228 m.put(10, 30, &r, &maybe_unshared);
229 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
230 ASSERT_EQ(1u, m.ref_map.size());
231 ASSERT_EQ(10u, m.ref_map[20].length);
232 ASSERT_EQ(1u, m.ref_map[20].refs);
233 ASSERT_EQ(2u, r.size());
234 ASSERT_EQ(10u, r[0].offset);
235 ASSERT_EQ(10u, r[0].length);
236 ASSERT_EQ(30u, r[1].offset);
237 ASSERT_EQ(10u, r[1].length);
238 ASSERT_TRUE(maybe_unshared);
239 r.clear();
240 m.get(30, 10);
241 m.get(30, 10);
242 maybe_unshared = true;
243 m.put(20, 15, &r, &maybe_unshared);
244 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
245 ASSERT_EQ(2u, m.ref_map.size());
246 ASSERT_EQ(5u, m.ref_map[30].length);
247 ASSERT_EQ(1u, m.ref_map[30].refs);
248 ASSERT_EQ(5u, m.ref_map[35].length);
249 ASSERT_EQ(2u, m.ref_map[35].refs);
250 ASSERT_EQ(1u, r.size());
251 ASSERT_EQ(20u, r[0].offset);
252 ASSERT_EQ(10u, r[0].length);
253 ASSERT_FALSE(maybe_unshared);
254 r.clear();
255 maybe_unshared = true;
256 m.put(33, 5, &r, &maybe_unshared);
257 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
258 ASSERT_EQ(3u, m.ref_map.size());
259 ASSERT_EQ(3u, m.ref_map[30].length);
260 ASSERT_EQ(1u, m.ref_map[30].refs);
261 ASSERT_EQ(3u, m.ref_map[35].length);
262 ASSERT_EQ(1u, m.ref_map[35].refs);
263 ASSERT_EQ(2u, m.ref_map[38].length);
264 ASSERT_EQ(2u, m.ref_map[38].refs);
265 ASSERT_EQ(1u, r.size());
266 ASSERT_EQ(33u, r[0].offset);
267 ASSERT_EQ(2u, r[0].length);
268 ASSERT_FALSE(maybe_unshared);
269 r.clear();
270 maybe_unshared = true;
271 m.put(38, 2, &r, &maybe_unshared);
272 cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
273 ASSERT_TRUE(maybe_unshared);
274 }
275
276 TEST(bluestore_extent_ref_map_t, contains)
277 {
278 bluestore_extent_ref_map_t m;
279 m.get(10, 30);
280 ASSERT_TRUE(m.contains(10, 30));
281 ASSERT_TRUE(m.contains(10, 10));
282 ASSERT_TRUE(m.contains(30, 10));
283 ASSERT_FALSE(m.contains(0, 10));
284 ASSERT_FALSE(m.contains(0, 20));
285 ASSERT_FALSE(m.contains(0, 100));
286 ASSERT_FALSE(m.contains(40, 10));
287 ASSERT_FALSE(m.contains(30, 11));
288 m.get(40, 10);
289 m.get(40, 10);
290 ASSERT_TRUE(m.contains(30, 11));
291 ASSERT_TRUE(m.contains(30, 20));
292 ASSERT_TRUE(m.contains(10, 40));
293 ASSERT_FALSE(m.contains(0, 50));
294 ASSERT_FALSE(m.contains(40, 20));
295 m.get(60, 100);
296 ASSERT_TRUE(m.contains(60, 10));
297 ASSERT_TRUE(m.contains(40, 10));
298 ASSERT_FALSE(m.contains(40, 11));
299 ASSERT_FALSE(m.contains(40, 20));
300 ASSERT_FALSE(m.contains(40, 30));
301 ASSERT_FALSE(m.contains(40, 3000));
302 ASSERT_FALSE(m.contains(4000, 30));
303 }
304
305 TEST(bluestore_extent_ref_map_t, intersects)
306 {
307 bluestore_extent_ref_map_t m;
308 m.get(10, 30);
309 ASSERT_TRUE(m.intersects(10, 30));
310 ASSERT_TRUE(m.intersects(0, 11));
311 ASSERT_TRUE(m.intersects(10, 40));
312 ASSERT_TRUE(m.intersects(15, 40));
313 ASSERT_FALSE(m.intersects(0, 10));
314 ASSERT_FALSE(m.intersects(0, 5));
315 ASSERT_FALSE(m.intersects(40, 20));
316 ASSERT_FALSE(m.intersects(41, 20));
317 m.get(40, 10);
318 m.get(40, 10);
319 ASSERT_TRUE(m.intersects(0, 100));
320 ASSERT_TRUE(m.intersects(10, 35));
321 ASSERT_TRUE(m.intersects(45, 10));
322 ASSERT_FALSE(m.intersects(50, 5));
323 m.get(60, 100);
324 ASSERT_TRUE(m.intersects(45, 10));
325 ASSERT_TRUE(m.intersects(55, 10));
326 ASSERT_TRUE(m.intersects(50, 11));
327 ASSERT_FALSE(m.intersects(50, 10));
328 ASSERT_FALSE(m.intersects(51, 9));
329 ASSERT_FALSE(m.intersects(55, 1));
330 }
331
332 TEST(bluestore_blob_t, calc_csum)
333 {
334 bufferlist bl;
335 bl.append("asdfghjkqwertyuizxcvbnm,");
336 bufferlist bl2;
337 bl2.append("xxxxXXXXyyyyYYYYzzzzZZZZ");
338 bufferlist f;
339 f.substr_of(bl, 0, 8);
340 bufferlist m;
341 m.substr_of(bl, 8, 8);
342 bufferlist e;
343 e.substr_of(bl, 16, 8);
344 bufferlist n;
345 n.append("12345678");
346
347 for (unsigned csum_type = Checksummer::CSUM_NONE + 1;
348 csum_type < Checksummer::CSUM_MAX;
349 ++csum_type) {
350 cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
351 << std::endl;
352
353 bluestore_blob_t b;
354 int bad_off;
355 uint64_t bad_csum;
356 ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
357 ASSERT_EQ(-1, bad_off);
358
359 b.init_csum(csum_type, 3, 24);
360 cout << " value size " << b.get_csum_value_size() << std::endl;
361 b.calc_csum(0, bl);
362 ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
363 ASSERT_EQ(-1, bad_off);
364 ASSERT_EQ(-1, b.verify_csum(0, bl2, &bad_off, &bad_csum));
365 ASSERT_EQ(0, bad_off);
366
367 ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
368 ASSERT_EQ(-1, bad_off);
369 ASSERT_EQ(-1, b.verify_csum(8, f, &bad_off, &bad_csum));
370 ASSERT_EQ(8, bad_off);
371 ASSERT_EQ(-1, b.verify_csum(16, f, &bad_off, &bad_csum));
372 ASSERT_EQ(16, bad_off);
373
374 ASSERT_EQ(-1, b.verify_csum(0, m, &bad_off, &bad_csum));
375 ASSERT_EQ(0, bad_off);
376 ASSERT_EQ(0, b.verify_csum(8, m, &bad_off, &bad_csum));
377 ASSERT_EQ(-1, bad_off);
378 ASSERT_EQ(-1, b.verify_csum(16, m, &bad_off, &bad_csum));
379 ASSERT_EQ(16, bad_off);
380
381 ASSERT_EQ(-1, b.verify_csum(0, e, &bad_off, &bad_csum));
382 ASSERT_EQ(0, bad_off);
383 ASSERT_EQ(-1, b.verify_csum(8, e, &bad_off, &bad_csum));
384 ASSERT_EQ(8, bad_off);
385 ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
386 ASSERT_EQ(-1, bad_off);
387
388 b.calc_csum(8, n);
389 ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
390 ASSERT_EQ(-1, bad_off);
391 ASSERT_EQ(0, b.verify_csum(8, n, &bad_off, &bad_csum));
392 ASSERT_EQ(-1, bad_off);
393 ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
394 ASSERT_EQ(-1, bad_off);
395 ASSERT_EQ(-1, b.verify_csum(0, bl, &bad_off, &bad_csum));
396 ASSERT_EQ(8, bad_off);
397 }
398 }
399
400 TEST(bluestore_blob_t, csum_bench)
401 {
402 bufferlist bl;
403 bufferptr bp(10485760);
404 for (char *a = bp.c_str(); a < bp.c_str() + bp.length(); ++a)
405 *a = (unsigned long)a & 0xff;
406 bl.append(bp);
407 int count = 256;
408 for (unsigned csum_type = 1;
409 csum_type < Checksummer::CSUM_MAX;
410 ++csum_type) {
411 bluestore_blob_t b;
412 b.init_csum(csum_type, 12, bl.length());
413 ceph::mono_clock::time_point start = ceph::mono_clock::now();
414 for (int i = 0; i<count; ++i) {
415 b.calc_csum(0, bl);
416 }
417 ceph::mono_clock::time_point end = ceph::mono_clock::now();
418 auto dur = std::chrono::duration_cast<ceph::timespan>(end - start);
419 double mbsec = (double)count * (double)bl.length() / 1000000.0 / (double)dur.count() * 1000000000.0;
420 cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
421 << ", " << dur << " seconds, "
422 << mbsec << " MB/sec" << std::endl;
423 }
424 }
425
426 TEST(Blob, put_ref)
427 {
428 {
429 BlueStore store(g_ceph_context, "", 4096);
430 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
431 g_ceph_context, "lru", NULL);
432 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
433 g_ceph_context, "lru", NULL);
434
435 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
436 BlueStore::Blob b;
437 b.shared_blob = new BlueStore::SharedBlob(coll.get());
438 b.dirty_blob().allocated_test(bluestore_pextent_t(0x40715000, 0x2000));
439 b.dirty_blob().allocated_test(
440 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x8000));
441 b.dirty_blob().allocated_test(bluestore_pextent_t(0x4071f000, 0x5000));
442 b.get_ref(coll.get(), 0, 0x1200);
443 b.get_ref(coll.get(), 0xae00, 0x4200);
444 ASSERT_EQ(0x5400u, b.get_referenced_bytes());
445 cout << b << std::endl;
446 PExtentVector r;
447
448 ASSERT_FALSE(b.put_ref(coll.get(), 0, 0x1200, &r));
449 ASSERT_EQ(0x4200u, b.get_referenced_bytes());
450 cout << " r " << r << std::endl;
451 cout << b << std::endl;
452
453 r.clear();
454 ASSERT_TRUE(b.put_ref(coll.get(), 0xae00, 0x4200, &r));
455 ASSERT_EQ(0u, b.get_referenced_bytes());
456 cout << " r " << r << std::endl;
457 cout << b << std::endl;
458 }
459
460 unsigned mas = 4096;
461 BlueStore store(g_ceph_context, "", 8192);
462 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
463 g_ceph_context, "lru", NULL);
464 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
465 g_ceph_context, "lru", NULL);
466 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
467
468 {
469 BlueStore::Blob B;
470 B.shared_blob = new BlueStore::SharedBlob(coll.get());
471 bluestore_blob_t& b = B.dirty_blob();
472 PExtentVector r;
473 b.allocated_test(bluestore_pextent_t(0, mas * 2));
474 B.get_ref(coll.get(), 0, mas*2);
475 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
476 ASSERT_TRUE(b.is_allocated(0, mas*2));
477 ASSERT_TRUE(B.put_ref(coll.get(), 0, mas*2, &r));
478 ASSERT_EQ(0u, B.get_referenced_bytes());
479 cout << "r " << r << " " << b << std::endl;
480 ASSERT_EQ(1u, r.size());
481 ASSERT_EQ(0u, r[0].offset);
482 ASSERT_EQ(mas*2, r[0].length);
483 ASSERT_FALSE(b.is_allocated(0, mas*2));
484 ASSERT_FALSE(b.is_allocated(0, mas));
485 ASSERT_FALSE(b.is_allocated(mas, 0));
486 ASSERT_FALSE(b.get_extents()[0].is_valid());
487 ASSERT_EQ(mas*2, b.get_extents()[0].length);
488 }
489 {
490 BlueStore::Blob B;
491 B.shared_blob = new BlueStore::SharedBlob(coll.get());
492 bluestore_blob_t& b = B.dirty_blob();
493 PExtentVector r;
494 b.allocated_test(bluestore_pextent_t(123, mas * 2));
495 B.get_ref(coll.get(), 0, mas*2);
496 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
497 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
498 ASSERT_EQ(mas, B.get_referenced_bytes());
499 cout << "r " << r << " " << b << std::endl;
500 ASSERT_EQ(0u, r.size());
501 ASSERT_TRUE(b.is_allocated(0, mas*2));
502 ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
503 ASSERT_EQ(0u, B.get_referenced_bytes());
504 ASSERT_EQ(0u, B.get_referenced_bytes());
505 cout << "r " << r << " " << b << std::endl;
506 ASSERT_EQ(1u, r.size());
507 ASSERT_EQ(123u, r[0].offset);
508 ASSERT_EQ(mas*2, r[0].length);
509 ASSERT_FALSE(b.is_allocated(0, mas*2));
510 ASSERT_FALSE(b.get_extents()[0].is_valid());
511 ASSERT_EQ(mas*2, b.get_extents()[0].length);
512 }
513 {
514 BlueStore::Blob B;
515 B.shared_blob = new BlueStore::SharedBlob(coll.get());
516 bluestore_blob_t& b = B.dirty_blob();
517 PExtentVector r;
518 b.allocated_test(bluestore_pextent_t(1, mas));
519 b.allocated_test(bluestore_pextent_t(2, mas));
520 b.allocated_test(bluestore_pextent_t(3, mas));
521 b.allocated_test(bluestore_pextent_t(4, mas));
522 B.get_ref(coll.get(), 0, mas*4);
523 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
524 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
525 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
526 cout << "r " << r << " " << b << std::endl;
527 ASSERT_EQ(0u, r.size());
528 ASSERT_TRUE(b.is_allocated(0, mas*4));
529 ASSERT_TRUE(b.is_allocated(mas, mas));
530 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
531 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
532 cout << "r " << r << " " << b << std::endl;
533 ASSERT_EQ(0u, r.size());
534 ASSERT_TRUE(b.is_allocated(mas*2, mas));
535 ASSERT_TRUE(b.is_allocated(0, mas*4));
536 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
537 ASSERT_EQ(mas, B.get_referenced_bytes());
538 cout << "r " << r << " " << b << std::endl;
539 ASSERT_EQ(2u, r.size());
540 ASSERT_EQ(3u, r[0].offset);
541 ASSERT_EQ(mas, r[0].length);
542 ASSERT_EQ(4u, r[1].offset);
543 ASSERT_EQ(mas, r[1].length);
544 ASSERT_TRUE(b.is_allocated(0, mas*2));
545 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
546 ASSERT_TRUE(b.get_extents()[0].is_valid());
547 ASSERT_TRUE(b.get_extents()[1].is_valid());
548 ASSERT_FALSE(b.get_extents()[2].is_valid());
549 ASSERT_EQ(3u, b.get_extents().size());
550 }
551 {
552 BlueStore::Blob B;
553 B.shared_blob = new BlueStore::SharedBlob(coll.get());
554 bluestore_blob_t& b = B.dirty_blob();
555 PExtentVector r;
556 b.allocated_test(bluestore_pextent_t(1, mas));
557 b.allocated_test(bluestore_pextent_t(2, mas));
558 b.allocated_test(bluestore_pextent_t(3, mas));
559 b.allocated_test(bluestore_pextent_t(4, mas));
560 b.allocated_test(bluestore_pextent_t(5, mas));
561 b.allocated_test(bluestore_pextent_t(6, mas));
562 B.get_ref(coll.get(), 0, mas*6);
563 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
564 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
565 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
566 cout << "r " << r << " " << b << std::endl;
567 ASSERT_EQ(0u, r.size());
568 ASSERT_TRUE(b.is_allocated(0, mas*6));
569 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
570 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
571 cout << "r " << r << " " << b << std::endl;
572 ASSERT_EQ(0u, r.size());
573 ASSERT_TRUE(b.is_allocated(0, mas*6));
574 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
575 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
576 cout << "r " << r << " " << b << std::endl;
577 ASSERT_EQ(2u, r.size());
578 ASSERT_EQ(3u, r[0].offset);
579 ASSERT_EQ(mas, r[0].length);
580 ASSERT_EQ(4u, r[1].offset);
581 ASSERT_EQ(mas, r[1].length);
582 ASSERT_TRUE(b.is_allocated(0, mas*2));
583 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
584 ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
585 ASSERT_EQ(5u, b.get_extents().size());
586 ASSERT_TRUE(b.get_extents()[0].is_valid());
587 ASSERT_TRUE(b.get_extents()[1].is_valid());
588 ASSERT_FALSE(b.get_extents()[2].is_valid());
589 ASSERT_TRUE(b.get_extents()[3].is_valid());
590 ASSERT_TRUE(b.get_extents()[4].is_valid());
591 }
592 {
593 BlueStore::Blob B;
594 B.shared_blob = new BlueStore::SharedBlob(coll.get());
595 bluestore_blob_t& b = B.dirty_blob();
596 PExtentVector r;
597 b.allocated_test(bluestore_pextent_t(1, mas * 6));
598 B.get_ref(coll.get(), 0, mas*6);
599 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
600 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
601 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
602 cout << "r " << r << " " << b << std::endl;
603 ASSERT_EQ(0u, r.size());
604 ASSERT_TRUE(b.is_allocated(0, mas*6));
605 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
606 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
607 cout << "r " << r << " " << b << std::endl;
608 ASSERT_EQ(0u, r.size());
609 ASSERT_TRUE(b.is_allocated(0, mas*6));
610 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
611 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
612 cout << "r " << r << " " << b << std::endl;
613 ASSERT_EQ(1u, r.size());
614 ASSERT_EQ(0x2001u, r[0].offset);
615 ASSERT_EQ(mas*2, r[0].length);
616 ASSERT_TRUE(b.is_allocated(0, mas*2));
617 ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
618 ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
619 ASSERT_EQ(3u, b.get_extents().size());
620 ASSERT_TRUE(b.get_extents()[0].is_valid());
621 ASSERT_FALSE(b.get_extents()[1].is_valid());
622 ASSERT_TRUE(b.get_extents()[2].is_valid());
623 }
624 {
625 BlueStore::Blob B;
626 B.shared_blob = new BlueStore::SharedBlob(coll.get());
627 bluestore_blob_t& b = B.dirty_blob();
628 PExtentVector r;
629 b.allocated_test(bluestore_pextent_t(1, mas * 4));
630 b.allocated_test(bluestore_pextent_t(2, mas * 4));
631 b.allocated_test(bluestore_pextent_t(3, mas * 4));
632 B.get_ref(coll.get(), 0, mas*12);
633 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
634 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
635 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
636 cout << "r " << r << " " << b << std::endl;
637 ASSERT_EQ(0u, r.size());
638 ASSERT_TRUE(b.is_allocated(0, mas*12));
639 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
640 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
641 cout << "r " << r << " " << b << std::endl;
642 ASSERT_EQ(0u, r.size());
643 ASSERT_TRUE(b.is_allocated(0, mas*12));
644 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
645 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
646 cout << "r " << r << " " << b << std::endl;
647 ASSERT_EQ(3u, r.size());
648 ASSERT_EQ(0x2001u, r[0].offset);
649 ASSERT_EQ(mas*2, r[0].length);
650 ASSERT_EQ(0x2u, r[1].offset);
651 ASSERT_EQ(mas*4, r[1].length);
652 ASSERT_EQ(0x3u, r[2].offset);
653 ASSERT_EQ(mas*2, r[2].length);
654 ASSERT_TRUE(b.is_allocated(0, mas*2));
655 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
656 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
657 ASSERT_EQ(3u, b.get_extents().size());
658 ASSERT_TRUE(b.get_extents()[0].is_valid());
659 ASSERT_FALSE(b.get_extents()[1].is_valid());
660 ASSERT_TRUE(b.get_extents()[2].is_valid());
661 }
662 {
663 BlueStore::Blob B;
664 B.shared_blob = new BlueStore::SharedBlob(coll.get());
665 bluestore_blob_t& b = B.dirty_blob();
666 PExtentVector r;
667 b.allocated_test(bluestore_pextent_t(1, mas * 4));
668 b.allocated_test(bluestore_pextent_t(2, mas * 4));
669 b.allocated_test(bluestore_pextent_t(3, mas * 4));
670 B.get_ref(coll.get(), 0, mas*12);
671 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
672 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
673 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
674 cout << "r " << r << " " << b << std::endl;
675 ASSERT_EQ(0u, r.size());
676 ASSERT_TRUE(b.is_allocated(0, mas*12));
677 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
678 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
679 cout << "r " << r << " " << b << std::endl;
680 ASSERT_EQ(0u, r.size());
681 ASSERT_TRUE(b.is_allocated(0, mas*12));
682 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
683 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
684 cout << "r " << r << " " << b << std::endl;
685 ASSERT_EQ(3u, r.size());
686 ASSERT_EQ(0x2001u, r[0].offset);
687 ASSERT_EQ(mas*2, r[0].length);
688 ASSERT_EQ(0x2u, r[1].offset);
689 ASSERT_EQ(mas*4, r[1].length);
690 ASSERT_EQ(0x3u, r[2].offset);
691 ASSERT_EQ(mas*2, r[2].length);
692 ASSERT_TRUE(b.is_allocated(0, mas*2));
693 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
694 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
695 ASSERT_EQ(3u, b.get_extents().size());
696 ASSERT_TRUE(b.get_extents()[0].is_valid());
697 ASSERT_FALSE(b.get_extents()[1].is_valid());
698 ASSERT_TRUE(b.get_extents()[2].is_valid());
699 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
700 ASSERT_EQ(mas * 2, B.get_referenced_bytes());
701 cout << "r " << r << " " << b << std::endl;
702 ASSERT_EQ(1u, r.size());
703 ASSERT_EQ(0x1u, r[0].offset);
704 ASSERT_EQ(mas*2, r[0].length);
705 ASSERT_EQ(2u, b.get_extents().size());
706 ASSERT_FALSE(b.get_extents()[0].is_valid());
707 ASSERT_TRUE(b.get_extents()[1].is_valid());
708 ASSERT_TRUE(B.put_ref(coll.get(), mas*10, mas*2, &r));
709 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
710 cout << "r " << r << " " << b << std::endl;
711 ASSERT_EQ(1u, r.size());
712 ASSERT_EQ(0x2003u, r[0].offset);
713 ASSERT_EQ(mas*2, r[0].length);
714 ASSERT_EQ(1u, b.get_extents().size());
715 ASSERT_FALSE(b.get_extents()[0].is_valid());
716 }
717 {
718 BlueStore::Blob B;
719 B.shared_blob = new BlueStore::SharedBlob(coll.get());
720 bluestore_blob_t& b = B.dirty_blob();
721 PExtentVector r;
722 b.allocated_test(bluestore_pextent_t(1, mas * 4));
723 b.allocated_test(bluestore_pextent_t(2, mas * 4));
724 b.allocated_test(bluestore_pextent_t(3, mas * 4));
725 B.get_ref(coll.get(), 0, mas*12);
726 ASSERT_EQ(mas * 12, B.get_referenced_bytes());
727 ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
728 ASSERT_EQ(mas * 11, B.get_referenced_bytes());
729 cout << "r " << r << " " << b << std::endl;
730 ASSERT_EQ(0u, r.size());
731 ASSERT_TRUE(b.is_allocated(0, mas*12));
732 ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
733 ASSERT_EQ(mas * 10, B.get_referenced_bytes());
734 cout << "r " << r << " " << b << std::endl;
735 ASSERT_EQ(0u, r.size());
736 ASSERT_TRUE(b.is_allocated(0, mas*12));
737 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
738 ASSERT_EQ(mas * 3, B.get_referenced_bytes());
739 cout << "r " << r << " " << b << std::endl;
740 ASSERT_EQ(3u, r.size());
741 ASSERT_EQ(0x2001u, r[0].offset);
742 ASSERT_EQ(mas*2, r[0].length);
743 ASSERT_EQ(0x2u, r[1].offset);
744 ASSERT_EQ(mas*4, r[1].length);
745 ASSERT_EQ(0x3u, r[2].offset);
746 ASSERT_EQ(mas*2, r[2].length);
747 ASSERT_TRUE(b.is_allocated(0, mas*2));
748 ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
749 ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
750 ASSERT_EQ(3u, b.get_extents().size());
751 ASSERT_TRUE(b.get_extents()[0].is_valid());
752 ASSERT_FALSE(b.get_extents()[1].is_valid());
753 ASSERT_TRUE(b.get_extents()[2].is_valid());
754 ASSERT_FALSE(B.put_ref(coll.get(), mas*10, mas*2, &r));
755 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
756 cout << "r " << r << " " << b << std::endl;
757 ASSERT_EQ(1u, r.size());
758 ASSERT_EQ(0x2003u, r[0].offset);
759 ASSERT_EQ(mas*2, r[0].length);
760 ASSERT_EQ(2u, b.get_extents().size());
761 ASSERT_TRUE(b.get_extents()[0].is_valid());
762 ASSERT_FALSE(b.get_extents()[1].is_valid());
763 ASSERT_TRUE(B.put_ref(coll.get(), 0, mas, &r));
764 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
765 cout << "r " << r << " " << b << std::endl;
766 ASSERT_EQ(1u, r.size());
767 ASSERT_EQ(0x1u, r[0].offset);
768 ASSERT_EQ(mas*2, r[0].length);
769 ASSERT_EQ(1u, b.get_extents().size());
770 ASSERT_FALSE(b.get_extents()[0].is_valid());
771 }
772 {
773 BlueStore::Blob B;
774 B.shared_blob = new BlueStore::SharedBlob(coll.get());
775 bluestore_blob_t& b = B.dirty_blob();
776 PExtentVector r;
777 b.allocated_test(bluestore_pextent_t(1, mas * 8));
778 B.get_ref(coll.get(), 0, mas*8);
779 ASSERT_EQ(mas * 8, B.get_referenced_bytes());
780 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
781 ASSERT_EQ(mas * 7, B.get_referenced_bytes());
782 cout << "r " << r << " " << b << std::endl;
783 ASSERT_EQ(0u, r.size());
784 ASSERT_TRUE(b.is_allocated(0, mas*8));
785 ASSERT_FALSE(B.put_ref(coll.get(), mas*7, mas, &r));
786 ASSERT_EQ(mas * 6, B.get_referenced_bytes());
787 cout << "r " << r << " " << b << std::endl;
788 ASSERT_EQ(0u, r.size());
789 ASSERT_TRUE(b.is_allocated(0, mas*8));
790 ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
791 ASSERT_EQ(mas * 5, B.get_referenced_bytes());
792 cout << "r " << r << " " << b << std::endl;
793 ASSERT_EQ(0u, r.size());
794 ASSERT_TRUE(b.is_allocated(0, 8));
795 ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas*4, &r));
796 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
797 ASSERT_EQ(1u, r.size());
798 ASSERT_EQ(0x2001u, r[0].offset);
799 ASSERT_EQ(mas*6, r[0].length);
800 ASSERT_TRUE(b.is_allocated(0, mas*2));
801 ASSERT_FALSE(b.is_allocated(mas*2, mas*6));
802 ASSERT_EQ(2u, b.get_extents().size());
803 ASSERT_TRUE(b.get_extents()[0].is_valid());
804 ASSERT_FALSE(b.get_extents()[1].is_valid());
805 ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
806 ASSERT_EQ(mas * 0, B.get_referenced_bytes());
807 cout << "r " << r << " " << b << std::endl;
808 ASSERT_EQ(1u, r.size());
809 ASSERT_EQ(0x1u, r[0].offset);
810 ASSERT_EQ(mas*2, r[0].length);
811 ASSERT_EQ(1u, b.get_extents().size());
812 ASSERT_FALSE(b.get_extents()[0].is_valid());
813 }
814 // verify csum chunk size if factored in properly
815 {
816 BlueStore::Blob B;
817 B.shared_blob = new BlueStore::SharedBlob(coll.get());
818 bluestore_blob_t& b = B.dirty_blob();
819 PExtentVector r;
820 b.allocated_test(bluestore_pextent_t(0, mas*4));
821 b.init_csum(Checksummer::CSUM_CRC32C, 14, mas * 4);
822 B.get_ref(coll.get(), 0, mas*4);
823 ASSERT_EQ(mas * 4, B.get_referenced_bytes());
824 ASSERT_TRUE(b.is_allocated(0, mas*4));
825 ASSERT_FALSE(B.put_ref(coll.get(), 0, mas*3, &r));
826 ASSERT_EQ(mas * 1, B.get_referenced_bytes());
827 cout << "r " << r << " " << b << std::endl;
828 ASSERT_EQ(0u, r.size());
829 ASSERT_TRUE(b.is_allocated(0, mas*4));
830 ASSERT_TRUE(b.get_extents()[0].is_valid());
831 ASSERT_EQ(mas*4, b.get_extents()[0].length);
832 }
833 {
834 BlueStore::Blob B;
835 B.shared_blob = new BlueStore::SharedBlob(coll.get());
836 bluestore_blob_t& b = B.dirty_blob();
837 b.allocated_test(bluestore_pextent_t(0x40101000, 0x4000));
838 b.allocated_test(bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET,
839 0x13000));
840
841 b.allocated_test(bluestore_pextent_t(0x40118000, 0x7000));
842 B.get_ref(coll.get(), 0x0, 0x3800);
843 B.get_ref(coll.get(), 0x17c00, 0x6400);
844 ASSERT_EQ(0x3800u + 0x6400u, B.get_referenced_bytes());
845 b.set_flag(bluestore_blob_t::FLAG_SHARED);
846 b.init_csum(Checksummer::CSUM_CRC32C, 12, 0x1e000);
847
848 cout << "before: " << B << std::endl;
849 PExtentVector r;
850 ASSERT_FALSE(B.put_ref(coll.get(), 0x1800, 0x2000, &r));
851 ASSERT_EQ(0x3800u + 0x6400u - 0x2000u, B.get_referenced_bytes());
852 cout << "after: " << B << std::endl;
853 cout << "r " << r << std::endl;
854 }
855 {
856 BlueStore::Blob B;
857 B.shared_blob = new BlueStore::SharedBlob(coll.get());
858 bluestore_blob_t& b = B.dirty_blob();
859 b.allocated_test(bluestore_pextent_t(1, 0x5000));
860 b.allocated_test(bluestore_pextent_t(2, 0x5000));
861 B.get_ref(coll.get(), 0x0, 0xa000);
862 ASSERT_EQ(0xa000u, B.get_referenced_bytes());
863 cout << "before: " << B << std::endl;
864 PExtentVector r;
865 ASSERT_FALSE(B.put_ref(coll.get(), 0x8000, 0x2000, &r));
866 cout << "after: " << B << std::endl;
867 cout << "r " << r << std::endl;
868 ASSERT_EQ(0x8000u, B.get_referenced_bytes());
869 ASSERT_EQ(1u, r.size());
870 ASSERT_EQ(0x3002u, r[0].offset);
871 ASSERT_EQ(0x2000u, r[0].length);
872 }
873 {
874 BlueStore::Blob B;
875 B.shared_blob = new BlueStore::SharedBlob(coll.get());
876 bluestore_blob_t& b = B.dirty_blob();
877 b.allocated_test(bluestore_pextent_t(1, 0x7000));
878 b.allocated_test(bluestore_pextent_t(2, 0x7000));
879 B.get_ref(coll.get(), 0x0, 0xe000);
880 ASSERT_EQ(0xe000u, B.get_referenced_bytes());
881 cout << "before: " << B << std::endl;
882 PExtentVector r;
883 ASSERT_FALSE(B.put_ref(coll.get(), 0, 0xb000, &r));
884 ASSERT_EQ(0x3000u, B.get_referenced_bytes());
885 cout << "after: " << B << std::endl;
886 cout << "r " << r << std::endl;
887 ASSERT_EQ(0x3000u, B.get_referenced_bytes());
888 ASSERT_EQ(2u, r.size());
889 ASSERT_EQ(1u, r[0].offset);
890 ASSERT_EQ(0x7000u, r[0].length);
891 ASSERT_EQ(2u, r[1].offset);
892 ASSERT_EQ(0x3000u, r[1].length); // we have 0x1000 bytes less due to
893 // alignment caused by min_alloc_size = 0x2000
894 }
895 {
896 BlueStore store(g_ceph_context, "", 0x4000);
897 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
898 g_ceph_context, "lru", NULL);
899 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
900 g_ceph_context, "lru", NULL);
901
902 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
903 BlueStore::Blob B;
904 B.shared_blob = new BlueStore::SharedBlob(coll.get());
905 bluestore_blob_t& b = B.dirty_blob();
906 b.allocated_test(bluestore_pextent_t(1, 0x5000));
907 b.allocated_test(bluestore_pextent_t(2, 0x7000));
908 B.get_ref(coll.get(), 0x0, 0xc000);
909 ASSERT_EQ(0xc000u, B.get_referenced_bytes());
910 cout << "before: " << B << std::endl;
911 PExtentVector r;
912 ASSERT_FALSE(B.put_ref(coll.get(), 0x2000, 0xa000, &r));
913 cout << "after: " << B << std::endl;
914 cout << "r " << r << std::endl;
915 ASSERT_EQ(0x2000u, B.get_referenced_bytes());
916 ASSERT_EQ(2u, r.size());
917 ASSERT_EQ(0x4001u, r[0].offset);
918 ASSERT_EQ(0x1000u, r[0].length);
919 ASSERT_EQ(2u, r[1].offset);
920 ASSERT_EQ(0x7000u, r[1].length);
921 ASSERT_EQ(1u, b.get_extents()[0].offset);
922 ASSERT_EQ(0x4000u, b.get_extents()[0].length);
923 }
924 }
925
926 TEST(bluestore_blob_t, can_split)
927 {
928 bluestore_blob_t a;
929 ASSERT_TRUE(a.can_split());
930 a.flags = bluestore_blob_t::FLAG_SHARED;
931 ASSERT_FALSE(a.can_split());
932 a.flags = bluestore_blob_t::FLAG_COMPRESSED;
933 ASSERT_FALSE(a.can_split());
934 a.flags = bluestore_blob_t::FLAG_HAS_UNUSED;
935 ASSERT_FALSE(a.can_split());
936 }
937
938 TEST(bluestore_blob_t, can_split_at)
939 {
940 bluestore_blob_t a;
941 a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
942 a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
943 ASSERT_TRUE(a.can_split_at(0x1000));
944 ASSERT_TRUE(a.can_split_at(0x1800));
945 a.init_csum(Checksummer::CSUM_CRC32C, 12, 0x4000);
946 ASSERT_TRUE(a.can_split_at(0x1000));
947 ASSERT_TRUE(a.can_split_at(0x2000));
948 ASSERT_TRUE(a.can_split_at(0x3000));
949 ASSERT_FALSE(a.can_split_at(0x2800));
950 }
951
952 TEST(bluestore_blob_t, prune_tail)
953 {
954 bluestore_blob_t a;
955 a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
956 a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
957 ASSERT_FALSE(a.can_prune_tail());
958 a.allocated_test(
959 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
960 ASSERT_TRUE(a.can_prune_tail());
961 a.prune_tail();
962 ASSERT_FALSE(a.can_prune_tail());
963 ASSERT_EQ(2u, a.get_extents().size());
964 ASSERT_EQ(0x4000u, a.get_logical_length());
965
966 a.allocated_test(
967 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
968 a.init_csum(Checksummer::CSUM_CRC32C_8, 12, 0x6000);
969 ASSERT_EQ(6u, a.csum_data.length());
970 ASSERT_TRUE(a.can_prune_tail());
971 a.prune_tail();
972 ASSERT_FALSE(a.can_prune_tail());
973 ASSERT_EQ(2u, a.get_extents().size());
974 ASSERT_EQ(0x4000u, a.get_logical_length());
975 ASSERT_EQ(4u, a.csum_data.length());
976
977 bluestore_blob_t b;
978 b.allocated_test(
979 bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
980 ASSERT_FALSE(a.can_prune_tail());
981 }
982
983 TEST(Blob, split)
984 {
985 BlueStore store(g_ceph_context, "", 4096);
986 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
987 g_ceph_context, "lru", NULL);
988 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
989 g_ceph_context, "lru", NULL);
990 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
991 {
992 BlueStore::Blob L, R;
993 L.shared_blob = new BlueStore::SharedBlob(coll.get());
994 R.shared_blob = new BlueStore::SharedBlob(coll.get());
995 L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x2000));
996 L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
997 L.get_ref(coll.get(), 0, 0x2000);
998 L.split(coll.get(), 0x1000, &R);
999 ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
1000 ASSERT_EQ(4u, L.get_blob().csum_data.length());
1001 ASSERT_EQ(1u, L.get_blob().get_extents().size());
1002 ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
1003 ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
1004 ASSERT_EQ(0x1000u, L.get_referenced_bytes());
1005 ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
1006 ASSERT_EQ(4u, R.get_blob().csum_data.length());
1007 ASSERT_EQ(1u, R.get_blob().get_extents().size());
1008 ASSERT_EQ(0x3000u, R.get_blob().get_extents().front().offset);
1009 ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
1010 ASSERT_EQ(0x1000u, R.get_referenced_bytes());
1011 }
1012 {
1013 BlueStore::Blob L, R;
1014 L.shared_blob = new BlueStore::SharedBlob(coll.get());
1015 R.shared_blob = new BlueStore::SharedBlob(coll.get());
1016 L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x1000));
1017 L.dirty_blob().allocated_test(bluestore_pextent_t(0x12000, 0x1000));
1018 L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
1019 L.get_ref(coll.get(), 0, 0x1000);
1020 L.get_ref(coll.get(), 0x1000, 0x1000);
1021 L.split(coll.get(), 0x1000, &R);
1022 ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
1023 ASSERT_EQ(4u, L.get_blob().csum_data.length());
1024 ASSERT_EQ(1u, L.get_blob().get_extents().size());
1025 ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
1026 ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
1027 ASSERT_EQ(0x1000u, L.get_referenced_bytes());
1028 ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
1029 ASSERT_EQ(4u, R.get_blob().csum_data.length());
1030 ASSERT_EQ(1u, R.get_blob().get_extents().size());
1031 ASSERT_EQ(0x12000u, R.get_blob().get_extents().front().offset);
1032 ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
1033 ASSERT_EQ(0x1000u, R.get_referenced_bytes());
1034 }
1035 }
1036
1037 TEST(Blob, legacy_decode)
1038 {
1039 BlueStore store(g_ceph_context, "", 4096);
1040 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1041 g_ceph_context, "lru", NULL);
1042 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1043 g_ceph_context, "lru", NULL);
1044 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1045 bufferlist bl, bl2;
1046 {
1047 BlueStore::Blob B;
1048
1049 B.shared_blob = new BlueStore::SharedBlob(coll.get());
1050 B.dirty_blob().allocated_test(bluestore_pextent_t(0x1, 0x2000));
1051 B.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
1052 B.get_ref(coll.get(), 0, 0xff0);
1053 B.get_ref(coll.get(), 0x1fff, 1);
1054
1055 bluestore_extent_ref_map_t fake_ref_map;
1056 fake_ref_map.get(0, 0xff0);
1057 fake_ref_map.get(0x1fff, 1);
1058
1059 size_t bound = 0, bound2 = 0;
1060
1061 B.bound_encode(
1062 bound,
1063 1, /*struct_v*/
1064 0, /*sbid*/
1065 false);
1066 fake_ref_map.bound_encode(bound);
1067
1068 B.bound_encode(
1069 bound2,
1070 2, /*struct_v*/
1071 0, /*sbid*/
1072 true);
1073
1074 {
1075 auto app = bl.get_contiguous_appender(bound);
1076 auto app2 = bl2.get_contiguous_appender(bound2);
1077 B.encode(
1078 app,
1079 1, /*struct_v*/
1080 0, /*sbid*/
1081 false);
1082 fake_ref_map.encode(app);
1083
1084 B.encode(
1085 app2,
1086 2, /*struct_v*/
1087 0, /*sbid*/
1088 true);
1089 }
1090
1091 auto p = bl.front().begin_deep();
1092 auto p2 = bl2.front().begin_deep();
1093 BlueStore::Blob Bres, Bres2;
1094 Bres.shared_blob = new BlueStore::SharedBlob(coll.get());
1095 Bres2.shared_blob = new BlueStore::SharedBlob(coll.get());
1096
1097 uint64_t sbid, sbid2;
1098 Bres.decode(
1099 p,
1100 1, /*struct_v*/
1101 &sbid,
1102 true,
1103 coll.get());
1104 Bres2.decode(
1105 p2,
1106 2, /*struct_v*/
1107 &sbid2,
1108 true,
1109 coll.get());
1110
1111 ASSERT_EQ(0xff0u + 1u, Bres.get_blob_use_tracker().get_referenced_bytes());
1112 ASSERT_EQ(0xff0u + 1u, Bres2.get_blob_use_tracker().get_referenced_bytes());
1113 ASSERT_TRUE(Bres.get_blob_use_tracker().equal(Bres2.get_blob_use_tracker()));
1114 }
1115 }
1116
1117 TEST(ExtentMap, seek_lextent)
1118 {
1119 BlueStore store(g_ceph_context, "", 4096);
1120 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1121 g_ceph_context, "lru", NULL);
1122 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1123 g_ceph_context, "lru", NULL);
1124
1125 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1126 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1127 BlueStore::ExtentMap em(&onode,
1128 g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
1129 BlueStore::BlobRef br(new BlueStore::Blob);
1130 br->shared_blob = new BlueStore::SharedBlob(coll.get());
1131
1132 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0));
1133 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(100));
1134
1135 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br));
1136 auto a = em.find(100);
1137 ASSERT_EQ(a, em.seek_lextent(0));
1138 ASSERT_EQ(a, em.seek_lextent(99));
1139 ASSERT_EQ(a, em.seek_lextent(100));
1140 ASSERT_EQ(a, em.seek_lextent(101));
1141 ASSERT_EQ(a, em.seek_lextent(199));
1142 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(200));
1143
1144 em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br));
1145 auto b = em.find(200);
1146 ASSERT_EQ(a, em.seek_lextent(0));
1147 ASSERT_EQ(a, em.seek_lextent(99));
1148 ASSERT_EQ(a, em.seek_lextent(100));
1149 ASSERT_EQ(a, em.seek_lextent(101));
1150 ASSERT_EQ(a, em.seek_lextent(199));
1151 ASSERT_EQ(b, em.seek_lextent(200));
1152 ASSERT_EQ(b, em.seek_lextent(299));
1153 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(300));
1154
1155 em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br));
1156 auto d = em.find(400);
1157 ASSERT_EQ(a, em.seek_lextent(0));
1158 ASSERT_EQ(a, em.seek_lextent(99));
1159 ASSERT_EQ(a, em.seek_lextent(100));
1160 ASSERT_EQ(a, em.seek_lextent(101));
1161 ASSERT_EQ(a, em.seek_lextent(199));
1162 ASSERT_EQ(b, em.seek_lextent(200));
1163 ASSERT_EQ(b, em.seek_lextent(299));
1164 ASSERT_EQ(d, em.seek_lextent(300));
1165 ASSERT_EQ(d, em.seek_lextent(399));
1166 ASSERT_EQ(d, em.seek_lextent(400));
1167 ASSERT_EQ(d, em.seek_lextent(499));
1168 ASSERT_EQ(em.extent_map.end(), em.seek_lextent(500));
1169 }
1170
1171 TEST(ExtentMap, has_any_lextents)
1172 {
1173 BlueStore store(g_ceph_context, "", 4096);
1174 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1175 g_ceph_context, "lru", NULL);
1176 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1177 g_ceph_context, "lru", NULL);
1178 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1179 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1180 BlueStore::ExtentMap em(&onode,
1181 g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
1182 BlueStore::BlobRef b(new BlueStore::Blob);
1183 b->shared_blob = new BlueStore::SharedBlob(coll.get());
1184
1185 ASSERT_FALSE(em.has_any_lextents(0, 0));
1186 ASSERT_FALSE(em.has_any_lextents(0, 1000));
1187 ASSERT_FALSE(em.has_any_lextents(1000, 1000));
1188
1189 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b));
1190 ASSERT_FALSE(em.has_any_lextents(0, 50));
1191 ASSERT_FALSE(em.has_any_lextents(0, 100));
1192 ASSERT_FALSE(em.has_any_lextents(50, 50));
1193 ASSERT_TRUE(em.has_any_lextents(50, 51));
1194 ASSERT_TRUE(em.has_any_lextents(50, 100051));
1195 ASSERT_TRUE(em.has_any_lextents(100, 100));
1196 ASSERT_TRUE(em.has_any_lextents(100, 1));
1197 ASSERT_TRUE(em.has_any_lextents(199, 1));
1198 ASSERT_TRUE(em.has_any_lextents(199, 2));
1199 ASSERT_FALSE(em.has_any_lextents(200, 2));
1200
1201 em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, b));
1202 ASSERT_TRUE(em.has_any_lextents(199, 1));
1203 ASSERT_TRUE(em.has_any_lextents(199, 2));
1204 ASSERT_TRUE(em.has_any_lextents(200, 2));
1205 ASSERT_TRUE(em.has_any_lextents(200, 200));
1206 ASSERT_TRUE(em.has_any_lextents(299, 1));
1207 ASSERT_FALSE(em.has_any_lextents(300, 1));
1208
1209 em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, b));
1210 ASSERT_TRUE(em.has_any_lextents(0, 10000));
1211 ASSERT_TRUE(em.has_any_lextents(199, 1));
1212 ASSERT_FALSE(em.has_any_lextents(300, 1));
1213 ASSERT_FALSE(em.has_any_lextents(300, 100));
1214 ASSERT_FALSE(em.has_any_lextents(399, 1));
1215 ASSERT_TRUE(em.has_any_lextents(400, 1));
1216 ASSERT_TRUE(em.has_any_lextents(400, 100));
1217 ASSERT_TRUE(em.has_any_lextents(400, 1000));
1218 ASSERT_TRUE(em.has_any_lextents(499, 1000));
1219 ASSERT_FALSE(em.has_any_lextents(500, 1000));
1220 }
1221
1222 void erase_and_delete(BlueStore::ExtentMap& em, size_t v)
1223 {
1224 auto d = em.find(v);
1225 ASSERT_NE(d, em.extent_map.end());
1226 em.extent_map.erase(d);
1227 delete &*d;
1228 }
1229
1230 TEST(ExtentMap, compress_extent_map)
1231 {
1232 BlueStore store(g_ceph_context, "", 4096);
1233 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1234 g_ceph_context, "lru", NULL);
1235 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1236 g_ceph_context, "lru", NULL);
1237
1238 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1239 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1240 BlueStore::ExtentMap em(&onode,
1241 g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
1242 BlueStore::BlobRef b1(new BlueStore::Blob);
1243 BlueStore::BlobRef b2(new BlueStore::Blob);
1244 BlueStore::BlobRef b3(new BlueStore::Blob);
1245 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1246 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1247 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1248
1249 em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, b1));
1250 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
1251 ASSERT_EQ(0, em.compress_extent_map(0, 10000));
1252 ASSERT_EQ(2u, em.extent_map.size());
1253
1254 em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b2));
1255 em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
1256 ASSERT_EQ(0, em.compress_extent_map(0, 0));
1257 ASSERT_EQ(0, em.compress_extent_map(100000, 1000));
1258 ASSERT_EQ(2, em.compress_extent_map(0, 100000));
1259 ASSERT_EQ(2u, em.extent_map.size());
1260 erase_and_delete(em, 100);
1261 em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
1262 em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b3));
1263 em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
1264 ASSERT_EQ(0, em.compress_extent_map(0, 1));
1265 ASSERT_EQ(0, em.compress_extent_map(0, 100000));
1266 ASSERT_EQ(4u, em.extent_map.size());
1267
1268 em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
1269 em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, b2));
1270 em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, b2));
1271 em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, b1));
1272 em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, b3));
1273 ASSERT_EQ(0, em.compress_extent_map(0, 99));
1274 ASSERT_EQ(0, em.compress_extent_map(800, 1000));
1275 ASSERT_EQ(2, em.compress_extent_map(100, 500));
1276 ASSERT_EQ(7u, em.extent_map.size());
1277 erase_and_delete(em, 300);
1278 erase_and_delete(em, 500);
1279 erase_and_delete(em, 700);
1280 em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
1281 em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, b2));
1282 em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, b2));
1283 ASSERT_EQ(1, em.compress_extent_map(0, 1000));
1284 ASSERT_EQ(6u, em.extent_map.size());
1285 }
1286
1287
1288 void clear_and_dispose(BlueStore::old_extent_map_t& old_em)
1289 {
1290 auto oep = old_em.begin();
1291 while (oep != old_em.end()) {
1292 auto &lo = *oep;
1293 oep = old_em.erase(oep);
1294 delete &lo;
1295 }
1296 }
1297
1298 TEST(GarbageCollector, BasicTest)
1299 {
1300 BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
1301 g_ceph_context, "lru", NULL);
1302 BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
1303 g_ceph_context, "lru", NULL);
1304
1305 BlueStore store(g_ceph_context, "", 4096);
1306 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1307 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1308 BlueStore::ExtentMap em(&onode,
1309 g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
1310
1311 BlueStore::old_extent_map_t old_extents;
1312
1313
1314 /*
1315 min_alloc_size = 4096
1316 original disposition
1317 extent1 <loffs = 100, boffs = 100, len = 10>
1318 -> blob1<compressed, len_on_disk=4096, logical_len=8192>
1319 extent2 <loffs = 200, boffs = 200, len = 10>
1320 -> blob2<raw, len_on_disk=4096, llen=4096>
1321 extent3 <loffs = 300, boffs = 300, len = 10>
1322 -> blob1<compressed, len_on_disk=4096, llen=8192>
1323 extent4 <loffs = 4096, boffs = 0, len = 10>
1324 -> blob3<raw, len_on_disk=4096, llen=4096>
1325 on write(300~100) resulted in
1326 extent1 <loffs = 100, boffs = 100, len = 10>
1327 -> blob1<compressed, len_on_disk=4096, logical_len=8192>
1328 extent2 <loffs = 200, boffs = 200, len = 10>
1329 -> blob2<raw, len_on_disk=4096, llen=4096>
1330 extent3 <loffs = 300, boffs = 300, len = 100>
1331 -> blob4<raw, len_on_disk=4096, llen=4096>
1332 extent4 <loffs = 4096, boffs = 0, len = 10>
1333 -> blob3<raw, len_on_disk=4096, llen=4096>
1334 */
1335 {
1336 BlueStore::GarbageCollector gc(g_ceph_context);
1337 int64_t saving;
1338 BlueStore::BlobRef b1(new BlueStore::Blob);
1339 BlueStore::BlobRef b2(new BlueStore::Blob);
1340 BlueStore::BlobRef b3(new BlueStore::Blob);
1341 BlueStore::BlobRef b4(new BlueStore::Blob);
1342 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1343 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1344 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1345 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1346 b1->dirty_blob().set_compressed(0x2000, 0x1000);
1347 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x1000));
1348 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x1000));
1349 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x1000));
1350 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
1351 em.extent_map.insert(*new BlueStore::Extent(100, 100, 10, b1));
1352 b1->get_ref(coll.get(), 100, 10);
1353 em.extent_map.insert(*new BlueStore::Extent(200, 200, 10, b2));
1354 b2->get_ref(coll.get(), 200, 10);
1355 em.extent_map.insert(*new BlueStore::Extent(300, 300, 100, b4));
1356 b4->get_ref(coll.get(), 300, 100);
1357 em.extent_map.insert(*new BlueStore::Extent(4096, 0, 10, b3));
1358 b3->get_ref(coll.get(), 0, 10);
1359
1360 old_extents.push_back(*new BlueStore::OldExtent(300, 300, 10, b1));
1361
1362 saving = gc.estimate(300, 100, em, old_extents, 4096);
1363 ASSERT_EQ(saving, 1);
1364 auto& to_collect = gc.get_extents_to_collect();
1365 ASSERT_EQ(to_collect.num_intervals(), 1u);
1366 {
1367 auto it = to_collect.begin();
1368 using p = decltype(*it);
1369 auto v = p{100ul, 10ul};
1370 ASSERT_EQ(*it, v);
1371 }
1372 em.clear();
1373 clear_and_dispose(old_extents);
1374 }
1375 /*
1376 original disposition
1377 min_alloc_size = 0x10000
1378 extent1 <loffs = 0, boffs = 0, len = 0x40000>
1379 -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
1380 Write 0x8000~37000 resulted in the following extent map prior to GC
1381 for the last write_small(0x30000~0xf000):
1382
1383 extent1 <loffs = 0, boffs = 0, len = 0x8000>
1384 -> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
1385 extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
1386 -> blob2<raw, len_on_disk=0x10000, llen=0x10000>
1387 extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
1388 -> blob3<raw, len_on_disk=0x20000, llen=0x20000>
1389 extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
1390 -> blob4<raw, len_on_disk=0x10000, llen=0x10000>
1391 extent5 <loffs = 0x3f000, boffs = 0x3f000, len = 0x1000>
1392 -> blob1<compressed, len_on_disk=0x20000, llen=0x40000>
1393 */
1394 {
1395 BlueStore store(g_ceph_context, "", 0x10000);
1396 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1397 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1398 BlueStore::ExtentMap em(&onode,
1399 g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
1400
1401 BlueStore::old_extent_map_t old_extents;
1402 BlueStore::GarbageCollector gc(g_ceph_context);
1403 int64_t saving;
1404 BlueStore::BlobRef b1(new BlueStore::Blob);
1405 BlueStore::BlobRef b2(new BlueStore::Blob);
1406 BlueStore::BlobRef b3(new BlueStore::Blob);
1407 BlueStore::BlobRef b4(new BlueStore::Blob);
1408 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1409 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1410 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1411 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1412 b1->dirty_blob().set_compressed(0x40000, 0x20000);
1413 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x20000));
1414 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
1415 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
1416 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x10000));
1417
1418 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b1));
1419 b1->get_ref(coll.get(), 0, 0x8000);
1420 em.extent_map.insert(
1421 *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
1422 b2->get_ref(coll.get(), 0x8000, 0x8000);
1423 em.extent_map.insert(
1424 *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
1425 b3->get_ref(coll.get(), 0, 0x20000);
1426 em.extent_map.insert(
1427 *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
1428 b4->get_ref(coll.get(), 0, 0xf000);
1429 em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x3f000, 0x1000, b1));
1430 b1->get_ref(coll.get(), 0x3f000, 0x1000);
1431
1432 old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b1));
1433 old_extents.push_back(
1434 *new BlueStore::OldExtent(0x10000, 0x10000, 0x20000, b1));
1435 old_extents.push_back(*new BlueStore::OldExtent(0x30000, 0x30000, 0xf000, b1));
1436
1437 saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
1438 ASSERT_EQ(saving, 2);
1439 auto& to_collect = gc.get_extents_to_collect();
1440 ASSERT_EQ(to_collect.num_intervals(), 2u);
1441 {
1442 auto it1 = to_collect.begin();
1443 auto it2 = ++to_collect.begin();
1444 using p = decltype(*it1);
1445 {
1446 auto v1 = p{0x0ul ,0x8000ul};
1447 auto v2 = p{0x0ul, 0x8000ul};
1448 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1449 }
1450 {
1451 auto v1 = p{0x3f000ul, 0x1000ul};
1452 auto v2 = p{0x3f000ul, 0x1000ul};
1453 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1454 }
1455 }
1456
1457 em.clear();
1458 clear_and_dispose(old_extents);
1459 }
1460 /*
1461 original disposition
1462 min_alloc_size = 0x1000
1463 extent1 <loffs = 0, boffs = 0, len = 0x4000>
1464 -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
1465 write 0x3000~4000 resulted in the following extent map
1466 (future feature - suppose we can compress incoming write prior to
1467 GC invocation)
1468
1469 extent1 <loffs = 0, boffs = 0, len = 0x4000>
1470 -> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
1471 extent2 <loffs = 0x3000, boffs = 0, len = 0x4000>
1472 -> blob2<compressed, len_on_disk=0x2000, llen=0x4000>
1473 */
1474 {
1475 BlueStore::GarbageCollector gc(g_ceph_context);
1476 int64_t saving;
1477 BlueStore::BlobRef b1(new BlueStore::Blob);
1478 BlueStore::BlobRef b2(new BlueStore::Blob);
1479 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1480 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1481 b1->dirty_blob().set_compressed(0x4000, 0x2000);
1482 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
1483 b2->dirty_blob().set_compressed(0x4000, 0x2000);
1484 b2->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
1485
1486 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x3000, b1));
1487 b1->get_ref(coll.get(), 0, 0x3000);
1488 em.extent_map.insert(
1489 *new BlueStore::Extent(0x3000, 0, 0x4000, b2)); // new extent
1490 b2->get_ref(coll.get(), 0, 0x4000);
1491
1492 old_extents.push_back(*new BlueStore::OldExtent(0x3000, 0x3000, 0x1000, b1));
1493
1494 saving = gc.estimate(0x3000, 0x4000, em, old_extents, 0x1000);
1495 ASSERT_EQ(saving, 0);
1496 auto& to_collect = gc.get_extents_to_collect();
1497 ASSERT_EQ(to_collect.num_intervals(), 0u);
1498 em.clear();
1499 clear_and_dispose(old_extents);
1500 }
1501 /*
1502 original disposition
1503 min_alloc_size = 0x10000
1504 extent0 <loffs = 0, boffs = 0, len = 0x20000>
1505 -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
1506 extent1 <loffs = 0x20000, boffs = 0, len = 0x20000>
1507 -> blob1<compressed, len_on_disk=0x10000, logical_len=0x20000>
1508 write 0x8000~37000 resulted in the following extent map prior
1509 to GC for the last write_small(0x30000~0xf000)
1510
1511 extent0 <loffs = 0, boffs = 0, len = 0x8000>
1512 -> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
1513 extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
1514 -> blob2<raw, len_on_disk=0x10000, llen=0x10000>
1515 extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
1516 -> blob3<raw, len_on_disk=0x20000, llen=0x20000>
1517 extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
1518 -> blob4<raw, len_on_disk=0x1000, llen=0x1000>
1519 extent5 <loffs = 0x3f000, boffs = 0x1f000, len = 0x1000>
1520 -> blob1<compressed, len_on_disk=0x10000, llen=0x20000>
1521 */
1522 {
1523 BlueStore store(g_ceph_context, "", 0x10000);
1524 auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
1525 BlueStore::Onode onode(coll.get(), ghobject_t(), "");
1526 BlueStore::ExtentMap em(&onode,
1527 g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
1528
1529 BlueStore::old_extent_map_t old_extents;
1530 BlueStore::GarbageCollector gc(g_ceph_context);
1531 int64_t saving;
1532 BlueStore::BlobRef b0(new BlueStore::Blob);
1533 BlueStore::BlobRef b1(new BlueStore::Blob);
1534 BlueStore::BlobRef b2(new BlueStore::Blob);
1535 BlueStore::BlobRef b3(new BlueStore::Blob);
1536 BlueStore::BlobRef b4(new BlueStore::Blob);
1537 b0->shared_blob = new BlueStore::SharedBlob(coll.get());
1538 b1->shared_blob = new BlueStore::SharedBlob(coll.get());
1539 b2->shared_blob = new BlueStore::SharedBlob(coll.get());
1540 b3->shared_blob = new BlueStore::SharedBlob(coll.get());
1541 b4->shared_blob = new BlueStore::SharedBlob(coll.get());
1542 b0->dirty_blob().set_compressed(0x2000, 0x1000);
1543 b0->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
1544 b1->dirty_blob().set_compressed(0x20000, 0x10000);
1545 b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
1546 b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
1547 b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
1548 b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
1549
1550 em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b0));
1551 b0->get_ref(coll.get(), 0, 0x8000);
1552 em.extent_map.insert(
1553 *new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
1554 b2->get_ref(coll.get(), 0x8000, 0x8000);
1555 em.extent_map.insert(
1556 *new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
1557 b3->get_ref(coll.get(), 0, 0x20000);
1558 em.extent_map.insert(
1559 *new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
1560 b4->get_ref(coll.get(), 0, 0xf000);
1561 em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x1f000, 0x1000, b1));
1562 b1->get_ref(coll.get(), 0x1f000, 0x1000);
1563
1564 old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b0));
1565 old_extents.push_back(
1566 *new BlueStore::OldExtent(0x10000, 0x10000, 0x10000, b0));
1567 old_extents.push_back(
1568 *new BlueStore::OldExtent(0x20000, 0x00000, 0x1f000, b1));
1569
1570 saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
1571 ASSERT_EQ(saving, 2);
1572 auto& to_collect = gc.get_extents_to_collect();
1573 ASSERT_EQ(to_collect.num_intervals(), 2u);
1574 {
1575 auto it1 = to_collect.begin();
1576 auto it2 = ++to_collect.begin();
1577 using p = decltype(*it1);
1578 {
1579 auto v1 = p{0x0ul, 0x8000ul};
1580 auto v2 = p{0x0ul, 0x8000ul};
1581 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1582 }
1583 {
1584 auto v1 = p{0x3f000ul, 0x1000ul};
1585 auto v2 = p{0x3f000ul, 0x1000ul};
1586 ASSERT_TRUE(*it1 == v1 || *it2 == v2);
1587 }
1588 }
1589
1590 em.clear();
1591 clear_and_dispose(old_extents);
1592 }
1593 }
1594
1595 TEST(BlueStoreRepairer, StoreSpaceTracker)
1596 {
1597 BlueStoreRepairer::StoreSpaceTracker bmap0;
1598 bmap0.init((uint64_t)4096 * 1024 * 1024 * 1024, 0x1000);
1599 ASSERT_EQ(bmap0.granularity, 2 * 1024 * 1024U);
1600 ASSERT_EQ(bmap0.collections_bfs.size(), 2048u * 1024u);
1601 ASSERT_EQ(bmap0.objects_bfs.size(), 2048u * 1024u);
1602
1603 BlueStoreRepairer::StoreSpaceTracker bmap;
1604 bmap.init(0x2000 * 0x1000 - 1, 0x1000, 512 * 1024);
1605 ASSERT_EQ(bmap.granularity, 0x1000u);
1606 ASSERT_EQ(bmap.collections_bfs.size(), 0x2000u);
1607 ASSERT_EQ(bmap.objects_bfs.size(), 0x2000u);
1608
1609 coll_t cid;
1610 ghobject_t hoid;
1611
1612 ASSERT_FALSE(bmap.is_used(cid, 0));
1613 ASSERT_FALSE(bmap.is_used(hoid, 0));
1614 bmap.set_used(0, 1, cid, hoid);
1615 ASSERT_TRUE(bmap.is_used(cid, 0));
1616 ASSERT_TRUE(bmap.is_used(hoid, 0));
1617
1618 ASSERT_FALSE(bmap.is_used(cid, 0x1023));
1619 ASSERT_FALSE(bmap.is_used(hoid, 0x1023));
1620 ASSERT_FALSE(bmap.is_used(cid, 0x2023));
1621 ASSERT_FALSE(bmap.is_used(hoid, 0x2023));
1622 ASSERT_FALSE(bmap.is_used(cid, 0x3023));
1623 ASSERT_FALSE(bmap.is_used(hoid, 0x3023));
1624 bmap.set_used(0x1023, 0x3000, cid, hoid);
1625 ASSERT_TRUE(bmap.is_used(cid, 0x1023));
1626 ASSERT_TRUE(bmap.is_used(hoid, 0x1023));
1627 ASSERT_TRUE(bmap.is_used(cid, 0x2023));
1628 ASSERT_TRUE(bmap.is_used(hoid, 0x2023));
1629 ASSERT_TRUE(bmap.is_used(cid, 0x3023));
1630 ASSERT_TRUE(bmap.is_used(hoid, 0x3023));
1631
1632 ASSERT_FALSE(bmap.is_used(cid, 0x9001));
1633 ASSERT_FALSE(bmap.is_used(hoid, 0x9001));
1634 ASSERT_FALSE(bmap.is_used(cid, 0xa001));
1635 ASSERT_FALSE(bmap.is_used(hoid, 0xa001));
1636 ASSERT_FALSE(bmap.is_used(cid, 0xb000));
1637 ASSERT_FALSE(bmap.is_used(hoid, 0xb000));
1638 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1639 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1640 bmap.set_used(0x9001, 0x2fff, cid, hoid);
1641 ASSERT_TRUE(bmap.is_used(cid, 0x9001));
1642 ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
1643 ASSERT_TRUE(bmap.is_used(cid, 0xa001));
1644 ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
1645 ASSERT_TRUE(bmap.is_used(cid, 0xb001));
1646 ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
1647 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1648 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1649
1650 bmap.set_used(0xa001, 0x2, cid, hoid);
1651 ASSERT_TRUE(bmap.is_used(cid, 0x9001));
1652 ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
1653 ASSERT_TRUE(bmap.is_used(cid, 0xa001));
1654 ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
1655 ASSERT_TRUE(bmap.is_used(cid, 0xb001));
1656 ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
1657 ASSERT_FALSE(bmap.is_used(cid, 0xc000));
1658 ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
1659
1660 ASSERT_FALSE(bmap.is_used(cid, 0xc0000));
1661 ASSERT_FALSE(bmap.is_used(hoid, 0xc0000));
1662 ASSERT_FALSE(bmap.is_used(cid, 0xc1000));
1663 ASSERT_FALSE(bmap.is_used(hoid, 0xc1000));
1664
1665 bmap.set_used(0xc0000, 0x2000, cid, hoid);
1666 ASSERT_TRUE(bmap.is_used(cid, 0xc0000));
1667 ASSERT_TRUE(bmap.is_used(hoid, 0xc0000));
1668 ASSERT_TRUE(bmap.is_used(cid, 0xc1000));
1669 ASSERT_TRUE(bmap.is_used(hoid, 0xc1000));
1670
1671 interval_set<uint64_t> extents;
1672 extents.insert(0,0x500);
1673 extents.insert(0x800,0x100);
1674 extents.insert(0x1000,0x1000);
1675 extents.insert(0xa001,1);
1676 extents.insert(0xa0000,0xff8);
1677
1678 ASSERT_EQ(3u, bmap.filter_out(extents));
1679 ASSERT_TRUE(bmap.is_used(cid));
1680 ASSERT_TRUE(bmap.is_used(hoid));
1681
1682 BlueStoreRepairer::StoreSpaceTracker bmap2;
1683 bmap2.init((uint64_t)0x3223b1d1000, 0x10000);
1684 ASSERT_EQ(0x1a0000u, bmap2.granularity);
1685 ASSERT_EQ(0x1edae4u, bmap2.collections_bfs.size());
1686 ASSERT_EQ(0x1edae4u, bmap2.objects_bfs.size());
1687 bmap2.set_used(0x3223b190000, 0x10000, cid, hoid);
1688 ASSERT_TRUE(bmap2.is_used(cid, 0x3223b190000));
1689 ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b190000));
1690 ASSERT_TRUE(bmap2.is_used(cid, 0x3223b19f000));
1691 ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b19ffff));
1692 }
1693
1694 TEST(bluestore_blob_t, unused)
1695 {
1696 {
1697 bluestore_blob_t b;
1698 uint64_t min_alloc_size = 64 << 10; // 64 kB
1699
1700 // _do_write_small 0x0~1000
1701 uint64_t offset = 0x0;
1702 uint64_t length = 0x1000; // 4kB
1703 uint64_t suggested_boff = 0;
1704 PExtentVector extents;
1705 extents.emplace_back(0x1a560000, min_alloc_size);
1706 b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
1707 b.mark_used(offset, length);
1708 ASSERT_FALSE(b.is_unused(offset, length));
1709
1710 // _do_write_small 0x2000~1000
1711 offset = 0x2000;
1712 length = 0x1000;
1713 b.add_unused(0, 0x10000);
1714 ASSERT_TRUE(b.is_unused(offset, length));
1715 b.mark_used(offset, length);
1716 ASSERT_FALSE(b.is_unused(offset, length));
1717
1718 // _do_write_small 0xc000~2000
1719 offset = 0xc000;
1720 length = 0x2000;
1721 ASSERT_TRUE(b.is_unused(offset, length));
1722 b.mark_used(offset, length);
1723 ASSERT_FALSE(b.is_unused(offset, length));
1724 }
1725
1726 {
1727 bluestore_blob_t b;
1728 uint64_t min_alloc_size = 64 << 10; // 64 kB
1729
1730 // _do_write_small 0x11000~1000
1731 uint64_t offset = 0x11000;
1732 uint64_t length = 0x1000; // 4kB
1733 uint64_t suggested_boff = 0x11000;
1734 PExtentVector extents;
1735 extents.emplace_back(0x1a560000, min_alloc_size);
1736 b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
1737 b.add_unused(0, offset);
1738 b.add_unused(offset + length, min_alloc_size * 2 - offset - length);
1739 b.mark_used(offset, length);
1740 ASSERT_FALSE(b.is_unused(offset, length));
1741
1742 // _do_write_small 0x15000~3000
1743 offset = 0x15000;
1744 length = 0x3000;
1745 ASSERT_TRUE(b.is_unused(offset, length));
1746 b.mark_used(offset, length);
1747 ASSERT_FALSE(b.is_unused(offset, length));
1748 }
1749
1750 {
1751 // reuse blob
1752 bluestore_blob_t b;
1753 uint64_t min_alloc_size = 64 << 10; // 64 kB
1754
1755 // _do_write_small 0x2a000~1000
1756 // and 0x1d000~1000
1757 uint64_t unused_granularity = 0x3000;
1758 // offsets and lenght below are selected to
1759 // be aligned with unused_granularity
1760 uint64_t offset0 = 0x2a000;
1761 uint64_t offset = 0x1d000;
1762 uint64_t length = 0x1000; // 4kB
1763 PExtentVector extents;
1764 extents.emplace_back(0x410000, min_alloc_size);
1765 b.allocated(p2align(offset0, min_alloc_size), min_alloc_size, extents);
1766 b.add_unused(0, min_alloc_size * 3);
1767 b.mark_used(offset0, length);
1768 ASSERT_FALSE(b.is_unused(offset0, length));
1769 ASSERT_TRUE(b.is_unused(offset, length));
1770
1771 extents.clear();
1772 extents.emplace_back(0x430000, min_alloc_size);
1773 b.allocated(p2align(offset, min_alloc_size), min_alloc_size, extents);
1774 b.mark_used(offset, length);
1775 ASSERT_FALSE(b.is_unused(offset0, length));
1776 ASSERT_FALSE(b.is_unused(offset, length));
1777 ASSERT_FALSE(b.is_unused(offset, unused_granularity));
1778
1779 ASSERT_TRUE(b.is_unused(0, offset / unused_granularity * unused_granularity));
1780 ASSERT_TRUE(b.is_unused(offset + length, offset0 - offset - length));
1781 auto end0_aligned = round_up_to(offset0 + length, unused_granularity);
1782 ASSERT_TRUE(b.is_unused(end0_aligned, min_alloc_size * 3 - end0_aligned));
1783 }
1784 }
1785 // This UT is primarily intended to show how repair procedure
1786 // causes erroneous write to INVALID_OFFSET which is reported in
1787 // https://tracker.ceph.com/issues/51682
1788 // Basic map_any functionality is tested as well though.
1789 //
1790 TEST(bluestore_blob_t, wrong_map_bl_in_51682)
1791 {
1792 {
1793 bluestore_blob_t b;
1794 uint64_t min_alloc_size = 4 << 10; // 64 kB
1795
1796 b.allocated_test(bluestore_pextent_t(0x17ba000, 4 * min_alloc_size));
1797 b.allocated_test(bluestore_pextent_t(0x17bf000, 4 * min_alloc_size));
1798 b.allocated_test(
1799 bluestore_pextent_t(
1800 bluestore_pextent_t::INVALID_OFFSET,
1801 1 * min_alloc_size));
1802 b.allocated_test(bluestore_pextent_t(0x153c44d000, 7 * min_alloc_size));
1803
1804 b.mark_used(0, 0x8000);
1805 b.mark_used(0x9000, 0x7000);
1806
1807 string s(0x7000, 'a');
1808 bufferlist bl;
1809 bl.append(s);
1810 const size_t num_expected_entries = 5;
1811 uint64_t expected[num_expected_entries][2] = {
1812 {0x17ba000, 0x4000},
1813 {0x17bf000, 0x3000},
1814 {0x17c0000, 0x3000},
1815 {0xffffffffffffffff, 0x1000},
1816 {0x153c44d000, 0x3000}};
1817 size_t expected_pos = 0;
1818 b.map_bl(0, bl,
1819 [&](uint64_t o, bufferlist& bl) {
1820 ASSERT_EQ(o, expected[expected_pos][0]);
1821 ASSERT_EQ(bl.length(), expected[expected_pos][1]);
1822 ++expected_pos;
1823 });
1824 // 0x5000 is an improper offset presumably provided when doing a repair
1825 b.map_bl(0x5000, bl,
1826 [&](uint64_t o, bufferlist& bl) {
1827 ASSERT_EQ(o, expected[expected_pos][0]);
1828 ASSERT_EQ(bl.length(), expected[expected_pos][1]);
1829 ++expected_pos;
1830 });
1831 ASSERT_EQ(expected_pos, num_expected_entries);
1832 }
1833 }
1834
1835 //---------------------------------------------------------------------------------
1836 static int verify_extent(const extent_t & ext, const extent_t *ext_arr, uint64_t ext_arr_size, uint64_t idx)
1837 {
1838 const extent_t & ext_ref = ext_arr[idx];
1839 if (ext.offset == ext_ref.offset && ext.length == ext_ref.length) {
1840 return 0;
1841 } else {
1842 std::cerr << "mismatch was found at index " << idx << std::endl;
1843 if (ext.length == 0) {
1844 std::cerr << "Null extent was returned at idx = " << idx << std::endl;
1845 }
1846 unsigned start = std::max(((int32_t)(idx)-3), 0);
1847 unsigned end = std::min(idx+3, ext_arr_size);
1848 for (unsigned j = start; j < end; j++) {
1849 const extent_t & ext_ref = ext_arr[j];
1850 std::cerr << j << ") ref_ext = [" << ext_ref.offset << ", " << ext_ref.length << "]" << std::endl;
1851 }
1852 std::cerr << idx << ") ext = [" << ext.offset << ", " << ext.length << "]" << std::endl;
1853 return -1;
1854 }
1855 }
1856
1857 //---------------------------------------------------------------------------------
1858 static int test_extents(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
1859 {
1860 const uint64_t MAX_JUMP_BIG = 1523;
1861 const uint64_t MAX_JUMP_SMALL = 19;
1862 const uint64_t MAX_LEN_BIG = 523;
1863 const uint64_t MAX_LEN_SMALL = 23;
1864
1865 uint64_t n = sbmap.get_size();
1866 uint64_t offset = 0;
1867 unsigned length, jump, i;
1868 for (i = 0; i < ext_arr_size; i++) {
1869 if (i & 3) {
1870 jump = std::rand() % MAX_JUMP_BIG;
1871 } else {
1872 jump = std::rand() % MAX_JUMP_SMALL;
1873 }
1874 offset += jump;
1875 if (i & 1) {
1876 length = std::rand() % MAX_LEN_BIG;
1877 } else {
1878 length = std::rand() % MAX_LEN_SMALL;
1879 }
1880 // make sure no zero length will be used
1881 length++;
1882 if (offset + length >= n) {
1883 break;
1884 }
1885
1886 bool success;
1887 if (set) {
1888 success = sbmap.set(offset, length);
1889 } else {
1890 success = sbmap.clr(offset, length);
1891 }
1892 if (!success) {
1893 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
1894 return -1;
1895 }
1896
1897 // if this is not the first entry and no jump -> merge extents
1898 if ( (i==0) || (jump > 0) ) {
1899 ext_arr[i] = {offset, length};
1900 } else {
1901 // merge 2 extents
1902 i --;
1903 ext_arr[i].length += length;
1904 }
1905 offset += length;
1906 }
1907 unsigned arr_size = std::min((uint64_t)i, ext_arr_size);
1908 std::cout << std::hex << std::right;
1909 std::cout << "[" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
1910 std::cout << std::dec << std::endl;
1911
1912 offset = 0;
1913 extent_t ext;
1914 for(unsigned i = 0; i < arr_size; i++) {
1915 if (set) {
1916 ext = sbmap.get_next_set_extent(offset);
1917 } else {
1918 ext = sbmap.get_next_clr_extent(offset);
1919 }
1920
1921 if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
1922 return -1;
1923 }
1924 offset = ext.offset + ext.length;
1925 }
1926
1927 if (set) {
1928 ext = sbmap.get_next_set_extent(offset);
1929 } else {
1930 ext = sbmap.get_next_clr_extent(offset);
1931 }
1932 if (ext.length == 0) {
1933 return 0;
1934 } else {
1935 std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
1936 return -1;
1937 }
1938 }
1939
1940 //---------------------------------------------------------------------------------
1941 TEST(SimpleBitmap, basic)
1942 {
1943 const uint64_t MAX_EXTENTS_COUNT = 7131177;
1944 std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
1945 ASSERT_TRUE(ext_arr != nullptr);
1946 const uint64_t BIT_COUNT = 4ULL << 30; // 4Gb = 512MB
1947 SimpleBitmap sbmap(g_ceph_context, BIT_COUNT);
1948
1949 // use current time as seed for random generator
1950 std::srand(std::time(nullptr));
1951 for (unsigned i = 0; i < 3; i++ ) {
1952 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
1953 sbmap.clear_all();
1954 ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
1955
1956 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
1957 sbmap.set_all();
1958 ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
1959 }
1960 }
1961
1962 //---------------------------------------------------------------------------------
1963 static int test_intersections(unsigned test_idx, SimpleBitmap &sbmap, uint8_t map[], uint64_t map_size)
1964 {
1965 const uint64_t MAX_LEN_BIG = 523;
1966 const uint64_t MAX_LEN_SMALL = 23;
1967
1968 bool success;
1969 uint64_t set_op_count = 0, clr_op_count = 0;
1970 unsigned length, i;
1971 for (i = 0; i < map_size / (MAX_LEN_BIG*2); i++) {
1972 uint64_t offset = (std::rand() % (map_size - 1));
1973 if (i & 1) {
1974 length = std::rand() % MAX_LEN_BIG;
1975 } else {
1976 length = std::rand() % MAX_LEN_SMALL;
1977 }
1978 // make sure no zero length will be used
1979 length++;
1980 if (offset + length >= map_size) {
1981 continue;
1982 }
1983 // 2:1 set/clr
1984 bool set = (std::rand() % 3);
1985 if (set) {
1986 success = sbmap.set(offset, length);
1987 memset(map+offset, 0xFF, length);
1988 set_op_count++;
1989 } else {
1990 success = sbmap.clr(offset, length);
1991 memset(map+offset, 0x0, length);
1992 clr_op_count++;
1993 }
1994 if (!success) {
1995 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
1996 return -1;
1997 }
1998 }
1999
2000 uint64_t set_bit_count = 0;
2001 uint64_t clr_bit_count = 0;
2002 for(uint64_t idx = 0; idx < map_size; idx++) {
2003 if (map[idx]) {
2004 set_bit_count++;
2005 success = sbmap.bit_is_set(idx);
2006 } else {
2007 clr_bit_count++;
2008 success = sbmap.bit_is_clr(idx);
2009 }
2010 if (!success) {
2011 std::cerr << "expected: sbmap.bit_is_" << (map[idx] ? "set(" : "clr(") << idx << ")"<< std::endl;
2012 return -1;
2013 }
2014
2015 }
2016 std::cout << std::hex << std::right << __func__ ;
2017 std::cout << " [" << test_idx << "] set_bit_count = 0x" << std::setfill('0') << std::setw(8) << set_bit_count
2018 << ", clr_bit_count = 0x" << std::setfill('0') << std::setw(8) << clr_bit_count
2019 << ", sum = 0x" << set_bit_count + clr_bit_count << std::endl;
2020 std::cout << std::dec;
2021 uint64_t offset = 0;
2022 for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
2023 extent_t ext = sbmap.get_next_set_extent(offset);
2024 //std::cout << "set_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
2025 for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
2026 if (map[idx] != 0xFF) {
2027 std::cerr << "map[" << idx << "] is clear, but extent [" << ext.offset << ", " << ext.length << "] is set" << std::endl;
2028 return -1;
2029 }
2030 }
2031 offset = ext.offset + ext.length;
2032 }
2033
2034 offset = 0;
2035 for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
2036 extent_t ext = sbmap.get_next_clr_extent(offset);
2037 //std::cout << "clr_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
2038 for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
2039 if (map[idx] ) {
2040 std::cerr << "map[" << idx << "] is set, but extent [" << ext.offset << ", " << ext.length << "] is free" << std::endl;
2041 return -1;
2042 }
2043 }
2044 offset = ext.offset + ext.length;
2045 }
2046
2047 return 0;
2048 }
2049
2050 //---------------------------------------------------------------------------------
2051 TEST(SimpleBitmap, intersection)
2052 {
2053 const uint64_t MAP_SIZE = 1ULL << 30; // 1G
2054 SimpleBitmap sbmap(g_ceph_context, MAP_SIZE);
2055
2056 // use current time as seed for random generator
2057 std::srand(std::time(nullptr));
2058
2059 std::unique_ptr<uint8_t[]> map = std::make_unique<uint8_t[]> (MAP_SIZE);
2060 ASSERT_TRUE(map != nullptr);
2061
2062 for (unsigned i = 0; i < 1; i++ ) {
2063 sbmap.clear_all();
2064 memset(map.get(), 0, MAP_SIZE);
2065 ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
2066
2067 sbmap.set_all();
2068 memset(map.get(), 0xFF, MAP_SIZE);
2069 ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
2070 }
2071 }
2072
2073
2074 //---------------------------------------------------------------------------------
2075 static int test_extents_boundaries(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
2076 {
2077 uint64_t n = sbmap.get_size();
2078 uint64_t offset = 0, k = 0;
2079 for(unsigned i = 0; i < 64; i++) {
2080 offset += i;
2081 if (offset >= n) {
2082 break;
2083 }
2084
2085 for(unsigned length = 1; length <= 128; length++) {
2086 if (offset + length >= n) {
2087 break;
2088 }
2089
2090 if (k >= ext_arr_size) {
2091 break;
2092 }
2093 bool success;
2094 if (set) {
2095 success = sbmap.set(offset, length);
2096 } else {
2097 success = sbmap.clr(offset, length);
2098 }
2099 if (!success) {
2100 std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
2101 return -1;
2102 }
2103 ext_arr[k++] = {offset, length};
2104 if (length < 64) {
2105 offset += 64;
2106 } else {
2107 offset += 128;
2108 }
2109 }
2110 if (k >= ext_arr_size) {
2111 break;
2112 }
2113 }
2114
2115 unsigned arr_size = std::min((uint64_t)k, ext_arr_size);
2116 std::cout << std::hex << std::right << __func__ ;
2117 std::cout << " [" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
2118 std::cout << std::dec << std::endl;
2119
2120 offset = 0;
2121 extent_t ext;
2122 for(unsigned i = 0; i < arr_size; i++) {
2123 if (set) {
2124 ext = sbmap.get_next_set_extent(offset);
2125 } else {
2126 ext = sbmap.get_next_clr_extent(offset);
2127 }
2128
2129 if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
2130 return -1;
2131 }
2132 offset = ext.offset + ext.length;
2133 }
2134
2135 if (set) {
2136 ext = sbmap.get_next_set_extent(offset);
2137 } else {
2138 ext = sbmap.get_next_clr_extent(offset);
2139 }
2140 if (ext.length == 0) {
2141 return 0;
2142 } else {
2143 std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
2144 return -1;
2145 }
2146
2147 }
2148
2149 //---------------------------------------------------------------------------------
2150 TEST(SimpleBitmap, boundaries)
2151 {
2152 const uint64_t MAX_EXTENTS_COUNT = 64 << 10;
2153 std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
2154 ASSERT_TRUE(ext_arr != nullptr);
2155
2156 // use current time as seed for random generator
2157 std::srand(std::time(nullptr));
2158
2159 uint64_t bit_count = 32 << 20; // 32Mb = 4MB
2160 unsigned count = 0;
2161 for (unsigned i = 0; i < 64; i++) {
2162 SimpleBitmap sbmap(g_ceph_context, bit_count+i);
2163 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
2164 sbmap.clear_all();
2165 ASSERT_TRUE(test_extents_boundaries(count, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
2166
2167 memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
2168 sbmap.set_all();
2169 ASSERT_TRUE(test_extents_boundaries(count++, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
2170 }
2171 }
2172
2173 //---------------------------------------------------------------------------------
2174 TEST(SimpleBitmap, boundaries2)
2175 {
2176 const uint64_t bit_count_base = 64 << 10; // 64Kb = 8MB
2177 const extent_t null_extent = {0, 0};
2178
2179 for (unsigned i = 0; i < 64; i++) {
2180 uint64_t bit_count = bit_count_base + i;
2181 extent_t full_extent = {0, bit_count};
2182 SimpleBitmap sbmap(g_ceph_context, bit_count);
2183
2184 sbmap.set(0, bit_count);
2185 ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
2186 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
2187
2188 for (uint64_t bit = 0; bit < bit_count; bit++) {
2189 sbmap.clr(bit, 1);
2190 }
2191 ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
2192 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
2193
2194 for (uint64_t bit = 0; bit < bit_count; bit++) {
2195 sbmap.set(bit, 1);
2196 }
2197 ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
2198 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
2199
2200 sbmap.clr(0, bit_count);
2201 ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
2202 ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
2203 }
2204 }
2205
2206 TEST(shared_blob_2hash_tracker_t, basic_test)
2207 {
2208 shared_blob_2hash_tracker_t t1(1024 * 1024, 4096);
2209
2210 ASSERT_TRUE(t1.count_non_zero() == 0);
2211
2212 t1.inc(0, 0, 1);
2213 ASSERT_TRUE(t1.count_non_zero() != 0);
2214 t1.inc(0, 0, -1);
2215 ASSERT_TRUE(t1.count_non_zero() == 0);
2216
2217 t1.inc(3, 0x1000, 2);
2218 ASSERT_TRUE(t1.count_non_zero() != 0);
2219 t1.inc(3, 0x1000, -1);
2220 ASSERT_TRUE(t1.count_non_zero() != 0);
2221 t1.inc(3, 0x1000, -1);
2222 ASSERT_TRUE(t1.count_non_zero() == 0);
2223
2224 t1.inc(2, 0x2000, 5);
2225 ASSERT_TRUE(t1.count_non_zero() != 0);
2226 t1.inc(18, 0x2000, -5);
2227 ASSERT_TRUE(t1.count_non_zero() != 0);
2228 t1.inc(18, 0x2000, 1);
2229 ASSERT_TRUE(t1.count_non_zero() != 0);
2230 t1.inc(2, 0x2000, -1);
2231 ASSERT_TRUE(t1.count_non_zero() != 0);
2232 t1.inc(18, 0x2000, 4);
2233 ASSERT_TRUE(t1.count_non_zero() != 0);
2234 t1.inc(2, 0x2000, -4);
2235 ASSERT_TRUE(t1.count_non_zero() == 0);
2236
2237 t1.inc(3, 0x3000, 2);
2238 ASSERT_TRUE(t1.count_non_zero() != 0);
2239 t1.inc(4, 0x3000, -1);
2240 ASSERT_TRUE(t1.count_non_zero() != 0);
2241 t1.inc(4, 0x3000, -1);
2242 ASSERT_TRUE(t1.count_non_zero() != 0);
2243 t1.inc(3, 0x3000, -2);
2244 ASSERT_TRUE(t1.count_non_zero() != 0);
2245 t1.inc(4, 0x3000, 1);
2246 ASSERT_TRUE(t1.count_non_zero() != 0);
2247 t1.inc(4, 0x3000, 1);
2248 ASSERT_TRUE(t1.count_non_zero() == 0);
2249
2250 t1.inc(5, 0x1000, 1);
2251 t1.inc(5, 0x2000, 3);
2252 t1.inc(5, 0x3000, 2);
2253 t1.inc(5, 0x8000, 1);
2254
2255 ASSERT_TRUE(t1.count_non_zero() != 0);
2256
2257 ASSERT_TRUE(!t1.test_all_zero(5,0x1000));
2258 ASSERT_TRUE(!t1.test_all_zero(5, 0x2000));
2259 ASSERT_TRUE(!t1.test_all_zero(5, 0x3000));
2260 ASSERT_TRUE(t1.test_all_zero(5, 0x4000));
2261 ASSERT_TRUE(!t1.test_all_zero(5, 0x8000));
2262
2263 ASSERT_TRUE(t1.test_all_zero_range(5, 0, 0x1000));
2264 ASSERT_TRUE(t1.test_all_zero_range(5, 0x500, 0x500));
2265 ASSERT_TRUE(!t1.test_all_zero_range(5, 0x500, 0x1500));
2266 ASSERT_TRUE(!t1.test_all_zero_range(5, 0x1500, 0x3200));
2267 ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x1500));
2268 ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x3b00));
2269 ASSERT_TRUE(!t1.test_all_zero_range(5, 0, 0x9000));
2270 }
2271
2272 TEST(bluestore_blob_use_tracker_t, mempool_stats_test)
2273 {
2274 using mempool::bluestore_cache_other::allocated_items;
2275 using mempool::bluestore_cache_other::allocated_bytes;
2276 uint64_t other_items0 = allocated_items();
2277 uint64_t other_bytes0 = allocated_bytes();
2278 {
2279 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2280
2281 t1->init(1024 * 1024, 4096);
2282 ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
2283 ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
2284
2285 delete t1;
2286 ASSERT_EQ(allocated_items(), other_items0);
2287 ASSERT_EQ(allocated_bytes(), other_bytes0);
2288 }
2289 {
2290 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2291
2292 t1->init(1024 * 1024, 4096);
2293 t1->add_tail(2048 * 1024, 4096);
2294 // proper stats update after tail add
2295 ASSERT_EQ(512, allocated_items() - other_items0); // = 2M / 4K
2296 ASSERT_EQ(2048, allocated_bytes() - other_bytes0); // = 2M / 4K * 4
2297
2298 delete t1;
2299 ASSERT_EQ(allocated_items(), other_items0);
2300 ASSERT_EQ(allocated_bytes(), other_bytes0);
2301 }
2302 {
2303 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2304
2305 t1->init(1024 * 1024, 4096);
2306 t1->prune_tail(512 * 1024);
2307 // no changes in stats after pruning
2308 ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
2309 ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
2310
2311 delete t1;
2312 ASSERT_EQ(allocated_items(), other_items0);
2313 ASSERT_EQ(allocated_bytes(), other_bytes0);
2314 }
2315 {
2316 bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
2317 bluestore_blob_use_tracker_t* t2 = new bluestore_blob_use_tracker_t;
2318
2319 t1->init(1024 * 1024, 4096);
2320
2321 // t1 keeps the same amount of entries + t2 has got half of them
2322 t1->split(512 * 1024, t2);
2323 ASSERT_EQ(256 + 128, allocated_items() - other_items0); //= 1M / 4K*1.5
2324 ASSERT_EQ(1024 + 512, allocated_bytes() - other_bytes0); //= 1M / 4K*4*1.5
2325
2326 // t1 & t2 release everything, then t2 get one less entry than t2 had had
2327 // before
2328 t1->split(4096, t2);
2329 ASSERT_EQ(127, allocated_items() - other_items0); // = 512K / 4K - 1
2330 ASSERT_EQ(127 * 4, allocated_bytes() - other_bytes0); // = 512L / 4K * 4 - 4
2331 delete t1;
2332 delete t2;
2333 ASSERT_EQ(allocated_items(), other_items0);
2334 ASSERT_EQ(allocated_bytes(), other_bytes0);
2335 }
2336 }
2337
2338 int main(int argc, char **argv) {
2339 auto args = argv_to_vec(argc, argv);
2340 auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
2341 CODE_ENVIRONMENT_UTILITY,
2342 CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
2343 common_init_finish(g_ceph_context);
2344 ::testing::InitGoogleTest(&argc, argv);
2345 return RUN_ALL_TESTS();
2346 }