+TEST(bluestore_blob_t, unused)
+{
+ {
+ bluestore_blob_t b;
+ uint64_t min_alloc_size = 64 << 10; // 64 kB
+
+ // _do_write_small 0x0~1000
+ uint64_t offset = 0x0;
+ uint64_t length = 0x1000; // 4kB
+ uint64_t suggested_boff = 0;
+ PExtentVector extents;
+ extents.emplace_back(0x1a560000, min_alloc_size);
+ b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
+ b.mark_used(offset, length);
+ ASSERT_FALSE(b.is_unused(offset, length));
+
+ // _do_write_small 0x2000~1000
+ offset = 0x2000;
+ length = 0x1000;
+ b.add_unused(0, 0x10000);
+ ASSERT_TRUE(b.is_unused(offset, length));
+ b.mark_used(offset, length);
+ ASSERT_FALSE(b.is_unused(offset, length));
+
+ // _do_write_small 0xc000~2000
+ offset = 0xc000;
+ length = 0x2000;
+ ASSERT_TRUE(b.is_unused(offset, length));
+ b.mark_used(offset, length);
+ ASSERT_FALSE(b.is_unused(offset, length));
+ }
+
+ {
+ bluestore_blob_t b;
+ uint64_t min_alloc_size = 64 << 10; // 64 kB
+
+ // _do_write_small 0x11000~1000
+ uint64_t offset = 0x11000;
+ uint64_t length = 0x1000; // 4kB
+ uint64_t suggested_boff = 0x11000;
+ PExtentVector extents;
+ extents.emplace_back(0x1a560000, min_alloc_size);
+ b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
+ b.add_unused(0, offset);
+ b.add_unused(offset + length, min_alloc_size * 2 - offset - length);
+ b.mark_used(offset, length);
+ ASSERT_FALSE(b.is_unused(offset, length));
+
+ // _do_write_small 0x15000~3000
+ offset = 0x15000;
+ length = 0x3000;
+ ASSERT_TRUE(b.is_unused(offset, length));
+ b.mark_used(offset, length);
+ ASSERT_FALSE(b.is_unused(offset, length));
+ }
+
+ {
+ // reuse blob
+ bluestore_blob_t b;
+ uint64_t min_alloc_size = 64 << 10; // 64 kB
+
+ // _do_write_small 0x2a000~1000
+ // and 0x1d000~1000
+ uint64_t unused_granularity = 0x3000;
+ // offsets and lenght below are selected to
+ // be aligned with unused_granularity
+ uint64_t offset0 = 0x2a000;
+ uint64_t offset = 0x1d000;
+ uint64_t length = 0x1000; // 4kB
+ PExtentVector extents;
+ extents.emplace_back(0x410000, min_alloc_size);
+ b.allocated(p2align(offset0, min_alloc_size), min_alloc_size, extents);
+ b.add_unused(0, min_alloc_size * 3);
+ b.mark_used(offset0, length);
+ ASSERT_FALSE(b.is_unused(offset0, length));
+ ASSERT_TRUE(b.is_unused(offset, length));
+
+ extents.clear();
+ extents.emplace_back(0x430000, min_alloc_size);
+ b.allocated(p2align(offset, min_alloc_size), min_alloc_size, extents);
+ b.mark_used(offset, length);
+ ASSERT_FALSE(b.is_unused(offset0, length));
+ ASSERT_FALSE(b.is_unused(offset, length));
+ ASSERT_FALSE(b.is_unused(offset, unused_granularity));
+
+ ASSERT_TRUE(b.is_unused(0, offset / unused_granularity * unused_granularity));
+ ASSERT_TRUE(b.is_unused(offset + length, offset0 - offset - length));
+ auto end0_aligned = round_up_to(offset0 + length, unused_granularity);
+ ASSERT_TRUE(b.is_unused(end0_aligned, min_alloc_size * 3 - end0_aligned));
+ }
+}
+// This UT is primarily intended to show how repair procedure
+// causes erroneous write to INVALID_OFFSET which is reported in
+// https://tracker.ceph.com/issues/51682
+// Basic map_any functionality is tested as well though.
+//
+TEST(bluestore_blob_t, wrong_map_bl_in_51682)
+{
+ {
+ bluestore_blob_t b;
+ uint64_t min_alloc_size = 4 << 10; // 64 kB
+
+ b.allocated_test(bluestore_pextent_t(0x17ba000, 4 * min_alloc_size));
+ b.allocated_test(bluestore_pextent_t(0x17bf000, 4 * min_alloc_size));
+ b.allocated_test(
+ bluestore_pextent_t(
+ bluestore_pextent_t::INVALID_OFFSET,
+ 1 * min_alloc_size));
+ b.allocated_test(bluestore_pextent_t(0x153c44d000, 7 * min_alloc_size));
+
+ b.mark_used(0, 0x8000);
+ b.mark_used(0x9000, 0x7000);
+
+ string s(0x7000, 'a');
+ bufferlist bl;
+ bl.append(s);
+ const size_t num_expected_entries = 5;
+ uint64_t expected[num_expected_entries][2] = {
+ {0x17ba000, 0x4000},
+ {0x17bf000, 0x3000},
+ {0x17c0000, 0x3000},
+ {0xffffffffffffffff, 0x1000},
+ {0x153c44d000, 0x3000}};
+ size_t expected_pos = 0;
+ b.map_bl(0, bl,
+ [&](uint64_t o, bufferlist& bl) {
+ ASSERT_EQ(o, expected[expected_pos][0]);
+ ASSERT_EQ(bl.length(), expected[expected_pos][1]);
+ ++expected_pos;
+ });
+ // 0x5000 is an improper offset presumably provided when doing a repair
+ b.map_bl(0x5000, bl,
+ [&](uint64_t o, bufferlist& bl) {
+ ASSERT_EQ(o, expected[expected_pos][0]);
+ ASSERT_EQ(bl.length(), expected[expected_pos][1]);
+ ++expected_pos;
+ });
+ ASSERT_EQ(expected_pos, num_expected_entries);
+ }
+}
+
+//---------------------------------------------------------------------------------
+static int verify_extent(const extent_t & ext, const extent_t *ext_arr, uint64_t ext_arr_size, uint64_t idx)
+{
+ const extent_t & ext_ref = ext_arr[idx];
+ if (ext.offset == ext_ref.offset && ext.length == ext_ref.length) {
+ return 0;
+ } else {
+ std::cerr << "mismatch was found at index " << idx << std::endl;
+ if (ext.length == 0) {
+ std::cerr << "Null extent was returned at idx = " << idx << std::endl;
+ }
+ unsigned start = std::max(((int32_t)(idx)-3), 0);
+ unsigned end = std::min(idx+3, ext_arr_size);
+ for (unsigned j = start; j < end; j++) {
+ const extent_t & ext_ref = ext_arr[j];
+ std::cerr << j << ") ref_ext = [" << ext_ref.offset << ", " << ext_ref.length << "]" << std::endl;
+ }
+ std::cerr << idx << ") ext = [" << ext.offset << ", " << ext.length << "]" << std::endl;
+ return -1;
+ }
+}
+
+//---------------------------------------------------------------------------------
+static int test_extents(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
+{
+ const uint64_t MAX_JUMP_BIG = 1523;
+ const uint64_t MAX_JUMP_SMALL = 19;
+ const uint64_t MAX_LEN_BIG = 523;
+ const uint64_t MAX_LEN_SMALL = 23;
+
+ uint64_t n = sbmap.get_size();
+ uint64_t offset = 0;
+ unsigned length, jump, i;
+ for (i = 0; i < ext_arr_size; i++) {
+ if (i & 3) {
+ jump = std::rand() % MAX_JUMP_BIG;
+ } else {
+ jump = std::rand() % MAX_JUMP_SMALL;
+ }
+ offset += jump;
+ if (i & 1) {
+ length = std::rand() % MAX_LEN_BIG;
+ } else {
+ length = std::rand() % MAX_LEN_SMALL;
+ }
+ // make sure no zero length will be used
+ length++;
+ if (offset + length >= n) {
+ break;
+ }
+
+ bool success;
+ if (set) {
+ success = sbmap.set(offset, length);
+ } else {
+ success = sbmap.clr(offset, length);
+ }
+ if (!success) {
+ std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
+ return -1;
+ }
+
+ // if this is not the first entry and no jump -> merge extents
+ if ( (i==0) || (jump > 0) ) {
+ ext_arr[i] = {offset, length};
+ } else {
+ // merge 2 extents
+ i --;
+ ext_arr[i].length += length;
+ }
+ offset += length;
+ }
+ unsigned arr_size = std::min((uint64_t)i, ext_arr_size);
+ std::cout << std::hex << std::right;
+ std::cout << "[" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
+ std::cout << std::dec << std::endl;
+
+ offset = 0;
+ extent_t ext;
+ for(unsigned i = 0; i < arr_size; i++) {
+ if (set) {
+ ext = sbmap.get_next_set_extent(offset);
+ } else {
+ ext = sbmap.get_next_clr_extent(offset);
+ }
+
+ if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
+ return -1;
+ }
+ offset = ext.offset + ext.length;
+ }
+
+ if (set) {
+ ext = sbmap.get_next_set_extent(offset);
+ } else {
+ ext = sbmap.get_next_clr_extent(offset);
+ }
+ if (ext.length == 0) {
+ return 0;
+ } else {
+ std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
+ return -1;
+ }
+}
+
+//---------------------------------------------------------------------------------
+TEST(SimpleBitmap, basic)
+{
+ const uint64_t MAX_EXTENTS_COUNT = 7131177;
+ std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
+ ASSERT_TRUE(ext_arr != nullptr);
+ const uint64_t BIT_COUNT = 4ULL << 30; // 4Gb = 512MB
+ SimpleBitmap sbmap(g_ceph_context, BIT_COUNT);
+
+ // use current time as seed for random generator
+ std::srand(std::time(nullptr));
+ for (unsigned i = 0; i < 3; i++ ) {
+ memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
+ sbmap.clear_all();
+ ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
+
+ memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
+ sbmap.set_all();
+ ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
+ }
+}
+
+//---------------------------------------------------------------------------------
+static int test_intersections(unsigned test_idx, SimpleBitmap &sbmap, uint8_t map[], uint64_t map_size)
+{
+ const uint64_t MAX_LEN_BIG = 523;
+ const uint64_t MAX_LEN_SMALL = 23;
+
+ bool success;
+ uint64_t set_op_count = 0, clr_op_count = 0;
+ unsigned length, i;
+ for (i = 0; i < map_size / (MAX_LEN_BIG*2); i++) {
+ uint64_t offset = (std::rand() % (map_size - 1));
+ if (i & 1) {
+ length = std::rand() % MAX_LEN_BIG;
+ } else {
+ length = std::rand() % MAX_LEN_SMALL;
+ }
+ // make sure no zero length will be used
+ length++;
+ if (offset + length >= map_size) {
+ continue;
+ }
+ // 2:1 set/clr
+ bool set = (std::rand() % 3);
+ if (set) {
+ success = sbmap.set(offset, length);
+ memset(map+offset, 0xFF, length);
+ set_op_count++;
+ } else {
+ success = sbmap.clr(offset, length);
+ memset(map+offset, 0x0, length);
+ clr_op_count++;
+ }
+ if (!success) {
+ std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
+ return -1;
+ }
+ }
+
+ uint64_t set_bit_count = 0;
+ uint64_t clr_bit_count = 0;
+ for(uint64_t idx = 0; idx < map_size; idx++) {
+ if (map[idx]) {
+ set_bit_count++;
+ success = sbmap.bit_is_set(idx);
+ } else {
+ clr_bit_count++;
+ success = sbmap.bit_is_clr(idx);
+ }
+ if (!success) {
+ std::cerr << "expected: sbmap.bit_is_" << (map[idx] ? "set(" : "clr(") << idx << ")"<< std::endl;
+ return -1;
+ }
+
+ }
+ std::cout << std::hex << std::right << __func__ ;
+ std::cout << " [" << test_idx << "] set_bit_count = 0x" << std::setfill('0') << std::setw(8) << set_bit_count
+ << ", clr_bit_count = 0x" << std::setfill('0') << std::setw(8) << clr_bit_count
+ << ", sum = 0x" << set_bit_count + clr_bit_count << std::endl;
+ std::cout << std::dec;
+ uint64_t offset = 0;
+ for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
+ extent_t ext = sbmap.get_next_set_extent(offset);
+ //std::cout << "set_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
+ for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
+ if (map[idx] != 0xFF) {
+ std::cerr << "map[" << idx << "] is clear, but extent [" << ext.offset << ", " << ext.length << "] is set" << std::endl;
+ return -1;
+ }
+ }
+ offset = ext.offset + ext.length;
+ }
+
+ offset = 0;
+ for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
+ extent_t ext = sbmap.get_next_clr_extent(offset);
+ //std::cout << "clr_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
+ for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
+ if (map[idx] ) {
+ std::cerr << "map[" << idx << "] is set, but extent [" << ext.offset << ", " << ext.length << "] is free" << std::endl;
+ return -1;
+ }
+ }
+ offset = ext.offset + ext.length;
+ }
+
+ return 0;
+}
+
+//---------------------------------------------------------------------------------
+TEST(SimpleBitmap, intersection)
+{
+ const uint64_t MAP_SIZE = 1ULL << 30; // 1G
+ SimpleBitmap sbmap(g_ceph_context, MAP_SIZE);
+
+ // use current time as seed for random generator
+ std::srand(std::time(nullptr));
+
+ std::unique_ptr<uint8_t[]> map = std::make_unique<uint8_t[]> (MAP_SIZE);
+ ASSERT_TRUE(map != nullptr);
+
+ for (unsigned i = 0; i < 1; i++ ) {
+ sbmap.clear_all();
+ memset(map.get(), 0, MAP_SIZE);
+ ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
+
+ sbmap.set_all();
+ memset(map.get(), 0xFF, MAP_SIZE);
+ ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
+ }
+}
+
+
+//---------------------------------------------------------------------------------
+static int test_extents_boundaries(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
+{
+ uint64_t n = sbmap.get_size();
+ uint64_t offset = 0, k = 0;
+ for(unsigned i = 0; i < 64; i++) {
+ offset += i;
+ if (offset >= n) {
+ break;
+ }
+
+ for(unsigned length = 1; length <= 128; length++) {
+ if (offset + length >= n) {
+ break;
+ }
+
+ if (k >= ext_arr_size) {
+ break;
+ }
+ bool success;
+ if (set) {
+ success = sbmap.set(offset, length);
+ } else {
+ success = sbmap.clr(offset, length);
+ }
+ if (!success) {
+ std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
+ return -1;
+ }
+ ext_arr[k++] = {offset, length};
+ if (length < 64) {
+ offset += 64;
+ } else {
+ offset += 128;
+ }
+ }
+ if (k >= ext_arr_size) {
+ break;
+ }
+ }
+
+ unsigned arr_size = std::min((uint64_t)k, ext_arr_size);
+ std::cout << std::hex << std::right << __func__ ;
+ std::cout << " [" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
+ std::cout << std::dec << std::endl;
+
+ offset = 0;
+ extent_t ext;
+ for(unsigned i = 0; i < arr_size; i++) {
+ if (set) {
+ ext = sbmap.get_next_set_extent(offset);
+ } else {
+ ext = sbmap.get_next_clr_extent(offset);
+ }
+
+ if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
+ return -1;
+ }
+ offset = ext.offset + ext.length;
+ }
+
+ if (set) {
+ ext = sbmap.get_next_set_extent(offset);
+ } else {
+ ext = sbmap.get_next_clr_extent(offset);
+ }
+ if (ext.length == 0) {
+ return 0;
+ } else {
+ std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
+ return -1;
+ }
+
+}
+
+//---------------------------------------------------------------------------------
+TEST(SimpleBitmap, boundaries)
+{
+ const uint64_t MAX_EXTENTS_COUNT = 64 << 10;
+ std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
+ ASSERT_TRUE(ext_arr != nullptr);
+
+ // use current time as seed for random generator
+ std::srand(std::time(nullptr));
+
+ uint64_t bit_count = 32 << 20; // 32Mb = 4MB
+ unsigned count = 0;
+ for (unsigned i = 0; i < 64; i++) {
+ SimpleBitmap sbmap(g_ceph_context, bit_count+i);
+ memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
+ sbmap.clear_all();
+ ASSERT_TRUE(test_extents_boundaries(count, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
+
+ memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
+ sbmap.set_all();
+ ASSERT_TRUE(test_extents_boundaries(count++, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
+ }
+}
+
+//---------------------------------------------------------------------------------
+TEST(SimpleBitmap, boundaries2)
+{
+ const uint64_t bit_count_base = 64 << 10; // 64Kb = 8MB
+ const extent_t null_extent = {0, 0};
+
+ for (unsigned i = 0; i < 64; i++) {
+ uint64_t bit_count = bit_count_base + i;
+ extent_t full_extent = {0, bit_count};
+ SimpleBitmap sbmap(g_ceph_context, bit_count);
+
+ sbmap.set(0, bit_count);
+ ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
+ ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
+
+ for (uint64_t bit = 0; bit < bit_count; bit++) {
+ sbmap.clr(bit, 1);
+ }
+ ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
+ ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
+
+ for (uint64_t bit = 0; bit < bit_count; bit++) {
+ sbmap.set(bit, 1);
+ }
+ ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
+ ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
+
+ sbmap.clr(0, bit_count);
+ ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
+ ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
+ }
+}
+
+TEST(shared_blob_2hash_tracker_t, basic_test)
+{
+ shared_blob_2hash_tracker_t t1(1024 * 1024, 4096);
+
+ ASSERT_TRUE(t1.count_non_zero() == 0);
+
+ t1.inc(0, 0, 1);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(0, 0, -1);
+ ASSERT_TRUE(t1.count_non_zero() == 0);
+
+ t1.inc(3, 0x1000, 2);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(3, 0x1000, -1);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(3, 0x1000, -1);
+ ASSERT_TRUE(t1.count_non_zero() == 0);
+
+ t1.inc(2, 0x2000, 5);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(18, 0x2000, -5);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(18, 0x2000, 1);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(2, 0x2000, -1);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(18, 0x2000, 4);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(2, 0x2000, -4);
+ ASSERT_TRUE(t1.count_non_zero() == 0);
+
+ t1.inc(3, 0x3000, 2);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(4, 0x3000, -1);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(4, 0x3000, -1);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(3, 0x3000, -2);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(4, 0x3000, 1);
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+ t1.inc(4, 0x3000, 1);
+ ASSERT_TRUE(t1.count_non_zero() == 0);
+
+ t1.inc(5, 0x1000, 1);
+ t1.inc(5, 0x2000, 3);
+ t1.inc(5, 0x3000, 2);
+ t1.inc(5, 0x8000, 1);
+
+ ASSERT_TRUE(t1.count_non_zero() != 0);
+
+ ASSERT_TRUE(!t1.test_all_zero(5,0x1000));
+ ASSERT_TRUE(!t1.test_all_zero(5, 0x2000));
+ ASSERT_TRUE(!t1.test_all_zero(5, 0x3000));
+ ASSERT_TRUE(t1.test_all_zero(5, 0x4000));
+ ASSERT_TRUE(!t1.test_all_zero(5, 0x8000));
+
+ ASSERT_TRUE(t1.test_all_zero_range(5, 0, 0x1000));
+ ASSERT_TRUE(t1.test_all_zero_range(5, 0x500, 0x500));
+ ASSERT_TRUE(!t1.test_all_zero_range(5, 0x500, 0x1500));
+ ASSERT_TRUE(!t1.test_all_zero_range(5, 0x1500, 0x3200));
+ ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x1500));
+ ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x3b00));
+ ASSERT_TRUE(!t1.test_all_zero_range(5, 0, 0x9000));
+}
+
+TEST(bluestore_blob_use_tracker_t, mempool_stats_test)
+{
+ using mempool::bluestore_cache_other::allocated_items;
+ using mempool::bluestore_cache_other::allocated_bytes;
+ uint64_t other_items0 = allocated_items();
+ uint64_t other_bytes0 = allocated_bytes();
+ {
+ bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
+
+ t1->init(1024 * 1024, 4096);
+ ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
+ ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
+
+ delete t1;
+ ASSERT_EQ(allocated_items(), other_items0);
+ ASSERT_EQ(allocated_bytes(), other_bytes0);
+ }
+ {
+ bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
+
+ t1->init(1024 * 1024, 4096);
+ t1->add_tail(2048 * 1024, 4096);
+ // proper stats update after tail add
+ ASSERT_EQ(512, allocated_items() - other_items0); // = 2M / 4K
+ ASSERT_EQ(2048, allocated_bytes() - other_bytes0); // = 2M / 4K * 4
+
+ delete t1;
+ ASSERT_EQ(allocated_items(), other_items0);
+ ASSERT_EQ(allocated_bytes(), other_bytes0);
+ }
+ {
+ bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
+
+ t1->init(1024 * 1024, 4096);
+ t1->prune_tail(512 * 1024);
+ // no changes in stats after pruning
+ ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
+ ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
+
+ delete t1;
+ ASSERT_EQ(allocated_items(), other_items0);
+ ASSERT_EQ(allocated_bytes(), other_bytes0);
+ }
+ {
+ bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
+ bluestore_blob_use_tracker_t* t2 = new bluestore_blob_use_tracker_t;
+
+ t1->init(1024 * 1024, 4096);
+
+ // t1 keeps the same amount of entries + t2 has got half of them
+ t1->split(512 * 1024, t2);
+ ASSERT_EQ(256 + 128, allocated_items() - other_items0); //= 1M / 4K*1.5
+ ASSERT_EQ(1024 + 512, allocated_bytes() - other_bytes0); //= 1M / 4K*4*1.5
+
+ // t1 & t2 release everything, then t2 get one less entry than t2 had had
+ // before
+ t1->split(4096, t2);
+ ASSERT_EQ(127, allocated_items() - other_items0); // = 512K / 4K - 1
+ ASSERT_EQ(127 * 4, allocated_bytes() - other_bytes0); // = 512L / 4K * 4 - 4
+ delete t1;
+ delete t2;
+ ASSERT_EQ(allocated_items(), other_items0);
+ ASSERT_EQ(allocated_bytes(), other_bytes0);
+ }
+}
+