]> git.proxmox.com Git - ceph.git/blame - ceph/src/test/objectstore/hybrid_allocator_test.cc
import quincy beta 17.1.0
[ceph.git] / ceph / src / test / objectstore / hybrid_allocator_test.cc
CommitLineData
e306af50
TL
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include <iostream>
5#include <gtest/gtest.h>
6
7#include "os/bluestore/HybridAllocator.h"
8
9class TestHybridAllocator : public HybridAllocator {
10public:
11 TestHybridAllocator(CephContext* cct,
12 int64_t device_size,
13 int64_t _block_size,
14 uint64_t max_entries,
15 const std::string& name) :
16 HybridAllocator(cct, device_size, _block_size,
17 max_entries,
18 name) {
19 }
20
21 uint64_t get_bmap_free() {
22 return get_bmap() ? get_bmap()->get_free() : 0;
23 }
24 uint64_t get_avl_free() {
25 return AvlAllocator::get_free();
26 }
27};
28
29const uint64_t _1m = 1024 * 1024;
30const uint64_t _4m = 4 * 1024 * 1024;
31
32TEST(HybridAllocator, basic)
33{
34 {
35 uint64_t block_size = 0x1000;
36 uint64_t capacity = 0x10000 * _1m; // = 64GB
37 TestHybridAllocator ha(g_ceph_context, capacity, block_size,
38 4 * sizeof(range_seg_t), "test_hybrid_allocator");
39
40 ASSERT_EQ(0, ha.get_free());
41 ASSERT_EQ(0, ha.get_avl_free());
42 ASSERT_EQ(0, ha.get_bmap_free());
43
44 ha.init_add_free(0, _4m);
45 ASSERT_EQ(_4m, ha.get_free());
46 ASSERT_EQ(_4m, ha.get_avl_free());
47 ASSERT_EQ(0, ha.get_bmap_free());
48
49 ha.init_add_free(2 * _4m, _4m);
50 ASSERT_EQ(_4m * 2, ha.get_free());
51 ASSERT_EQ(_4m * 2, ha.get_avl_free());
52 ASSERT_EQ(0, ha.get_bmap_free());
53
54 ha.init_add_free(100 * _4m, _4m);
55 ha.init_add_free(102 * _4m, _4m);
56
57 ASSERT_EQ(_4m * 4, ha.get_free());
58 ASSERT_EQ(_4m * 4, ha.get_avl_free());
59 ASSERT_EQ(0, ha.get_bmap_free());
60
61 // next allocs will go to bitmap
62 ha.init_add_free(4 * _4m, _4m);
63 ASSERT_EQ(_4m * 5, ha.get_free());
64 ASSERT_EQ(_4m * 4, ha.get_avl_free());
65 ASSERT_EQ(_4m * 1, ha.get_bmap_free());
66
67 ha.init_add_free(6 * _4m, _4m);
68 ASSERT_EQ(_4m * 6, ha.get_free());
69 ASSERT_EQ(_4m * 4, ha.get_avl_free());
70 ASSERT_EQ(_4m * 2, ha.get_bmap_free());
71
72 // so we have 6x4M chunks, 4 chunks at AVL and 2 at bitmap
73
74 ha.init_rm_free(_1m, _1m); // take 1M from AVL
75 ASSERT_EQ(_1m * 23, ha.get_free());
76 ASSERT_EQ(_1m * 14, ha.get_avl_free());
77 ASSERT_EQ(_1m * 9, ha.get_bmap_free());
78
79 ha.init_rm_free(6 * _4m + _1m, _1m); // take 1M from bmap
80 ASSERT_EQ(_1m * 22, ha.get_free());
81 ASSERT_EQ(_1m * 14, ha.get_avl_free());
82 ASSERT_EQ(_1m * 8, ha.get_bmap_free());
83
84 // so we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M
85 // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
86
87 PExtentVector extents;
88 // allocate 4K, to be served from bitmap
89 EXPECT_EQ(block_size, ha.allocate(block_size, block_size,
90 0, (int64_t)0, &extents));
91 ASSERT_EQ(1, extents.size());
92 ASSERT_EQ(0, extents[0].offset);
93
94 ASSERT_EQ(_1m * 14, ha.get_avl_free());
95 ASSERT_EQ(_1m * 8 - block_size, ha.get_bmap_free());
96
97 interval_set<uint64_t> release_set;
98 // release 4K, to be returned to bitmap
99 release_set.insert(extents[0].offset, extents[0].length);
100 ha.release(release_set);
101
102 ASSERT_EQ(_1m * 14, ha.get_avl_free());
103 ASSERT_EQ(_1m * 8, ha.get_bmap_free());
104 extents.clear();
105 release_set.clear();
106
107 // again we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M
108 // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
109
110 // add 12M~3M which will go to avl
111 ha.init_add_free(3 * _4m, 3 * _1m);
112 ASSERT_EQ(_1m * 17, ha.get_avl_free());
113 ASSERT_EQ(_1m * 8, ha.get_bmap_free());
114
115
116 // add 15M~4K which will be appended to existing slot
117 ha.init_add_free(15 * _1m, 0x1000);
118 ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
119 ASSERT_EQ(_1m * 8, ha.get_bmap_free());
120
121
122 // again we have at avl: 2M~2M, 8M~(7M+4K), 400M~4M , 408M~4M
123 // and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
124
125 //some removals from bmap
126 ha.init_rm_free(28 * _1m - 0x1000, 0x1000);
127 ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
128 ASSERT_EQ(_1m * 8 - 0x1000, ha.get_bmap_free());
129
130 ha.init_rm_free(24 * _1m + 0x1000, 0x1000);
131 ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
132 ASSERT_EQ(_1m * 8 - 0x2000, ha.get_bmap_free());
133
134 ha.init_rm_free(24 * _1m + 0x1000, _4m - 0x2000);
135 ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
136 ASSERT_EQ(_1m * 4, ha.get_bmap_free());
137
138 //4K removal from avl
139 ha.init_rm_free(15 * _1m, 0x1000);
140 ASSERT_EQ(_1m * 17, ha.get_avl_free());
141 ASSERT_EQ(_1m * 4, ha.get_bmap_free());
142
143 //remove highest 4Ms from avl
144 ha.init_rm_free(_1m * 400, _4m);
145 ha.init_rm_free(_1m * 408, _4m);
146 ASSERT_EQ(_1m * 9, ha.get_avl_free());
147 ASSERT_EQ(_1m * 4, ha.get_bmap_free());
148
149 // we have at avl: 2M~2M, 8M~7M
150 // and at bmap: 0~1M, 16M~1M, 18M~2M
151
152 // this will be merged with neighbors from bmap and go to avl
153 ha.init_add_free(17 * _1m, _1m);
154 ASSERT_EQ(_1m * 1, ha.get_bmap_free());
155 ASSERT_EQ(_1m * 13, ha.get_avl_free());
156
157 // we have at avl: 2M~2M, 8M~7M, 16M~4M
158 // and at bmap: 0~1M
159
160 // and now do some cutoffs from 0~1M span
161
162 //cut off 4K from bmap
163 ha.init_rm_free(0 * _1m, 0x1000);
164 ASSERT_EQ(_1m * 13, ha.get_avl_free());
165 ASSERT_EQ(_1m * 1 - 0x1000, ha.get_bmap_free());
166
167 //cut off 1M-4K from bmap
168 ha.init_rm_free(0 * _1m + 0x1000, _1m - 0x1000);
169 ASSERT_EQ(_1m * 13, ha.get_avl_free());
170 ASSERT_EQ(0, ha.get_bmap_free());
171
172 //cut off 512K avl
173 ha.init_rm_free(17 * _1m + 0x1000, _1m / 2);
174 ASSERT_EQ(_1m * 13 - _1m / 2, ha.get_avl_free());
175 ASSERT_EQ(0, ha.get_bmap_free());
176
177 //cut off the rest from avl
178 ha.init_rm_free(17 * _1m + 0x1000 + _1m / 2, _1m / 2);
179 ASSERT_EQ(_1m * 12, ha.get_avl_free());
180 ASSERT_EQ(0, ha.get_bmap_free());
181 }
182
183 {
184 uint64_t block_size = 0x1000;
185 uint64_t capacity = 0x10000 * _1m; // = 64GB
186 TestHybridAllocator ha(g_ceph_context, capacity, block_size,
187 4 * sizeof(range_seg_t), "test_hybrid_allocator");
188
189 ha.init_add_free(_1m, _1m);
190 ha.init_add_free(_1m * 3, _1m);
191 ha.init_add_free(_1m * 5, _1m);
192 ha.init_add_free(0x4000, 0x1000);
193
194 ASSERT_EQ(_1m * 3 + 0x1000, ha.get_free());
195 ASSERT_EQ(_1m * 3 + 0x1000, ha.get_avl_free());
196 ASSERT_EQ(0, ha.get_bmap_free());
197
198 // This will substitute chunk 0x4000~1000.
199 // Since new chunk insertion into into AvlAllocator:range_tree
200 // happens immediately before 0x4000~1000 chunk care should be taken
201 // to order operations properly and do not use already disposed iterator.
202 ha.init_add_free(0, 0x2000);
203
204 ASSERT_EQ(_1m * 3 + 0x3000, ha.get_free());
205 ASSERT_EQ(_1m * 3 + 0x2000, ha.get_avl_free());
206 ASSERT_EQ(0x1000, ha.get_bmap_free());
207 }
208}
209
210TEST(HybridAllocator, fragmentation)
211{
212 {
213 uint64_t block_size = 0x1000;
214 uint64_t capacity = 0x1000 * 0x1000; // = 16M
215 TestHybridAllocator ha(g_ceph_context, capacity, block_size,
216 4 * sizeof(range_seg_t), "test_hybrid_allocator");
217
218 ha.init_add_free(0, 0x2000);
219 ha.init_add_free(0x4000, 0x2000);
220 ha.init_add_free(0x8000, 0x2000);
221 ha.init_add_free(0xc000, 0x1000);
222
223 ASSERT_EQ(0.5, ha.get_fragmentation());
224
225 // this will got to bmap with fragmentation = 1
226 ha.init_add_free(0x10000, 0x1000);
227
228 // which results in the following total fragmentation
229 ASSERT_EQ(0.5 * 7 / 8 + 1.0 / 8, ha.get_fragmentation());
230 }
231}