]> git.proxmox.com Git - ceph.git/blob - ceph/src/os/bluestore/HybridAllocator.cc
import ceph pacific 16.2.5
[ceph.git] / ceph / src / os / bluestore / HybridAllocator.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "HybridAllocator.h"
5
6 #include <limits>
7
8 #include "common/config_proxy.h"
9 #include "common/debug.h"
10
11 #define dout_context cct
12 #define dout_subsys ceph_subsys_bluestore
13 #undef dout_prefix
14 #define dout_prefix *_dout << "HybridAllocator "
15
16
17 int64_t HybridAllocator::allocate(
18 uint64_t want,
19 uint64_t unit,
20 uint64_t max_alloc_size,
21 int64_t hint,
22 PExtentVector* extents)
23 {
24 ldout(cct, 10) << __func__ << std::hex
25 << " want 0x" << want
26 << " unit 0x" << unit
27 << " max_alloc_size 0x" << max_alloc_size
28 << " hint 0x" << hint
29 << std::dec << dendl;
30 ceph_assert(isp2(unit));
31 ceph_assert(want % unit == 0);
32
33 if (max_alloc_size == 0) {
34 max_alloc_size = want;
35 }
36 if (constexpr auto cap = std::numeric_limits<decltype(bluestore_pextent_t::length)>::max();
37 max_alloc_size >= cap) {
38 max_alloc_size = p2align(uint64_t(cap), (uint64_t)get_block_size());
39 }
40
41 std::lock_guard l(lock);
42
43 int64_t res;
44 PExtentVector local_extents;
45
46 // preserve original 'extents' vector state
47 auto orig_size = extents->size();
48 auto orig_pos = extents->end();
49 if (orig_size) {
50 --orig_pos;
51 }
52
53 // try bitmap first to avoid unneeded contiguous extents split if
54 // desired amount is less than shortes range in AVL
55 if (bmap_alloc && bmap_alloc->get_free() &&
56 want < _lowest_size_available()) {
57 res = bmap_alloc->allocate(want, unit, max_alloc_size, hint, extents);
58 if (res < 0) {
59 // got a failure, release already allocated and
60 // start over allocation from avl
61 if (orig_size) {
62 local_extents.insert(
63 local_extents.end(), ++orig_pos, extents->end());
64 extents->resize(orig_size);
65 } else {
66 extents->swap(local_extents);
67 }
68 bmap_alloc->release(local_extents);
69 res = 0;
70 }
71 if ((uint64_t)res < want) {
72 auto res2 = _allocate(want - res, unit, max_alloc_size, hint, extents);
73 if (res2 < 0) {
74 res = res2; // caller to do the release
75 } else {
76 res += res2;
77 }
78 }
79 } else {
80 res = _allocate(want, unit, max_alloc_size, hint, extents);
81 if (res < 0) {
82 // got a failure, release already allocated and
83 // start over allocation from bitmap
84 if (orig_size) {
85 local_extents.insert(
86 local_extents.end(), ++orig_pos, extents->end());
87 extents->resize(orig_size);
88 } else {
89 extents->swap(local_extents);
90 }
91 _release(local_extents);
92 res = 0;
93 }
94 if ((uint64_t)res < want ) {
95 auto res2 = bmap_alloc ?
96 bmap_alloc->allocate(want - res, unit, max_alloc_size, hint, extents) :
97 0;
98 if (res2 < 0 ) {
99 res = res2; // caller to do the release
100 } else {
101 res += res2;
102 }
103 }
104 }
105 return res ? res : -ENOSPC;
106 }
107
108 void HybridAllocator::release(const interval_set<uint64_t>& release_set) {
109 std::lock_guard l(lock);
110 // this will attempt to put free ranges into AvlAllocator first and
111 // fallback to bitmap one via _try_insert_range call
112 _release(release_set);
113 }
114
115 uint64_t HybridAllocator::get_free()
116 {
117 std::lock_guard l(lock);
118 return (bmap_alloc ? bmap_alloc->get_free() : 0) + _get_free();
119 }
120
121 double HybridAllocator::get_fragmentation()
122 {
123 std::lock_guard l(lock);
124 auto f = AvlAllocator::_get_fragmentation();
125 auto bmap_free = bmap_alloc ? bmap_alloc->get_free() : 0;
126 if (bmap_free) {
127 auto _free = _get_free() + bmap_free;
128 auto bf = bmap_alloc->get_fragmentation();
129
130 f = f * _get_free() / _free + bf * bmap_free / _free;
131 }
132 return f;
133 }
134
135 void HybridAllocator::dump()
136 {
137 std::lock_guard l(lock);
138 AvlAllocator::_dump();
139 if (bmap_alloc) {
140 bmap_alloc->dump();
141 }
142 ldout(cct, 0) << __func__
143 << " avl_free: " << _get_free()
144 << " bmap_free: " << (bmap_alloc ? bmap_alloc->get_free() : 0)
145 << dendl;
146 }
147
148 void HybridAllocator::dump(std::function<void(uint64_t offset, uint64_t length)> notify)
149 {
150 AvlAllocator::dump(notify);
151 if (bmap_alloc) {
152 bmap_alloc->dump(notify);
153 }
154 }
155
156 void HybridAllocator::init_rm_free(uint64_t offset, uint64_t length)
157 {
158 if (!length)
159 return;
160 std::lock_guard l(lock);
161 ldout(cct, 10) << __func__ << std::hex
162 << " offset 0x" << offset
163 << " length 0x" << length
164 << std::dec << dendl;
165 _try_remove_from_tree(offset, length,
166 [&](uint64_t o, uint64_t l, bool found) {
167 if (!found) {
168 if (bmap_alloc) {
169 bmap_alloc->init_rm_free(o, l);
170 } else {
171 lderr(cct) << "init_rm_free lambda" << std::hex
172 << "Uexpected extent: "
173 << " 0x" << o << "~" << l
174 << std::dec << dendl;
175 ceph_assert(false);
176 }
177 }
178 });
179 }
180
181 void HybridAllocator::shutdown()
182 {
183 std::lock_guard l(lock);
184 _shutdown();
185 if (bmap_alloc) {
186 bmap_alloc->shutdown();
187 delete bmap_alloc;
188 bmap_alloc = nullptr;
189 }
190 }
191
192 void HybridAllocator::_spillover_range(uint64_t start, uint64_t end)
193 {
194 auto size = end - start;
195 dout(20) << __func__
196 << std::hex << " "
197 << start << "~" << size
198 << std::dec
199 << dendl;
200 ceph_assert(size);
201 if (!bmap_alloc) {
202 dout(1) << __func__
203 << std::hex
204 << " constructing fallback allocator"
205 << dendl;
206 bmap_alloc = new BitmapAllocator(cct,
207 get_capacity(),
208 get_block_size(),
209 get_name() + ".fallback");
210 }
211 bmap_alloc->init_add_free(start, size);
212 }
213
214 void HybridAllocator::_add_to_tree(uint64_t start, uint64_t size)
215 {
216 if (bmap_alloc) {
217 uint64_t head = bmap_alloc->claim_free_to_left(start);
218 uint64_t tail = bmap_alloc->claim_free_to_right(start + size);
219 ceph_assert(head <= start);
220 start -= head;
221 size += head + tail;
222 }
223 AvlAllocator::_add_to_tree(start, size);
224 }