]> git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/os/seastore/random_block_manager/avlallocator.cc
28137a23d798e042a4f1d4cf2210c11415e91258
[ceph.git] / ceph / src / crimson / os / seastore / random_block_manager / avlallocator.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3 //
4 #include "avlallocator.h"
5 #include "crimson/os/seastore/logging.h"
6
7 SET_SUBSYS(seastore_device);
8
9 namespace crimson::os::seastore {
10
11 void AvlAllocator::mark_extent_used(rbm_abs_addr addr, size_t size)
12 {
13 LOG_PREFIX(AvlAllocator::mark_extent_used);
14 DEBUG("addr: {}, size: {}, avail: {}", addr, size, available_size);
15 _remove_from_tree(addr, size);
16 }
17
18 void AvlAllocator::init(rbm_abs_addr addr, size_t size, size_t b_size)
19 {
20 LOG_PREFIX(AvlAllocator::init);
21 DEBUG("addr: {}, size: {}", addr, size);
22 auto r = new extent_range_t{ addr, addr + size };
23 extent_tree.insert(*r);
24 extent_size_tree.insert(*r);
25 available_size = size;
26 block_size = b_size;
27 total_size = size;
28 base_addr = addr;
29 }
30
31 void AvlAllocator::_remove_from_tree(rbm_abs_addr start, rbm_abs_addr size)
32 {
33 LOG_PREFIX(AvlAllocator::_remove_from_tree);
34 rbm_abs_addr end = start + size;
35
36 ceph_assert(size != 0);
37 ceph_assert(size <= available_size);
38
39 auto rs = extent_tree.find(extent_range_t{start, end}, extent_tree.key_comp());
40 DEBUG("rs start: {}, rs end: {}", rs->start, rs->end);
41 ceph_assert(rs != extent_tree.end());
42 ceph_assert(rs->start <= start);
43 ceph_assert(rs->end >= end);
44
45 bool left_over = (rs->start != start);
46 bool right_over = (rs->end != end);
47
48 _extent_size_tree_rm(*rs);
49
50 if (left_over && right_over) {
51 auto old_right_end = rs->end;
52 auto insert_pos = rs;
53 ceph_assert(insert_pos != extent_tree.end());
54 ++insert_pos;
55 rs->end = start;
56
57 auto r = new extent_range_t{end, old_right_end};
58 extent_tree.insert_before(insert_pos, *r);
59 extent_size_tree.insert(*r);
60 available_size += r->length();
61 _extent_size_tree_try_insert(*rs);
62 } else if (left_over) {
63 assert(is_aligned(start, block_size));
64 rs->end = start;
65 _extent_size_tree_try_insert(*rs);
66 } else if (right_over) {
67 assert(is_aligned(end, block_size));
68 rs->start = end;
69 _extent_size_tree_try_insert(*rs);
70 } else {
71 extent_tree.erase_and_dispose(rs, dispose_rs{});
72 }
73 }
74
75 rbm_abs_addr AvlAllocator::find_block(size_t size)
76 {
77 const auto comp = extent_size_tree.key_comp();
78 auto iter = extent_size_tree.lower_bound(
79 extent_range_t{base_addr, base_addr + size}, comp);
80 for (; iter != extent_size_tree.end(); ++iter) {
81 assert(is_aligned(iter->start, block_size));
82 rbm_abs_addr off = iter->start;
83 if (off + size <= iter->end) {
84 return off;
85 }
86 }
87 return total_size;
88 }
89
90 void AvlAllocator::_add_to_tree(rbm_abs_addr start, rbm_abs_addr size)
91 {
92 LOG_PREFIX(AvlAllocator::_add_to_tree);
93 ceph_assert(size != 0);
94 DEBUG("addr: {}, size: {}", start, size);
95
96 rbm_abs_addr end = start + size;
97
98 auto rs_after = extent_tree.upper_bound(extent_range_t{start, end},
99 extent_tree.key_comp());
100
101 auto rs_before = extent_tree.end();
102 if (rs_after != extent_tree.begin()) {
103 rs_before = std::prev(rs_after);
104 }
105
106 bool merge_before = (rs_before != extent_tree.end() && rs_before->end == start);
107 bool merge_after = (rs_after != extent_tree.end() && rs_after->start == end);
108
109 if (merge_before && merge_after) {
110 _extent_size_tree_rm(*rs_before);
111 _extent_size_tree_rm(*rs_after);
112 rs_after->start = rs_before->start;
113 extent_tree.erase_and_dispose(rs_before, dispose_rs{});
114 _extent_size_tree_try_insert(*rs_after);
115 } else if (merge_before) {
116 _extent_size_tree_rm(*rs_before);
117 rs_before->end = end;
118 _extent_size_tree_try_insert(*rs_before);
119 } else if (merge_after) {
120 _extent_size_tree_rm(*rs_after);
121 rs_after->start = start;
122 _extent_size_tree_try_insert(*rs_after);
123 } else {
124 auto r = new extent_range_t{start, end};
125 extent_tree.insert(*r);
126 extent_size_tree.insert(*r);
127 available_size += r->length();
128 }
129 }
130
131 std::optional<interval_set<rbm_abs_addr>> AvlAllocator::alloc_extent(
132 size_t size)
133 {
134 LOG_PREFIX(AvlAllocator::alloc_extent);
135 if (available_size < size) {
136 return std::nullopt;
137 }
138 if (extent_size_tree.empty()) {
139 return std::nullopt;
140 }
141 ceph_assert(size > 0);
142 ceph_assert(is_aligned(size, block_size));
143
144 interval_set<rbm_abs_addr> result;
145
146 auto try_to_alloc_block = [this, &result, FNAME] (uint64_t alloc_size) -> uint64_t
147 {
148 rbm_abs_addr start = find_block(alloc_size);
149 if (start != base_addr + total_size) {
150 _remove_from_tree(start, alloc_size);
151 DEBUG("allocate addr: {}, allocate size: {}, available size: {}",
152 start, alloc_size, available_size);
153 result.insert(start, alloc_size);
154 return alloc_size;
155 }
156 return 0;
157 };
158
159 auto alloc = std::min(max_alloc_size, size);
160 rbm_abs_addr ret = try_to_alloc_block(alloc);
161 if (ret == 0) {
162 return std::nullopt;
163 }
164
165 assert(!result.empty());
166 assert(result.num_intervals() == 1);
167 for (auto p : result) {
168 INFO("result start: {}, end: {}", p.first, p.first + p.second);
169 if (detailed) {
170 assert(!reserved_extent_tracker.contains(p.first, p.second));
171 reserved_extent_tracker.insert(p.first, p.second);
172 }
173 }
174 return result;
175 }
176
177 void AvlAllocator::free_extent(rbm_abs_addr addr, size_t size)
178 {
179 assert(total_size);
180 assert(total_size > available_size);
181 _add_to_tree(addr, size);
182 if (detailed && reserved_extent_tracker.contains(addr, size)) {
183 reserved_extent_tracker.erase(addr, size);
184 }
185 }
186
187 bool AvlAllocator::is_free_extent(rbm_abs_addr start, size_t size)
188 {
189 rbm_abs_addr end = start + size;
190 ceph_assert(size != 0);
191 if (start < base_addr || base_addr + total_size < end) {
192 return false;
193 }
194
195 auto rs = extent_tree.find(extent_range_t{start, end}, extent_tree.key_comp());
196 if (rs != extent_tree.end() && rs->start <= start && rs->end >= end) {
197 return true;
198 }
199 return false;
200 }
201 }