]>
git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/os/seastore/lba_manager/btree/btree_range_pin.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
6 #include <boost/intrusive/set.hpp>
8 #include "crimson/os/seastore/cached_extent.h"
9 #include "crimson/os/seastore/seastore_types.h"
11 namespace crimson::os::seastore::lba_manager::btree
{
14 using LBANodeRef
= TCachedExtentRef
<LBANode
>;
16 struct lba_node_meta_t
{
21 bool is_parent_of(const lba_node_meta_t
&other
) const {
22 return (depth
== other
.depth
+ 1) &&
23 (begin
<= other
.begin
) &&
27 std::pair
<lba_node_meta_t
, lba_node_meta_t
> split_into(laddr_t pivot
) const {
28 return std::make_pair(
29 lba_node_meta_t
{begin
, pivot
, depth
},
30 lba_node_meta_t
{pivot
, end
, depth
});
33 static lba_node_meta_t
merge_from(
34 const lba_node_meta_t
&lhs
, const lba_node_meta_t
&rhs
) {
35 ceph_assert(lhs
.depth
== rhs
.depth
);
36 return lba_node_meta_t
{lhs
.begin
, rhs
.end
, lhs
.depth
};
39 static std::pair
<lba_node_meta_t
, lba_node_meta_t
>
40 rebalance(const lba_node_meta_t
&lhs
, const lba_node_meta_t
&rhs
, laddr_t pivot
) {
41 ceph_assert(lhs
.depth
== rhs
.depth
);
42 return std::make_pair(
43 lba_node_meta_t
{lhs
.begin
, pivot
, lhs
.depth
},
44 lba_node_meta_t
{pivot
, rhs
.end
, lhs
.depth
});
47 bool is_root() const {
48 return begin
== 0 && end
== L_ADDR_MAX
;
52 inline std::ostream
&operator<<(
54 const lba_node_meta_t
&rhs
)
56 return lhs
<< "btree_node_meta_t("
57 << "begin=" << rhs
.begin
58 << ", end=" << rhs
.end
59 << ", depth=" << rhs
.depth
66 * Element tracked by btree_pin_set_t below. Encapsulates the intrusive_set
67 * hook, the lba_node_meta_t representing the lba range covered by a node,
68 * and extent and ref members intended to hold a reference when the extent
71 class btree_pin_set_t
;
72 class btree_range_pin_t
: public boost::intrusive::set_base_hook
<> {
73 friend class btree_pin_set_t
;
74 lba_node_meta_t range
;
76 btree_pin_set_t
*pins
= nullptr;
78 // We need to be able to remember extent without holding a reference,
79 // but we can do it more compactly -- TODO
80 CachedExtent
*extent
= nullptr;
83 using index_t
= boost::intrusive::set
<btree_range_pin_t
>;
85 static auto get_tuple(const lba_node_meta_t
&meta
) {
86 return std::make_tuple(-meta
.depth
, meta
.begin
);
90 ref
= CachedExtentRef(extent
);
98 btree_range_pin_t() = default;
99 btree_range_pin_t(CachedExtent
*extent
)
101 btree_range_pin_t(const btree_range_pin_t
&rhs
, CachedExtent
*extent
)
102 : range(rhs
.range
), extent(extent
) {}
104 bool has_ref() const {
108 bool is_root() const {
109 return range
.is_root();
112 void set_range(const lba_node_meta_t
&nrange
) {
115 void set_extent(CachedExtent
*nextent
) {
116 ceph_assert(!extent
);
120 CachedExtent
&get_extent() {
129 void take_pin(btree_range_pin_t
&other
);
131 friend bool operator<(
132 const btree_range_pin_t
&lhs
, const btree_range_pin_t
&rhs
) {
133 return get_tuple(lhs
.range
) < get_tuple(rhs
.range
);
135 friend bool operator>(
136 const btree_range_pin_t
&lhs
, const btree_range_pin_t
&rhs
) {
137 return get_tuple(lhs
.range
) > get_tuple(rhs
.range
);
139 friend bool operator==(
140 const btree_range_pin_t
&lhs
, const btree_range_pin_t
&rhs
) {
141 return get_tuple(lhs
.range
) == rhs
.get_tuple(rhs
.range
);
146 const btree_range_pin_t
&lhs
, const lba_node_meta_t
&rhs
) const {
147 return get_tuple(lhs
.range
) < get_tuple(rhs
);
150 const lba_node_meta_t
&lhs
, const btree_range_pin_t
&rhs
) const {
151 return get_tuple(lhs
) < get_tuple(rhs
.range
);
155 friend std::ostream
&operator<<(
157 const btree_range_pin_t
&rhs
) {
158 return lhs
<< "btree_range_pin_t("
159 << "begin=" << rhs
.range
.begin
160 << ", end=" << rhs
.range
.end
161 << ", depth=" << rhs
.range
.depth
162 << ", extent=" << rhs
.extent
166 friend class BtreeLBAPin
;
167 ~btree_range_pin_t();
173 * Ensures that for every cached node, all parent LBANodes required
174 * to map it are present in cache. Relocating these nodes can
175 * therefore be done without further reads or cache space.
177 * Contains a btree_range_pin_t for every clean or dirty LBANode
178 * or LogicalCachedExtent instance in cache at any point in time.
179 * For any LBANode, the contained btree_range_pin_t will hold
180 * a reference to that node pinning it in cache as long as that
181 * node has children in the set. This invariant can be violated
182 * only by calling retire_extent and is repaired by calling
183 * check_parent synchronously after adding any new extents.
185 class btree_pin_set_t
{
186 friend class btree_range_pin_t
;
187 using pins_t
= btree_range_pin_t::index_t
;
190 /// Removes pin from set optionally checking whether parent has other children
191 void remove_pin(btree_range_pin_t
&pin
, bool check_parent
);
193 void replace_pin(btree_range_pin_t
&to
, btree_range_pin_t
&from
);
195 /// Returns parent pin if exists
196 btree_range_pin_t
*maybe_get_parent(const lba_node_meta_t
&pin
);
198 /// Returns earliest child pin if exist
199 const btree_range_pin_t
*maybe_get_first_child(const lba_node_meta_t
&pin
) const;
201 /// Releases pin if it has no children
202 void release_if_no_children(btree_range_pin_t
&pin
);
205 /// Adds pin to set, assumes set is consistent
206 void add_pin(btree_range_pin_t
&pin
);
209 * retire/check_parent
211 * See BtreeLBAManager::complete_transaction.
212 * retire removes the specified pin from the set, but does not
213 * check parents. After any new extents are added to the set,
214 * the caller is required to call check_parent to restore the
217 void retire(btree_range_pin_t
&pin
);
218 void check_parent(btree_range_pin_t
&pin
);
220 template <typename F
>
222 for (auto &i
: pins
) {
228 ceph_assert(pins
.empty());
232 class BtreeLBAPin
: public LBAPin
{
233 friend class BtreeLBAManager
;
234 friend class LBABtree
;
239 * populated until link_extent is called to ensure cache residence
240 * until add_pin is called.
242 CachedExtentRef parent
;
245 btree_range_pin_t pin
;
248 BtreeLBAPin() = default;
251 CachedExtentRef parent
,
253 lba_node_meta_t
&&meta
)
254 : parent(parent
), paddr(paddr
) {
255 pin
.set_range(std::move(meta
));
258 void link_extent(LogicalCachedExtent
*ref
) final
{
262 extent_len_t
get_length() const final
{
263 ceph_assert(pin
.range
.end
> pin
.range
.begin
);
264 return pin
.range
.end
- pin
.range
.begin
;
267 paddr_t
get_paddr() const final
{
271 laddr_t
get_laddr() const final
{
272 return pin
.range
.begin
;
275 LBAPinRef
duplicate() const final
{
276 auto ret
= std::unique_ptr
<BtreeLBAPin
>(new BtreeLBAPin
);
277 ret
->pin
.set_range(pin
.range
);
279 ret
->parent
= parent
;
283 void take_pin(LBAPin
&opin
) final
{
284 pin
.take_pin(static_cast<BtreeLBAPin
&>(opin
).pin
);
287 bool has_been_invalidated() const final
{
288 return parent
->has_been_invalidated();