1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
2 // vim: ts=8 sw=2 smarttab
6 #include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
7 #include "key_layout.h"
8 #include "stage_types.h"
10 namespace crimson::os::seastore::onode
{
12 class NodeExtentMutable
;
17 * The top indexing stage implementation for node N0/N1/N2/N3, implements
18 * staged contract as an indexable container, and provides access to node
21 * The specific field layout are defined by FieldType which are
22 * node_fields_0_t, node_fields_1_t, node_fields_2_t, internal_fields_3_t and
23 * leaf_fields_3_t. Diagrams see node_stage_layout.h.
25 template <typename FieldType
, node_type_t _NODE_TYPE
>
28 using value_t
= value_type_t
<_NODE_TYPE
>;
29 using num_keys_t
= typename
FieldType::num_keys_t
;
30 static constexpr node_type_t NODE_TYPE
= _NODE_TYPE
;
31 static constexpr field_type_t FIELD_TYPE
= FieldType::FIELD_TYPE
;
32 static constexpr node_offset_t EXTENT_SIZE
=
33 (FieldType::SIZE
+ DISK_BLOCK_SIZE
- 1u) / DISK_BLOCK_SIZE
* DISK_BLOCK_SIZE
;
36 node_extent_t() = default;
38 node_extent_t(const FieldType
* p_fields
) : p_fields
{p_fields
} {
42 const char* p_start() const { return fields_start(*p_fields
); }
44 const char* off_to_ptr(node_offset_t off
) const {
45 assert(off
<= FieldType::SIZE
);
46 return p_start() + off
;
49 node_offset_t
ptr_to_off(const void* ptr
) const {
50 auto _ptr
= static_cast<const char*>(ptr
);
51 assert(_ptr
>= p_start());
52 auto off
= _ptr
- p_start();
53 assert(off
<= FieldType::SIZE
);
57 bool is_level_tail() const { return p_fields
->is_level_tail(); }
58 level_t
level() const { return p_fields
->header
.level
; }
59 node_offset_t
free_size() const {
60 return p_fields
->template free_size_before
<NODE_TYPE
>(keys());
62 node_offset_t
total_size() const { return p_fields
->total_size(); }
63 const char* p_left_bound() const;
64 template <node_type_t T
= NODE_TYPE
>
65 std::enable_if_t
<T
== node_type_t::INTERNAL
, const laddr_packed_t
*>
66 get_end_p_laddr() const {
67 assert(is_level_tail());
68 if constexpr (FIELD_TYPE
== field_type_t::N3
) {
69 return &p_fields
->child_addrs
[keys()];
71 auto offset_start
= p_fields
->get_item_end_offset(keys());
72 assert(offset_start
<= FieldType::SIZE
);
73 offset_start
-= sizeof(laddr_packed_t
);
74 auto p_addr
= p_start() + offset_start
;
75 return reinterpret_cast<const laddr_packed_t
*>(p_addr
);
79 // container type system
80 using key_get_type
= typename
FieldType::key_get_type
;
81 static constexpr auto CONTAINER_TYPE
= ContainerType::INDEXABLE
;
82 index_t
keys() const { return p_fields
->num_keys
; }
83 key_get_type
operator[] (index_t index
) const { return p_fields
->get_key(index
); }
84 node_offset_t
size_before(index_t index
) const {
85 auto free_size
= p_fields
->template free_size_before
<NODE_TYPE
>(index
);
86 assert(total_size() >= free_size
);
87 return total_size() - free_size
;
89 node_offset_t
size_to_nxt_at(index_t index
) const;
90 node_offset_t
size_overhead_at(index_t index
) const {
91 return FieldType::ITEM_OVERHEAD
; }
92 memory_range_t
get_nxt_container(index_t index
) const;
94 template <typename T
= FieldType
>
95 std::enable_if_t
<T::FIELD_TYPE
== field_type_t::N3
, const value_t
*>
96 get_p_value(index_t index
) const {
97 assert(index
< keys());
98 if constexpr (NODE_TYPE
== node_type_t::INTERNAL
) {
99 return &p_fields
->child_addrs
[index
];
101 auto range
= get_nxt_container(index
);
102 auto ret
= reinterpret_cast<const onode_t
*>(range
.p_start
);
103 assert(range
.p_start
+ ret
->size
== range
.p_end
);
108 void encode(const char* p_node_start
, ceph::bufferlist
& encoded
) const {
109 assert(p_node_start
== p_start());
110 // nothing to encode as the container range is the entire extent
113 static node_extent_t
decode(const char* p_node_start
,
114 ceph::bufferlist::const_iterator
& delta
) {
116 return node_extent_t(reinterpret_cast<const FieldType
*>(p_node_start
));
119 static void validate(const FieldType
& fields
) {
121 assert(fields
.header
.get_node_type() == NODE_TYPE
);
122 assert(fields
.header
.get_field_type() == FieldType::FIELD_TYPE
);
123 if constexpr (NODE_TYPE
== node_type_t::INTERNAL
) {
124 assert(fields
.header
.level
> 0u);
126 assert(fields
.header
.level
== 0u);
131 static void bootstrap_extent(
132 NodeExtentMutable
&, field_type_t
, node_type_t
, bool, level_t
);
134 static void update_is_level_tail(NodeExtentMutable
&, const node_extent_t
&, bool);
136 static node_offset_t
header_size() { return FieldType::HEADER_SIZE
; }
139 static node_offset_t
estimate_insert(
140 const full_key_t
<KT
>& key
, const value_t
& value
) {
141 auto size
= FieldType::estimate_insert_one();
142 if constexpr (FIELD_TYPE
== field_type_t::N2
) {
143 size
+= ns_oid_view_t::estimate_size
<KT
>(key
);
144 } else if constexpr (FIELD_TYPE
== field_type_t::N3
&&
145 NODE_TYPE
== node_type_t::LEAF
) {
152 static const value_t
* insert_at(
153 NodeExtentMutable
& mut
, const node_extent_t
&,
154 const full_key_t
<KT
>& key
, const value_t
& value
,
155 index_t index
, node_offset_t size
, const char* p_left_bound
) {
156 if constexpr (FIELD_TYPE
== field_type_t::N3
) {
157 ceph_abort("not implemented");
159 ceph_abort("impossible");
164 static memory_range_t
insert_prefix_at(
165 NodeExtentMutable
&, const node_extent_t
&,
166 const full_key_t
<KT
>& key
,
167 index_t index
, node_offset_t size
, const char* p_left_bound
);
169 static void update_size_at(
170 NodeExtentMutable
&, const node_extent_t
&, index_t index
, int change
);
172 static node_offset_t
trim_until(
173 NodeExtentMutable
&, const node_extent_t
&, index_t index
);
174 static node_offset_t
trim_at(NodeExtentMutable
&, const node_extent_t
&,
175 index_t index
, node_offset_t trimmed
);
181 const FieldType
& fields() const { return *p_fields
; }
182 const FieldType
* p_fields
;
185 template <typename FieldType
, node_type_t NODE_TYPE
>
187 class node_extent_t
<FieldType
, NODE_TYPE
>::Appender
{
189 Appender(NodeExtentMutable
* p_mut
, char* p_append
)
190 : p_mut
{p_mut
}, p_start
{p_append
} {
192 auto p_fields
= reinterpret_cast<const FieldType
*>(p_append
);
193 assert(*(p_fields
->header
.get_field_type()) == FIELD_TYPE
);
194 assert(p_fields
->header
.get_node_type() == NODE_TYPE
);
195 assert(p_fields
->num_keys
== 0);
197 p_append_left
= p_start
+ FieldType::HEADER_SIZE
;
198 p_append_right
= p_start
+ FieldType::SIZE
;
200 void append(const node_extent_t
& src
, index_t from
, index_t items
);
201 void append(const full_key_t
<KT
>&, const value_t
&, const value_t
*&);
203 std::tuple
<NodeExtentMutable
*, char*> open_nxt(const key_get_type
&);
204 std::tuple
<NodeExtentMutable
*, char*> open_nxt(const full_key_t
<KT
>&);
205 void wrap_nxt(char* p_append
) {
206 if constexpr (FIELD_TYPE
!= field_type_t::N3
) {
207 assert(p_append
< p_append_right
);
208 assert(p_append_left
< p_append
);
209 p_append_right
= p_append
;
210 FieldType::append_offset(*p_mut
, p_append
- p_start
, p_append_left
);
213 ceph_abort("not implemented");
218 const node_extent_t
* p_src
= nullptr;
219 NodeExtentMutable
* p_mut
;
222 char* p_append_right
;
223 num_keys_t num_keys
= 0;