]> git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/os/seastore/onode_manager/staged-fltree/stages/node_stage.h
cf0ca463cbf4b596ca892a047bfa4165d1619b3c
[ceph.git] / ceph / src / crimson / os / seastore / onode_manager / staged-fltree / stages / node_stage.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #pragma once
5
6 #include "crimson/os/seastore/onode_manager/staged-fltree/node_types.h"
7 #include "key_layout.h"
8 #include "stage_types.h"
9
10 namespace crimson::os::seastore::onode {
11
12 class NodeExtentMutable;
13
14 /**
15 * node_extent_t
16 *
17 * The top indexing stage implementation for node N0/N1/N2/N3, implements
18 * staged contract as an indexable container, and provides access to node
19 * header.
20 *
21 * The specific field layout are defined by FieldType which are
22 * node_fields_0_t, node_fields_1_t, node_fields_2_t, internal_fields_3_t and
23 * leaf_fields_3_t. Diagrams see node_stage_layout.h.
24 */
25 template <typename FieldType, node_type_t _NODE_TYPE>
26 class node_extent_t {
27 public:
28 using value_t = value_type_t<_NODE_TYPE>;
29 using num_keys_t = typename FieldType::num_keys_t;
30 static constexpr node_type_t NODE_TYPE = _NODE_TYPE;
31 static constexpr field_type_t FIELD_TYPE = FieldType::FIELD_TYPE;
32 static constexpr node_offset_t EXTENT_SIZE =
33 (FieldType::SIZE + DISK_BLOCK_SIZE - 1u) / DISK_BLOCK_SIZE * DISK_BLOCK_SIZE;
34
35 // TODO: remove
36 node_extent_t() = default;
37
38 node_extent_t(const FieldType* p_fields) : p_fields{p_fields} {
39 validate(*p_fields);
40 }
41
42 const char* p_start() const { return fields_start(*p_fields); }
43
44 const char* off_to_ptr(node_offset_t off) const {
45 assert(off <= FieldType::SIZE);
46 return p_start() + off;
47 }
48
49 node_offset_t ptr_to_off(const void* ptr) const {
50 auto _ptr = static_cast<const char*>(ptr);
51 assert(_ptr >= p_start());
52 auto off = _ptr - p_start();
53 assert(off <= FieldType::SIZE);
54 return off;
55 }
56
57 bool is_level_tail() const { return p_fields->is_level_tail(); }
58 level_t level() const { return p_fields->header.level; }
59 node_offset_t free_size() const {
60 return p_fields->template free_size_before<NODE_TYPE>(keys());
61 }
62 node_offset_t total_size() const { return p_fields->total_size(); }
63 const char* p_left_bound() const;
64 template <node_type_t T = NODE_TYPE>
65 std::enable_if_t<T == node_type_t::INTERNAL, const laddr_packed_t*>
66 get_end_p_laddr() const {
67 assert(is_level_tail());
68 if constexpr (FIELD_TYPE == field_type_t::N3) {
69 return &p_fields->child_addrs[keys()];
70 } else {
71 auto offset_start = p_fields->get_item_end_offset(keys());
72 assert(offset_start <= FieldType::SIZE);
73 offset_start -= sizeof(laddr_packed_t);
74 auto p_addr = p_start() + offset_start;
75 return reinterpret_cast<const laddr_packed_t*>(p_addr);
76 }
77 }
78
79 // container type system
80 using key_get_type = typename FieldType::key_get_type;
81 static constexpr auto CONTAINER_TYPE = ContainerType::INDEXABLE;
82 index_t keys() const { return p_fields->num_keys; }
83 key_get_type operator[] (index_t index) const { return p_fields->get_key(index); }
84 node_offset_t size_before(index_t index) const {
85 auto free_size = p_fields->template free_size_before<NODE_TYPE>(index);
86 assert(total_size() >= free_size);
87 return total_size() - free_size;
88 }
89 node_offset_t size_to_nxt_at(index_t index) const;
90 node_offset_t size_overhead_at(index_t index) const {
91 return FieldType::ITEM_OVERHEAD; }
92 memory_range_t get_nxt_container(index_t index) const;
93
94 template <typename T = FieldType>
95 std::enable_if_t<T::FIELD_TYPE == field_type_t::N3, const value_t*>
96 get_p_value(index_t index) const {
97 assert(index < keys());
98 if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
99 return &p_fields->child_addrs[index];
100 } else {
101 auto range = get_nxt_container(index);
102 auto ret = reinterpret_cast<const onode_t*>(range.p_start);
103 assert(range.p_start + ret->size == range.p_end);
104 return ret;
105 }
106 }
107
108 void encode(const char* p_node_start, ceph::bufferlist& encoded) const {
109 assert(p_node_start == p_start());
110 // nothing to encode as the container range is the entire extent
111 }
112
113 static node_extent_t decode(const char* p_node_start,
114 ceph::bufferlist::const_iterator& delta) {
115 // nothing to decode
116 return node_extent_t(reinterpret_cast<const FieldType*>(p_node_start));
117 }
118
119 static void validate(const FieldType& fields) {
120 #ifndef NDEBUG
121 assert(fields.header.get_node_type() == NODE_TYPE);
122 assert(fields.header.get_field_type() == FieldType::FIELD_TYPE);
123 if constexpr (NODE_TYPE == node_type_t::INTERNAL) {
124 assert(fields.header.level > 0u);
125 } else {
126 assert(fields.header.level == 0u);
127 }
128 #endif
129 }
130
131 static void bootstrap_extent(
132 NodeExtentMutable&, field_type_t, node_type_t, bool, level_t);
133
134 static void update_is_level_tail(NodeExtentMutable&, const node_extent_t&, bool);
135
136 static node_offset_t header_size() { return FieldType::HEADER_SIZE; }
137
138 template <KeyT KT>
139 static node_offset_t estimate_insert(
140 const full_key_t<KT>& key, const value_t& value) {
141 auto size = FieldType::estimate_insert_one();
142 if constexpr (FIELD_TYPE == field_type_t::N2) {
143 size += ns_oid_view_t::estimate_size<KT>(key);
144 } else if constexpr (FIELD_TYPE == field_type_t::N3 &&
145 NODE_TYPE == node_type_t::LEAF) {
146 size += value.size;
147 }
148 return size;
149 }
150
151 template <KeyT KT>
152 static const value_t* insert_at(
153 NodeExtentMutable& mut, const node_extent_t&,
154 const full_key_t<KT>& key, const value_t& value,
155 index_t index, node_offset_t size, const char* p_left_bound) {
156 if constexpr (FIELD_TYPE == field_type_t::N3) {
157 ceph_abort("not implemented");
158 } else {
159 ceph_abort("impossible");
160 }
161 }
162
163 template <KeyT KT>
164 static memory_range_t insert_prefix_at(
165 NodeExtentMutable&, const node_extent_t&,
166 const full_key_t<KT>& key,
167 index_t index, node_offset_t size, const char* p_left_bound);
168
169 static void update_size_at(
170 NodeExtentMutable&, const node_extent_t&, index_t index, int change);
171
172 static node_offset_t trim_until(
173 NodeExtentMutable&, const node_extent_t&, index_t index);
174 static node_offset_t trim_at(NodeExtentMutable&, const node_extent_t&,
175 index_t index, node_offset_t trimmed);
176
177 template <KeyT KT>
178 class Appender;
179
180 private:
181 const FieldType& fields() const { return *p_fields; }
182 const FieldType* p_fields;
183 };
184
185 template <typename FieldType, node_type_t NODE_TYPE>
186 template <KeyT KT>
187 class node_extent_t<FieldType, NODE_TYPE>::Appender {
188 public:
189 Appender(NodeExtentMutable* p_mut, char* p_append)
190 : p_mut{p_mut}, p_start{p_append} {
191 #ifndef NDEBUG
192 auto p_fields = reinterpret_cast<const FieldType*>(p_append);
193 assert(*(p_fields->header.get_field_type()) == FIELD_TYPE);
194 assert(p_fields->header.get_node_type() == NODE_TYPE);
195 assert(p_fields->num_keys == 0);
196 #endif
197 p_append_left = p_start + FieldType::HEADER_SIZE;
198 p_append_right = p_start + FieldType::SIZE;
199 }
200 void append(const node_extent_t& src, index_t from, index_t items);
201 void append(const full_key_t<KT>&, const value_t&, const value_t*&);
202 char* wrap();
203 std::tuple<NodeExtentMutable*, char*> open_nxt(const key_get_type&);
204 std::tuple<NodeExtentMutable*, char*> open_nxt(const full_key_t<KT>&);
205 void wrap_nxt(char* p_append) {
206 if constexpr (FIELD_TYPE != field_type_t::N3) {
207 assert(p_append < p_append_right);
208 assert(p_append_left < p_append);
209 p_append_right = p_append;
210 FieldType::append_offset(*p_mut, p_append - p_start, p_append_left);
211 ++num_keys;
212 } else {
213 ceph_abort("not implemented");
214 }
215 }
216
217 private:
218 const node_extent_t* p_src = nullptr;
219 NodeExtentMutable* p_mut;
220 char* p_start;
221 char* p_append_left;
222 char* p_append_right;
223 num_keys_t num_keys = 0;
224 };
225
226 }