]> git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/osd/object_context.h
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / crimson / osd / object_context.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #pragma once
5
6 #include <map>
7 #include <optional>
8 #include <utility>
9 #include <seastar/core/shared_future.hh>
10 #include <seastar/core/shared_ptr.hh>
11
12 #include "common/intrusive_lru.h"
13 #include "osd/object_state.h"
14 #include "crimson/common/exception.h"
15 #include "crimson/common/tri_mutex.h"
16 #include "crimson/osd/osd_operation.h"
17
18 namespace ceph {
19 class Formatter;
20 }
21
22 namespace crimson::common {
23 class ConfigProxy;
24 }
25
26 namespace crimson::osd {
27
28 class Watch;
29 struct SnapSetContext;
30 using SnapSetContextRef = boost::intrusive_ptr<SnapSetContext>;
31
32 template <typename OBC>
33 struct obc_to_hoid {
34 using type = hobject_t;
35 const type &operator()(const OBC &obc) {
36 return obc.obs.oi.soid;
37 }
38 };
39
40 struct SnapSetContext :
41 public boost::intrusive_ref_counter<SnapSetContext,
42 boost::thread_unsafe_counter>
43 {
44 hobject_t oid;
45 SnapSet snapset;
46 bool exists = false;
47 /**
48 * exists
49 *
50 * Because ObjectContext's are cached, we need to be able to express the case
51 * where the object to which a cached ObjectContext refers does not exist.
52 * ObjectContext's for yet-to-be-created objects are initialized with exists=false.
53 * The ObjectContext for a deleted object will have exists set to false until it falls
54 * out of cache (or another write recreates the object).
55 */
56 explicit SnapSetContext(const hobject_t& o) :
57 oid(o), exists(false) {}
58 };
59
60 class ObjectContext : public ceph::common::intrusive_lru_base<
61 ceph::common::intrusive_lru_config<
62 hobject_t, ObjectContext, obc_to_hoid<ObjectContext>>>
63 {
64 public:
65 ObjectState obs;
66 SnapSetContextRef ssc;
67 // the watch / notify machinery rather stays away from the hot and
68 // frequented paths. std::map is used mostly because of developer's
69 // convenience.
70 using watch_key_t = std::pair<uint64_t, entity_name_t>;
71 std::map<watch_key_t, seastar::shared_ptr<crimson::osd::Watch>> watchers;
72
73 ObjectContext(hobject_t hoid) : obs(std::move(hoid)) {}
74
75 const hobject_t &get_oid() const {
76 return obs.oi.soid;
77 }
78
79 bool is_head() const {
80 return get_oid().is_head();
81 }
82
83 hobject_t get_head_oid() const {
84 return get_oid().get_head();
85 }
86
87 const SnapSet &get_head_ss() const {
88 ceph_assert(is_head());
89 ceph_assert(ssc);
90 return ssc->snapset;
91 }
92
93 void set_head_state(ObjectState &&_obs, SnapSetContextRef &&_ssc) {
94 ceph_assert(is_head());
95 obs = std::move(_obs);
96 ssc = std::move(_ssc);
97 }
98
99 void set_clone_state(ObjectState &&_obs) {
100 ceph_assert(!is_head());
101 obs = std::move(_obs);
102 }
103
104 /// pass the provided exception to any waiting consumers of this ObjectContext
105 template<typename Exception>
106 void interrupt(Exception ex) {
107 lock.abort(std::move(ex));
108 if (recovery_read_marker) {
109 drop_recovery_read();
110 }
111 }
112
113 private:
114 tri_mutex lock;
115 bool recovery_read_marker = false;
116
117 template <typename Lock, typename Func>
118 auto _with_lock(Lock&& lock, Func&& func) {
119 Ref obc = this;
120 return lock.lock().then([&lock, func = std::forward<Func>(func), obc]() mutable {
121 return seastar::futurize_invoke(func).finally([&lock, obc] {
122 lock.unlock();
123 });
124 });
125 }
126
127 boost::intrusive::list_member_hook<> list_hook;
128 uint64_t list_link_cnt = 0;
129
130 public:
131
132 template <typename ListType>
133 void append_to(ListType& list) {
134 if (list_link_cnt++ == 0) {
135 list.push_back(*this);
136 }
137 }
138
139 template <typename ListType>
140 void remove_from(ListType&& list) {
141 assert(list_link_cnt > 0);
142 if (--list_link_cnt == 0) {
143 list.erase(std::decay_t<ListType>::s_iterator_to(*this));
144 }
145 }
146
147 using obc_accessing_option_t = boost::intrusive::member_hook<
148 ObjectContext,
149 boost::intrusive::list_member_hook<>,
150 &ObjectContext::list_hook>;
151
152 template<RWState::State Type, typename InterruptCond = void, typename Func>
153 auto with_lock(Func&& func) {
154 if constexpr (!std::is_void_v<InterruptCond>) {
155 auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
156 switch (Type) {
157 case RWState::RWWRITE:
158 return _with_lock(lock.for_write(), std::move(wrapper));
159 case RWState::RWREAD:
160 return _with_lock(lock.for_read(), std::move(wrapper));
161 case RWState::RWEXCL:
162 return _with_lock(lock.for_excl(), std::move(wrapper));
163 case RWState::RWNONE:
164 return seastar::futurize_invoke(std::move(wrapper));
165 default:
166 assert(0 == "noop");
167 }
168 } else {
169 switch (Type) {
170 case RWState::RWWRITE:
171 return _with_lock(lock.for_write(), std::forward<Func>(func));
172 case RWState::RWREAD:
173 return _with_lock(lock.for_read(), std::forward<Func>(func));
174 case RWState::RWEXCL:
175 return _with_lock(lock.for_excl(), std::forward<Func>(func));
176 case RWState::RWNONE:
177 return seastar::futurize_invoke(std::forward<Func>(func));
178 default:
179 assert(0 == "noop");
180 }
181 }
182 }
183 template<RWState::State Type, typename InterruptCond = void, typename Func>
184 auto with_promoted_lock(Func&& func) {
185 if constexpr (!std::is_void_v<InterruptCond>) {
186 auto wrapper = ::crimson::interruptible::interruptor<InterruptCond>::wrap_function(std::forward<Func>(func));
187 switch (Type) {
188 case RWState::RWWRITE:
189 return _with_lock(lock.excl_from_write(), std::move(wrapper));
190 case RWState::RWREAD:
191 return _with_lock(lock.excl_from_read(), std::move(wrapper));
192 case RWState::RWEXCL:
193 return _with_lock(lock.excl_from_excl(), std::move(wrapper));
194 case RWState::RWNONE:
195 return _with_lock(lock.for_excl(), std::move(wrapper));
196 default:
197 assert(0 == "noop");
198 }
199 } else {
200 switch (Type) {
201 case RWState::RWWRITE:
202 return _with_lock(lock.excl_from_write(), std::forward<Func>(func));
203 case RWState::RWREAD:
204 return _with_lock(lock.excl_from_read(), std::forward<Func>(func));
205 case RWState::RWEXCL:
206 return _with_lock(lock.excl_from_excl(), std::forward<Func>(func));
207 case RWState::RWNONE:
208 return _with_lock(lock.for_excl(), std::forward<Func>(func));
209 default:
210 assert(0 == "noop");
211 }
212 }
213 }
214
215 bool empty() const {
216 return !lock.is_acquired();
217 }
218 bool is_request_pending() const {
219 return lock.is_acquired();
220 }
221
222 bool get_recovery_read() {
223 if (lock.try_lock_for_read()) {
224 recovery_read_marker = true;
225 return true;
226 } else {
227 return false;
228 }
229 }
230 void wait_recovery_read() {
231 assert(lock.get_readers() > 0);
232 recovery_read_marker = true;
233 }
234 void drop_recovery_read() {
235 assert(recovery_read_marker);
236 recovery_read_marker = false;
237 }
238 bool maybe_get_excl() {
239 return lock.try_lock_for_excl();
240 }
241 };
242 using ObjectContextRef = ObjectContext::Ref;
243
244 class ObjectContextRegistry : public md_config_obs_t {
245 ObjectContext::lru_t obc_lru;
246
247 public:
248 ObjectContextRegistry(crimson::common::ConfigProxy &conf);
249 ~ObjectContextRegistry();
250
251 std::pair<ObjectContextRef, bool> get_cached_obc(const hobject_t &hoid) {
252 return obc_lru.get_or_create(hoid);
253 }
254 ObjectContextRef maybe_get_cached_obc(const hobject_t &hoid) {
255 return obc_lru.get(hoid);
256 }
257
258 void clear_range(const hobject_t &from,
259 const hobject_t &to) {
260 obc_lru.clear_range(from, to);
261 }
262
263 template <class F>
264 void for_each(F&& f) {
265 obc_lru.for_each(std::forward<F>(f));
266 }
267
268 const char** get_tracked_conf_keys() const final;
269 void handle_conf_change(const crimson::common::ConfigProxy& conf,
270 const std::set <std::string> &changed) final;
271 };
272
273 std::optional<hobject_t> resolve_oid(const SnapSet &ss,
274 const hobject_t &oid);
275
276 } // namespace crimson::osd