]>
git.proxmox.com Git - ceph.git/blob - ceph/src/crimson/osd/object_context.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
9 #include <seastar/core/shared_future.hh>
10 #include <seastar/core/shared_ptr.hh>
12 #include "common/intrusive_lru.h"
13 #include "osd/object_state.h"
14 #include "crimson/common/exception.h"
15 #include "crimson/common/tri_mutex.h"
16 #include "crimson/osd/osd_operation.h"
22 namespace crimson::common
{
26 namespace crimson::osd
{
29 struct SnapSetContext
;
30 using SnapSetContextRef
= boost::intrusive_ptr
<SnapSetContext
>;
32 template <typename OBC
>
34 using type
= hobject_t
;
35 const type
&operator()(const OBC
&obc
) {
36 return obc
.obs
.oi
.soid
;
40 struct SnapSetContext
:
41 public boost::intrusive_ref_counter
<SnapSetContext
,
42 boost::thread_unsafe_counter
>
50 * Because ObjectContext's are cached, we need to be able to express the case
51 * where the object to which a cached ObjectContext refers does not exist.
52 * ObjectContext's for yet-to-be-created objects are initialized with exists=false.
53 * The ObjectContext for a deleted object will have exists set to false until it falls
54 * out of cache (or another write recreates the object).
56 explicit SnapSetContext(const hobject_t
& o
) :
57 oid(o
), exists(false) {}
60 class ObjectContext
: public ceph::common::intrusive_lru_base
<
61 ceph::common::intrusive_lru_config
<
62 hobject_t
, ObjectContext
, obc_to_hoid
<ObjectContext
>>>
66 SnapSetContextRef ssc
;
67 // the watch / notify machinery rather stays away from the hot and
68 // frequented paths. std::map is used mostly because of developer's
70 using watch_key_t
= std::pair
<uint64_t, entity_name_t
>;
71 std::map
<watch_key_t
, seastar::shared_ptr
<crimson::osd::Watch
>> watchers
;
73 ObjectContext(hobject_t hoid
) : obs(std::move(hoid
)) {}
75 const hobject_t
&get_oid() const {
79 bool is_head() const {
80 return get_oid().is_head();
83 hobject_t
get_head_oid() const {
84 return get_oid().get_head();
87 const SnapSet
&get_head_ss() const {
88 ceph_assert(is_head());
93 void set_head_state(ObjectState
&&_obs
, SnapSetContextRef
&&_ssc
) {
94 ceph_assert(is_head());
95 obs
= std::move(_obs
);
96 ssc
= std::move(_ssc
);
99 void set_clone_state(ObjectState
&&_obs
) {
100 ceph_assert(!is_head());
101 obs
= std::move(_obs
);
104 /// pass the provided exception to any waiting consumers of this ObjectContext
105 template<typename Exception
>
106 void interrupt(Exception ex
) {
107 lock
.abort(std::move(ex
));
108 if (recovery_read_marker
) {
109 drop_recovery_read();
115 bool recovery_read_marker
= false;
117 template <typename Lock
, typename Func
>
118 auto _with_lock(Lock
&& lock
, Func
&& func
) {
120 return lock
.lock().then([&lock
, func
= std::forward
<Func
>(func
), obc
]() mutable {
121 return seastar::futurize_invoke(func
).finally([&lock
, obc
] {
127 boost::intrusive::list_member_hook
<> list_hook
;
128 uint64_t list_link_cnt
= 0;
132 template <typename ListType
>
133 void append_to(ListType
& list
) {
134 if (list_link_cnt
++ == 0) {
135 list
.push_back(*this);
139 template <typename ListType
>
140 void remove_from(ListType
&& list
) {
141 assert(list_link_cnt
> 0);
142 if (--list_link_cnt
== 0) {
143 list
.erase(std::decay_t
<ListType
>::s_iterator_to(*this));
147 using obc_accessing_option_t
= boost::intrusive::member_hook
<
149 boost::intrusive::list_member_hook
<>,
150 &ObjectContext::list_hook
>;
152 template<RWState::State Type
, typename InterruptCond
= void, typename Func
>
153 auto with_lock(Func
&& func
) {
154 if constexpr (!std::is_void_v
<InterruptCond
>) {
155 auto wrapper
= ::crimson::interruptible::interruptor
<InterruptCond
>::wrap_function(std::forward
<Func
>(func
));
157 case RWState::RWWRITE
:
158 return _with_lock(lock
.for_write(), std::move(wrapper
));
159 case RWState::RWREAD
:
160 return _with_lock(lock
.for_read(), std::move(wrapper
));
161 case RWState::RWEXCL
:
162 return _with_lock(lock
.for_excl(), std::move(wrapper
));
163 case RWState::RWNONE
:
164 return seastar::futurize_invoke(std::move(wrapper
));
170 case RWState::RWWRITE
:
171 return _with_lock(lock
.for_write(), std::forward
<Func
>(func
));
172 case RWState::RWREAD
:
173 return _with_lock(lock
.for_read(), std::forward
<Func
>(func
));
174 case RWState::RWEXCL
:
175 return _with_lock(lock
.for_excl(), std::forward
<Func
>(func
));
176 case RWState::RWNONE
:
177 return seastar::futurize_invoke(std::forward
<Func
>(func
));
183 template<RWState::State Type
, typename InterruptCond
= void, typename Func
>
184 auto with_promoted_lock(Func
&& func
) {
185 if constexpr (!std::is_void_v
<InterruptCond
>) {
186 auto wrapper
= ::crimson::interruptible::interruptor
<InterruptCond
>::wrap_function(std::forward
<Func
>(func
));
188 case RWState::RWWRITE
:
189 return _with_lock(lock
.excl_from_write(), std::move(wrapper
));
190 case RWState::RWREAD
:
191 return _with_lock(lock
.excl_from_read(), std::move(wrapper
));
192 case RWState::RWEXCL
:
193 return _with_lock(lock
.excl_from_excl(), std::move(wrapper
));
194 case RWState::RWNONE
:
195 return _with_lock(lock
.for_excl(), std::move(wrapper
));
201 case RWState::RWWRITE
:
202 return _with_lock(lock
.excl_from_write(), std::forward
<Func
>(func
));
203 case RWState::RWREAD
:
204 return _with_lock(lock
.excl_from_read(), std::forward
<Func
>(func
));
205 case RWState::RWEXCL
:
206 return _with_lock(lock
.excl_from_excl(), std::forward
<Func
>(func
));
207 case RWState::RWNONE
:
208 return _with_lock(lock
.for_excl(), std::forward
<Func
>(func
));
216 return !lock
.is_acquired();
218 bool is_request_pending() const {
219 return lock
.is_acquired();
222 bool get_recovery_read() {
223 if (lock
.try_lock_for_read()) {
224 recovery_read_marker
= true;
230 void wait_recovery_read() {
231 assert(lock
.get_readers() > 0);
232 recovery_read_marker
= true;
234 void drop_recovery_read() {
235 assert(recovery_read_marker
);
236 recovery_read_marker
= false;
238 bool maybe_get_excl() {
239 return lock
.try_lock_for_excl();
242 using ObjectContextRef
= ObjectContext::Ref
;
244 class ObjectContextRegistry
: public md_config_obs_t
{
245 ObjectContext::lru_t obc_lru
;
248 ObjectContextRegistry(crimson::common::ConfigProxy
&conf
);
249 ~ObjectContextRegistry();
251 std::pair
<ObjectContextRef
, bool> get_cached_obc(const hobject_t
&hoid
) {
252 return obc_lru
.get_or_create(hoid
);
254 ObjectContextRef
maybe_get_cached_obc(const hobject_t
&hoid
) {
255 return obc_lru
.get(hoid
);
258 void clear_range(const hobject_t
&from
,
259 const hobject_t
&to
) {
260 obc_lru
.clear_range(from
, to
);
264 void for_each(F
&& f
) {
265 obc_lru
.for_each(std::forward
<F
>(f
));
268 const char** get_tracked_conf_keys() const final
;
269 void handle_conf_change(const crimson::common::ConfigProxy
& conf
,
270 const std::set
<std::string
> &changed
) final
;
273 std::optional
<hobject_t
> resolve_oid(const SnapSet
&ss
,
274 const hobject_t
&oid
);
276 } // namespace crimson::osd