]>
git.proxmox.com Git - ceph.git/blob - ceph/src/osd/osd_internal_types.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #ifndef CEPH_OSD_INTERNAL_TYPES_H
5 #define CEPH_OSD_INTERNAL_TYPES_H
9 #include "object_state.h"
12 * keep tabs on object modifications that are in flight.
13 * we need to know the projected existence, size, snapset,
14 * etc., because we don't send writes down to disk until after
18 struct SnapSetContext
{
25 explicit SnapSetContext(const hobject_t
& o
) :
26 oid(o
), ref(0), registered(false), exists(true) { }
29 typedef std::shared_ptr
<ObjectContext
> ObjectContextRef
;
31 struct ObjectContext
{
34 SnapSetContext
*ssc
; // may be null
36 Context
*destructor_callback
;
40 // any entity in obs.oi.watchers MUST be in either watchers or unconnected_watchers.
41 std::map
<std::pair
<uint64_t, entity_name_t
>, WatchRef
> watchers
;
44 std::map
<std::string
, ceph::buffer::list
, std::less
<>> attr_cache
;
47 std::list
<OpRequestRef
> waiters
; ///< ops waiting on state change
48 bool get_read(OpRequestRef
& op
) {
49 if (rwstate
.get_read_lock()) {
52 // Now we really need to bump up the ref-counter.
53 waiters
.emplace_back(op
);
54 rwstate
.inc_waiters();
57 bool get_write(OpRequestRef
& op
, bool greedy
=false) {
58 if (rwstate
.get_write_lock(greedy
)) {
62 waiters
.emplace_back(op
);
63 rwstate
.inc_waiters();
67 bool get_excl(OpRequestRef
& op
) {
68 if (rwstate
.get_excl_lock()) {
72 waiters
.emplace_back(op
);
73 rwstate
.inc_waiters();
77 void wake(std::list
<OpRequestRef
> *requeue
) {
78 rwstate
.release_waiters();
79 requeue
->splice(requeue
->end(), waiters
);
81 void put_read(std::list
<OpRequestRef
> *requeue
) {
82 if (rwstate
.put_read()) {
86 void put_write(std::list
<OpRequestRef
> *requeue
) {
87 if (rwstate
.put_write()) {
91 void put_excl(std::list
<OpRequestRef
> *requeue
) {
92 if (rwstate
.put_excl()) {
96 bool empty() const { return rwstate
.empty(); }
98 bool get_lock_type(OpRequestRef
& op
, RWState::State type
) {
100 case RWState::RWWRITE
:
101 return get_write(op
);
102 case RWState::RWREAD
:
104 case RWState::RWEXCL
:
107 ceph_abort_msg("invalid lock type");
111 bool get_write_greedy(OpRequestRef
& op
) {
112 return get_write(op
, true);
114 bool get_snaptrimmer_write(bool mark_if_unsuccessful
) {
115 return rwstate
.get_snaptrimmer_write(mark_if_unsuccessful
);
117 bool get_recovery_read() {
118 return rwstate
.get_recovery_read();
120 bool try_get_read_lock() {
121 return rwstate
.get_read_lock();
123 void drop_recovery_read(std::list
<OpRequestRef
> *ls
) {
124 ceph_assert(rwstate
.recovery_read_marker
);
126 rwstate
.recovery_read_marker
= false;
130 std::list
<OpRequestRef
> *to_wake
,
131 bool *requeue_recovery
,
132 bool *requeue_snaptrimmer
) {
134 case RWState::RWWRITE
:
137 case RWState::RWREAD
:
140 case RWState::RWEXCL
:
144 ceph_abort_msg("invalid lock type");
146 if (rwstate
.empty() && rwstate
.recovery_read_marker
) {
147 rwstate
.recovery_read_marker
= false;
148 *requeue_recovery
= true;
150 if (rwstate
.empty() && rwstate
.snaptrimmer_write_marker
) {
151 rwstate
.snaptrimmer_write_marker
= false;
152 *requeue_snaptrimmer
= true;
155 bool is_request_pending() {
156 return !rwstate
.empty();
161 destructor_callback(0),
162 blocked(false), requeue_scrub_on_unblock(false) {}
165 ceph_assert(rwstate
.empty());
166 if (destructor_callback
)
167 destructor_callback
->complete(0);
171 ceph_assert(!blocked
);
175 ceph_assert(blocked
);
178 bool is_blocked() const {
182 /// in-progress copyfrom ops for this object
184 bool requeue_scrub_on_unblock
:1; // true if we need to requeue scrub on unblock
188 inline std::ostream
& operator<<(std::ostream
& out
, const ObjectState
& obs
)
196 inline std::ostream
& operator<<(std::ostream
& out
, const ObjectContext
& obc
)
198 return out
<< "obc(" << obc
.obs
<< " " << obc
.rwstate
<< ")";
201 class ObcLockManager
{
202 struct ObjectLockState
{
203 ObjectContextRef obc
;
206 ObjectContextRef obc
,
208 : obc(std::move(obc
)), type(type
) {}
210 std::map
<hobject_t
, ObjectLockState
> locks
;
212 ObcLockManager() = default;
213 ObcLockManager(ObcLockManager
&&) = default;
214 ObcLockManager(const ObcLockManager
&) = delete;
215 ObcLockManager
&operator=(ObcLockManager
&&) = default;
217 return locks
.empty();
221 const hobject_t
&hoid
,
222 ObjectContextRef
& obc
,
224 ceph_assert(locks
.find(hoid
) == locks
.end());
225 if (obc
->get_lock_type(op
, type
)) {
226 locks
.insert(std::make_pair(hoid
, ObjectLockState(obc
, type
)));
232 /// Get write lock, ignore starvation
233 bool take_write_lock(
234 const hobject_t
&hoid
,
235 ObjectContextRef obc
) {
236 ceph_assert(locks
.find(hoid
) == locks
.end());
237 if (obc
->rwstate
.take_write_lock()) {
240 hoid
, ObjectLockState(obc
, RWState::RWWRITE
)));
246 /// Get write lock for snap trim
247 bool get_snaptrimmer_write(
248 const hobject_t
&hoid
,
249 ObjectContextRef obc
,
250 bool mark_if_unsuccessful
) {
251 ceph_assert(locks
.find(hoid
) == locks
.end());
252 if (obc
->get_snaptrimmer_write(mark_if_unsuccessful
)) {
255 hoid
, ObjectLockState(obc
, RWState::RWWRITE
)));
261 /// Get write lock greedy
262 bool get_write_greedy(
263 const hobject_t
&hoid
,
264 ObjectContextRef obc
,
266 ceph_assert(locks
.find(hoid
) == locks
.end());
267 if (obc
->get_write_greedy(op
)) {
270 hoid
, ObjectLockState(obc
, RWState::RWWRITE
)));
277 /// try get read lock
278 bool try_get_read_lock(
279 const hobject_t
&hoid
,
280 ObjectContextRef obc
) {
281 ceph_assert(locks
.find(hoid
) == locks
.end());
282 if (obc
->try_get_read_lock()) {
286 ObjectLockState(obc
, RWState::RWREAD
)));
294 std::list
<std::pair
<ObjectContextRef
, std::list
<OpRequestRef
> > > *to_requeue
,
295 bool *requeue_recovery
,
296 bool *requeue_snaptrimmer
) {
297 for (auto& p
: locks
) {
298 std::list
<OpRequestRef
> _to_requeue
;
299 p
.second
.obc
->put_lock_type(
303 requeue_snaptrimmer
);
305 // We can safely std::move here as the whole `locks` is going
306 // to die just after the loop.
307 to_requeue
->emplace_back(std::move(p
.second
.obc
),
308 std::move(_to_requeue
));
314 ceph_assert(locks
.empty());