1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "librbd/ExclusiveLock.h"
5 #include "librbd/ImageCtx.h"
6 #include "librbd/ImageWatcher.h"
7 #include "librbd/ImageState.h"
8 #include "librbd/exclusive_lock/PreAcquireRequest.h"
9 #include "librbd/exclusive_lock/PostAcquireRequest.h"
10 #include "librbd/exclusive_lock/PreReleaseRequest.h"
11 #include "librbd/io/ImageRequestWQ.h"
12 #include "librbd/Utils.h"
13 #include "common/Mutex.h"
14 #include "common/dout.h"
16 #define dout_subsys ceph_subsys_rbd
18 #define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
23 using namespace exclusive_lock
;
26 using ML
= ManagedLock
<I
>;
29 ExclusiveLock
<I
>::ExclusiveLock(I
&image_ctx
)
30 : ML
<I
>(image_ctx
.md_ctx
, image_ctx
.op_work_queue
, image_ctx
.header_oid
,
31 image_ctx
.image_watcher
, managed_lock::EXCLUSIVE
,
32 image_ctx
.blacklist_on_break_lock
,
33 image_ctx
.blacklist_expire_seconds
),
34 m_image_ctx(image_ctx
) {
35 Mutex::Locker
locker(ML
<I
>::m_lock
);
36 ML
<I
>::set_state_uninitialized();
40 bool ExclusiveLock
<I
>::accept_requests(int *ret_val
) const {
41 Mutex::Locker
locker(ML
<I
>::m_lock
);
43 bool accept_requests
= (!ML
<I
>::is_state_shutdown() &&
44 ML
<I
>::is_state_locked() &&
45 m_request_blocked_count
== 0);
46 if (ret_val
!= nullptr) {
47 *ret_val
= m_request_blocked_ret_val
;
50 ldout(m_image_ctx
.cct
, 20) << "=" << accept_requests
<< dendl
;
51 return accept_requests
;
55 bool ExclusiveLock
<I
>::accept_ops() const {
56 Mutex::Locker
locker(ML
<I
>::m_lock
);
57 bool accept
= accept_ops(ML
<I
>::m_lock
);
58 ldout(m_image_ctx
.cct
, 20) << "=" << accept
<< dendl
;
63 bool ExclusiveLock
<I
>::accept_ops(const Mutex
&lock
) const {
64 return (!ML
<I
>::is_state_shutdown() &&
65 (ML
<I
>::is_state_locked() || ML
<I
>::is_state_post_acquiring()));
69 void ExclusiveLock
<I
>::block_requests(int r
) {
70 Mutex::Locker
locker(ML
<I
>::m_lock
);
72 m_request_blocked_count
++;
73 if (m_request_blocked_ret_val
== 0) {
74 m_request_blocked_ret_val
= r
;
77 ldout(m_image_ctx
.cct
, 20) << dendl
;
81 void ExclusiveLock
<I
>::unblock_requests() {
82 Mutex::Locker
locker(ML
<I
>::m_lock
);
84 assert(m_request_blocked_count
> 0);
85 m_request_blocked_count
--;
86 if (m_request_blocked_count
== 0) {
87 m_request_blocked_ret_val
= 0;
90 ldout(m_image_ctx
.cct
, 20) << dendl
;
94 int ExclusiveLock
<I
>::get_unlocked_op_error() const {
95 if (m_image_ctx
.image_watcher
->is_blacklisted()) {
101 template <typename I
>
102 void ExclusiveLock
<I
>::init(uint64_t features
, Context
*on_init
) {
103 assert(m_image_ctx
.owner_lock
.is_locked());
104 ldout(m_image_ctx
.cct
, 10) << dendl
;
107 Mutex::Locker
locker(ML
<I
>::m_lock
);
108 ML
<I
>::set_state_initializing();
111 m_image_ctx
.io_work_queue
->block_writes(new C_InitComplete(this, features
,
115 template <typename I
>
116 void ExclusiveLock
<I
>::shut_down(Context
*on_shut_down
) {
117 ldout(m_image_ctx
.cct
, 10) << dendl
;
119 ML
<I
>::shut_down(on_shut_down
);
121 // if stalled in request state machine -- abort
122 handle_peer_notification(0);
125 template <typename I
>
126 void ExclusiveLock
<I
>::handle_peer_notification(int r
) {
127 Mutex::Locker
locker(ML
<I
>::m_lock
);
128 if (!ML
<I
>::is_state_waiting_for_lock()) {
132 ldout(m_image_ctx
.cct
, 10) << dendl
;
133 assert(ML
<I
>::is_action_acquire_lock());
135 m_acquire_lock_peer_ret_val
= r
;
136 ML
<I
>::execute_next_action();
139 template <typename I
>
140 Context
*ExclusiveLock
<I
>::start_op(int* ret_val
) {
141 assert(m_image_ctx
.owner_lock
.is_locked());
142 Mutex::Locker
locker(ML
<I
>::m_lock
);
144 if (!accept_ops(ML
<I
>::m_lock
)) {
145 *ret_val
= get_unlocked_op_error();
149 m_async_op_tracker
.start_op();
150 return new FunctionContext([this](int r
) {
151 m_async_op_tracker
.finish_op();
155 template <typename I
>
156 void ExclusiveLock
<I
>::handle_init_complete(uint64_t features
) {
157 ldout(m_image_ctx
.cct
, 10) << ": features=" << features
<< dendl
;
160 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
161 if (m_image_ctx
.clone_copy_on_read
||
162 (features
& RBD_FEATURE_JOURNALING
) != 0) {
163 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_BOTH
, true);
165 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_WRITE
, true);
169 Mutex::Locker
locker(ML
<I
>::m_lock
);
170 ML
<I
>::set_state_unlocked();
173 template <typename I
>
174 void ExclusiveLock
<I
>::shutdown_handler(int r
, Context
*on_finish
) {
175 ldout(m_image_ctx
.cct
, 10) << dendl
;
178 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
179 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_BOTH
, false);
180 m_image_ctx
.exclusive_lock
= nullptr;
183 m_image_ctx
.io_work_queue
->unblock_writes();
184 m_image_ctx
.image_watcher
->flush(on_finish
);
187 template <typename I
>
188 void ExclusiveLock
<I
>::pre_acquire_lock_handler(Context
*on_finish
) {
189 ldout(m_image_ctx
.cct
, 10) << dendl
;
191 int acquire_lock_peer_ret_val
= 0;
193 Mutex::Locker
locker(ML
<I
>::m_lock
);
194 std::swap(acquire_lock_peer_ret_val
, m_acquire_lock_peer_ret_val
);
197 if (acquire_lock_peer_ret_val
== -EROFS
) {
198 ldout(m_image_ctx
.cct
, 10) << ": peer nacked lock request" << dendl
;
199 on_finish
->complete(acquire_lock_peer_ret_val
);
203 PreAcquireRequest
<I
> *req
= PreAcquireRequest
<I
>::create(m_image_ctx
,
205 m_image_ctx
.op_work_queue
->queue(new FunctionContext([req
](int r
) {
210 template <typename I
>
211 void ExclusiveLock
<I
>::post_acquire_lock_handler(int r
, Context
*on_finish
) {
212 ldout(m_image_ctx
.cct
, 10) << ": r=" << r
<< dendl
;
215 // peer refused to release the exclusive lock
216 on_finish
->complete(r
);
219 ML
<I
>::m_lock
.Lock();
220 assert(ML
<I
>::is_state_acquiring());
222 // PostAcquire state machine will not run, so we need complete prepare
223 m_image_ctx
.state
->handle_prepare_lock_complete();
225 // if lock is in-use by another client, request the lock
226 if (ML
<I
>::is_action_acquire_lock() && (r
== -EBUSY
|| r
== -EAGAIN
)) {
227 ML
<I
>::set_state_waiting_for_lock();
228 ML
<I
>::m_lock
.Unlock();
230 // request the lock from a peer
231 m_image_ctx
.image_watcher
->notify_request_lock();
233 // inform manage lock that we have interrupted the state machine
236 ML
<I
>::m_lock
.Unlock();
238 // clear error if peer owns lock
244 on_finish
->complete(r
);
248 Mutex::Locker
locker(ML
<I
>::m_lock
);
249 m_pre_post_callback
= on_finish
;
250 using EL
= ExclusiveLock
<I
>;
251 PostAcquireRequest
<I
> *req
= PostAcquireRequest
<I
>::create(m_image_ctx
,
252 util::create_context_callback
<EL
, &EL::handle_post_acquiring_lock
>(this),
253 util::create_context_callback
<EL
, &EL::handle_post_acquired_lock
>(this));
255 m_image_ctx
.op_work_queue
->queue(new FunctionContext([req
](int r
) {
260 template <typename I
>
261 void ExclusiveLock
<I
>::handle_post_acquiring_lock(int r
) {
262 ldout(m_image_ctx
.cct
, 10) << dendl
;
264 Mutex::Locker
locker(ML
<I
>::m_lock
);
268 // lock is owned at this point
269 ML
<I
>::set_state_post_acquiring();
272 template <typename I
>
273 void ExclusiveLock
<I
>::handle_post_acquired_lock(int r
) {
274 ldout(m_image_ctx
.cct
, 10) << ": r=" << r
<< dendl
;
276 Context
*on_finish
= nullptr;
278 Mutex::Locker
locker(ML
<I
>::m_lock
);
279 assert(ML
<I
>::is_state_acquiring() || ML
<I
>::is_state_post_acquiring());
281 assert (m_pre_post_callback
!= nullptr);
282 std::swap(m_pre_post_callback
, on_finish
);
286 m_image_ctx
.perfcounter
->tset(l_librbd_lock_acquired_time
,
288 m_image_ctx
.image_watcher
->notify_acquired_lock();
289 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_BOTH
, false);
290 m_image_ctx
.io_work_queue
->unblock_writes();
293 on_finish
->complete(r
);
296 template <typename I
>
297 void ExclusiveLock
<I
>::pre_release_lock_handler(bool shutting_down
,
298 Context
*on_finish
) {
299 ldout(m_image_ctx
.cct
, 10) << dendl
;
300 Mutex::Locker
locker(ML
<I
>::m_lock
);
302 PreReleaseRequest
<I
> *req
= PreReleaseRequest
<I
>::create(
303 m_image_ctx
, shutting_down
, m_async_op_tracker
, on_finish
);
304 m_image_ctx
.op_work_queue
->queue(new FunctionContext([req
](int r
) {
309 template <typename I
>
310 void ExclusiveLock
<I
>::post_release_lock_handler(bool shutting_down
, int r
,
311 Context
*on_finish
) {
312 ldout(m_image_ctx
.cct
, 10) << ": r=" << r
<< " shutting_down="
313 << shutting_down
<< dendl
;
314 if (!shutting_down
) {
316 Mutex::Locker
locker(ML
<I
>::m_lock
);
317 assert(ML
<I
>::is_state_pre_releasing() || ML
<I
>::is_state_releasing());
321 m_image_ctx
.image_watcher
->notify_released_lock();
325 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
326 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_BOTH
, false);
327 m_image_ctx
.exclusive_lock
= nullptr;
331 m_image_ctx
.io_work_queue
->unblock_writes();
334 m_image_ctx
.image_watcher
->notify_released_lock();
337 on_finish
->complete(r
);
340 template <typename I
>
341 void ExclusiveLock
<I
>::post_reacquire_lock_handler(int r
, Context
*on_finish
) {
342 ldout(m_image_ctx
.cct
, 10) << dendl
;
344 m_image_ctx
.image_watcher
->notify_acquired_lock();
347 on_finish
->complete(r
);
350 template <typename I
>
351 struct ExclusiveLock
<I
>::C_InitComplete
: public Context
{
352 ExclusiveLock
*exclusive_lock
;
356 C_InitComplete(ExclusiveLock
*exclusive_lock
, uint64_t features
,
358 : exclusive_lock(exclusive_lock
), features(features
), on_init(on_init
) {
360 void finish(int r
) override
{
362 exclusive_lock
->handle_init_complete(features
);
364 on_init
->complete(r
);
368 } // namespace librbd
370 template class librbd::ExclusiveLock
<librbd::ImageCtx
>;