]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/ManagedLock.cc
import ceph 15.2.10
[ceph.git] / ceph / src / librbd / ManagedLock.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/ManagedLock.h"
5 #include "librbd/managed_lock/AcquireRequest.h"
6 #include "librbd/managed_lock/BreakRequest.h"
7 #include "librbd/managed_lock/GetLockerRequest.h"
8 #include "librbd/managed_lock/ReleaseRequest.h"
9 #include "librbd/managed_lock/ReacquireRequest.h"
10 #include "librbd/managed_lock/Types.h"
11 #include "librbd/managed_lock/Utils.h"
12 #include "librbd/Watcher.h"
13 #include "librbd/ImageCtx.h"
14 #include "cls/lock/cls_lock_client.h"
15 #include "common/dout.h"
16 #include "common/errno.h"
17 #include "common/Cond.h"
18 #include "common/WorkQueue.h"
19 #include "librbd/Utils.h"
20
21 #define dout_subsys ceph_subsys_rbd
22 #undef dout_prefix
23 #define dout_prefix *_dout << "librbd::ManagedLock: " << this << " " \
24 << __func__ << ": "
25
26 namespace librbd {
27
28 using std::string;
29 using namespace managed_lock;
30
31 namespace {
32
33 template <typename R>
34 struct C_SendLockRequest : public Context {
35 R* request;
36 explicit C_SendLockRequest(R* request) : request(request) {
37 }
38 void finish(int r) override {
39 request->send();
40 }
41 };
42
43 struct C_Tracked : public Context {
44 AsyncOpTracker &tracker;
45 Context *ctx;
46 C_Tracked(AsyncOpTracker &tracker, Context *ctx)
47 : tracker(tracker), ctx(ctx) {
48 tracker.start_op();
49 }
50 ~C_Tracked() override {
51 tracker.finish_op();
52 }
53 void finish(int r) override {
54 ctx->complete(r);
55 }
56 };
57
58 } // anonymous namespace
59
60 using librbd::util::create_context_callback;
61 using librbd::util::unique_lock_name;
62 using managed_lock::util::decode_lock_cookie;
63 using managed_lock::util::encode_lock_cookie;
64
65 template <typename I>
66 ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, ContextWQ *work_queue,
67 const string& oid, Watcher *watcher, Mode mode,
68 bool blacklist_on_break_lock,
69 uint32_t blacklist_expire_seconds)
70 : m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock<I>::m_lock", this))),
71 m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
72 m_work_queue(work_queue),
73 m_oid(oid),
74 m_watcher(watcher),
75 m_mode(mode),
76 m_blacklist_on_break_lock(blacklist_on_break_lock),
77 m_blacklist_expire_seconds(blacklist_expire_seconds),
78 m_state(STATE_UNLOCKED) {
79 }
80
81 template <typename I>
82 ManagedLock<I>::~ManagedLock() {
83 std::lock_guard locker{m_lock};
84 ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
85 m_state == STATE_UNINITIALIZED);
86 if (m_state == STATE_UNINITIALIZED) {
87 // never initialized -- ensure any in-flight ops are complete
88 // since we wouldn't expect shut_down to be invoked
89 C_SaferCond ctx;
90 m_async_op_tracker.wait_for_ops(&ctx);
91 ctx.wait();
92 }
93 ceph_assert(m_async_op_tracker.empty());
94 }
95
96 template <typename I>
97 bool ManagedLock<I>::is_lock_owner() const {
98 std::lock_guard locker{m_lock};
99
100 return is_lock_owner(m_lock);
101 }
102
103 template <typename I>
104 bool ManagedLock<I>::is_lock_owner(ceph::mutex &lock) const {
105
106 ceph_assert(ceph_mutex_is_locked(m_lock));
107
108 bool lock_owner;
109
110 switch (m_state) {
111 case STATE_LOCKED:
112 case STATE_REACQUIRING:
113 case STATE_PRE_SHUTTING_DOWN:
114 case STATE_POST_ACQUIRING:
115 case STATE_PRE_RELEASING:
116 lock_owner = true;
117 break;
118 default:
119 lock_owner = false;
120 break;
121 }
122
123 ldout(m_cct, 20) << "=" << lock_owner << dendl;
124 return lock_owner;
125 }
126
127 template <typename I>
128 void ManagedLock<I>::shut_down(Context *on_shut_down) {
129 ldout(m_cct, 10) << dendl;
130
131 std::lock_guard locker{m_lock};
132 ceph_assert(!is_state_shutdown());
133
134 if (m_state == STATE_WAITING_FOR_REGISTER) {
135 // abort stalled acquire lock state
136 ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
137 Action active_action = get_active_action();
138 ceph_assert(active_action == ACTION_TRY_LOCK ||
139 active_action == ACTION_ACQUIRE_LOCK);
140 complete_active_action(STATE_UNLOCKED, -ESHUTDOWN);
141 }
142
143 execute_action(ACTION_SHUT_DOWN, on_shut_down);
144 }
145
146 template <typename I>
147 void ManagedLock<I>::acquire_lock(Context *on_acquired) {
148 int r = 0;
149 {
150 std::lock_guard locker{m_lock};
151 if (is_state_shutdown()) {
152 r = -ESHUTDOWN;
153 } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
154 ldout(m_cct, 10) << dendl;
155 execute_action(ACTION_ACQUIRE_LOCK, on_acquired);
156 return;
157 }
158 }
159
160 if (on_acquired != nullptr) {
161 on_acquired->complete(r);
162 }
163 }
164
165 template <typename I>
166 void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
167 int r = 0;
168 {
169 std::lock_guard locker{m_lock};
170 if (is_state_shutdown()) {
171 r = -ESHUTDOWN;
172 } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
173 ldout(m_cct, 10) << dendl;
174 execute_action(ACTION_TRY_LOCK, on_acquired);
175 return;
176 }
177 }
178
179 if (on_acquired != nullptr) {
180 on_acquired->complete(r);
181 }
182 }
183
184 template <typename I>
185 void ManagedLock<I>::release_lock(Context *on_released) {
186 int r = 0;
187 {
188 std::lock_guard locker{m_lock};
189 if (is_state_shutdown()) {
190 r = -ESHUTDOWN;
191 } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
192 ldout(m_cct, 10) << dendl;
193 execute_action(ACTION_RELEASE_LOCK, on_released);
194 return;
195 }
196 }
197
198 if (on_released != nullptr) {
199 on_released->complete(r);
200 }
201 }
202
203 template <typename I>
204 void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
205 {
206 std::lock_guard locker{m_lock};
207
208 if (m_state == STATE_WAITING_FOR_REGISTER) {
209 // restart the acquire lock process now that watch is valid
210 ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
211 Action active_action = get_active_action();
212 ceph_assert(active_action == ACTION_TRY_LOCK ||
213 active_action == ACTION_ACQUIRE_LOCK);
214 execute_next_action();
215 } else if (!is_state_shutdown() &&
216 (m_state == STATE_LOCKED ||
217 m_state == STATE_ACQUIRING ||
218 m_state == STATE_POST_ACQUIRING ||
219 m_state == STATE_WAITING_FOR_LOCK)) {
220 // interlock the lock operation with other state ops
221 ldout(m_cct, 10) << dendl;
222 execute_action(ACTION_REACQUIRE_LOCK, on_reacquired);
223 return;
224 }
225 }
226
227 // ignore request if shutdown or not in a locked-related state
228 if (on_reacquired != nullptr) {
229 on_reacquired->complete(0);
230 }
231 }
232
233 template <typename I>
234 void ManagedLock<I>::get_locker(managed_lock::Locker *locker,
235 Context *on_finish) {
236 ldout(m_cct, 10) << dendl;
237
238 int r;
239 {
240 std::lock_guard l{m_lock};
241 if (is_state_shutdown()) {
242 r = -ESHUTDOWN;
243 } else {
244 on_finish = new C_Tracked(m_async_op_tracker, on_finish);
245 auto req = managed_lock::GetLockerRequest<I>::create(
246 m_ioctx, m_oid, m_mode == EXCLUSIVE, locker, on_finish);
247 req->send();
248 return;
249 }
250 }
251
252 on_finish->complete(r);
253 }
254
255 template <typename I>
256 void ManagedLock<I>::break_lock(const managed_lock::Locker &locker,
257 bool force_break_lock, Context *on_finish) {
258 ldout(m_cct, 10) << dendl;
259
260 int r;
261 {
262 std::lock_guard l{m_lock};
263 if (is_state_shutdown()) {
264 r = -ESHUTDOWN;
265 } else if (is_lock_owner(m_lock)) {
266 r = -EBUSY;
267 } else {
268 on_finish = new C_Tracked(m_async_op_tracker, on_finish);
269 auto req = managed_lock::BreakRequest<I>::create(
270 m_ioctx, m_work_queue, m_oid, locker, m_mode == EXCLUSIVE,
271 m_blacklist_on_break_lock, m_blacklist_expire_seconds, force_break_lock,
272 on_finish);
273 req->send();
274 return;
275 }
276 }
277
278 on_finish->complete(r);
279 }
280
281 template <typename I>
282 int ManagedLock<I>::assert_header_locked() {
283 ldout(m_cct, 10) << dendl;
284
285 librados::ObjectReadOperation op;
286 {
287 std::lock_guard locker{m_lock};
288 rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME,
289 (m_mode == EXCLUSIVE ? LOCK_EXCLUSIVE :
290 LOCK_SHARED),
291 m_cookie,
292 managed_lock::util::get_watcher_lock_tag());
293 }
294
295 int r = m_ioctx.operate(m_oid, &op, nullptr);
296 if (r < 0) {
297 if (r == -EBLACKLISTED) {
298 ldout(m_cct, 5) << "client is not lock owner -- client blacklisted"
299 << dendl;
300 } else if (r == -ENOENT) {
301 ldout(m_cct, 5) << "client is not lock owner -- no lock detected"
302 << dendl;
303 } else if (r == -EBUSY) {
304 ldout(m_cct, 5) << "client is not lock owner -- owned by different client"
305 << dendl;
306 } else {
307 lderr(m_cct) << "failed to verify lock ownership: " << cpp_strerror(r)
308 << dendl;
309 }
310
311 return r;
312 }
313
314 return 0;
315 }
316
317 template <typename I>
318 void ManagedLock<I>::shutdown_handler(int r, Context *on_finish) {
319 on_finish->complete(r);
320 }
321
322 template <typename I>
323 void ManagedLock<I>::pre_acquire_lock_handler(Context *on_finish) {
324 on_finish->complete(0);
325 }
326
327 template <typename I>
328 void ManagedLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
329 on_finish->complete(r);
330 }
331
332 template <typename I>
333 void ManagedLock<I>::pre_release_lock_handler(bool shutting_down,
334 Context *on_finish) {
335 on_finish->complete(0);
336 }
337
338 template <typename I>
339 void ManagedLock<I>::post_release_lock_handler(bool shutting_down, int r,
340 Context *on_finish) {
341 on_finish->complete(r);
342 }
343
344 template <typename I>
345 void ManagedLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
346 on_finish->complete(r);
347 }
348
349 template <typename I>
350 bool ManagedLock<I>::is_transition_state() const {
351 switch (m_state) {
352 case STATE_ACQUIRING:
353 case STATE_WAITING_FOR_REGISTER:
354 case STATE_REACQUIRING:
355 case STATE_RELEASING:
356 case STATE_PRE_SHUTTING_DOWN:
357 case STATE_SHUTTING_DOWN:
358 case STATE_INITIALIZING:
359 case STATE_WAITING_FOR_LOCK:
360 case STATE_POST_ACQUIRING:
361 case STATE_PRE_RELEASING:
362 return true;
363 case STATE_UNLOCKED:
364 case STATE_LOCKED:
365 case STATE_SHUTDOWN:
366 case STATE_UNINITIALIZED:
367 break;
368 }
369 return false;
370 }
371
372 template <typename I>
373 void ManagedLock<I>::append_context(Action action, Context *ctx) {
374 ceph_assert(ceph_mutex_is_locked(m_lock));
375
376 for (auto &action_ctxs : m_actions_contexts) {
377 if (action == action_ctxs.first) {
378 if (ctx != nullptr) {
379 action_ctxs.second.push_back(ctx);
380 }
381 return;
382 }
383 }
384
385 Contexts contexts;
386 if (ctx != nullptr) {
387 contexts.push_back(ctx);
388 }
389 m_actions_contexts.push_back({action, std::move(contexts)});
390 }
391
392 template <typename I>
393 void ManagedLock<I>::execute_action(Action action, Context *ctx) {
394 ceph_assert(ceph_mutex_is_locked(m_lock));
395
396 append_context(action, ctx);
397 if (!is_transition_state()) {
398 execute_next_action();
399 }
400 }
401
402 template <typename I>
403 void ManagedLock<I>::execute_next_action() {
404 ceph_assert(ceph_mutex_is_locked(m_lock));
405 ceph_assert(!m_actions_contexts.empty());
406 switch (get_active_action()) {
407 case ACTION_ACQUIRE_LOCK:
408 case ACTION_TRY_LOCK:
409 send_acquire_lock();
410 break;
411 case ACTION_REACQUIRE_LOCK:
412 send_reacquire_lock();
413 break;
414 case ACTION_RELEASE_LOCK:
415 send_release_lock();
416 break;
417 case ACTION_SHUT_DOWN:
418 send_shutdown();
419 break;
420 default:
421 ceph_abort();
422 break;
423 }
424 }
425
426 template <typename I>
427 typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
428 ceph_assert(ceph_mutex_is_locked(m_lock));
429 ceph_assert(!m_actions_contexts.empty());
430 return m_actions_contexts.front().first;
431 }
432
433 template <typename I>
434 void ManagedLock<I>::complete_active_action(State next_state, int r) {
435 ceph_assert(ceph_mutex_is_locked(m_lock));
436 ceph_assert(!m_actions_contexts.empty());
437
438 ActionContexts action_contexts(std::move(m_actions_contexts.front()));
439 m_actions_contexts.pop_front();
440 m_state = next_state;
441
442 m_lock.unlock();
443 for (auto ctx : action_contexts.second) {
444 ctx->complete(r);
445 }
446 m_lock.lock();
447
448 if (!is_transition_state() && !m_actions_contexts.empty()) {
449 execute_next_action();
450 }
451 }
452
453 template <typename I>
454 bool ManagedLock<I>::is_state_shutdown() const {
455 ceph_assert(ceph_mutex_is_locked(m_lock));
456
457 switch (m_state) {
458 case STATE_PRE_SHUTTING_DOWN:
459 case STATE_SHUTTING_DOWN:
460 case STATE_SHUTDOWN:
461 return true;
462 default:
463 break;
464 }
465
466 return (!m_actions_contexts.empty() &&
467 m_actions_contexts.back().first == ACTION_SHUT_DOWN);
468 }
469
470 template <typename I>
471 void ManagedLock<I>::send_acquire_lock() {
472 ceph_assert(ceph_mutex_is_locked(m_lock));
473 if (m_state == STATE_LOCKED) {
474 complete_active_action(STATE_LOCKED, 0);
475 return;
476 }
477
478 ldout(m_cct, 10) << dendl;
479
480 uint64_t watch_handle = m_watcher->get_watch_handle();
481 if (watch_handle == 0) {
482 lderr(m_cct) << "watcher not registered - delaying request" << dendl;
483 m_state = STATE_WAITING_FOR_REGISTER;
484
485 // shut down might race w/ release/re-acquire of the lock
486 if (is_state_shutdown()) {
487 complete_active_action(STATE_UNLOCKED, -ESHUTDOWN);
488 }
489 return;
490 }
491
492 m_state = STATE_ACQUIRING;
493 m_cookie = encode_lock_cookie(watch_handle);
494
495 m_work_queue->queue(new LambdaContext([this](int r) {
496 pre_acquire_lock_handler(create_context_callback<
497 ManagedLock<I>, &ManagedLock<I>::handle_pre_acquire_lock>(this));
498 }));
499 }
500
501 template <typename I>
502 void ManagedLock<I>::handle_pre_acquire_lock(int r) {
503 ldout(m_cct, 10) << "r=" << r << dendl;
504
505 if (r < 0) {
506 handle_acquire_lock(r);
507 return;
508 }
509
510 using managed_lock::AcquireRequest;
511 AcquireRequest<I>* req = AcquireRequest<I>::create(
512 m_ioctx, m_watcher, m_work_queue, m_oid, m_cookie, m_mode == EXCLUSIVE,
513 m_blacklist_on_break_lock, m_blacklist_expire_seconds,
514 create_context_callback<
515 ManagedLock<I>, &ManagedLock<I>::handle_acquire_lock>(this));
516 m_work_queue->queue(new C_SendLockRequest<AcquireRequest<I>>(req), 0);
517 }
518
519 template <typename I>
520 void ManagedLock<I>::handle_acquire_lock(int r) {
521 ldout(m_cct, 10) << "r=" << r << dendl;
522
523 if (r == -EBUSY || r == -EAGAIN) {
524 ldout(m_cct, 5) << "unable to acquire exclusive lock" << dendl;
525 } else if (r < 0) {
526 lderr(m_cct) << "failed to acquire exclusive lock:" << cpp_strerror(r)
527 << dendl;
528 } else {
529 ldout(m_cct, 5) << "successfully acquired exclusive lock" << dendl;
530 }
531
532 m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED);
533
534 m_work_queue->queue(new LambdaContext([this, r](int ret) {
535 post_acquire_lock_handler(r, create_context_callback<
536 ManagedLock<I>, &ManagedLock<I>::handle_post_acquire_lock>(this));
537 }));
538 }
539
540 template <typename I>
541 void ManagedLock<I>::handle_post_acquire_lock(int r) {
542 ldout(m_cct, 10) << "r=" << r << dendl;
543
544 std::lock_guard locker{m_lock};
545
546 if (r < 0 && m_post_next_state == STATE_LOCKED) {
547 // release_lock without calling pre and post handlers
548 revert_to_unlock_state(r);
549 } else if (r != -ECANCELED) {
550 // fail the lock request
551 complete_active_action(m_post_next_state, r);
552 }
553 }
554
555 template <typename I>
556 void ManagedLock<I>::revert_to_unlock_state(int r) {
557 ldout(m_cct, 10) << "r=" << r << dendl;
558
559 using managed_lock::ReleaseRequest;
560 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
561 m_work_queue, m_oid, m_cookie,
562 new LambdaContext([this, r](int ret) {
563 std::lock_guard locker{m_lock};
564 ceph_assert(ret == 0);
565 complete_active_action(STATE_UNLOCKED, r);
566 }));
567 m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req));
568 }
569
570 template <typename I>
571 void ManagedLock<I>::send_reacquire_lock() {
572 ceph_assert(ceph_mutex_is_locked(m_lock));
573
574 if (m_state != STATE_LOCKED) {
575 complete_active_action(m_state, 0);
576 return;
577 }
578
579 ldout(m_cct, 10) << dendl;
580 m_state = STATE_REACQUIRING;
581
582 uint64_t watch_handle = m_watcher->get_watch_handle();
583 if (watch_handle == 0) {
584 // watch (re)failed while recovering
585 lderr(m_cct) << "aborting reacquire due to invalid watch handle"
586 << dendl;
587
588 // treat double-watch failure as a lost lock and invoke the
589 // release/acquire handlers
590 release_acquire_lock();
591 complete_active_action(STATE_LOCKED, 0);
592 return;
593 }
594
595 m_new_cookie = encode_lock_cookie(watch_handle);
596 if (m_cookie == m_new_cookie && m_blacklist_on_break_lock) {
597 ldout(m_cct, 10) << "skipping reacquire since cookie still valid"
598 << dendl;
599 auto ctx = create_context_callback<
600 ManagedLock, &ManagedLock<I>::handle_no_op_reacquire_lock>(this);
601 post_reacquire_lock_handler(0, ctx);
602 return;
603 }
604
605 auto ctx = create_context_callback<
606 ManagedLock, &ManagedLock<I>::handle_reacquire_lock>(this);
607 ctx = new LambdaContext([this, ctx](int r) {
608 post_reacquire_lock_handler(r, ctx);
609 });
610
611 using managed_lock::ReacquireRequest;
612 ReacquireRequest<I>* req = ReacquireRequest<I>::create(m_ioctx, m_oid,
613 m_cookie, m_new_cookie, m_mode == EXCLUSIVE, ctx);
614 m_work_queue->queue(new C_SendLockRequest<ReacquireRequest<I>>(req));
615 }
616
617 template <typename I>
618 void ManagedLock<I>::handle_reacquire_lock(int r) {
619 ldout(m_cct, 10) << "r=" << r << dendl;
620
621 std::lock_guard locker{m_lock};
622 ceph_assert(m_state == STATE_REACQUIRING);
623
624 if (r < 0) {
625 if (r == -EOPNOTSUPP) {
626 ldout(m_cct, 10) << "updating lock is not supported" << dendl;
627 } else {
628 lderr(m_cct) << "failed to update lock cookie: " << cpp_strerror(r)
629 << dendl;
630 }
631
632 release_acquire_lock();
633 } else {
634 m_cookie = m_new_cookie;
635 }
636
637 complete_active_action(STATE_LOCKED, 0);
638 }
639
640 template <typename I>
641 void ManagedLock<I>::handle_no_op_reacquire_lock(int r) {
642 ldout(m_cct, 10) << "r=" << r << dendl;
643 ceph_assert(m_state == STATE_REACQUIRING);
644 ceph_assert(r >= 0);
645 complete_active_action(STATE_LOCKED, 0);
646 }
647
648 template <typename I>
649 void ManagedLock<I>::release_acquire_lock() {
650 assert(ceph_mutex_is_locked(m_lock));
651
652 if (!is_state_shutdown()) {
653 // queue a release and re-acquire of the lock since cookie cannot
654 // be updated on older OSDs
655 execute_action(ACTION_RELEASE_LOCK, nullptr);
656
657 ceph_assert(!m_actions_contexts.empty());
658 ActionContexts &action_contexts(m_actions_contexts.front());
659
660 // reacquire completes when the request lock completes
661 Contexts contexts;
662 std::swap(contexts, action_contexts.second);
663 if (contexts.empty()) {
664 execute_action(ACTION_ACQUIRE_LOCK, nullptr);
665 } else {
666 for (auto ctx : contexts) {
667 execute_action(ACTION_ACQUIRE_LOCK, ctx);
668 }
669 }
670 }
671 }
672
673 template <typename I>
674 void ManagedLock<I>::send_release_lock() {
675 ceph_assert(ceph_mutex_is_locked(m_lock));
676 if (m_state == STATE_UNLOCKED) {
677 complete_active_action(STATE_UNLOCKED, 0);
678 return;
679 }
680
681 ldout(m_cct, 10) << dendl;
682 m_state = STATE_PRE_RELEASING;
683
684 m_work_queue->queue(new LambdaContext([this](int r) {
685 pre_release_lock_handler(false, create_context_callback<
686 ManagedLock<I>, &ManagedLock<I>::handle_pre_release_lock>(this));
687 }));
688 }
689
690 template <typename I>
691 void ManagedLock<I>::handle_pre_release_lock(int r) {
692 ldout(m_cct, 10) << "r=" << r << dendl;
693
694 {
695 std::lock_guard locker{m_lock};
696 ceph_assert(m_state == STATE_PRE_RELEASING);
697 m_state = STATE_RELEASING;
698 }
699
700 if (r < 0) {
701 handle_release_lock(r);
702 return;
703 }
704
705 using managed_lock::ReleaseRequest;
706 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
707 m_work_queue, m_oid, m_cookie,
708 create_context_callback<
709 ManagedLock<I>, &ManagedLock<I>::handle_release_lock>(this));
710 m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req), 0);
711 }
712
713 template <typename I>
714 void ManagedLock<I>::handle_release_lock(int r) {
715 ldout(m_cct, 10) << "r=" << r << dendl;
716
717 std::lock_guard locker{m_lock};
718 ceph_assert(m_state == STATE_RELEASING);
719
720 if (r >= 0 || r == -EBLACKLISTED || r == -ENOENT) {
721 m_cookie = "";
722 m_post_next_state = STATE_UNLOCKED;
723 } else {
724 m_post_next_state = STATE_LOCKED;
725 }
726
727 m_work_queue->queue(new LambdaContext([this, r](int ret) {
728 post_release_lock_handler(false, r, create_context_callback<
729 ManagedLock<I>, &ManagedLock<I>::handle_post_release_lock>(this));
730 }));
731 }
732
733 template <typename I>
734 void ManagedLock<I>::handle_post_release_lock(int r) {
735 ldout(m_cct, 10) << "r=" << r << dendl;
736
737 std::lock_guard locker{m_lock};
738 complete_active_action(m_post_next_state, r);
739 }
740
741 template <typename I>
742 void ManagedLock<I>::send_shutdown() {
743 ldout(m_cct, 10) << dendl;
744 ceph_assert(ceph_mutex_is_locked(m_lock));
745 if (m_state == STATE_UNLOCKED) {
746 m_state = STATE_SHUTTING_DOWN;
747 m_work_queue->queue(new LambdaContext([this](int r) {
748 shutdown_handler(r, create_context_callback<
749 ManagedLock<I>, &ManagedLock<I>::handle_shutdown>(this));
750 }));
751 return;
752 }
753
754 ceph_assert(m_state == STATE_LOCKED);
755 m_state = STATE_PRE_SHUTTING_DOWN;
756
757 m_lock.unlock();
758 m_work_queue->queue(new C_ShutDownRelease(this), 0);
759 m_lock.lock();
760 }
761
762 template <typename I>
763 void ManagedLock<I>::handle_shutdown(int r) {
764 ldout(m_cct, 10) << "r=" << r << dendl;
765
766 wait_for_tracked_ops(r);
767 }
768
769 template <typename I>
770 void ManagedLock<I>::send_shutdown_release() {
771 ldout(m_cct, 10) << dendl;
772
773 std::lock_guard locker{m_lock};
774
775 m_work_queue->queue(new LambdaContext([this](int r) {
776 pre_release_lock_handler(true, create_context_callback<
777 ManagedLock<I>, &ManagedLock<I>::handle_shutdown_pre_release>(this));
778 }));
779 }
780
781 template <typename I>
782 void ManagedLock<I>::handle_shutdown_pre_release(int r) {
783 ldout(m_cct, 10) << "r=" << r << dendl;
784
785 std::string cookie;
786 {
787 std::lock_guard locker{m_lock};
788 cookie = m_cookie;
789
790 ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
791 m_state = STATE_SHUTTING_DOWN;
792 }
793
794 using managed_lock::ReleaseRequest;
795 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
796 m_work_queue, m_oid, cookie,
797 new LambdaContext([this, r](int l) {
798 int rst = r < 0 ? r : l;
799 post_release_lock_handler(true, rst, create_context_callback<
800 ManagedLock<I>, &ManagedLock<I>::handle_shutdown_post_release>(this));
801 }));
802 req->send();
803
804 }
805
806 template <typename I>
807 void ManagedLock<I>::handle_shutdown_post_release(int r) {
808 ldout(m_cct, 10) << "r=" << r << dendl;
809
810 wait_for_tracked_ops(r);
811 }
812
813 template <typename I>
814 void ManagedLock<I>::wait_for_tracked_ops(int r) {
815 ldout(m_cct, 10) << "r=" << r << dendl;
816
817 Context *ctx = new LambdaContext([this, r](int ret) {
818 complete_shutdown(r);
819 });
820
821 m_async_op_tracker.wait_for_ops(ctx);
822 }
823
824 template <typename I>
825 void ManagedLock<I>::complete_shutdown(int r) {
826 ldout(m_cct, 10) << "r=" << r << dendl;
827
828 if (r < 0) {
829 lderr(m_cct) << "failed to shut down lock: " << cpp_strerror(r)
830 << dendl;
831 }
832
833 ActionContexts action_contexts;
834 {
835 std::lock_guard locker{m_lock};
836 ceph_assert(ceph_mutex_is_locked(m_lock));
837 ceph_assert(m_actions_contexts.size() == 1);
838
839 action_contexts = std::move(m_actions_contexts.front());
840 m_actions_contexts.pop_front();
841 m_state = STATE_SHUTDOWN;
842 }
843
844 // expect to be destroyed after firing callback
845 for (auto ctx : action_contexts.second) {
846 ctx->complete(r);
847 }
848 }
849
850 } // namespace librbd
851
852 template class librbd::ManagedLock<librbd::ImageCtx>;