]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/ManagedLock.cc
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / librbd / ManagedLock.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/ManagedLock.h"
5 #include "librbd/AsioEngine.h"
6 #include "librbd/ImageCtx.h"
7 #include "librbd/Watcher.h"
8 #include "librbd/asio/ContextWQ.h"
9 #include "librbd/managed_lock/AcquireRequest.h"
10 #include "librbd/managed_lock/BreakRequest.h"
11 #include "librbd/managed_lock/GetLockerRequest.h"
12 #include "librbd/managed_lock/ReleaseRequest.h"
13 #include "librbd/managed_lock/ReacquireRequest.h"
14 #include "librbd/managed_lock/Types.h"
15 #include "librbd/managed_lock/Utils.h"
16 #include "cls/lock/cls_lock_client.h"
17 #include "common/dout.h"
18 #include "common/errno.h"
19 #include "common/Cond.h"
20 #include "librbd/Utils.h"
21
22 #define dout_subsys ceph_subsys_rbd
23 #undef dout_prefix
24 #define dout_prefix *_dout << "librbd::ManagedLock: " << this << " " \
25 << __func__ << ": "
26
27 namespace librbd {
28
29 using std::string;
30 using namespace managed_lock;
31
32 namespace {
33
34 template <typename R>
35 struct C_SendLockRequest : public Context {
36 R* request;
37 explicit C_SendLockRequest(R* request) : request(request) {
38 }
39 void finish(int r) override {
40 request->send();
41 }
42 };
43
44 struct C_Tracked : public Context {
45 AsyncOpTracker &tracker;
46 Context *ctx;
47 C_Tracked(AsyncOpTracker &tracker, Context *ctx)
48 : tracker(tracker), ctx(ctx) {
49 tracker.start_op();
50 }
51 ~C_Tracked() override {
52 tracker.finish_op();
53 }
54 void finish(int r) override {
55 ctx->complete(r);
56 }
57 };
58
59 } // anonymous namespace
60
61 using librbd::util::create_context_callback;
62 using librbd::util::unique_lock_name;
63 using managed_lock::util::decode_lock_cookie;
64 using managed_lock::util::encode_lock_cookie;
65
66 template <typename I>
67 ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, AsioEngine& asio_engine,
68 const string& oid, Watcher *watcher, Mode mode,
69 bool blocklist_on_break_lock,
70 uint32_t blocklist_expire_seconds)
71 : m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock<I>::m_lock", this))),
72 m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
73 m_asio_engine(asio_engine),
74 m_work_queue(asio_engine.get_work_queue()),
75 m_oid(oid),
76 m_watcher(watcher),
77 m_mode(mode),
78 m_blocklist_on_break_lock(blocklist_on_break_lock),
79 m_blocklist_expire_seconds(blocklist_expire_seconds),
80 m_state(STATE_UNLOCKED) {
81 }
82
83 template <typename I>
84 ManagedLock<I>::~ManagedLock() {
85 std::lock_guard locker{m_lock};
86 ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
87 m_state == STATE_UNINITIALIZED);
88 if (m_state == STATE_UNINITIALIZED) {
89 // never initialized -- ensure any in-flight ops are complete
90 // since we wouldn't expect shut_down to be invoked
91 C_SaferCond ctx;
92 m_async_op_tracker.wait_for_ops(&ctx);
93 ctx.wait();
94 }
95 ceph_assert(m_async_op_tracker.empty());
96 }
97
98 template <typename I>
99 bool ManagedLock<I>::is_lock_owner() const {
100 std::lock_guard locker{m_lock};
101
102 return is_lock_owner(m_lock);
103 }
104
105 template <typename I>
106 bool ManagedLock<I>::is_lock_owner(ceph::mutex &lock) const {
107
108 ceph_assert(ceph_mutex_is_locked(m_lock));
109
110 bool lock_owner;
111
112 switch (m_state) {
113 case STATE_LOCKED:
114 case STATE_REACQUIRING:
115 case STATE_PRE_SHUTTING_DOWN:
116 case STATE_POST_ACQUIRING:
117 case STATE_PRE_RELEASING:
118 lock_owner = true;
119 break;
120 default:
121 lock_owner = false;
122 break;
123 }
124
125 ldout(m_cct, 20) << "=" << lock_owner << dendl;
126 return lock_owner;
127 }
128
129 template <typename I>
130 void ManagedLock<I>::shut_down(Context *on_shut_down) {
131 ldout(m_cct, 10) << dendl;
132
133 std::lock_guard locker{m_lock};
134 ceph_assert(!is_state_shutdown());
135
136 if (m_state == STATE_WAITING_FOR_REGISTER) {
137 // abort stalled acquire lock state
138 ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
139 Action active_action = get_active_action();
140 ceph_assert(active_action == ACTION_TRY_LOCK ||
141 active_action == ACTION_ACQUIRE_LOCK);
142 complete_active_action(STATE_UNLOCKED, -ESHUTDOWN);
143 }
144
145 execute_action(ACTION_SHUT_DOWN, on_shut_down);
146 }
147
148 template <typename I>
149 void ManagedLock<I>::acquire_lock(Context *on_acquired) {
150 int r = 0;
151 {
152 std::lock_guard locker{m_lock};
153 if (is_state_shutdown()) {
154 r = -ESHUTDOWN;
155 } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
156 ldout(m_cct, 10) << dendl;
157 execute_action(ACTION_ACQUIRE_LOCK, on_acquired);
158 return;
159 }
160 }
161
162 if (on_acquired != nullptr) {
163 on_acquired->complete(r);
164 }
165 }
166
167 template <typename I>
168 void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
169 int r = 0;
170 {
171 std::lock_guard locker{m_lock};
172 if (is_state_shutdown()) {
173 r = -ESHUTDOWN;
174 } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
175 ldout(m_cct, 10) << dendl;
176 execute_action(ACTION_TRY_LOCK, on_acquired);
177 return;
178 }
179 }
180
181 if (on_acquired != nullptr) {
182 on_acquired->complete(r);
183 }
184 }
185
186 template <typename I>
187 void ManagedLock<I>::release_lock(Context *on_released) {
188 int r = 0;
189 {
190 std::lock_guard locker{m_lock};
191 if (is_state_shutdown()) {
192 r = -ESHUTDOWN;
193 } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
194 ldout(m_cct, 10) << dendl;
195 execute_action(ACTION_RELEASE_LOCK, on_released);
196 return;
197 }
198 }
199
200 if (on_released != nullptr) {
201 on_released->complete(r);
202 }
203 }
204
205 template <typename I>
206 void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
207 {
208 std::lock_guard locker{m_lock};
209
210 if (m_state == STATE_WAITING_FOR_REGISTER) {
211 // restart the acquire lock process now that watch is valid
212 ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
213 Action active_action = get_active_action();
214 ceph_assert(active_action == ACTION_TRY_LOCK ||
215 active_action == ACTION_ACQUIRE_LOCK);
216 execute_next_action();
217 } else if (!is_state_shutdown() &&
218 (m_state == STATE_LOCKED ||
219 m_state == STATE_ACQUIRING ||
220 m_state == STATE_POST_ACQUIRING ||
221 m_state == STATE_WAITING_FOR_LOCK)) {
222 // interlock the lock operation with other state ops
223 ldout(m_cct, 10) << dendl;
224 execute_action(ACTION_REACQUIRE_LOCK, on_reacquired);
225 return;
226 }
227 }
228
229 // ignore request if shutdown or not in a locked-related state
230 if (on_reacquired != nullptr) {
231 on_reacquired->complete(0);
232 }
233 }
234
235 template <typename I>
236 void ManagedLock<I>::get_locker(managed_lock::Locker *locker,
237 Context *on_finish) {
238 ldout(m_cct, 10) << dendl;
239
240 int r;
241 {
242 std::lock_guard l{m_lock};
243 if (is_state_shutdown()) {
244 r = -ESHUTDOWN;
245 } else {
246 on_finish = new C_Tracked(m_async_op_tracker, on_finish);
247 auto req = managed_lock::GetLockerRequest<I>::create(
248 m_ioctx, m_oid, m_mode == EXCLUSIVE, locker, on_finish);
249 req->send();
250 return;
251 }
252 }
253
254 on_finish->complete(r);
255 }
256
257 template <typename I>
258 void ManagedLock<I>::break_lock(const managed_lock::Locker &locker,
259 bool force_break_lock, Context *on_finish) {
260 ldout(m_cct, 10) << dendl;
261
262 int r;
263 {
264 std::lock_guard l{m_lock};
265 if (is_state_shutdown()) {
266 r = -ESHUTDOWN;
267 } else if (is_lock_owner(m_lock)) {
268 r = -EBUSY;
269 } else {
270 on_finish = new C_Tracked(m_async_op_tracker, on_finish);
271 auto req = managed_lock::BreakRequest<I>::create(
272 m_ioctx, m_asio_engine, m_oid, locker, m_mode == EXCLUSIVE,
273 m_blocklist_on_break_lock, m_blocklist_expire_seconds, force_break_lock,
274 on_finish);
275 req->send();
276 return;
277 }
278 }
279
280 on_finish->complete(r);
281 }
282
283 template <typename I>
284 int ManagedLock<I>::assert_header_locked() {
285 ldout(m_cct, 10) << dendl;
286
287 librados::ObjectReadOperation op;
288 {
289 std::lock_guard locker{m_lock};
290 rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME,
291 (m_mode == EXCLUSIVE ? ClsLockType::EXCLUSIVE :
292 ClsLockType::SHARED),
293 m_cookie,
294 managed_lock::util::get_watcher_lock_tag());
295 }
296
297 int r = m_ioctx.operate(m_oid, &op, nullptr);
298 if (r < 0) {
299 if (r == -EBLOCKLISTED) {
300 ldout(m_cct, 5) << "client is not lock owner -- client blocklisted"
301 << dendl;
302 } else if (r == -ENOENT) {
303 ldout(m_cct, 5) << "client is not lock owner -- no lock detected"
304 << dendl;
305 } else if (r == -EBUSY) {
306 ldout(m_cct, 5) << "client is not lock owner -- owned by different client"
307 << dendl;
308 } else {
309 lderr(m_cct) << "failed to verify lock ownership: " << cpp_strerror(r)
310 << dendl;
311 }
312
313 return r;
314 }
315
316 return 0;
317 }
318
319 template <typename I>
320 void ManagedLock<I>::shutdown_handler(int r, Context *on_finish) {
321 on_finish->complete(r);
322 }
323
324 template <typename I>
325 void ManagedLock<I>::pre_acquire_lock_handler(Context *on_finish) {
326 on_finish->complete(0);
327 }
328
329 template <typename I>
330 void ManagedLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
331 on_finish->complete(r);
332 }
333
334 template <typename I>
335 void ManagedLock<I>::pre_release_lock_handler(bool shutting_down,
336 Context *on_finish) {
337 on_finish->complete(0);
338 }
339
340 template <typename I>
341 void ManagedLock<I>::post_release_lock_handler(bool shutting_down, int r,
342 Context *on_finish) {
343 on_finish->complete(r);
344 }
345
346 template <typename I>
347 void ManagedLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
348 on_finish->complete(r);
349 }
350
351 template <typename I>
352 bool ManagedLock<I>::is_transition_state() const {
353 switch (m_state) {
354 case STATE_ACQUIRING:
355 case STATE_WAITING_FOR_REGISTER:
356 case STATE_REACQUIRING:
357 case STATE_RELEASING:
358 case STATE_PRE_SHUTTING_DOWN:
359 case STATE_SHUTTING_DOWN:
360 case STATE_INITIALIZING:
361 case STATE_WAITING_FOR_LOCK:
362 case STATE_POST_ACQUIRING:
363 case STATE_PRE_RELEASING:
364 return true;
365 case STATE_UNLOCKED:
366 case STATE_LOCKED:
367 case STATE_SHUTDOWN:
368 case STATE_UNINITIALIZED:
369 break;
370 }
371 return false;
372 }
373
374 template <typename I>
375 void ManagedLock<I>::append_context(Action action, Context *ctx) {
376 ceph_assert(ceph_mutex_is_locked(m_lock));
377
378 for (auto &action_ctxs : m_actions_contexts) {
379 if (action == action_ctxs.first) {
380 if (ctx != nullptr) {
381 action_ctxs.second.push_back(ctx);
382 }
383 return;
384 }
385 }
386
387 Contexts contexts;
388 if (ctx != nullptr) {
389 contexts.push_back(ctx);
390 }
391 m_actions_contexts.push_back({action, std::move(contexts)});
392 }
393
394 template <typename I>
395 void ManagedLock<I>::execute_action(Action action, Context *ctx) {
396 ceph_assert(ceph_mutex_is_locked(m_lock));
397
398 append_context(action, ctx);
399 if (!is_transition_state()) {
400 execute_next_action();
401 }
402 }
403
404 template <typename I>
405 void ManagedLock<I>::execute_next_action() {
406 ceph_assert(ceph_mutex_is_locked(m_lock));
407 ceph_assert(!m_actions_contexts.empty());
408 switch (get_active_action()) {
409 case ACTION_ACQUIRE_LOCK:
410 case ACTION_TRY_LOCK:
411 send_acquire_lock();
412 break;
413 case ACTION_REACQUIRE_LOCK:
414 send_reacquire_lock();
415 break;
416 case ACTION_RELEASE_LOCK:
417 send_release_lock();
418 break;
419 case ACTION_SHUT_DOWN:
420 send_shutdown();
421 break;
422 default:
423 ceph_abort();
424 break;
425 }
426 }
427
428 template <typename I>
429 typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
430 ceph_assert(ceph_mutex_is_locked(m_lock));
431 ceph_assert(!m_actions_contexts.empty());
432 return m_actions_contexts.front().first;
433 }
434
435 template <typename I>
436 void ManagedLock<I>::complete_active_action(State next_state, int r) {
437 ceph_assert(ceph_mutex_is_locked(m_lock));
438 ceph_assert(!m_actions_contexts.empty());
439
440 ActionContexts action_contexts(std::move(m_actions_contexts.front()));
441 m_actions_contexts.pop_front();
442 m_state = next_state;
443
444 m_lock.unlock();
445 for (auto ctx : action_contexts.second) {
446 ctx->complete(r);
447 }
448 m_lock.lock();
449
450 if (!is_transition_state() && !m_actions_contexts.empty()) {
451 execute_next_action();
452 }
453 }
454
455 template <typename I>
456 bool ManagedLock<I>::is_state_shutdown() const {
457 ceph_assert(ceph_mutex_is_locked(m_lock));
458
459 switch (m_state) {
460 case STATE_PRE_SHUTTING_DOWN:
461 case STATE_SHUTTING_DOWN:
462 case STATE_SHUTDOWN:
463 return true;
464 default:
465 break;
466 }
467
468 return (!m_actions_contexts.empty() &&
469 m_actions_contexts.back().first == ACTION_SHUT_DOWN);
470 }
471
472 template <typename I>
473 void ManagedLock<I>::send_acquire_lock() {
474 ceph_assert(ceph_mutex_is_locked(m_lock));
475 if (m_state == STATE_LOCKED) {
476 complete_active_action(STATE_LOCKED, 0);
477 return;
478 }
479
480 ldout(m_cct, 10) << dendl;
481
482 uint64_t watch_handle = m_watcher->get_watch_handle();
483 if (watch_handle == 0) {
484 lderr(m_cct) << "watcher not registered - delaying request" << dendl;
485 m_state = STATE_WAITING_FOR_REGISTER;
486
487 // shut down might race w/ release/re-acquire of the lock
488 if (is_state_shutdown()) {
489 complete_active_action(STATE_UNLOCKED, -ESHUTDOWN);
490 }
491 return;
492 }
493
494 m_state = STATE_ACQUIRING;
495 m_cookie = encode_lock_cookie(watch_handle);
496
497 m_work_queue->queue(new LambdaContext([this](int r) {
498 pre_acquire_lock_handler(create_context_callback<
499 ManagedLock<I>, &ManagedLock<I>::handle_pre_acquire_lock>(this));
500 }));
501 }
502
503 template <typename I>
504 void ManagedLock<I>::handle_pre_acquire_lock(int r) {
505 ldout(m_cct, 10) << "r=" << r << dendl;
506
507 if (r < 0) {
508 handle_acquire_lock(r);
509 return;
510 }
511
512 using managed_lock::AcquireRequest;
513 AcquireRequest<I>* req = AcquireRequest<I>::create(
514 m_ioctx, m_watcher, m_asio_engine, m_oid, m_cookie, m_mode == EXCLUSIVE,
515 m_blocklist_on_break_lock, m_blocklist_expire_seconds,
516 create_context_callback<
517 ManagedLock<I>, &ManagedLock<I>::handle_acquire_lock>(this));
518 m_work_queue->queue(new C_SendLockRequest<AcquireRequest<I>>(req), 0);
519 }
520
521 template <typename I>
522 void ManagedLock<I>::handle_acquire_lock(int r) {
523 ldout(m_cct, 10) << "r=" << r << dendl;
524
525 if (r == -EBUSY || r == -EAGAIN || r == -EROFS) {
526 ldout(m_cct, 5) << "unable to acquire exclusive lock" << dendl;
527 } else if (r < 0) {
528 lderr(m_cct) << "failed to acquire exclusive lock: " << cpp_strerror(r)
529 << dendl;
530 } else {
531 ldout(m_cct, 5) << "successfully acquired exclusive lock" << dendl;
532 }
533
534 m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED);
535
536 m_work_queue->queue(new LambdaContext([this, r](int ret) {
537 post_acquire_lock_handler(r, create_context_callback<
538 ManagedLock<I>, &ManagedLock<I>::handle_post_acquire_lock>(this));
539 }));
540 }
541
542 template <typename I>
543 void ManagedLock<I>::handle_post_acquire_lock(int r) {
544 ldout(m_cct, 10) << "r=" << r << dendl;
545
546 std::lock_guard locker{m_lock};
547
548 if (r < 0 && m_post_next_state == STATE_LOCKED) {
549 // release_lock without calling pre and post handlers
550 revert_to_unlock_state(r);
551 } else if (r != -ECANCELED) {
552 // fail the lock request
553 complete_active_action(m_post_next_state, r);
554 }
555 }
556
557 template <typename I>
558 void ManagedLock<I>::revert_to_unlock_state(int r) {
559 ldout(m_cct, 10) << "r=" << r << dendl;
560
561 using managed_lock::ReleaseRequest;
562 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
563 m_work_queue, m_oid, m_cookie,
564 new LambdaContext([this, r](int ret) {
565 std::lock_guard locker{m_lock};
566 ceph_assert(ret == 0);
567 complete_active_action(STATE_UNLOCKED, r);
568 }));
569 m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req));
570 }
571
572 template <typename I>
573 void ManagedLock<I>::send_reacquire_lock() {
574 ceph_assert(ceph_mutex_is_locked(m_lock));
575
576 if (m_state != STATE_LOCKED) {
577 complete_active_action(m_state, 0);
578 return;
579 }
580
581 ldout(m_cct, 10) << dendl;
582 m_state = STATE_REACQUIRING;
583
584 uint64_t watch_handle = m_watcher->get_watch_handle();
585 if (watch_handle == 0) {
586 // watch (re)failed while recovering
587 lderr(m_cct) << "aborting reacquire due to invalid watch handle"
588 << dendl;
589
590 // treat double-watch failure as a lost lock and invoke the
591 // release/acquire handlers
592 release_acquire_lock();
593 complete_active_action(STATE_LOCKED, 0);
594 return;
595 }
596
597 m_new_cookie = encode_lock_cookie(watch_handle);
598 if (m_cookie == m_new_cookie && m_blocklist_on_break_lock) {
599 ldout(m_cct, 10) << "skipping reacquire since cookie still valid"
600 << dendl;
601 auto ctx = create_context_callback<
602 ManagedLock, &ManagedLock<I>::handle_no_op_reacquire_lock>(this);
603 post_reacquire_lock_handler(0, ctx);
604 return;
605 }
606
607 auto ctx = create_context_callback<
608 ManagedLock, &ManagedLock<I>::handle_reacquire_lock>(this);
609 ctx = new LambdaContext([this, ctx](int r) {
610 post_reacquire_lock_handler(r, ctx);
611 });
612
613 using managed_lock::ReacquireRequest;
614 ReacquireRequest<I>* req = ReacquireRequest<I>::create(m_ioctx, m_oid,
615 m_cookie, m_new_cookie, m_mode == EXCLUSIVE, ctx);
616 m_work_queue->queue(new C_SendLockRequest<ReacquireRequest<I>>(req));
617 }
618
619 template <typename I>
620 void ManagedLock<I>::handle_reacquire_lock(int r) {
621 ldout(m_cct, 10) << "r=" << r << dendl;
622
623 std::lock_guard locker{m_lock};
624 ceph_assert(m_state == STATE_REACQUIRING);
625
626 if (r < 0) {
627 if (r == -EOPNOTSUPP) {
628 ldout(m_cct, 10) << "updating lock is not supported" << dendl;
629 } else {
630 lderr(m_cct) << "failed to update lock cookie: " << cpp_strerror(r)
631 << dendl;
632 }
633
634 release_acquire_lock();
635 } else {
636 m_cookie = m_new_cookie;
637 }
638
639 complete_active_action(STATE_LOCKED, 0);
640 }
641
642 template <typename I>
643 void ManagedLock<I>::handle_no_op_reacquire_lock(int r) {
644 ldout(m_cct, 10) << "r=" << r << dendl;
645 ceph_assert(m_state == STATE_REACQUIRING);
646 ceph_assert(r >= 0);
647 complete_active_action(STATE_LOCKED, 0);
648 }
649
650 template <typename I>
651 void ManagedLock<I>::release_acquire_lock() {
652 assert(ceph_mutex_is_locked(m_lock));
653
654 if (!is_state_shutdown()) {
655 // queue a release and re-acquire of the lock since cookie cannot
656 // be updated on older OSDs
657 execute_action(ACTION_RELEASE_LOCK, nullptr);
658
659 ceph_assert(!m_actions_contexts.empty());
660 ActionContexts &action_contexts(m_actions_contexts.front());
661
662 // reacquire completes when the request lock completes
663 Contexts contexts;
664 std::swap(contexts, action_contexts.second);
665 if (contexts.empty()) {
666 execute_action(ACTION_ACQUIRE_LOCK, nullptr);
667 } else {
668 for (auto ctx : contexts) {
669 execute_action(ACTION_ACQUIRE_LOCK, ctx);
670 }
671 }
672 }
673 }
674
675 template <typename I>
676 void ManagedLock<I>::send_release_lock() {
677 ceph_assert(ceph_mutex_is_locked(m_lock));
678 if (m_state == STATE_UNLOCKED) {
679 complete_active_action(STATE_UNLOCKED, 0);
680 return;
681 }
682
683 ldout(m_cct, 10) << dendl;
684 m_state = STATE_PRE_RELEASING;
685
686 m_work_queue->queue(new LambdaContext([this](int r) {
687 pre_release_lock_handler(false, create_context_callback<
688 ManagedLock<I>, &ManagedLock<I>::handle_pre_release_lock>(this));
689 }));
690 }
691
692 template <typename I>
693 void ManagedLock<I>::handle_pre_release_lock(int r) {
694 ldout(m_cct, 10) << "r=" << r << dendl;
695
696 {
697 std::lock_guard locker{m_lock};
698 ceph_assert(m_state == STATE_PRE_RELEASING);
699 m_state = STATE_RELEASING;
700 }
701
702 if (r < 0) {
703 handle_release_lock(r);
704 return;
705 }
706
707 using managed_lock::ReleaseRequest;
708 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
709 m_work_queue, m_oid, m_cookie,
710 create_context_callback<
711 ManagedLock<I>, &ManagedLock<I>::handle_release_lock>(this));
712 m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req), 0);
713 }
714
715 template <typename I>
716 void ManagedLock<I>::handle_release_lock(int r) {
717 ldout(m_cct, 10) << "r=" << r << dendl;
718
719 std::lock_guard locker{m_lock};
720 ceph_assert(m_state == STATE_RELEASING);
721
722 if (r >= 0 || r == -EBLOCKLISTED || r == -ENOENT) {
723 m_cookie = "";
724 m_post_next_state = STATE_UNLOCKED;
725 } else {
726 m_post_next_state = STATE_LOCKED;
727 }
728
729 m_work_queue->queue(new LambdaContext([this, r](int ret) {
730 post_release_lock_handler(false, r, create_context_callback<
731 ManagedLock<I>, &ManagedLock<I>::handle_post_release_lock>(this));
732 }));
733 }
734
735 template <typename I>
736 void ManagedLock<I>::handle_post_release_lock(int r) {
737 ldout(m_cct, 10) << "r=" << r << dendl;
738
739 std::lock_guard locker{m_lock};
740 complete_active_action(m_post_next_state, r);
741 }
742
743 template <typename I>
744 void ManagedLock<I>::send_shutdown() {
745 ldout(m_cct, 10) << dendl;
746 ceph_assert(ceph_mutex_is_locked(m_lock));
747 if (m_state == STATE_UNLOCKED) {
748 m_state = STATE_SHUTTING_DOWN;
749 m_work_queue->queue(new LambdaContext([this](int r) {
750 shutdown_handler(r, create_context_callback<
751 ManagedLock<I>, &ManagedLock<I>::handle_shutdown>(this));
752 }));
753 return;
754 }
755
756 ceph_assert(m_state == STATE_LOCKED);
757 m_state = STATE_PRE_SHUTTING_DOWN;
758
759 m_lock.unlock();
760 m_work_queue->queue(new C_ShutDownRelease(this), 0);
761 m_lock.lock();
762 }
763
764 template <typename I>
765 void ManagedLock<I>::handle_shutdown(int r) {
766 ldout(m_cct, 10) << "r=" << r << dendl;
767
768 wait_for_tracked_ops(r);
769 }
770
771 template <typename I>
772 void ManagedLock<I>::send_shutdown_release() {
773 ldout(m_cct, 10) << dendl;
774
775 std::lock_guard locker{m_lock};
776
777 m_work_queue->queue(new LambdaContext([this](int r) {
778 pre_release_lock_handler(true, create_context_callback<
779 ManagedLock<I>, &ManagedLock<I>::handle_shutdown_pre_release>(this));
780 }));
781 }
782
783 template <typename I>
784 void ManagedLock<I>::handle_shutdown_pre_release(int r) {
785 ldout(m_cct, 10) << "r=" << r << dendl;
786
787 std::string cookie;
788 {
789 std::lock_guard locker{m_lock};
790 cookie = m_cookie;
791
792 ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
793 m_state = STATE_SHUTTING_DOWN;
794 }
795
796 using managed_lock::ReleaseRequest;
797 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
798 m_work_queue, m_oid, cookie,
799 new LambdaContext([this, r](int l) {
800 int rst = r < 0 ? r : l;
801 post_release_lock_handler(true, rst, create_context_callback<
802 ManagedLock<I>, &ManagedLock<I>::handle_shutdown_post_release>(this));
803 }));
804 req->send();
805
806 }
807
808 template <typename I>
809 void ManagedLock<I>::handle_shutdown_post_release(int r) {
810 ldout(m_cct, 10) << "r=" << r << dendl;
811
812 wait_for_tracked_ops(r);
813 }
814
815 template <typename I>
816 void ManagedLock<I>::wait_for_tracked_ops(int r) {
817 ldout(m_cct, 10) << "r=" << r << dendl;
818
819 Context *ctx = new LambdaContext([this, r](int ret) {
820 complete_shutdown(r);
821 });
822
823 m_async_op_tracker.wait_for_ops(ctx);
824 }
825
826 template <typename I>
827 void ManagedLock<I>::complete_shutdown(int r) {
828 ldout(m_cct, 10) << "r=" << r << dendl;
829
830 if (r < 0) {
831 lderr(m_cct) << "failed to shut down lock: " << cpp_strerror(r)
832 << dendl;
833 }
834
835 ActionContexts action_contexts;
836 {
837 std::lock_guard locker{m_lock};
838 ceph_assert(ceph_mutex_is_locked(m_lock));
839 ceph_assert(m_actions_contexts.size() == 1);
840
841 action_contexts = std::move(m_actions_contexts.front());
842 m_actions_contexts.pop_front();
843 m_state = STATE_SHUTDOWN;
844 }
845
846 // expect to be destroyed after firing callback
847 for (auto ctx : action_contexts.second) {
848 ctx->complete(r);
849 }
850 }
851
852 } // namespace librbd
853
854 template class librbd::ManagedLock<librbd::ImageCtx>;