]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/ManagedLock.cc
update ceph source to reef 18.2.1
[ceph.git] / ceph / src / librbd / ManagedLock.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/ManagedLock.h"
5 #include "librbd/AsioEngine.h"
6 #include "librbd/ImageCtx.h"
7 #include "librbd/Watcher.h"
8 #include "librbd/asio/ContextWQ.h"
9 #include "librbd/managed_lock/AcquireRequest.h"
10 #include "librbd/managed_lock/BreakRequest.h"
11 #include "librbd/managed_lock/GetLockerRequest.h"
12 #include "librbd/managed_lock/ReleaseRequest.h"
13 #include "librbd/managed_lock/ReacquireRequest.h"
14 #include "librbd/managed_lock/Types.h"
15 #include "librbd/managed_lock/Utils.h"
16 #include "cls/lock/cls_lock_client.h"
17 #include "common/dout.h"
18 #include "common/errno.h"
19 #include "common/Cond.h"
20 #include "librbd/Utils.h"
21
22 #define dout_subsys ceph_subsys_rbd
23 #undef dout_prefix
24 #define dout_prefix *_dout << "librbd::ManagedLock: " << this << " " \
25 << __func__ << ": "
26
27 namespace librbd {
28
29 using std::string;
30 using namespace managed_lock;
31
32 namespace {
33
34 template <typename R>
35 struct C_SendLockRequest : public Context {
36 R* request;
37 explicit C_SendLockRequest(R* request) : request(request) {
38 }
39 void finish(int r) override {
40 request->send();
41 }
42 };
43
44 struct C_Tracked : public Context {
45 AsyncOpTracker &tracker;
46 Context *ctx;
47 C_Tracked(AsyncOpTracker &tracker, Context *ctx)
48 : tracker(tracker), ctx(ctx) {
49 tracker.start_op();
50 }
51 ~C_Tracked() override {
52 tracker.finish_op();
53 }
54 void finish(int r) override {
55 ctx->complete(r);
56 }
57 };
58
59 } // anonymous namespace
60
61 using librbd::util::create_context_callback;
62 using librbd::util::unique_lock_name;
63 using managed_lock::util::decode_lock_cookie;
64 using managed_lock::util::encode_lock_cookie;
65
66 template <typename I>
67 ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, AsioEngine& asio_engine,
68 const string& oid, Watcher *watcher, Mode mode,
69 bool blocklist_on_break_lock,
70 uint32_t blocklist_expire_seconds)
71 : m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock<I>::m_lock", this))),
72 m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
73 m_asio_engine(asio_engine),
74 m_work_queue(asio_engine.get_work_queue()),
75 m_oid(oid),
76 m_watcher(watcher),
77 m_mode(mode),
78 m_blocklist_on_break_lock(blocklist_on_break_lock),
79 m_blocklist_expire_seconds(blocklist_expire_seconds),
80 m_state(STATE_UNLOCKED) {
81 }
82
83 template <typename I>
84 ManagedLock<I>::~ManagedLock() {
85 std::lock_guard locker{m_lock};
86 ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
87 m_state == STATE_UNINITIALIZED);
88 if (m_state == STATE_UNINITIALIZED) {
89 // never initialized -- ensure any in-flight ops are complete
90 // since we wouldn't expect shut_down to be invoked
91 C_SaferCond ctx;
92 m_async_op_tracker.wait_for_ops(&ctx);
93 ctx.wait();
94 }
95 ceph_assert(m_async_op_tracker.empty());
96 }
97
98 template <typename I>
99 bool ManagedLock<I>::is_lock_owner() const {
100 std::lock_guard locker{m_lock};
101
102 return is_lock_owner(m_lock);
103 }
104
105 template <typename I>
106 bool ManagedLock<I>::is_lock_owner(ceph::mutex &lock) const {
107
108 ceph_assert(ceph_mutex_is_locked(m_lock));
109
110 bool lock_owner;
111
112 switch (m_state) {
113 case STATE_LOCKED:
114 case STATE_REACQUIRING:
115 case STATE_PRE_SHUTTING_DOWN:
116 case STATE_POST_ACQUIRING:
117 case STATE_PRE_RELEASING:
118 lock_owner = true;
119 break;
120 default:
121 lock_owner = false;
122 break;
123 }
124
125 ldout(m_cct, 20) << lock_owner << dendl;
126 return lock_owner;
127 }
128
129 template <typename I>
130 void ManagedLock<I>::shut_down(Context *on_shut_down) {
131 ldout(m_cct, 10) << dendl;
132
133 std::lock_guard locker{m_lock};
134 ceph_assert(!is_state_shutdown());
135
136 if (m_state == STATE_WAITING_FOR_REGISTER) {
137 // abort stalled acquire lock state
138 ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
139 Action active_action = get_active_action();
140 ceph_assert(active_action == ACTION_TRY_LOCK ||
141 active_action == ACTION_ACQUIRE_LOCK);
142 complete_active_action(STATE_UNLOCKED, -ERESTART);
143 }
144
145 execute_action(ACTION_SHUT_DOWN, on_shut_down);
146 }
147
148 template <typename I>
149 void ManagedLock<I>::acquire_lock(Context *on_acquired) {
150 int r = 0;
151 {
152 std::lock_guard locker{m_lock};
153 if (is_state_shutdown()) {
154 r = -ERESTART;
155 } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
156 ldout(m_cct, 10) << dendl;
157 execute_action(ACTION_ACQUIRE_LOCK, on_acquired);
158 return;
159 }
160 }
161
162 if (on_acquired != nullptr) {
163 on_acquired->complete(r);
164 }
165 }
166
167 template <typename I>
168 void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
169 int r = 0;
170 {
171 std::lock_guard locker{m_lock};
172 if (is_state_shutdown()) {
173 r = -ERESTART;
174 } else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
175 ldout(m_cct, 10) << dendl;
176 execute_action(ACTION_TRY_LOCK, on_acquired);
177 return;
178 }
179 }
180
181 if (on_acquired != nullptr) {
182 on_acquired->complete(r);
183 }
184 }
185
186 template <typename I>
187 void ManagedLock<I>::release_lock(Context *on_released) {
188 int r = 0;
189 {
190 std::lock_guard locker{m_lock};
191 if (is_state_shutdown()) {
192 r = -ERESTART;
193 } else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
194 ldout(m_cct, 10) << dendl;
195 execute_action(ACTION_RELEASE_LOCK, on_released);
196 return;
197 }
198 }
199
200 if (on_released != nullptr) {
201 on_released->complete(r);
202 }
203 }
204
205 template <typename I>
206 void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
207 {
208 std::lock_guard locker{m_lock};
209
210 if (m_state == STATE_WAITING_FOR_REGISTER ||
211 m_state == STATE_WAITING_FOR_LOCK) {
212 // restart the acquire lock process now that watch is valid
213 ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
214 Action active_action = get_active_action();
215 ceph_assert(active_action == ACTION_TRY_LOCK ||
216 active_action == ACTION_ACQUIRE_LOCK);
217 execute_next_action();
218 } else if (!is_state_shutdown() &&
219 (m_state == STATE_LOCKED ||
220 m_state == STATE_ACQUIRING ||
221 m_state == STATE_POST_ACQUIRING)) {
222 // interlock the lock operation with other state ops
223 ldout(m_cct, 10) << dendl;
224 execute_action(ACTION_REACQUIRE_LOCK, on_reacquired);
225 return;
226 }
227 }
228
229 // ignore request if shutdown or not in a locked-related state
230 if (on_reacquired != nullptr) {
231 on_reacquired->complete(0);
232 }
233 }
234
235 template <typename I>
236 void ManagedLock<I>::get_locker(managed_lock::Locker *locker,
237 Context *on_finish) {
238 ldout(m_cct, 10) << dendl;
239
240 int r;
241 {
242 std::lock_guard l{m_lock};
243 if (is_state_shutdown()) {
244 r = -ERESTART;
245 } else {
246 on_finish = new C_Tracked(m_async_op_tracker, on_finish);
247 auto req = managed_lock::GetLockerRequest<I>::create(
248 m_ioctx, m_oid, m_mode == EXCLUSIVE, locker, on_finish);
249 req->send();
250 return;
251 }
252 }
253
254 on_finish->complete(r);
255 }
256
257 template <typename I>
258 void ManagedLock<I>::break_lock(const managed_lock::Locker &locker,
259 bool force_break_lock, Context *on_finish) {
260 ldout(m_cct, 10) << dendl;
261
262 int r;
263 {
264 std::lock_guard l{m_lock};
265 if (is_state_shutdown()) {
266 r = -ERESTART;
267 } else if (is_lock_owner(m_lock)) {
268 r = -EBUSY;
269 } else {
270 on_finish = new C_Tracked(m_async_op_tracker, on_finish);
271 auto req = managed_lock::BreakRequest<I>::create(
272 m_ioctx, m_asio_engine, m_oid, locker, m_mode == EXCLUSIVE,
273 m_blocklist_on_break_lock, m_blocklist_expire_seconds, force_break_lock,
274 on_finish);
275 req->send();
276 return;
277 }
278 }
279
280 on_finish->complete(r);
281 }
282
283 template <typename I>
284 int ManagedLock<I>::assert_header_locked() {
285 ldout(m_cct, 10) << dendl;
286
287 librados::ObjectReadOperation op;
288 {
289 std::lock_guard locker{m_lock};
290 rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME,
291 (m_mode == EXCLUSIVE ? ClsLockType::EXCLUSIVE :
292 ClsLockType::SHARED),
293 m_cookie,
294 managed_lock::util::get_watcher_lock_tag());
295 }
296
297 int r = m_ioctx.operate(m_oid, &op, nullptr);
298 if (r < 0) {
299 if (r == -EBLOCKLISTED) {
300 ldout(m_cct, 5) << "client is not lock owner -- client blocklisted"
301 << dendl;
302 } else if (r == -ENOENT) {
303 ldout(m_cct, 5) << "client is not lock owner -- no lock detected"
304 << dendl;
305 } else if (r == -EBUSY) {
306 ldout(m_cct, 5) << "client is not lock owner -- owned by different client"
307 << dendl;
308 } else {
309 lderr(m_cct) << "failed to verify lock ownership: " << cpp_strerror(r)
310 << dendl;
311 }
312
313 return r;
314 }
315
316 return 0;
317 }
318
319 template <typename I>
320 void ManagedLock<I>::shutdown_handler(int r, Context *on_finish) {
321 on_finish->complete(r);
322 }
323
324 template <typename I>
325 void ManagedLock<I>::pre_acquire_lock_handler(Context *on_finish) {
326 on_finish->complete(0);
327 }
328
329 template <typename I>
330 void ManagedLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
331 on_finish->complete(r);
332 }
333
334 template <typename I>
335 void ManagedLock<I>::pre_release_lock_handler(bool shutting_down,
336 Context *on_finish) {
337 on_finish->complete(0);
338 }
339
340 template <typename I>
341 void ManagedLock<I>::post_release_lock_handler(bool shutting_down, int r,
342 Context *on_finish) {
343 on_finish->complete(r);
344 }
345
346 template <typename I>
347 void ManagedLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
348 on_finish->complete(r);
349 }
350
351 template <typename I>
352 bool ManagedLock<I>::is_transition_state() const {
353 switch (m_state) {
354 case STATE_ACQUIRING:
355 case STATE_WAITING_FOR_REGISTER:
356 case STATE_REACQUIRING:
357 case STATE_RELEASING:
358 case STATE_PRE_SHUTTING_DOWN:
359 case STATE_SHUTTING_DOWN:
360 case STATE_INITIALIZING:
361 case STATE_WAITING_FOR_LOCK:
362 case STATE_POST_ACQUIRING:
363 case STATE_PRE_RELEASING:
364 return true;
365 case STATE_UNLOCKED:
366 case STATE_LOCKED:
367 case STATE_SHUTDOWN:
368 case STATE_UNINITIALIZED:
369 break;
370 }
371 return false;
372 }
373
374 template <typename I>
375 void ManagedLock<I>::append_context(Action action, Context *ctx) {
376 ceph_assert(ceph_mutex_is_locked(m_lock));
377
378 for (auto &action_ctxs : m_actions_contexts) {
379 if (action == action_ctxs.first) {
380 if (ctx != nullptr) {
381 action_ctxs.second.push_back(ctx);
382 }
383 return;
384 }
385 }
386
387 Contexts contexts;
388 if (ctx != nullptr) {
389 contexts.push_back(ctx);
390 }
391 m_actions_contexts.push_back({action, std::move(contexts)});
392 }
393
394 template <typename I>
395 void ManagedLock<I>::execute_action(Action action, Context *ctx) {
396 ceph_assert(ceph_mutex_is_locked(m_lock));
397
398 append_context(action, ctx);
399 if (!is_transition_state()) {
400 execute_next_action();
401 }
402 }
403
404 template <typename I>
405 void ManagedLock<I>::execute_next_action() {
406 ceph_assert(ceph_mutex_is_locked(m_lock));
407 ceph_assert(!m_actions_contexts.empty());
408 switch (get_active_action()) {
409 case ACTION_ACQUIRE_LOCK:
410 case ACTION_TRY_LOCK:
411 send_acquire_lock();
412 break;
413 case ACTION_REACQUIRE_LOCK:
414 send_reacquire_lock();
415 break;
416 case ACTION_RELEASE_LOCK:
417 send_release_lock();
418 break;
419 case ACTION_SHUT_DOWN:
420 send_shutdown();
421 break;
422 default:
423 ceph_abort();
424 break;
425 }
426 }
427
428 template <typename I>
429 typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
430 ceph_assert(ceph_mutex_is_locked(m_lock));
431 ceph_assert(!m_actions_contexts.empty());
432 return m_actions_contexts.front().first;
433 }
434
435 template <typename I>
436 void ManagedLock<I>::complete_active_action(State next_state, int r) {
437 ceph_assert(ceph_mutex_is_locked(m_lock));
438 ceph_assert(!m_actions_contexts.empty());
439
440 ActionContexts action_contexts(std::move(m_actions_contexts.front()));
441 m_actions_contexts.pop_front();
442 m_state = next_state;
443
444 m_lock.unlock();
445 for (auto ctx : action_contexts.second) {
446 ctx->complete(r);
447 }
448 m_lock.lock();
449
450 if (!is_transition_state() && !m_actions_contexts.empty()) {
451 execute_next_action();
452 }
453 }
454
455 template <typename I>
456 bool ManagedLock<I>::is_state_shutdown() const {
457 ceph_assert(ceph_mutex_is_locked(m_lock));
458
459 switch (m_state) {
460 case STATE_PRE_SHUTTING_DOWN:
461 case STATE_SHUTTING_DOWN:
462 case STATE_SHUTDOWN:
463 return true;
464 default:
465 break;
466 }
467
468 return (!m_actions_contexts.empty() &&
469 m_actions_contexts.back().first == ACTION_SHUT_DOWN);
470 }
471
472 template <typename I>
473 void ManagedLock<I>::send_acquire_lock() {
474 ceph_assert(ceph_mutex_is_locked(m_lock));
475 if (m_state == STATE_LOCKED) {
476 complete_active_action(STATE_LOCKED, 0);
477 return;
478 }
479
480 ldout(m_cct, 10) << dendl;
481
482 uint64_t watch_handle = m_watcher->get_watch_handle();
483 if (watch_handle == 0) {
484 if (m_watcher->is_blocklisted()) {
485 lderr(m_cct) << "watcher not registered - client blocklisted" << dendl;
486 complete_active_action(STATE_UNLOCKED, -EBLOCKLISTED);
487 } else {
488 lderr(m_cct) << "watcher not registered - delaying request" << dendl;
489 m_state = STATE_WAITING_FOR_REGISTER;
490
491 // shut down might race w/ release/re-acquire of the lock
492 if (is_state_shutdown()) {
493 complete_active_action(STATE_UNLOCKED, -ERESTART);
494 }
495 }
496 return;
497 }
498
499 m_state = STATE_ACQUIRING;
500 m_cookie = encode_lock_cookie(watch_handle);
501
502 m_work_queue->queue(new LambdaContext([this](int r) {
503 pre_acquire_lock_handler(create_context_callback<
504 ManagedLock<I>, &ManagedLock<I>::handle_pre_acquire_lock>(this));
505 }));
506 }
507
508 template <typename I>
509 void ManagedLock<I>::handle_pre_acquire_lock(int r) {
510 ldout(m_cct, 10) << "r=" << r << dendl;
511
512 if (r < 0) {
513 handle_acquire_lock(r);
514 return;
515 }
516
517 using managed_lock::AcquireRequest;
518 AcquireRequest<I>* req = AcquireRequest<I>::create(
519 m_ioctx, m_watcher, m_asio_engine, m_oid, m_cookie, m_mode == EXCLUSIVE,
520 m_blocklist_on_break_lock, m_blocklist_expire_seconds,
521 create_context_callback<
522 ManagedLock<I>, &ManagedLock<I>::handle_acquire_lock>(this));
523 m_work_queue->queue(new C_SendLockRequest<AcquireRequest<I>>(req), 0);
524 }
525
526 template <typename I>
527 void ManagedLock<I>::handle_acquire_lock(int r) {
528 ldout(m_cct, 10) << "r=" << r << dendl;
529
530 if (r == -EBUSY || r == -EAGAIN || r == -EROFS) {
531 ldout(m_cct, 5) << "unable to acquire exclusive lock" << dendl;
532 } else if (r < 0) {
533 lderr(m_cct) << "failed to acquire exclusive lock: " << cpp_strerror(r)
534 << dendl;
535 } else {
536 ldout(m_cct, 5) << "successfully acquired exclusive lock" << dendl;
537 }
538
539 m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED);
540
541 m_work_queue->queue(new LambdaContext([this, r](int ret) {
542 post_acquire_lock_handler(r, create_context_callback<
543 ManagedLock<I>, &ManagedLock<I>::handle_post_acquire_lock>(this));
544 }));
545 }
546
547 template <typename I>
548 void ManagedLock<I>::handle_post_acquire_lock(int r) {
549 ldout(m_cct, 10) << "r=" << r << dendl;
550
551 std::lock_guard locker{m_lock};
552
553 if (r < 0 && m_post_next_state == STATE_LOCKED) {
554 // release_lock without calling pre and post handlers
555 revert_to_unlock_state(r);
556 } else if (r != -ECANCELED) {
557 // fail the lock request
558 complete_active_action(m_post_next_state, r);
559 }
560 }
561
562 template <typename I>
563 void ManagedLock<I>::revert_to_unlock_state(int r) {
564 ldout(m_cct, 10) << "r=" << r << dendl;
565
566 using managed_lock::ReleaseRequest;
567 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
568 m_work_queue, m_oid, m_cookie,
569 new LambdaContext([this, r](int ret) {
570 std::lock_guard locker{m_lock};
571 ceph_assert(ret == 0);
572 complete_active_action(STATE_UNLOCKED, r);
573 }));
574 m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req));
575 }
576
577 template <typename I>
578 void ManagedLock<I>::send_reacquire_lock() {
579 ceph_assert(ceph_mutex_is_locked(m_lock));
580
581 if (m_state != STATE_LOCKED) {
582 complete_active_action(m_state, 0);
583 return;
584 }
585
586 ldout(m_cct, 10) << dendl;
587 m_state = STATE_REACQUIRING;
588
589 uint64_t watch_handle = m_watcher->get_watch_handle();
590 if (watch_handle == 0) {
591 // watch (re)failed while recovering
592 lderr(m_cct) << "aborting reacquire due to invalid watch handle"
593 << dendl;
594
595 // treat double-watch failure as a lost lock and invoke the
596 // release/acquire handlers
597 release_acquire_lock();
598 complete_active_action(STATE_LOCKED, 0);
599 return;
600 }
601
602 m_new_cookie = encode_lock_cookie(watch_handle);
603 if (m_cookie == m_new_cookie && m_blocklist_on_break_lock) {
604 ldout(m_cct, 10) << "skipping reacquire since cookie still valid"
605 << dendl;
606 auto ctx = create_context_callback<
607 ManagedLock, &ManagedLock<I>::handle_no_op_reacquire_lock>(this);
608 post_reacquire_lock_handler(0, ctx);
609 return;
610 }
611
612 auto ctx = create_context_callback<
613 ManagedLock, &ManagedLock<I>::handle_reacquire_lock>(this);
614 ctx = new LambdaContext([this, ctx](int r) {
615 post_reacquire_lock_handler(r, ctx);
616 });
617
618 using managed_lock::ReacquireRequest;
619 ReacquireRequest<I>* req = ReacquireRequest<I>::create(m_ioctx, m_oid,
620 m_cookie, m_new_cookie, m_mode == EXCLUSIVE, ctx);
621 m_work_queue->queue(new C_SendLockRequest<ReacquireRequest<I>>(req));
622 }
623
624 template <typename I>
625 void ManagedLock<I>::handle_reacquire_lock(int r) {
626 ldout(m_cct, 10) << "r=" << r << dendl;
627
628 std::lock_guard locker{m_lock};
629 ceph_assert(m_state == STATE_REACQUIRING);
630
631 if (r < 0) {
632 if (r == -EOPNOTSUPP) {
633 ldout(m_cct, 10) << "updating lock is not supported" << dendl;
634 } else {
635 lderr(m_cct) << "failed to update lock cookie: " << cpp_strerror(r)
636 << dendl;
637 }
638
639 release_acquire_lock();
640 } else {
641 m_cookie = m_new_cookie;
642 }
643
644 complete_active_action(STATE_LOCKED, 0);
645 }
646
647 template <typename I>
648 void ManagedLock<I>::handle_no_op_reacquire_lock(int r) {
649 ldout(m_cct, 10) << "r=" << r << dendl;
650 ceph_assert(m_state == STATE_REACQUIRING);
651 ceph_assert(r >= 0);
652 complete_active_action(STATE_LOCKED, 0);
653 }
654
655 template <typename I>
656 void ManagedLock<I>::release_acquire_lock() {
657 assert(ceph_mutex_is_locked(m_lock));
658
659 if (!is_state_shutdown()) {
660 // queue a release and re-acquire of the lock since cookie cannot
661 // be updated on older OSDs
662 execute_action(ACTION_RELEASE_LOCK, nullptr);
663
664 ceph_assert(!m_actions_contexts.empty());
665 ActionContexts &action_contexts(m_actions_contexts.front());
666
667 // reacquire completes when the request lock completes
668 Contexts contexts;
669 std::swap(contexts, action_contexts.second);
670 if (contexts.empty()) {
671 execute_action(ACTION_ACQUIRE_LOCK, nullptr);
672 } else {
673 for (auto ctx : contexts) {
674 execute_action(ACTION_ACQUIRE_LOCK, ctx);
675 }
676 }
677 }
678 }
679
680 template <typename I>
681 void ManagedLock<I>::send_release_lock() {
682 ceph_assert(ceph_mutex_is_locked(m_lock));
683 if (m_state == STATE_UNLOCKED) {
684 complete_active_action(STATE_UNLOCKED, 0);
685 return;
686 }
687
688 ldout(m_cct, 10) << dendl;
689 m_state = STATE_PRE_RELEASING;
690
691 m_work_queue->queue(new LambdaContext([this](int r) {
692 pre_release_lock_handler(false, create_context_callback<
693 ManagedLock<I>, &ManagedLock<I>::handle_pre_release_lock>(this));
694 }));
695 }
696
697 template <typename I>
698 void ManagedLock<I>::handle_pre_release_lock(int r) {
699 ldout(m_cct, 10) << "r=" << r << dendl;
700
701 {
702 std::lock_guard locker{m_lock};
703 ceph_assert(m_state == STATE_PRE_RELEASING);
704 m_state = STATE_RELEASING;
705 }
706
707 if (r < 0) {
708 handle_release_lock(r);
709 return;
710 }
711
712 using managed_lock::ReleaseRequest;
713 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
714 m_work_queue, m_oid, m_cookie,
715 create_context_callback<
716 ManagedLock<I>, &ManagedLock<I>::handle_release_lock>(this));
717 m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req), 0);
718 }
719
720 template <typename I>
721 void ManagedLock<I>::handle_release_lock(int r) {
722 ldout(m_cct, 10) << "r=" << r << dendl;
723
724 std::lock_guard locker{m_lock};
725 ceph_assert(m_state == STATE_RELEASING);
726
727 if (r >= 0 || r == -EBLOCKLISTED || r == -ENOENT) {
728 m_cookie = "";
729 m_post_next_state = STATE_UNLOCKED;
730 } else {
731 m_post_next_state = STATE_LOCKED;
732 }
733
734 m_work_queue->queue(new LambdaContext([this, r](int ret) {
735 post_release_lock_handler(false, r, create_context_callback<
736 ManagedLock<I>, &ManagedLock<I>::handle_post_release_lock>(this));
737 }));
738 }
739
740 template <typename I>
741 void ManagedLock<I>::handle_post_release_lock(int r) {
742 ldout(m_cct, 10) << "r=" << r << dendl;
743
744 std::lock_guard locker{m_lock};
745 complete_active_action(m_post_next_state, r);
746 }
747
748 template <typename I>
749 void ManagedLock<I>::send_shutdown() {
750 ldout(m_cct, 10) << dendl;
751 ceph_assert(ceph_mutex_is_locked(m_lock));
752 if (m_state == STATE_UNLOCKED) {
753 m_state = STATE_SHUTTING_DOWN;
754 m_work_queue->queue(new LambdaContext([this](int r) {
755 shutdown_handler(r, create_context_callback<
756 ManagedLock<I>, &ManagedLock<I>::handle_shutdown>(this));
757 }));
758 return;
759 }
760
761 ceph_assert(m_state == STATE_LOCKED);
762 m_state = STATE_PRE_SHUTTING_DOWN;
763
764 m_lock.unlock();
765 m_work_queue->queue(new C_ShutDownRelease(this), 0);
766 m_lock.lock();
767 }
768
769 template <typename I>
770 void ManagedLock<I>::handle_shutdown(int r) {
771 ldout(m_cct, 10) << "r=" << r << dendl;
772
773 wait_for_tracked_ops(r);
774 }
775
776 template <typename I>
777 void ManagedLock<I>::send_shutdown_release() {
778 ldout(m_cct, 10) << dendl;
779
780 std::lock_guard locker{m_lock};
781
782 m_work_queue->queue(new LambdaContext([this](int r) {
783 pre_release_lock_handler(true, create_context_callback<
784 ManagedLock<I>, &ManagedLock<I>::handle_shutdown_pre_release>(this));
785 }));
786 }
787
788 template <typename I>
789 void ManagedLock<I>::handle_shutdown_pre_release(int r) {
790 ldout(m_cct, 10) << "r=" << r << dendl;
791
792 std::string cookie;
793 {
794 std::lock_guard locker{m_lock};
795 cookie = m_cookie;
796
797 ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
798 m_state = STATE_SHUTTING_DOWN;
799 }
800
801 using managed_lock::ReleaseRequest;
802 ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
803 m_work_queue, m_oid, cookie,
804 new LambdaContext([this, r](int l) {
805 int rst = r < 0 ? r : l;
806 post_release_lock_handler(true, rst, create_context_callback<
807 ManagedLock<I>, &ManagedLock<I>::handle_shutdown_post_release>(this));
808 }));
809 req->send();
810
811 }
812
813 template <typename I>
814 void ManagedLock<I>::handle_shutdown_post_release(int r) {
815 ldout(m_cct, 10) << "r=" << r << dendl;
816
817 wait_for_tracked_ops(r);
818 }
819
820 template <typename I>
821 void ManagedLock<I>::wait_for_tracked_ops(int r) {
822 ldout(m_cct, 10) << "r=" << r << dendl;
823
824 Context *ctx = new LambdaContext([this, r](int ret) {
825 complete_shutdown(r);
826 });
827
828 m_async_op_tracker.wait_for_ops(ctx);
829 }
830
831 template <typename I>
832 void ManagedLock<I>::complete_shutdown(int r) {
833 ldout(m_cct, 10) << "r=" << r << dendl;
834
835 if (r < 0) {
836 lderr(m_cct) << "failed to shut down lock: " << cpp_strerror(r)
837 << dendl;
838 }
839
840 ActionContexts action_contexts;
841 {
842 std::lock_guard locker{m_lock};
843 ceph_assert(ceph_mutex_is_locked(m_lock));
844 ceph_assert(m_actions_contexts.size() == 1);
845
846 action_contexts = std::move(m_actions_contexts.front());
847 m_actions_contexts.pop_front();
848 m_state = STATE_SHUTDOWN;
849 }
850
851 // expect to be destroyed after firing callback
852 for (auto ctx : action_contexts.second) {
853 ctx->complete(r);
854 }
855 }
856
857 } // namespace librbd
858
859 template class librbd::ManagedLock<librbd::ImageCtx>;