]> git.proxmox.com Git - ceph.git/blame - ceph/src/librbd/ExclusiveLock.cc
bump version to 18.2.2-pve1
[ceph.git] / ceph / src / librbd / ExclusiveLock.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
f67539c2 4#include "librbd/cache/Utils.h"
7c673cae 5#include "librbd/ExclusiveLock.h"
31f18b77 6#include "librbd/ImageCtx.h"
7c673cae
FG
7#include "librbd/ImageWatcher.h"
8#include "librbd/ImageState.h"
f67539c2 9#include "librbd/exclusive_lock/ImageDispatch.h"
7c673cae
FG
10#include "librbd/exclusive_lock/PreAcquireRequest.h"
11#include "librbd/exclusive_lock/PostAcquireRequest.h"
12#include "librbd/exclusive_lock/PreReleaseRequest.h"
f67539c2 13#include "librbd/io/ImageDispatcherInterface.h"
7c673cae 14#include "librbd/Utils.h"
f67539c2 15#include "librbd/asio/ContextWQ.h"
9f95a23c 16#include "common/ceph_mutex.h"
7c673cae
FG
17#include "common/dout.h"
18
19#define dout_subsys ceph_subsys_rbd
20#undef dout_prefix
21#define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
22 << __func__
23
24namespace librbd {
25
26using namespace exclusive_lock;
9f95a23c 27using librbd::util::create_context_callback;
7c673cae
FG
28
29template <typename I>
30using ML = ManagedLock<I>;
31
32template <typename I>
33ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
9f95a23c 34 : RefCountedObject(image_ctx.cct),
f67539c2 35 ML<I>(image_ctx.md_ctx, *image_ctx.asio_engine, image_ctx.header_oid,
7c673cae 36 image_ctx.image_watcher, managed_lock::EXCLUSIVE,
f67539c2
TL
37 image_ctx.config.template get_val<bool>("rbd_blocklist_on_break_lock"),
38 image_ctx.config.template get_val<uint64_t>("rbd_blocklist_expire_seconds")),
7c673cae 39 m_image_ctx(image_ctx) {
9f95a23c 40 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
41 ML<I>::set_state_uninitialized();
42}
43
44template <typename I>
92f5a8d4
TL
45bool ExclusiveLock<I>::accept_request(OperationRequestType request_type,
46 int *ret_val) const {
9f95a23c 47 std::lock_guard locker{ML<I>::m_lock};
7c673cae 48
92f5a8d4
TL
49 bool accept_request =
50 (!ML<I>::is_state_shutdown() && ML<I>::is_state_locked() &&
51 (m_request_blocked_count == 0 ||
52 m_image_ctx.get_exclusive_lock_policy()->accept_blocked_request(
53 request_type)));
31f18b77 54 if (ret_val != nullptr) {
92f5a8d4 55 *ret_val = accept_request ? 0 : m_request_blocked_ret_val;
31f18b77 56 }
7c673cae 57
92f5a8d4
TL
58 ldout(m_image_ctx.cct, 20) << "=" << accept_request << " (request_type="
59 << request_type << ")" << dendl;
60 return accept_request;
7c673cae
FG
61}
62
31f18b77
FG
63template <typename I>
64bool ExclusiveLock<I>::accept_ops() const {
9f95a23c 65 std::lock_guard locker{ML<I>::m_lock};
31f18b77
FG
66 bool accept = accept_ops(ML<I>::m_lock);
67 ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
68 return accept;
69}
70
71template <typename I>
9f95a23c 72bool ExclusiveLock<I>::accept_ops(const ceph::mutex &lock) const {
31f18b77
FG
73 return (!ML<I>::is_state_shutdown() &&
74 (ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
75}
76
f67539c2
TL
77template <typename I>
78void ExclusiveLock<I>::set_require_lock(bool init_shutdown,
79 io::Direction direction,
80 Context* on_finish) {
81 m_image_dispatch->set_require_lock(init_shutdown, direction, on_finish);
82}
83
84template <typename I>
85void ExclusiveLock<I>::unset_require_lock(io::Direction direction) {
86 m_image_dispatch->unset_require_lock(direction);
87}
88
7c673cae
FG
89template <typename I>
90void ExclusiveLock<I>::block_requests(int r) {
9f95a23c 91 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
92
93 m_request_blocked_count++;
94 if (m_request_blocked_ret_val == 0) {
95 m_request_blocked_ret_val = r;
96 }
97
1e59de90 98 ldout(m_image_ctx.cct, 20) << ": r=" << r << dendl;
7c673cae
FG
99}
100
101template <typename I>
102void ExclusiveLock<I>::unblock_requests() {
9f95a23c 103 std::lock_guard locker{ML<I>::m_lock};
7c673cae 104
11fdf7f2 105 ceph_assert(m_request_blocked_count > 0);
7c673cae
FG
106 m_request_blocked_count--;
107 if (m_request_blocked_count == 0) {
108 m_request_blocked_ret_val = 0;
109 }
110
111 ldout(m_image_ctx.cct, 20) << dendl;
112}
113
91327a77
AA
114template <typename I>
115int ExclusiveLock<I>::get_unlocked_op_error() const {
f67539c2
TL
116 if (m_image_ctx.image_watcher->is_blocklisted()) {
117 return -EBLOCKLISTED;
91327a77
AA
118 }
119 return -EROFS;
120}
121
7c673cae
FG
122template <typename I>
123void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
9f95a23c
TL
124 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
125
126 on_init = create_context_callback<Context>(on_init, this);
127
f67539c2 128 ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
7c673cae
FG
129
130 {
9f95a23c 131 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
132 ML<I>::set_state_initializing();
133 }
134
f67539c2
TL
135 m_image_dispatch = exclusive_lock::ImageDispatch<I>::create(&m_image_ctx);
136 m_image_ctx.io_image_dispatcher->register_dispatch(m_image_dispatch);
137
138 on_init = new LambdaContext([this, on_init](int r) {
139 {
140 std::lock_guard locker{ML<I>::m_lock};
141 ML<I>::set_state_unlocked();
142 }
143
144 on_init->complete(r);
145 });
146
147 bool pwl_enabled = cache::util::is_pwl_enabled(m_image_ctx);
148 if (m_image_ctx.clone_copy_on_read ||
149 (features & RBD_FEATURE_JOURNALING) != 0 ||
150 pwl_enabled) {
151 m_image_dispatch->set_require_lock(true, io::DIRECTION_BOTH, on_init);
152 } else {
153 m_image_dispatch->set_require_lock(true, io::DIRECTION_WRITE, on_init);
154 }
7c673cae
FG
155}
156
157template <typename I>
158void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
159 ldout(m_image_ctx.cct, 10) << dendl;
160
f67539c2 161 auto ref = ceph::ref_t<ExclusiveLock<I>>(this);
9f95a23c
TL
162 on_shut_down = create_context_callback<Context>(on_shut_down, this);
163
7c673cae
FG
164 ML<I>::shut_down(on_shut_down);
165
166 // if stalled in request state machine -- abort
167 handle_peer_notification(0);
168}
169
170template <typename I>
171void ExclusiveLock<I>::handle_peer_notification(int r) {
9f95a23c 172 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
173 if (!ML<I>::is_state_waiting_for_lock()) {
174 return;
175 }
176
177 ldout(m_image_ctx.cct, 10) << dendl;
11fdf7f2 178 ceph_assert(ML<I>::is_action_acquire_lock());
7c673cae
FG
179
180 m_acquire_lock_peer_ret_val = r;
181 ML<I>::execute_next_action();
182}
183
31f18b77 184template <typename I>
91327a77 185Context *ExclusiveLock<I>::start_op(int* ret_val) {
9f95a23c
TL
186 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
187 std::lock_guard locker{ML<I>::m_lock};
31f18b77
FG
188
189 if (!accept_ops(ML<I>::m_lock)) {
91327a77 190 *ret_val = get_unlocked_op_error();
31f18b77
FG
191 return nullptr;
192 }
193
194 m_async_op_tracker.start_op();
9f95a23c 195 return new LambdaContext([this](int r) {
31f18b77
FG
196 m_async_op_tracker.finish_op();
197 });
198}
199
7c673cae
FG
200template <typename I>
201void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
202 ldout(m_image_ctx.cct, 10) << dendl;
203
204 {
9f95a23c 205 std::unique_lock owner_locker{m_image_ctx.owner_lock};
7c673cae
FG
206 m_image_ctx.exclusive_lock = nullptr;
207 }
208
f67539c2
TL
209 on_finish = new LambdaContext([this, on_finish](int r) {
210 m_image_dispatch = nullptr;
211 m_image_ctx.image_watcher->flush(on_finish);
212 });
213 m_image_ctx.io_image_dispatcher->shut_down_dispatch(
214 m_image_dispatch->get_dispatch_layer(), on_finish);
7c673cae
FG
215}
216
217template <typename I>
218void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
219 ldout(m_image_ctx.cct, 10) << dendl;
220
221 int acquire_lock_peer_ret_val = 0;
222 {
9f95a23c 223 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
224 std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
225 }
226
227 if (acquire_lock_peer_ret_val == -EROFS) {
228 ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
229 on_finish->complete(acquire_lock_peer_ret_val);
230 return;
231 }
232
233 PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
234 on_finish);
9f95a23c 235 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
7c673cae
FG
236 req->send();
237 }));
238}
239
240template <typename I>
241void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
242 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
243
244 if (r == -EROFS) {
245 // peer refused to release the exclusive lock
246 on_finish->complete(r);
247 return;
248 } else if (r < 0) {
9f95a23c 249 ML<I>::m_lock.lock();
11fdf7f2 250 ceph_assert(ML<I>::is_state_acquiring());
7c673cae
FG
251
252 // PostAcquire state machine will not run, so we need complete prepare
253 m_image_ctx.state->handle_prepare_lock_complete();
254
255 // if lock is in-use by another client, request the lock
256 if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
257 ML<I>::set_state_waiting_for_lock();
9f95a23c 258 ML<I>::m_lock.unlock();
7c673cae
FG
259
260 // request the lock from a peer
261 m_image_ctx.image_watcher->notify_request_lock();
262
263 // inform manage lock that we have interrupted the state machine
264 r = -ECANCELED;
265 } else {
9f95a23c 266 ML<I>::m_lock.unlock();
7c673cae
FG
267
268 // clear error if peer owns lock
269 if (r == -EAGAIN) {
270 r = 0;
271 }
272 }
273
274 on_finish->complete(r);
275 return;
276 }
277
9f95a23c 278 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
279 m_pre_post_callback = on_finish;
280 using EL = ExclusiveLock<I>;
281 PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
282 util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
283 util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
284
9f95a23c 285 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
7c673cae
FG
286 req->send();
287 }));
288}
289
290template <typename I>
291void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
292 ldout(m_image_ctx.cct, 10) << dendl;
293
9f95a23c 294 std::lock_guard locker{ML<I>::m_lock};
7c673cae 295
11fdf7f2 296 ceph_assert(r == 0);
7c673cae
FG
297
298 // lock is owned at this point
299 ML<I>::set_state_post_acquiring();
300}
301
302template <typename I>
303void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
304 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
305
306 Context *on_finish = nullptr;
307 {
9f95a23c 308 std::lock_guard locker{ML<I>::m_lock};
f67539c2
TL
309 ceph_assert(ML<I>::is_state_acquiring() ||
310 ML<I>::is_state_post_acquiring());
7c673cae
FG
311
312 assert (m_pre_post_callback != nullptr);
313 std::swap(m_pre_post_callback, on_finish);
314 }
315
f67539c2
TL
316 if (r < 0) {
317 on_finish->complete(r);
318 return;
7c673cae
FG
319 }
320
f67539c2
TL
321 m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time,
322 ceph_clock_now());
323 m_image_ctx.image_watcher->notify_acquired_lock();
324 m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH);
325
326 on_finish->complete(0);
7c673cae
FG
327}
328
329template <typename I>
330void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
331 Context *on_finish) {
332 ldout(m_image_ctx.cct, 10) << dendl;
9f95a23c 333 std::lock_guard locker{ML<I>::m_lock};
7c673cae 334
f67539c2
TL
335 auto req = PreReleaseRequest<I>::create(
336 m_image_ctx, m_image_dispatch, shutting_down, m_async_op_tracker,
337 on_finish);
9f95a23c 338 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
7c673cae
FG
339 req->send();
340 }));
341}
342
343template <typename I>
344void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
345 Context *on_finish) {
346 ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
347 << shutting_down << dendl;
348 if (!shutting_down) {
349 {
9f95a23c 350 std::lock_guard locker{ML<I>::m_lock};
f67539c2
TL
351 ceph_assert(ML<I>::is_state_pre_releasing() ||
352 ML<I>::is_state_releasing());
7c673cae
FG
353 }
354
355 if (r >= 0) {
356 m_image_ctx.image_watcher->notify_released_lock();
7c673cae 357 }
f67539c2
TL
358
359 on_finish->complete(r);
7c673cae
FG
360 } else {
361 {
9f95a23c 362 std::unique_lock owner_locker{m_image_ctx.owner_lock};
7c673cae
FG
363 m_image_ctx.exclusive_lock = nullptr;
364 }
365
f67539c2
TL
366 on_finish = new LambdaContext([this, r, on_finish](int) {
367 m_image_dispatch = nullptr;
368 m_image_ctx.image_watcher->notify_released_lock();
369 on_finish->complete(r);
370 });
371 m_image_ctx.io_image_dispatcher->shut_down_dispatch(
372 m_image_dispatch->get_dispatch_layer(), on_finish);
7c673cae 373 }
7c673cae
FG
374}
375
31f18b77
FG
376template <typename I>
377void ExclusiveLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
378 ldout(m_image_ctx.cct, 10) << dendl;
379 if (r >= 0) {
380 m_image_ctx.image_watcher->notify_acquired_lock();
381 }
382
383 on_finish->complete(r);
384}
385
7c673cae
FG
386} // namespace librbd
387
388template class librbd::ExclusiveLock<librbd::ImageCtx>;