]> git.proxmox.com Git - ceph.git/blame - ceph/src/librbd/ExclusiveLock.cc
import 15.2.0 Octopus source
[ceph.git] / ceph / src / librbd / ExclusiveLock.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include "librbd/ExclusiveLock.h"
31f18b77 5#include "librbd/ImageCtx.h"
7c673cae
FG
6#include "librbd/ImageWatcher.h"
7#include "librbd/ImageState.h"
8#include "librbd/exclusive_lock/PreAcquireRequest.h"
9#include "librbd/exclusive_lock/PostAcquireRequest.h"
10#include "librbd/exclusive_lock/PreReleaseRequest.h"
11#include "librbd/io/ImageRequestWQ.h"
12#include "librbd/Utils.h"
9f95a23c 13#include "common/ceph_mutex.h"
7c673cae
FG
14#include "common/dout.h"
15
16#define dout_subsys ceph_subsys_rbd
17#undef dout_prefix
18#define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
19 << __func__
20
21namespace librbd {
22
23using namespace exclusive_lock;
9f95a23c 24using librbd::util::create_context_callback;
7c673cae
FG
25
26template <typename I>
27using ML = ManagedLock<I>;
28
29template <typename I>
30ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
9f95a23c
TL
31 : RefCountedObject(image_ctx.cct),
32 ML<I>(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid,
7c673cae 33 image_ctx.image_watcher, managed_lock::EXCLUSIVE,
11fdf7f2
TL
34 image_ctx.config.template get_val<bool>("rbd_blacklist_on_break_lock"),
35 image_ctx.config.template get_val<uint64_t>("rbd_blacklist_expire_seconds")),
7c673cae 36 m_image_ctx(image_ctx) {
9f95a23c 37 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
38 ML<I>::set_state_uninitialized();
39}
40
41template <typename I>
92f5a8d4
TL
42bool ExclusiveLock<I>::accept_request(OperationRequestType request_type,
43 int *ret_val) const {
9f95a23c 44 std::lock_guard locker{ML<I>::m_lock};
7c673cae 45
92f5a8d4
TL
46 bool accept_request =
47 (!ML<I>::is_state_shutdown() && ML<I>::is_state_locked() &&
48 (m_request_blocked_count == 0 ||
49 m_image_ctx.get_exclusive_lock_policy()->accept_blocked_request(
50 request_type)));
31f18b77 51 if (ret_val != nullptr) {
92f5a8d4 52 *ret_val = accept_request ? 0 : m_request_blocked_ret_val;
31f18b77 53 }
7c673cae 54
92f5a8d4
TL
55 ldout(m_image_ctx.cct, 20) << "=" << accept_request << " (request_type="
56 << request_type << ")" << dendl;
57 return accept_request;
7c673cae
FG
58}
59
31f18b77
FG
60template <typename I>
61bool ExclusiveLock<I>::accept_ops() const {
9f95a23c 62 std::lock_guard locker{ML<I>::m_lock};
31f18b77
FG
63 bool accept = accept_ops(ML<I>::m_lock);
64 ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
65 return accept;
66}
67
68template <typename I>
9f95a23c 69bool ExclusiveLock<I>::accept_ops(const ceph::mutex &lock) const {
31f18b77
FG
70 return (!ML<I>::is_state_shutdown() &&
71 (ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
72}
73
7c673cae
FG
74template <typename I>
75void ExclusiveLock<I>::block_requests(int r) {
9f95a23c 76 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
77
78 m_request_blocked_count++;
79 if (m_request_blocked_ret_val == 0) {
80 m_request_blocked_ret_val = r;
81 }
82
92f5a8d4 83 ldout(m_image_ctx.cct, 20) << "r=" << r << dendl;
7c673cae
FG
84}
85
86template <typename I>
87void ExclusiveLock<I>::unblock_requests() {
9f95a23c 88 std::lock_guard locker{ML<I>::m_lock};
7c673cae 89
11fdf7f2 90 ceph_assert(m_request_blocked_count > 0);
7c673cae
FG
91 m_request_blocked_count--;
92 if (m_request_blocked_count == 0) {
93 m_request_blocked_ret_val = 0;
94 }
95
96 ldout(m_image_ctx.cct, 20) << dendl;
97}
98
91327a77
AA
99template <typename I>
100int ExclusiveLock<I>::get_unlocked_op_error() const {
101 if (m_image_ctx.image_watcher->is_blacklisted()) {
102 return -EBLACKLISTED;
103 }
104 return -EROFS;
105}
106
7c673cae
FG
107template <typename I>
108void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
9f95a23c
TL
109 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
110
111 on_init = create_context_callback<Context>(on_init, this);
112
7c673cae
FG
113 ldout(m_image_ctx.cct, 10) << dendl;
114
115 {
9f95a23c 116 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
117 ML<I>::set_state_initializing();
118 }
119
120 m_image_ctx.io_work_queue->block_writes(new C_InitComplete(this, features,
121 on_init));
122}
123
124template <typename I>
125void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
126 ldout(m_image_ctx.cct, 10) << dendl;
127
9f95a23c
TL
128 on_shut_down = create_context_callback<Context>(on_shut_down, this);
129
7c673cae
FG
130 ML<I>::shut_down(on_shut_down);
131
132 // if stalled in request state machine -- abort
133 handle_peer_notification(0);
134}
135
136template <typename I>
137void ExclusiveLock<I>::handle_peer_notification(int r) {
9f95a23c 138 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
139 if (!ML<I>::is_state_waiting_for_lock()) {
140 return;
141 }
142
143 ldout(m_image_ctx.cct, 10) << dendl;
11fdf7f2 144 ceph_assert(ML<I>::is_action_acquire_lock());
7c673cae
FG
145
146 m_acquire_lock_peer_ret_val = r;
147 ML<I>::execute_next_action();
148}
149
31f18b77 150template <typename I>
91327a77 151Context *ExclusiveLock<I>::start_op(int* ret_val) {
9f95a23c
TL
152 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
153 std::lock_guard locker{ML<I>::m_lock};
31f18b77
FG
154
155 if (!accept_ops(ML<I>::m_lock)) {
91327a77 156 *ret_val = get_unlocked_op_error();
31f18b77
FG
157 return nullptr;
158 }
159
160 m_async_op_tracker.start_op();
9f95a23c 161 return new LambdaContext([this](int r) {
31f18b77
FG
162 m_async_op_tracker.finish_op();
163 });
164}
165
7c673cae
FG
166template <typename I>
167void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
31f18b77 168 ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
7c673cae 169
224ce89b 170 {
9f95a23c 171 std::shared_lock owner_locker{m_image_ctx.owner_lock};
224ce89b
WB
172 if (m_image_ctx.clone_copy_on_read ||
173 (features & RBD_FEATURE_JOURNALING) != 0) {
174 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true);
175 } else {
176 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_WRITE, true);
177 }
7c673cae
FG
178 }
179
9f95a23c 180 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
181 ML<I>::set_state_unlocked();
182}
183
184template <typename I>
185void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
186 ldout(m_image_ctx.cct, 10) << dendl;
187
188 {
9f95a23c 189 std::unique_lock owner_locker{m_image_ctx.owner_lock};
224ce89b 190 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
7c673cae
FG
191 m_image_ctx.exclusive_lock = nullptr;
192 }
193
194 m_image_ctx.io_work_queue->unblock_writes();
195 m_image_ctx.image_watcher->flush(on_finish);
196}
197
198template <typename I>
199void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
200 ldout(m_image_ctx.cct, 10) << dendl;
201
202 int acquire_lock_peer_ret_val = 0;
203 {
9f95a23c 204 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
205 std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
206 }
207
208 if (acquire_lock_peer_ret_val == -EROFS) {
209 ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
210 on_finish->complete(acquire_lock_peer_ret_val);
211 return;
212 }
213
214 PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
215 on_finish);
9f95a23c 216 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
7c673cae
FG
217 req->send();
218 }));
219}
220
221template <typename I>
222void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
223 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
224
225 if (r == -EROFS) {
226 // peer refused to release the exclusive lock
227 on_finish->complete(r);
228 return;
229 } else if (r < 0) {
9f95a23c 230 ML<I>::m_lock.lock();
11fdf7f2 231 ceph_assert(ML<I>::is_state_acquiring());
7c673cae
FG
232
233 // PostAcquire state machine will not run, so we need complete prepare
234 m_image_ctx.state->handle_prepare_lock_complete();
235
236 // if lock is in-use by another client, request the lock
237 if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
238 ML<I>::set_state_waiting_for_lock();
9f95a23c 239 ML<I>::m_lock.unlock();
7c673cae
FG
240
241 // request the lock from a peer
242 m_image_ctx.image_watcher->notify_request_lock();
243
244 // inform manage lock that we have interrupted the state machine
245 r = -ECANCELED;
246 } else {
9f95a23c 247 ML<I>::m_lock.unlock();
7c673cae
FG
248
249 // clear error if peer owns lock
250 if (r == -EAGAIN) {
251 r = 0;
252 }
253 }
254
255 on_finish->complete(r);
256 return;
257 }
258
9f95a23c 259 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
260 m_pre_post_callback = on_finish;
261 using EL = ExclusiveLock<I>;
262 PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
263 util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
264 util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
265
9f95a23c 266 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
7c673cae
FG
267 req->send();
268 }));
269}
270
271template <typename I>
272void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
273 ldout(m_image_ctx.cct, 10) << dendl;
274
9f95a23c 275 std::lock_guard locker{ML<I>::m_lock};
7c673cae 276
11fdf7f2 277 ceph_assert(r == 0);
7c673cae
FG
278
279 // lock is owned at this point
280 ML<I>::set_state_post_acquiring();
281}
282
283template <typename I>
284void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
285 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
286
287 Context *on_finish = nullptr;
288 {
9f95a23c 289 std::lock_guard locker{ML<I>::m_lock};
11fdf7f2 290 ceph_assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
7c673cae
FG
291
292 assert (m_pre_post_callback != nullptr);
293 std::swap(m_pre_post_callback, on_finish);
294 }
295
296 if (r >= 0) {
b32b8144
FG
297 m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time,
298 ceph_clock_now());
7c673cae 299 m_image_ctx.image_watcher->notify_acquired_lock();
224ce89b 300 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
7c673cae
FG
301 m_image_ctx.io_work_queue->unblock_writes();
302 }
303
304 on_finish->complete(r);
305}
306
307template <typename I>
308void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
309 Context *on_finish) {
310 ldout(m_image_ctx.cct, 10) << dendl;
9f95a23c 311 std::lock_guard locker{ML<I>::m_lock};
7c673cae
FG
312
313 PreReleaseRequest<I> *req = PreReleaseRequest<I>::create(
31f18b77 314 m_image_ctx, shutting_down, m_async_op_tracker, on_finish);
9f95a23c 315 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
7c673cae
FG
316 req->send();
317 }));
318}
319
320template <typename I>
321void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
322 Context *on_finish) {
323 ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
324 << shutting_down << dendl;
325 if (!shutting_down) {
326 {
9f95a23c 327 std::lock_guard locker{ML<I>::m_lock};
11fdf7f2 328 ceph_assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
7c673cae
FG
329 }
330
331 if (r >= 0) {
332 m_image_ctx.image_watcher->notify_released_lock();
7c673cae
FG
333 }
334 } else {
335 {
9f95a23c 336 std::unique_lock owner_locker{m_image_ctx.owner_lock};
224ce89b 337 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
7c673cae
FG
338 m_image_ctx.exclusive_lock = nullptr;
339 }
340
341 if (r >= 0) {
342 m_image_ctx.io_work_queue->unblock_writes();
343 }
344
345 m_image_ctx.image_watcher->notify_released_lock();
346 }
347
348 on_finish->complete(r);
349}
350
31f18b77
FG
351template <typename I>
352void ExclusiveLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
353 ldout(m_image_ctx.cct, 10) << dendl;
354 if (r >= 0) {
355 m_image_ctx.image_watcher->notify_acquired_lock();
356 }
357
358 on_finish->complete(r);
359}
360
7c673cae
FG
361template <typename I>
362struct ExclusiveLock<I>::C_InitComplete : public Context {
363 ExclusiveLock *exclusive_lock;
364 uint64_t features;
365 Context *on_init;
366
367 C_InitComplete(ExclusiveLock *exclusive_lock, uint64_t features,
368 Context *on_init)
369 : exclusive_lock(exclusive_lock), features(features), on_init(on_init) {
370 }
371 void finish(int r) override {
372 if (r == 0) {
373 exclusive_lock->handle_init_complete(features);
374 }
375 on_init->complete(r);
376 }
377};
378
379} // namespace librbd
380
381template class librbd::ExclusiveLock<librbd::ImageCtx>;