]> git.proxmox.com Git - ceph.git/blame - ceph/src/librbd/ExclusiveLock.cc
update sources to 12.2.10
[ceph.git] / ceph / src / librbd / ExclusiveLock.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include "librbd/ExclusiveLock.h"
31f18b77 5#include "librbd/ImageCtx.h"
7c673cae
FG
6#include "librbd/ImageWatcher.h"
7#include "librbd/ImageState.h"
8#include "librbd/exclusive_lock/PreAcquireRequest.h"
9#include "librbd/exclusive_lock/PostAcquireRequest.h"
10#include "librbd/exclusive_lock/PreReleaseRequest.h"
11#include "librbd/io/ImageRequestWQ.h"
12#include "librbd/Utils.h"
13#include "common/Mutex.h"
14#include "common/dout.h"
15
16#define dout_subsys ceph_subsys_rbd
17#undef dout_prefix
18#define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
19 << __func__
20
21namespace librbd {
22
23using namespace exclusive_lock;
24
25template <typename I>
26using ML = ManagedLock<I>;
27
28template <typename I>
29ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
30 : ML<I>(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid,
31 image_ctx.image_watcher, managed_lock::EXCLUSIVE,
32 image_ctx.blacklist_on_break_lock,
33 image_ctx.blacklist_expire_seconds),
34 m_image_ctx(image_ctx) {
35 Mutex::Locker locker(ML<I>::m_lock);
36 ML<I>::set_state_uninitialized();
37}
38
39template <typename I>
40bool ExclusiveLock<I>::accept_requests(int *ret_val) const {
41 Mutex::Locker locker(ML<I>::m_lock);
42
43 bool accept_requests = (!ML<I>::is_state_shutdown() &&
44 ML<I>::is_state_locked() &&
45 m_request_blocked_count == 0);
31f18b77
FG
46 if (ret_val != nullptr) {
47 *ret_val = m_request_blocked_ret_val;
48 }
7c673cae
FG
49
50 ldout(m_image_ctx.cct, 20) << "=" << accept_requests << dendl;
51 return accept_requests;
52}
53
31f18b77
FG
54template <typename I>
55bool ExclusiveLock<I>::accept_ops() const {
56 Mutex::Locker locker(ML<I>::m_lock);
57 bool accept = accept_ops(ML<I>::m_lock);
58 ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
59 return accept;
60}
61
62template <typename I>
63bool ExclusiveLock<I>::accept_ops(const Mutex &lock) const {
64 return (!ML<I>::is_state_shutdown() &&
65 (ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
66}
67
7c673cae
FG
68template <typename I>
69void ExclusiveLock<I>::block_requests(int r) {
70 Mutex::Locker locker(ML<I>::m_lock);
71
72 m_request_blocked_count++;
73 if (m_request_blocked_ret_val == 0) {
74 m_request_blocked_ret_val = r;
75 }
76
77 ldout(m_image_ctx.cct, 20) << dendl;
78}
79
80template <typename I>
81void ExclusiveLock<I>::unblock_requests() {
82 Mutex::Locker locker(ML<I>::m_lock);
83
84 assert(m_request_blocked_count > 0);
85 m_request_blocked_count--;
86 if (m_request_blocked_count == 0) {
87 m_request_blocked_ret_val = 0;
88 }
89
90 ldout(m_image_ctx.cct, 20) << dendl;
91}
92
91327a77
AA
93template <typename I>
94int ExclusiveLock<I>::get_unlocked_op_error() const {
95 if (m_image_ctx.image_watcher->is_blacklisted()) {
96 return -EBLACKLISTED;
97 }
98 return -EROFS;
99}
100
7c673cae
FG
101template <typename I>
102void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
103 assert(m_image_ctx.owner_lock.is_locked());
104 ldout(m_image_ctx.cct, 10) << dendl;
105
106 {
107 Mutex::Locker locker(ML<I>::m_lock);
108 ML<I>::set_state_initializing();
109 }
110
111 m_image_ctx.io_work_queue->block_writes(new C_InitComplete(this, features,
112 on_init));
113}
114
115template <typename I>
116void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
117 ldout(m_image_ctx.cct, 10) << dendl;
118
119 ML<I>::shut_down(on_shut_down);
120
121 // if stalled in request state machine -- abort
122 handle_peer_notification(0);
123}
124
125template <typename I>
126void ExclusiveLock<I>::handle_peer_notification(int r) {
127 Mutex::Locker locker(ML<I>::m_lock);
128 if (!ML<I>::is_state_waiting_for_lock()) {
129 return;
130 }
131
132 ldout(m_image_ctx.cct, 10) << dendl;
133 assert(ML<I>::is_action_acquire_lock());
134
135 m_acquire_lock_peer_ret_val = r;
136 ML<I>::execute_next_action();
137}
138
31f18b77 139template <typename I>
91327a77 140Context *ExclusiveLock<I>::start_op(int* ret_val) {
31f18b77
FG
141 assert(m_image_ctx.owner_lock.is_locked());
142 Mutex::Locker locker(ML<I>::m_lock);
143
144 if (!accept_ops(ML<I>::m_lock)) {
91327a77 145 *ret_val = get_unlocked_op_error();
31f18b77
FG
146 return nullptr;
147 }
148
149 m_async_op_tracker.start_op();
150 return new FunctionContext([this](int r) {
151 m_async_op_tracker.finish_op();
152 });
153}
154
7c673cae
FG
155template <typename I>
156void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
31f18b77 157 ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
7c673cae 158
224ce89b
WB
159 {
160 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
161 if (m_image_ctx.clone_copy_on_read ||
162 (features & RBD_FEATURE_JOURNALING) != 0) {
163 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true);
164 } else {
165 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_WRITE, true);
166 }
7c673cae
FG
167 }
168
169 Mutex::Locker locker(ML<I>::m_lock);
170 ML<I>::set_state_unlocked();
171}
172
173template <typename I>
174void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
175 ldout(m_image_ctx.cct, 10) << dendl;
176
177 {
178 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
224ce89b 179 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
7c673cae
FG
180 m_image_ctx.exclusive_lock = nullptr;
181 }
182
183 m_image_ctx.io_work_queue->unblock_writes();
184 m_image_ctx.image_watcher->flush(on_finish);
185}
186
187template <typename I>
188void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
189 ldout(m_image_ctx.cct, 10) << dendl;
190
191 int acquire_lock_peer_ret_val = 0;
192 {
193 Mutex::Locker locker(ML<I>::m_lock);
194 std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
195 }
196
197 if (acquire_lock_peer_ret_val == -EROFS) {
198 ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
199 on_finish->complete(acquire_lock_peer_ret_val);
200 return;
201 }
202
203 PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
204 on_finish);
205 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
206 req->send();
207 }));
208}
209
210template <typename I>
211void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
212 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
213
214 if (r == -EROFS) {
215 // peer refused to release the exclusive lock
216 on_finish->complete(r);
217 return;
218 } else if (r < 0) {
219 ML<I>::m_lock.Lock();
220 assert(ML<I>::is_state_acquiring());
221
222 // PostAcquire state machine will not run, so we need complete prepare
223 m_image_ctx.state->handle_prepare_lock_complete();
224
225 // if lock is in-use by another client, request the lock
226 if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
227 ML<I>::set_state_waiting_for_lock();
228 ML<I>::m_lock.Unlock();
229
230 // request the lock from a peer
231 m_image_ctx.image_watcher->notify_request_lock();
232
233 // inform manage lock that we have interrupted the state machine
234 r = -ECANCELED;
235 } else {
236 ML<I>::m_lock.Unlock();
237
238 // clear error if peer owns lock
239 if (r == -EAGAIN) {
240 r = 0;
241 }
242 }
243
244 on_finish->complete(r);
245 return;
246 }
247
248 Mutex::Locker locker(ML<I>::m_lock);
249 m_pre_post_callback = on_finish;
250 using EL = ExclusiveLock<I>;
251 PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
252 util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
253 util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
254
255 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
256 req->send();
257 }));
258}
259
260template <typename I>
261void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
262 ldout(m_image_ctx.cct, 10) << dendl;
263
264 Mutex::Locker locker(ML<I>::m_lock);
265
266 assert(r == 0);
267
268 // lock is owned at this point
269 ML<I>::set_state_post_acquiring();
270}
271
272template <typename I>
273void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
274 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
275
276 Context *on_finish = nullptr;
277 {
278 Mutex::Locker locker(ML<I>::m_lock);
279 assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
280
281 assert (m_pre_post_callback != nullptr);
282 std::swap(m_pre_post_callback, on_finish);
283 }
284
285 if (r >= 0) {
b32b8144
FG
286 m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time,
287 ceph_clock_now());
7c673cae 288 m_image_ctx.image_watcher->notify_acquired_lock();
224ce89b 289 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
7c673cae
FG
290 m_image_ctx.io_work_queue->unblock_writes();
291 }
292
293 on_finish->complete(r);
294}
295
296template <typename I>
297void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
298 Context *on_finish) {
299 ldout(m_image_ctx.cct, 10) << dendl;
300 Mutex::Locker locker(ML<I>::m_lock);
301
302 PreReleaseRequest<I> *req = PreReleaseRequest<I>::create(
31f18b77 303 m_image_ctx, shutting_down, m_async_op_tracker, on_finish);
7c673cae
FG
304 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
305 req->send();
306 }));
307}
308
309template <typename I>
310void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
311 Context *on_finish) {
312 ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
313 << shutting_down << dendl;
314 if (!shutting_down) {
315 {
316 Mutex::Locker locker(ML<I>::m_lock);
317 assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
318 }
319
320 if (r >= 0) {
321 m_image_ctx.image_watcher->notify_released_lock();
7c673cae
FG
322 }
323 } else {
324 {
325 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
224ce89b 326 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
7c673cae
FG
327 m_image_ctx.exclusive_lock = nullptr;
328 }
329
330 if (r >= 0) {
331 m_image_ctx.io_work_queue->unblock_writes();
332 }
333
334 m_image_ctx.image_watcher->notify_released_lock();
335 }
336
337 on_finish->complete(r);
338}
339
31f18b77
FG
340template <typename I>
341void ExclusiveLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
342 ldout(m_image_ctx.cct, 10) << dendl;
343 if (r >= 0) {
344 m_image_ctx.image_watcher->notify_acquired_lock();
345 }
346
347 on_finish->complete(r);
348}
349
7c673cae
FG
350template <typename I>
351struct ExclusiveLock<I>::C_InitComplete : public Context {
352 ExclusiveLock *exclusive_lock;
353 uint64_t features;
354 Context *on_init;
355
356 C_InitComplete(ExclusiveLock *exclusive_lock, uint64_t features,
357 Context *on_init)
358 : exclusive_lock(exclusive_lock), features(features), on_init(on_init) {
359 }
360 void finish(int r) override {
361 if (r == 0) {
362 exclusive_lock->handle_init_complete(features);
363 }
364 on_init->complete(r);
365 }
366};
367
368} // namespace librbd
369
370template class librbd::ExclusiveLock<librbd::ImageCtx>;