]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/ExclusiveLock.cc
a3d69fce89fbed9b7b48a1f5f910f5d6bf4c5c31
[ceph.git] / ceph / src / librbd / ExclusiveLock.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/ExclusiveLock.h"
5 #include "librbd/ImageCtx.h"
6 #include "librbd/ImageWatcher.h"
7 #include "librbd/ImageState.h"
8 #include "librbd/exclusive_lock/PreAcquireRequest.h"
9 #include "librbd/exclusive_lock/PostAcquireRequest.h"
10 #include "librbd/exclusive_lock/PreReleaseRequest.h"
11 #include "librbd/io/ImageRequestWQ.h"
12 #include "librbd/Utils.h"
13 #include "common/Mutex.h"
14 #include "common/dout.h"
15
16 #define dout_subsys ceph_subsys_rbd
17 #undef dout_prefix
18 #define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
19 << __func__
20
21 namespace librbd {
22
23 using namespace exclusive_lock;
24
25 template <typename I>
26 using ML = ManagedLock<I>;
27
28 template <typename I>
29 ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
30 : ML<I>(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid,
31 image_ctx.image_watcher, managed_lock::EXCLUSIVE,
32 image_ctx.blacklist_on_break_lock,
33 image_ctx.blacklist_expire_seconds),
34 m_image_ctx(image_ctx) {
35 Mutex::Locker locker(ML<I>::m_lock);
36 ML<I>::set_state_uninitialized();
37 }
38
39 template <typename I>
40 bool ExclusiveLock<I>::accept_requests(int *ret_val) const {
41 Mutex::Locker locker(ML<I>::m_lock);
42
43 bool accept_requests = (!ML<I>::is_state_shutdown() &&
44 ML<I>::is_state_locked() &&
45 m_request_blocked_count == 0);
46 if (ret_val != nullptr) {
47 *ret_val = m_request_blocked_ret_val;
48 }
49
50 ldout(m_image_ctx.cct, 20) << "=" << accept_requests << dendl;
51 return accept_requests;
52 }
53
54 template <typename I>
55 bool ExclusiveLock<I>::accept_ops() const {
56 Mutex::Locker locker(ML<I>::m_lock);
57 bool accept = accept_ops(ML<I>::m_lock);
58 ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
59 return accept;
60 }
61
62 template <typename I>
63 bool ExclusiveLock<I>::accept_ops(const Mutex &lock) const {
64 return (!ML<I>::is_state_shutdown() &&
65 (ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
66 }
67
68 template <typename I>
69 void ExclusiveLock<I>::block_requests(int r) {
70 Mutex::Locker locker(ML<I>::m_lock);
71
72 m_request_blocked_count++;
73 if (m_request_blocked_ret_val == 0) {
74 m_request_blocked_ret_val = r;
75 }
76
77 ldout(m_image_ctx.cct, 20) << dendl;
78 }
79
80 template <typename I>
81 void ExclusiveLock<I>::unblock_requests() {
82 Mutex::Locker locker(ML<I>::m_lock);
83
84 assert(m_request_blocked_count > 0);
85 m_request_blocked_count--;
86 if (m_request_blocked_count == 0) {
87 m_request_blocked_ret_val = 0;
88 }
89
90 ldout(m_image_ctx.cct, 20) << dendl;
91 }
92
93 template <typename I>
94 void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
95 assert(m_image_ctx.owner_lock.is_locked());
96 ldout(m_image_ctx.cct, 10) << dendl;
97
98 {
99 Mutex::Locker locker(ML<I>::m_lock);
100 ML<I>::set_state_initializing();
101 }
102
103 m_image_ctx.io_work_queue->block_writes(new C_InitComplete(this, features,
104 on_init));
105 }
106
107 template <typename I>
108 void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
109 ldout(m_image_ctx.cct, 10) << dendl;
110
111 ML<I>::shut_down(on_shut_down);
112
113 // if stalled in request state machine -- abort
114 handle_peer_notification(0);
115 }
116
117 template <typename I>
118 void ExclusiveLock<I>::handle_peer_notification(int r) {
119 Mutex::Locker locker(ML<I>::m_lock);
120 if (!ML<I>::is_state_waiting_for_lock()) {
121 return;
122 }
123
124 ldout(m_image_ctx.cct, 10) << dendl;
125 assert(ML<I>::is_action_acquire_lock());
126
127 m_acquire_lock_peer_ret_val = r;
128 ML<I>::execute_next_action();
129 }
130
131 template <typename I>
132 Context *ExclusiveLock<I>::start_op() {
133 assert(m_image_ctx.owner_lock.is_locked());
134 Mutex::Locker locker(ML<I>::m_lock);
135
136 if (!accept_ops(ML<I>::m_lock)) {
137 return nullptr;
138 }
139
140 m_async_op_tracker.start_op();
141 return new FunctionContext([this](int r) {
142 m_async_op_tracker.finish_op();
143 });
144 }
145
146 template <typename I>
147 void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
148 ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
149
150 if ((features & RBD_FEATURE_JOURNALING) != 0) {
151 m_image_ctx.io_work_queue->set_require_lock_on_read();
152 }
153
154 Mutex::Locker locker(ML<I>::m_lock);
155 ML<I>::set_state_unlocked();
156 }
157
158 template <typename I>
159 void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
160 ldout(m_image_ctx.cct, 10) << dendl;
161
162 {
163 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
164 m_image_ctx.io_work_queue->clear_require_lock_on_read();
165 m_image_ctx.exclusive_lock = nullptr;
166 }
167
168 m_image_ctx.io_work_queue->unblock_writes();
169 m_image_ctx.image_watcher->flush(on_finish);
170 }
171
172 template <typename I>
173 void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
174 ldout(m_image_ctx.cct, 10) << dendl;
175
176 int acquire_lock_peer_ret_val = 0;
177 {
178 Mutex::Locker locker(ML<I>::m_lock);
179 std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
180 }
181
182 if (acquire_lock_peer_ret_val == -EROFS) {
183 ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
184 on_finish->complete(acquire_lock_peer_ret_val);
185 return;
186 }
187
188 PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
189 on_finish);
190 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
191 req->send();
192 }));
193 }
194
195 template <typename I>
196 void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
197 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
198
199 if (r == -EROFS) {
200 // peer refused to release the exclusive lock
201 on_finish->complete(r);
202 return;
203 } else if (r < 0) {
204 ML<I>::m_lock.Lock();
205 assert(ML<I>::is_state_acquiring());
206
207 // PostAcquire state machine will not run, so we need complete prepare
208 m_image_ctx.state->handle_prepare_lock_complete();
209
210 // if lock is in-use by another client, request the lock
211 if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
212 ML<I>::set_state_waiting_for_lock();
213 ML<I>::m_lock.Unlock();
214
215 // request the lock from a peer
216 m_image_ctx.image_watcher->notify_request_lock();
217
218 // inform manage lock that we have interrupted the state machine
219 r = -ECANCELED;
220 } else {
221 ML<I>::m_lock.Unlock();
222
223 // clear error if peer owns lock
224 if (r == -EAGAIN) {
225 r = 0;
226 }
227 }
228
229 on_finish->complete(r);
230 return;
231 }
232
233 Mutex::Locker locker(ML<I>::m_lock);
234 m_pre_post_callback = on_finish;
235 using EL = ExclusiveLock<I>;
236 PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
237 util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
238 util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
239
240 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
241 req->send();
242 }));
243 }
244
245 template <typename I>
246 void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
247 ldout(m_image_ctx.cct, 10) << dendl;
248
249 Mutex::Locker locker(ML<I>::m_lock);
250
251 assert(r == 0);
252
253 // lock is owned at this point
254 ML<I>::set_state_post_acquiring();
255 }
256
257 template <typename I>
258 void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
259 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
260
261 Context *on_finish = nullptr;
262 {
263 Mutex::Locker locker(ML<I>::m_lock);
264 assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
265
266 assert (m_pre_post_callback != nullptr);
267 std::swap(m_pre_post_callback, on_finish);
268 }
269
270 if (r >= 0) {
271 m_image_ctx.image_watcher->notify_acquired_lock();
272 m_image_ctx.io_work_queue->clear_require_lock_on_read();
273 m_image_ctx.io_work_queue->unblock_writes();
274 }
275
276 on_finish->complete(r);
277 }
278
279 template <typename I>
280 void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
281 Context *on_finish) {
282 ldout(m_image_ctx.cct, 10) << dendl;
283 Mutex::Locker locker(ML<I>::m_lock);
284
285 PreReleaseRequest<I> *req = PreReleaseRequest<I>::create(
286 m_image_ctx, shutting_down, m_async_op_tracker, on_finish);
287 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
288 req->send();
289 }));
290 }
291
292 template <typename I>
293 void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
294 Context *on_finish) {
295 ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
296 << shutting_down << dendl;
297 if (!shutting_down) {
298 {
299 Mutex::Locker locker(ML<I>::m_lock);
300 assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
301 }
302
303 if (r >= 0) {
304 m_image_ctx.image_watcher->notify_released_lock();
305 if (m_image_ctx.io_work_queue->is_lock_request_needed()) {
306 // if we have blocked IO -- re-request the lock
307 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
308 ML<I>::acquire_lock(nullptr);
309 }
310 }
311 } else {
312 {
313 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
314 m_image_ctx.io_work_queue->clear_require_lock_on_read();
315 m_image_ctx.exclusive_lock = nullptr;
316 }
317
318 if (r >= 0) {
319 m_image_ctx.io_work_queue->unblock_writes();
320 }
321
322 m_image_ctx.image_watcher->notify_released_lock();
323 }
324
325 on_finish->complete(r);
326 }
327
328 template <typename I>
329 void ExclusiveLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
330 ldout(m_image_ctx.cct, 10) << dendl;
331 if (r >= 0) {
332 m_image_ctx.image_watcher->notify_acquired_lock();
333 }
334
335 on_finish->complete(r);
336 }
337
338 template <typename I>
339 struct ExclusiveLock<I>::C_InitComplete : public Context {
340 ExclusiveLock *exclusive_lock;
341 uint64_t features;
342 Context *on_init;
343
344 C_InitComplete(ExclusiveLock *exclusive_lock, uint64_t features,
345 Context *on_init)
346 : exclusive_lock(exclusive_lock), features(features), on_init(on_init) {
347 }
348 void finish(int r) override {
349 if (r == 0) {
350 exclusive_lock->handle_init_complete(features);
351 }
352 on_init->complete(r);
353 }
354 };
355
356 } // namespace librbd
357
358 template class librbd::ExclusiveLock<librbd::ImageCtx>;