]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/ExclusiveLock.cc
import 15.2.0 Octopus source
[ceph.git] / ceph / src / librbd / ExclusiveLock.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/ExclusiveLock.h"
5 #include "librbd/ImageCtx.h"
6 #include "librbd/ImageWatcher.h"
7 #include "librbd/ImageState.h"
8 #include "librbd/exclusive_lock/PreAcquireRequest.h"
9 #include "librbd/exclusive_lock/PostAcquireRequest.h"
10 #include "librbd/exclusive_lock/PreReleaseRequest.h"
11 #include "librbd/io/ImageRequestWQ.h"
12 #include "librbd/Utils.h"
13 #include "common/ceph_mutex.h"
14 #include "common/dout.h"
15
16 #define dout_subsys ceph_subsys_rbd
17 #undef dout_prefix
18 #define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
19 << __func__
20
21 namespace librbd {
22
23 using namespace exclusive_lock;
24 using librbd::util::create_context_callback;
25
26 template <typename I>
27 using ML = ManagedLock<I>;
28
29 template <typename I>
30 ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
31 : RefCountedObject(image_ctx.cct),
32 ML<I>(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid,
33 image_ctx.image_watcher, managed_lock::EXCLUSIVE,
34 image_ctx.config.template get_val<bool>("rbd_blacklist_on_break_lock"),
35 image_ctx.config.template get_val<uint64_t>("rbd_blacklist_expire_seconds")),
36 m_image_ctx(image_ctx) {
37 std::lock_guard locker{ML<I>::m_lock};
38 ML<I>::set_state_uninitialized();
39 }
40
41 template <typename I>
42 bool ExclusiveLock<I>::accept_request(OperationRequestType request_type,
43 int *ret_val) const {
44 std::lock_guard locker{ML<I>::m_lock};
45
46 bool accept_request =
47 (!ML<I>::is_state_shutdown() && ML<I>::is_state_locked() &&
48 (m_request_blocked_count == 0 ||
49 m_image_ctx.get_exclusive_lock_policy()->accept_blocked_request(
50 request_type)));
51 if (ret_val != nullptr) {
52 *ret_val = accept_request ? 0 : m_request_blocked_ret_val;
53 }
54
55 ldout(m_image_ctx.cct, 20) << "=" << accept_request << " (request_type="
56 << request_type << ")" << dendl;
57 return accept_request;
58 }
59
60 template <typename I>
61 bool ExclusiveLock<I>::accept_ops() const {
62 std::lock_guard locker{ML<I>::m_lock};
63 bool accept = accept_ops(ML<I>::m_lock);
64 ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
65 return accept;
66 }
67
68 template <typename I>
69 bool ExclusiveLock<I>::accept_ops(const ceph::mutex &lock) const {
70 return (!ML<I>::is_state_shutdown() &&
71 (ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
72 }
73
74 template <typename I>
75 void ExclusiveLock<I>::block_requests(int r) {
76 std::lock_guard locker{ML<I>::m_lock};
77
78 m_request_blocked_count++;
79 if (m_request_blocked_ret_val == 0) {
80 m_request_blocked_ret_val = r;
81 }
82
83 ldout(m_image_ctx.cct, 20) << "r=" << r << dendl;
84 }
85
86 template <typename I>
87 void ExclusiveLock<I>::unblock_requests() {
88 std::lock_guard locker{ML<I>::m_lock};
89
90 ceph_assert(m_request_blocked_count > 0);
91 m_request_blocked_count--;
92 if (m_request_blocked_count == 0) {
93 m_request_blocked_ret_val = 0;
94 }
95
96 ldout(m_image_ctx.cct, 20) << dendl;
97 }
98
99 template <typename I>
100 int ExclusiveLock<I>::get_unlocked_op_error() const {
101 if (m_image_ctx.image_watcher->is_blacklisted()) {
102 return -EBLACKLISTED;
103 }
104 return -EROFS;
105 }
106
107 template <typename I>
108 void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
109 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
110
111 on_init = create_context_callback<Context>(on_init, this);
112
113 ldout(m_image_ctx.cct, 10) << dendl;
114
115 {
116 std::lock_guard locker{ML<I>::m_lock};
117 ML<I>::set_state_initializing();
118 }
119
120 m_image_ctx.io_work_queue->block_writes(new C_InitComplete(this, features,
121 on_init));
122 }
123
124 template <typename I>
125 void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
126 ldout(m_image_ctx.cct, 10) << dendl;
127
128 on_shut_down = create_context_callback<Context>(on_shut_down, this);
129
130 ML<I>::shut_down(on_shut_down);
131
132 // if stalled in request state machine -- abort
133 handle_peer_notification(0);
134 }
135
136 template <typename I>
137 void ExclusiveLock<I>::handle_peer_notification(int r) {
138 std::lock_guard locker{ML<I>::m_lock};
139 if (!ML<I>::is_state_waiting_for_lock()) {
140 return;
141 }
142
143 ldout(m_image_ctx.cct, 10) << dendl;
144 ceph_assert(ML<I>::is_action_acquire_lock());
145
146 m_acquire_lock_peer_ret_val = r;
147 ML<I>::execute_next_action();
148 }
149
150 template <typename I>
151 Context *ExclusiveLock<I>::start_op(int* ret_val) {
152 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
153 std::lock_guard locker{ML<I>::m_lock};
154
155 if (!accept_ops(ML<I>::m_lock)) {
156 *ret_val = get_unlocked_op_error();
157 return nullptr;
158 }
159
160 m_async_op_tracker.start_op();
161 return new LambdaContext([this](int r) {
162 m_async_op_tracker.finish_op();
163 });
164 }
165
166 template <typename I>
167 void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
168 ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
169
170 {
171 std::shared_lock owner_locker{m_image_ctx.owner_lock};
172 if (m_image_ctx.clone_copy_on_read ||
173 (features & RBD_FEATURE_JOURNALING) != 0) {
174 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, true);
175 } else {
176 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_WRITE, true);
177 }
178 }
179
180 std::lock_guard locker{ML<I>::m_lock};
181 ML<I>::set_state_unlocked();
182 }
183
184 template <typename I>
185 void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
186 ldout(m_image_ctx.cct, 10) << dendl;
187
188 {
189 std::unique_lock owner_locker{m_image_ctx.owner_lock};
190 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
191 m_image_ctx.exclusive_lock = nullptr;
192 }
193
194 m_image_ctx.io_work_queue->unblock_writes();
195 m_image_ctx.image_watcher->flush(on_finish);
196 }
197
198 template <typename I>
199 void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
200 ldout(m_image_ctx.cct, 10) << dendl;
201
202 int acquire_lock_peer_ret_val = 0;
203 {
204 std::lock_guard locker{ML<I>::m_lock};
205 std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
206 }
207
208 if (acquire_lock_peer_ret_val == -EROFS) {
209 ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
210 on_finish->complete(acquire_lock_peer_ret_val);
211 return;
212 }
213
214 PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
215 on_finish);
216 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
217 req->send();
218 }));
219 }
220
221 template <typename I>
222 void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
223 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
224
225 if (r == -EROFS) {
226 // peer refused to release the exclusive lock
227 on_finish->complete(r);
228 return;
229 } else if (r < 0) {
230 ML<I>::m_lock.lock();
231 ceph_assert(ML<I>::is_state_acquiring());
232
233 // PostAcquire state machine will not run, so we need complete prepare
234 m_image_ctx.state->handle_prepare_lock_complete();
235
236 // if lock is in-use by another client, request the lock
237 if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
238 ML<I>::set_state_waiting_for_lock();
239 ML<I>::m_lock.unlock();
240
241 // request the lock from a peer
242 m_image_ctx.image_watcher->notify_request_lock();
243
244 // inform manage lock that we have interrupted the state machine
245 r = -ECANCELED;
246 } else {
247 ML<I>::m_lock.unlock();
248
249 // clear error if peer owns lock
250 if (r == -EAGAIN) {
251 r = 0;
252 }
253 }
254
255 on_finish->complete(r);
256 return;
257 }
258
259 std::lock_guard locker{ML<I>::m_lock};
260 m_pre_post_callback = on_finish;
261 using EL = ExclusiveLock<I>;
262 PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
263 util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
264 util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
265
266 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
267 req->send();
268 }));
269 }
270
271 template <typename I>
272 void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
273 ldout(m_image_ctx.cct, 10) << dendl;
274
275 std::lock_guard locker{ML<I>::m_lock};
276
277 ceph_assert(r == 0);
278
279 // lock is owned at this point
280 ML<I>::set_state_post_acquiring();
281 }
282
283 template <typename I>
284 void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
285 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
286
287 Context *on_finish = nullptr;
288 {
289 std::lock_guard locker{ML<I>::m_lock};
290 ceph_assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
291
292 assert (m_pre_post_callback != nullptr);
293 std::swap(m_pre_post_callback, on_finish);
294 }
295
296 if (r >= 0) {
297 m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time,
298 ceph_clock_now());
299 m_image_ctx.image_watcher->notify_acquired_lock();
300 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
301 m_image_ctx.io_work_queue->unblock_writes();
302 }
303
304 on_finish->complete(r);
305 }
306
307 template <typename I>
308 void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
309 Context *on_finish) {
310 ldout(m_image_ctx.cct, 10) << dendl;
311 std::lock_guard locker{ML<I>::m_lock};
312
313 PreReleaseRequest<I> *req = PreReleaseRequest<I>::create(
314 m_image_ctx, shutting_down, m_async_op_tracker, on_finish);
315 m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
316 req->send();
317 }));
318 }
319
320 template <typename I>
321 void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
322 Context *on_finish) {
323 ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
324 << shutting_down << dendl;
325 if (!shutting_down) {
326 {
327 std::lock_guard locker{ML<I>::m_lock};
328 ceph_assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
329 }
330
331 if (r >= 0) {
332 m_image_ctx.image_watcher->notify_released_lock();
333 }
334 } else {
335 {
336 std::unique_lock owner_locker{m_image_ctx.owner_lock};
337 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_BOTH, false);
338 m_image_ctx.exclusive_lock = nullptr;
339 }
340
341 if (r >= 0) {
342 m_image_ctx.io_work_queue->unblock_writes();
343 }
344
345 m_image_ctx.image_watcher->notify_released_lock();
346 }
347
348 on_finish->complete(r);
349 }
350
351 template <typename I>
352 void ExclusiveLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
353 ldout(m_image_ctx.cct, 10) << dendl;
354 if (r >= 0) {
355 m_image_ctx.image_watcher->notify_acquired_lock();
356 }
357
358 on_finish->complete(r);
359 }
360
361 template <typename I>
362 struct ExclusiveLock<I>::C_InitComplete : public Context {
363 ExclusiveLock *exclusive_lock;
364 uint64_t features;
365 Context *on_init;
366
367 C_InitComplete(ExclusiveLock *exclusive_lock, uint64_t features,
368 Context *on_init)
369 : exclusive_lock(exclusive_lock), features(features), on_init(on_init) {
370 }
371 void finish(int r) override {
372 if (r == 0) {
373 exclusive_lock->handle_init_complete(features);
374 }
375 on_init->complete(r);
376 }
377 };
378
379 } // namespace librbd
380
381 template class librbd::ExclusiveLock<librbd::ImageCtx>;