]> git.proxmox.com Git - ceph.git/blame - ceph/src/librbd/ExclusiveLock.cc
bump version to 12.0.3-pve3
[ceph.git] / ceph / src / librbd / ExclusiveLock.cc
CommitLineData
7c673cae
FG
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include "librbd/ExclusiveLock.h"
5#include "librbd/ImageWatcher.h"
6#include "librbd/ImageState.h"
7#include "librbd/exclusive_lock/PreAcquireRequest.h"
8#include "librbd/exclusive_lock/PostAcquireRequest.h"
9#include "librbd/exclusive_lock/PreReleaseRequest.h"
10#include "librbd/io/ImageRequestWQ.h"
11#include "librbd/Utils.h"
12#include "common/Mutex.h"
13#include "common/dout.h"
14
15#define dout_subsys ceph_subsys_rbd
16#undef dout_prefix
17#define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
18 << __func__
19
20namespace librbd {
21
22using namespace exclusive_lock;
23
24template <typename I>
25using ML = ManagedLock<I>;
26
27template <typename I>
28ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
29 : ML<I>(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid,
30 image_ctx.image_watcher, managed_lock::EXCLUSIVE,
31 image_ctx.blacklist_on_break_lock,
32 image_ctx.blacklist_expire_seconds),
33 m_image_ctx(image_ctx) {
34 Mutex::Locker locker(ML<I>::m_lock);
35 ML<I>::set_state_uninitialized();
36}
37
38template <typename I>
39bool ExclusiveLock<I>::accept_requests(int *ret_val) const {
40 Mutex::Locker locker(ML<I>::m_lock);
41
42 bool accept_requests = (!ML<I>::is_state_shutdown() &&
43 ML<I>::is_state_locked() &&
44 m_request_blocked_count == 0);
45 *ret_val = m_request_blocked_ret_val;
46
47 ldout(m_image_ctx.cct, 20) << "=" << accept_requests << dendl;
48 return accept_requests;
49}
50
51template <typename I>
52void ExclusiveLock<I>::block_requests(int r) {
53 Mutex::Locker locker(ML<I>::m_lock);
54
55 m_request_blocked_count++;
56 if (m_request_blocked_ret_val == 0) {
57 m_request_blocked_ret_val = r;
58 }
59
60 ldout(m_image_ctx.cct, 20) << dendl;
61}
62
63template <typename I>
64void ExclusiveLock<I>::unblock_requests() {
65 Mutex::Locker locker(ML<I>::m_lock);
66
67 assert(m_request_blocked_count > 0);
68 m_request_blocked_count--;
69 if (m_request_blocked_count == 0) {
70 m_request_blocked_ret_val = 0;
71 }
72
73 ldout(m_image_ctx.cct, 20) << dendl;
74}
75
76template <typename I>
77void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
78 assert(m_image_ctx.owner_lock.is_locked());
79 ldout(m_image_ctx.cct, 10) << dendl;
80
81 {
82 Mutex::Locker locker(ML<I>::m_lock);
83 ML<I>::set_state_initializing();
84 }
85
86 m_image_ctx.io_work_queue->block_writes(new C_InitComplete(this, features,
87 on_init));
88}
89
90template <typename I>
91void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
92 ldout(m_image_ctx.cct, 10) << dendl;
93
94 ML<I>::shut_down(on_shut_down);
95
96 // if stalled in request state machine -- abort
97 handle_peer_notification(0);
98}
99
100template <typename I>
101void ExclusiveLock<I>::handle_peer_notification(int r) {
102 Mutex::Locker locker(ML<I>::m_lock);
103 if (!ML<I>::is_state_waiting_for_lock()) {
104 return;
105 }
106
107 ldout(m_image_ctx.cct, 10) << dendl;
108 assert(ML<I>::is_action_acquire_lock());
109
110 m_acquire_lock_peer_ret_val = r;
111 ML<I>::execute_next_action();
112}
113
114template <typename I>
115void ExclusiveLock<I>::handle_init_complete(uint64_t features) {
116 ldout(m_image_ctx.cct, 10) << "features=" << features << dendl;
117
118 if ((features & RBD_FEATURE_JOURNALING) != 0) {
119 m_image_ctx.io_work_queue->set_require_lock_on_read();
120 }
121
122 Mutex::Locker locker(ML<I>::m_lock);
123 ML<I>::set_state_unlocked();
124}
125
126template <typename I>
127void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
128 ldout(m_image_ctx.cct, 10) << dendl;
129
130 {
131 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
132 m_image_ctx.io_work_queue->clear_require_lock_on_read();
133 m_image_ctx.exclusive_lock = nullptr;
134 }
135
136 m_image_ctx.io_work_queue->unblock_writes();
137 m_image_ctx.image_watcher->flush(on_finish);
138}
139
140template <typename I>
141void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
142 ldout(m_image_ctx.cct, 10) << dendl;
143
144 int acquire_lock_peer_ret_val = 0;
145 {
146 Mutex::Locker locker(ML<I>::m_lock);
147 std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
148 }
149
150 if (acquire_lock_peer_ret_val == -EROFS) {
151 ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
152 on_finish->complete(acquire_lock_peer_ret_val);
153 return;
154 }
155
156 PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
157 on_finish);
158 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
159 req->send();
160 }));
161}
162
163template <typename I>
164void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
165 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
166
167 if (r == -EROFS) {
168 // peer refused to release the exclusive lock
169 on_finish->complete(r);
170 return;
171 } else if (r < 0) {
172 ML<I>::m_lock.Lock();
173 assert(ML<I>::is_state_acquiring());
174
175 // PostAcquire state machine will not run, so we need complete prepare
176 m_image_ctx.state->handle_prepare_lock_complete();
177
178 // if lock is in-use by another client, request the lock
179 if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
180 ML<I>::set_state_waiting_for_lock();
181 ML<I>::m_lock.Unlock();
182
183 // request the lock from a peer
184 m_image_ctx.image_watcher->notify_request_lock();
185
186 // inform manage lock that we have interrupted the state machine
187 r = -ECANCELED;
188 } else {
189 ML<I>::m_lock.Unlock();
190
191 // clear error if peer owns lock
192 if (r == -EAGAIN) {
193 r = 0;
194 }
195 }
196
197 on_finish->complete(r);
198 return;
199 }
200
201 Mutex::Locker locker(ML<I>::m_lock);
202 m_pre_post_callback = on_finish;
203 using EL = ExclusiveLock<I>;
204 PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
205 util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
206 util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
207
208 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
209 req->send();
210 }));
211}
212
213template <typename I>
214void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
215 ldout(m_image_ctx.cct, 10) << dendl;
216
217 Mutex::Locker locker(ML<I>::m_lock);
218
219 assert(r == 0);
220
221 // lock is owned at this point
222 ML<I>::set_state_post_acquiring();
223}
224
225template <typename I>
226void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
227 ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
228
229 Context *on_finish = nullptr;
230 {
231 Mutex::Locker locker(ML<I>::m_lock);
232 assert(ML<I>::is_state_acquiring() || ML<I>::is_state_post_acquiring());
233
234 assert (m_pre_post_callback != nullptr);
235 std::swap(m_pre_post_callback, on_finish);
236 }
237
238 if (r >= 0) {
239 m_image_ctx.image_watcher->notify_acquired_lock();
240 m_image_ctx.io_work_queue->clear_require_lock_on_read();
241 m_image_ctx.io_work_queue->unblock_writes();
242 }
243
244 on_finish->complete(r);
245}
246
247template <typename I>
248void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
249 Context *on_finish) {
250 ldout(m_image_ctx.cct, 10) << dendl;
251 Mutex::Locker locker(ML<I>::m_lock);
252
253 PreReleaseRequest<I> *req = PreReleaseRequest<I>::create(
254 m_image_ctx, shutting_down, on_finish);
255 m_image_ctx.op_work_queue->queue(new FunctionContext([req](int r) {
256 req->send();
257 }));
258}
259
260template <typename I>
261void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
262 Context *on_finish) {
263 ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
264 << shutting_down << dendl;
265 if (!shutting_down) {
266 {
267 Mutex::Locker locker(ML<I>::m_lock);
268 assert(ML<I>::is_state_pre_releasing() || ML<I>::is_state_releasing());
269 }
270
271 if (r >= 0) {
272 m_image_ctx.image_watcher->notify_released_lock();
273 if (m_image_ctx.io_work_queue->is_lock_request_needed()) {
274 // if we have blocked IO -- re-request the lock
275 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
276 ML<I>::acquire_lock(nullptr);
277 }
278 }
279 } else {
280 {
281 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
282 m_image_ctx.io_work_queue->clear_require_lock_on_read();
283 m_image_ctx.exclusive_lock = nullptr;
284 }
285
286 if (r >= 0) {
287 m_image_ctx.io_work_queue->unblock_writes();
288 }
289
290 m_image_ctx.image_watcher->notify_released_lock();
291 }
292
293 on_finish->complete(r);
294}
295
296template <typename I>
297struct ExclusiveLock<I>::C_InitComplete : public Context {
298 ExclusiveLock *exclusive_lock;
299 uint64_t features;
300 Context *on_init;
301
302 C_InitComplete(ExclusiveLock *exclusive_lock, uint64_t features,
303 Context *on_init)
304 : exclusive_lock(exclusive_lock), features(features), on_init(on_init) {
305 }
306 void finish(int r) override {
307 if (r == 0) {
308 exclusive_lock->handle_init_complete(features);
309 }
310 on_init->complete(r);
311 }
312};
313
314} // namespace librbd
315
316template class librbd::ExclusiveLock<librbd::ImageCtx>;