]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include "librbd/ImageWatcher.h" | |
5 | #include "librbd/ExclusiveLock.h" | |
6 | #include "librbd/ImageCtx.h" | |
7 | #include "librbd/ImageState.h" | |
8 | #include "librbd/internal.h" | |
7c673cae | 9 | #include "librbd/TaskFinisher.h" |
b32b8144 | 10 | #include "librbd/Types.h" |
7c673cae | 11 | #include "librbd/Utils.h" |
f67539c2 | 12 | #include "librbd/asio/ContextWQ.h" |
7c673cae FG |
13 | #include "librbd/exclusive_lock/Policy.h" |
14 | #include "librbd/image_watcher/NotifyLockOwner.h" | |
15 | #include "librbd/io/AioCompletion.h" | |
7c673cae FG |
16 | #include "include/encoding.h" |
17 | #include "common/errno.h" | |
f67539c2 | 18 | #include <boost/bind/bind.hpp> |
7c673cae FG |
19 | |
20 | #define dout_subsys ceph_subsys_rbd | |
21 | #undef dout_prefix | |
22 | #define dout_prefix *_dout << "librbd::ImageWatcher: " | |
23 | ||
24 | namespace librbd { | |
25 | ||
26 | using namespace image_watcher; | |
27 | using namespace watch_notify; | |
28 | using util::create_async_context_callback; | |
29 | using util::create_context_callback; | |
30 | using util::create_rados_callback; | |
7c673cae | 31 | |
11fdf7f2 TL |
32 | using ceph::encode; |
33 | using ceph::decode; | |
34 | ||
f67539c2 TL |
35 | using namespace boost::placeholders; |
36 | ||
7c673cae FG |
37 | static const double RETRY_DELAY_SECONDS = 1.0; |
38 | ||
31f18b77 FG |
39 | template <typename I> |
40 | struct ImageWatcher<I>::C_ProcessPayload : public Context { | |
41 | ImageWatcher *image_watcher; | |
42 | uint64_t notify_id; | |
43 | uint64_t handle; | |
f67539c2 | 44 | std::unique_ptr<watch_notify::Payload> payload; |
31f18b77 | 45 | |
f67539c2 TL |
46 | C_ProcessPayload(ImageWatcher *image_watcher, uint64_t notify_id, |
47 | uint64_t handle, | |
48 | std::unique_ptr<watch_notify::Payload> &&payload) | |
49 | : image_watcher(image_watcher), notify_id(notify_id), handle(handle), | |
50 | payload(std::move(payload)) { | |
31f18b77 FG |
51 | } |
52 | ||
53 | void finish(int r) override { | |
54 | image_watcher->m_async_op_tracker.start_op(); | |
55 | if (image_watcher->notifications_blocked()) { | |
56 | // requests are blocked -- just ack the notification | |
57 | bufferlist bl; | |
58 | image_watcher->acknowledge_notify(notify_id, handle, bl); | |
59 | } else { | |
f67539c2 | 60 | image_watcher->process_payload(notify_id, handle, payload.get()); |
31f18b77 FG |
61 | } |
62 | image_watcher->m_async_op_tracker.finish_op(); | |
63 | } | |
64 | }; | |
65 | ||
7c673cae FG |
66 | template <typename I> |
67 | ImageWatcher<I>::ImageWatcher(I &image_ctx) | |
68 | : Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid), | |
69 | m_image_ctx(image_ctx), | |
70 | m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)), | |
9f95a23c TL |
71 | m_async_request_lock(ceph::make_shared_mutex( |
72 | util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this))), | |
73 | m_owner_client_id_lock(ceph::make_mutex( | |
74 | util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this))) | |
7c673cae FG |
75 | { |
76 | } | |
77 | ||
78 | template <typename I> | |
79 | ImageWatcher<I>::~ImageWatcher() | |
80 | { | |
81 | delete m_task_finisher; | |
82 | } | |
83 | ||
84 | template <typename I> | |
85 | void ImageWatcher<I>::unregister_watch(Context *on_finish) { | |
86 | CephContext *cct = m_image_ctx.cct; | |
87 | ldout(cct, 10) << this << " unregistering image watcher" << dendl; | |
88 | ||
89 | cancel_async_requests(); | |
90 | ||
f67539c2 TL |
91 | // flush the task finisher queue before completing |
92 | on_finish = create_async_context_callback(m_task_finisher, on_finish); | |
93 | ||
f6b5b4d7 | 94 | on_finish = new LambdaContext([this, on_finish](int r) { |
f67539c2 TL |
95 | cancel_quiesce_requests(); |
96 | m_task_finisher->cancel_all(); | |
f6b5b4d7 TL |
97 | m_async_op_tracker.wait_for_ops(on_finish); |
98 | }); | |
f67539c2 | 99 | Watcher::unregister_watch(on_finish); |
7c673cae FG |
100 | } |
101 | ||
31f18b77 FG |
102 | template <typename I> |
103 | void ImageWatcher<I>::block_notifies(Context *on_finish) { | |
104 | CephContext *cct = m_image_ctx.cct; | |
105 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
106 | ||
9f95a23c | 107 | on_finish = new LambdaContext([this, on_finish](int r) { |
31f18b77 FG |
108 | cancel_async_requests(); |
109 | on_finish->complete(r); | |
110 | }); | |
111 | Watcher::block_notifies(on_finish); | |
112 | } | |
113 | ||
7c673cae FG |
114 | template <typename I> |
115 | void ImageWatcher<I>::schedule_async_progress(const AsyncRequestId &request, | |
116 | uint64_t offset, uint64_t total) { | |
f67539c2 TL |
117 | auto ctx = new LambdaContext([this, request, offset, total](int r) { |
118 | if (r != -ECANCELED) { | |
119 | notify_async_progress(request, offset, total); | |
120 | } | |
121 | }); | |
7c673cae FG |
122 | m_task_finisher->queue(Task(TASK_CODE_ASYNC_PROGRESS, request), ctx); |
123 | } | |
124 | ||
125 | template <typename I> | |
126 | int ImageWatcher<I>::notify_async_progress(const AsyncRequestId &request, | |
127 | uint64_t offset, uint64_t total) { | |
128 | ldout(m_image_ctx.cct, 20) << this << " remote async request progress: " | |
129 | << request << " @ " << offset | |
130 | << "/" << total << dendl; | |
131 | ||
f67539c2 | 132 | send_notify(new AsyncProgressPayload(request, offset, total)); |
7c673cae FG |
133 | return 0; |
134 | } | |
135 | ||
136 | template <typename I> | |
137 | void ImageWatcher<I>::schedule_async_complete(const AsyncRequestId &request, | |
138 | int r) { | |
f6b5b4d7 | 139 | m_async_op_tracker.start_op(); |
f67539c2 TL |
140 | auto ctx = new LambdaContext([this, request, ret_val=r](int r) { |
141 | if (r != -ECANCELED) { | |
142 | notify_async_complete(request, ret_val); | |
143 | } | |
144 | }); | |
7c673cae FG |
145 | m_task_finisher->queue(ctx); |
146 | } | |
147 | ||
148 | template <typename I> | |
149 | void ImageWatcher<I>::notify_async_complete(const AsyncRequestId &request, | |
150 | int r) { | |
151 | ldout(m_image_ctx.cct, 20) << this << " remote async request finished: " | |
152 | << request << " = " << r << dendl; | |
153 | ||
f67539c2 | 154 | send_notify(new AsyncCompletePayload(request, r), |
9f95a23c | 155 | new LambdaContext(boost::bind(&ImageWatcher<I>::handle_async_complete, |
7c673cae FG |
156 | this, request, r, _1))); |
157 | } | |
158 | ||
159 | template <typename I> | |
160 | void ImageWatcher<I>::handle_async_complete(const AsyncRequestId &request, | |
161 | int r, int ret_val) { | |
162 | ldout(m_image_ctx.cct, 20) << this << " " << __func__ << ": " | |
163 | << "request=" << request << ", r=" << ret_val | |
164 | << dendl; | |
165 | if (ret_val < 0) { | |
166 | lderr(m_image_ctx.cct) << this << " failed to notify async complete: " | |
167 | << cpp_strerror(ret_val) << dendl; | |
f6b5b4d7 | 168 | if (ret_val == -ETIMEDOUT && !is_unregistered()) { |
7c673cae | 169 | schedule_async_complete(request, r); |
f6b5b4d7 TL |
170 | m_async_op_tracker.finish_op(); |
171 | return; | |
7c673cae | 172 | } |
7c673cae | 173 | } |
f6b5b4d7 TL |
174 | |
175 | std::unique_lock async_request_locker{m_async_request_lock}; | |
f67539c2 | 176 | mark_async_request_complete(request, r); |
f6b5b4d7 | 177 | m_async_op_tracker.finish_op(); |
7c673cae FG |
178 | } |
179 | ||
180 | template <typename I> | |
181 | void ImageWatcher<I>::notify_flatten(uint64_t request_id, | |
182 | ProgressContext &prog_ctx, | |
183 | Context *on_finish) { | |
9f95a23c | 184 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
185 | ceph_assert(m_image_ctx.exclusive_lock && |
186 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae FG |
187 | |
188 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
189 | ||
f67539c2 | 190 | notify_async_request(async_request_id, new FlattenPayload(async_request_id), |
7c673cae FG |
191 | prog_ctx, on_finish); |
192 | } | |
193 | ||
194 | template <typename I> | |
195 | void ImageWatcher<I>::notify_resize(uint64_t request_id, uint64_t size, | |
196 | bool allow_shrink, | |
197 | ProgressContext &prog_ctx, | |
198 | Context *on_finish) { | |
9f95a23c | 199 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
200 | ceph_assert(m_image_ctx.exclusive_lock && |
201 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae FG |
202 | |
203 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
204 | ||
205 | notify_async_request(async_request_id, | |
f67539c2 | 206 | new ResizePayload(async_request_id, size, allow_shrink), |
7c673cae FG |
207 | prog_ctx, on_finish); |
208 | } | |
209 | ||
210 | template <typename I> | |
f67539c2 TL |
211 | void ImageWatcher<I>::notify_snap_create(uint64_t request_id, |
212 | const cls::rbd::SnapshotNamespace &snap_namespace, | |
7c673cae | 213 | const std::string &snap_name, |
f67539c2 TL |
214 | uint64_t flags, |
215 | ProgressContext &prog_ctx, | |
7c673cae | 216 | Context *on_finish) { |
9f95a23c | 217 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
218 | ceph_assert(m_image_ctx.exclusive_lock && |
219 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 220 | |
f67539c2 TL |
221 | AsyncRequestId async_request_id(get_client_id(), request_id); |
222 | ||
223 | notify_async_request(async_request_id, | |
224 | new SnapCreatePayload(async_request_id, snap_namespace, | |
225 | snap_name, flags), | |
226 | prog_ctx, on_finish); | |
7c673cae FG |
227 | } |
228 | ||
229 | template <typename I> | |
f67539c2 TL |
230 | void ImageWatcher<I>::notify_snap_rename(uint64_t request_id, |
231 | const snapid_t &src_snap_id, | |
7c673cae FG |
232 | const std::string &dst_snap_name, |
233 | Context *on_finish) { | |
9f95a23c | 234 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
235 | ceph_assert(m_image_ctx.exclusive_lock && |
236 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 237 | |
f67539c2 TL |
238 | AsyncRequestId async_request_id(get_client_id(), request_id); |
239 | ||
240 | notify_async_request( | |
241 | async_request_id, | |
242 | new SnapRenamePayload(async_request_id, src_snap_id, dst_snap_name), | |
243 | m_no_op_prog_ctx, on_finish); | |
7c673cae FG |
244 | } |
245 | ||
246 | template <typename I> | |
f67539c2 TL |
247 | void ImageWatcher<I>::notify_snap_remove( |
248 | uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace, | |
249 | const std::string &snap_name, Context *on_finish) { | |
9f95a23c | 250 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
251 | ceph_assert(m_image_ctx.exclusive_lock && |
252 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 253 | |
f67539c2 TL |
254 | AsyncRequestId async_request_id(get_client_id(), request_id); |
255 | ||
256 | notify_async_request( | |
257 | async_request_id, | |
258 | new SnapRemovePayload(async_request_id, snap_namespace, snap_name), | |
259 | m_no_op_prog_ctx, on_finish); | |
7c673cae FG |
260 | } |
261 | ||
262 | template <typename I> | |
f67539c2 TL |
263 | void ImageWatcher<I>::notify_snap_protect( |
264 | uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace, | |
265 | const std::string &snap_name, Context *on_finish) { | |
9f95a23c | 266 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
267 | ceph_assert(m_image_ctx.exclusive_lock && |
268 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 269 | |
f67539c2 TL |
270 | AsyncRequestId async_request_id(get_client_id(), request_id); |
271 | ||
272 | notify_async_request( | |
273 | async_request_id, | |
274 | new SnapProtectPayload(async_request_id, snap_namespace, snap_name), | |
275 | m_no_op_prog_ctx, on_finish); | |
7c673cae FG |
276 | } |
277 | ||
278 | template <typename I> | |
f67539c2 TL |
279 | void ImageWatcher<I>::notify_snap_unprotect( |
280 | uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace, | |
281 | const std::string &snap_name, Context *on_finish) { | |
9f95a23c | 282 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
283 | ceph_assert(m_image_ctx.exclusive_lock && |
284 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 285 | |
f67539c2 TL |
286 | AsyncRequestId async_request_id(get_client_id(), request_id); |
287 | ||
288 | notify_async_request( | |
289 | async_request_id, | |
290 | new SnapUnprotectPayload(async_request_id, snap_namespace, snap_name), | |
291 | m_no_op_prog_ctx, on_finish); | |
7c673cae FG |
292 | } |
293 | ||
294 | template <typename I> | |
295 | void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id, | |
296 | ProgressContext &prog_ctx, | |
297 | Context *on_finish) { | |
9f95a23c | 298 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
299 | ceph_assert(m_image_ctx.exclusive_lock && |
300 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae FG |
301 | |
302 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
303 | ||
304 | notify_async_request(async_request_id, | |
f67539c2 | 305 | new RebuildObjectMapPayload(async_request_id), |
7c673cae FG |
306 | prog_ctx, on_finish); |
307 | } | |
308 | ||
309 | template <typename I> | |
f67539c2 TL |
310 | void ImageWatcher<I>::notify_rename(uint64_t request_id, |
311 | const std::string &image_name, | |
7c673cae | 312 | Context *on_finish) { |
9f95a23c | 313 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
314 | ceph_assert(m_image_ctx.exclusive_lock && |
315 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 316 | |
f67539c2 TL |
317 | AsyncRequestId async_request_id(get_client_id(), request_id); |
318 | ||
319 | notify_async_request(async_request_id, | |
320 | new RenamePayload(async_request_id, image_name), | |
321 | m_no_op_prog_ctx, on_finish); | |
7c673cae FG |
322 | } |
323 | ||
324 | template <typename I> | |
f67539c2 TL |
325 | void ImageWatcher<I>::notify_update_features(uint64_t request_id, |
326 | uint64_t features, bool enabled, | |
7c673cae | 327 | Context *on_finish) { |
9f95a23c | 328 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
329 | ceph_assert(m_image_ctx.exclusive_lock && |
330 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 331 | |
f67539c2 TL |
332 | AsyncRequestId async_request_id(get_client_id(), request_id); |
333 | ||
334 | notify_async_request(async_request_id, | |
335 | new UpdateFeaturesPayload(async_request_id, features, enabled), | |
336 | m_no_op_prog_ctx, on_finish); | |
7c673cae FG |
337 | } |
338 | ||
11fdf7f2 TL |
339 | template <typename I> |
340 | void ImageWatcher<I>::notify_migrate(uint64_t request_id, | |
341 | ProgressContext &prog_ctx, | |
342 | Context *on_finish) { | |
9f95a23c | 343 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
344 | ceph_assert(m_image_ctx.exclusive_lock && |
345 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
346 | ||
347 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
348 | ||
f67539c2 | 349 | notify_async_request(async_request_id, new MigratePayload(async_request_id), |
11fdf7f2 TL |
350 | prog_ctx, on_finish); |
351 | } | |
352 | ||
353 | template <typename I> | |
354 | void ImageWatcher<I>::notify_sparsify(uint64_t request_id, size_t sparse_size, | |
355 | ProgressContext &prog_ctx, | |
356 | Context *on_finish) { | |
9f95a23c | 357 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
11fdf7f2 TL |
358 | ceph_assert(m_image_ctx.exclusive_lock && |
359 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
360 | ||
361 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
362 | ||
363 | notify_async_request(async_request_id, | |
f67539c2 TL |
364 | new SparsifyPayload(async_request_id, sparse_size), |
365 | prog_ctx, on_finish); | |
11fdf7f2 TL |
366 | } |
367 | ||
7c673cae FG |
368 | template <typename I> |
369 | void ImageWatcher<I>::notify_header_update(Context *on_finish) { | |
370 | ldout(m_image_ctx.cct, 10) << this << ": " << __func__ << dendl; | |
371 | ||
372 | // supports legacy (empty buffer) clients | |
f67539c2 | 373 | send_notify(new HeaderUpdatePayload(), on_finish); |
7c673cae FG |
374 | } |
375 | ||
376 | template <typename I> | |
377 | void ImageWatcher<I>::notify_header_update(librados::IoCtx &io_ctx, | |
378 | const std::string &oid) { | |
379 | // supports legacy (empty buffer) clients | |
380 | bufferlist bl; | |
f67539c2 | 381 | encode(NotifyMessage(new HeaderUpdatePayload()), bl); |
7c673cae FG |
382 | io_ctx.notify2(oid, bl, watcher::Notifier::NOTIFY_TIMEOUT, nullptr); |
383 | } | |
384 | ||
f67539c2 TL |
385 | template <typename I> |
386 | void ImageWatcher<I>::notify_quiesce(uint64_t *request_id, | |
387 | ProgressContext &prog_ctx, | |
388 | Context *on_finish) { | |
389 | *request_id = util::reserve_async_request_id(); | |
390 | ||
391 | ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": request_id=" | |
392 | << request_id << dendl; | |
393 | ||
394 | AsyncRequestId async_request_id(get_client_id(), *request_id); | |
395 | ||
396 | auto attempts = m_image_ctx.config.template get_val<uint64_t>( | |
397 | "rbd_quiesce_notification_attempts"); | |
398 | ||
399 | notify_quiesce(async_request_id, attempts, prog_ctx, on_finish); | |
400 | } | |
401 | ||
402 | template <typename I> | |
403 | void ImageWatcher<I>::notify_quiesce(const AsyncRequestId &async_request_id, | |
404 | size_t attempts, ProgressContext &prog_ctx, | |
405 | Context *on_finish) { | |
406 | ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": async_request_id=" | |
407 | << async_request_id << " attempts=" << attempts | |
408 | << dendl; | |
409 | ||
410 | ceph_assert(attempts > 0); | |
411 | auto notify_response = new watcher::NotifyResponse(); | |
412 | auto on_notify = new LambdaContext( | |
413 | [notify_response=std::unique_ptr<watcher::NotifyResponse>(notify_response), | |
414 | this, async_request_id, &prog_ctx, on_finish, attempts=attempts-1](int r) { | |
415 | auto total_attempts = m_image_ctx.config.template get_val<uint64_t>( | |
416 | "rbd_quiesce_notification_attempts"); | |
417 | if (total_attempts < attempts) { | |
418 | total_attempts = attempts; | |
419 | } | |
420 | prog_ctx.update_progress(total_attempts - attempts, total_attempts); | |
421 | ||
422 | if (r == -ETIMEDOUT) { | |
423 | ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": async_request_id=" | |
424 | << async_request_id << " timed out" << dendl; | |
425 | if (attempts > 0) { | |
426 | notify_quiesce(async_request_id, attempts, prog_ctx, on_finish); | |
427 | return; | |
428 | } | |
429 | } else if (r == 0) { | |
430 | for (auto &[client_id, bl] : notify_response->acks) { | |
431 | if (bl.length() == 0) { | |
432 | continue; | |
433 | } | |
434 | try { | |
435 | auto iter = bl.cbegin(); | |
436 | ||
437 | ResponseMessage response_message; | |
438 | using ceph::decode; | |
439 | decode(response_message, iter); | |
440 | ||
441 | if (response_message.result != -EOPNOTSUPP) { | |
442 | r = response_message.result; | |
443 | } | |
444 | } catch (const buffer::error &err) { | |
445 | r = -EINVAL; | |
446 | } | |
447 | if (r < 0) { | |
448 | break; | |
449 | } | |
450 | } | |
451 | } | |
452 | if (r < 0) { | |
453 | lderr(m_image_ctx.cct) << this << " failed to notify quiesce: " | |
454 | << cpp_strerror(r) << dendl; | |
455 | } | |
456 | on_finish->complete(r); | |
457 | }); | |
458 | ||
459 | bufferlist bl; | |
460 | encode(NotifyMessage(new QuiescePayload(async_request_id)), bl); | |
461 | Watcher::send_notify(bl, notify_response, on_notify); | |
462 | } | |
463 | ||
464 | template <typename I> | |
465 | void ImageWatcher<I>::notify_unquiesce(uint64_t request_id, Context *on_finish) { | |
466 | ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": request_id=" | |
467 | << request_id << dendl; | |
468 | ||
469 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
470 | ||
471 | send_notify(new UnquiescePayload(async_request_id), on_finish); | |
472 | } | |
473 | ||
474 | template <typename I> | |
475 | void ImageWatcher<I>::notify_metadata_set(uint64_t request_id, | |
476 | const std::string &key, | |
477 | const std::string &value, | |
478 | Context *on_finish) { | |
479 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); | |
480 | ceph_assert(m_image_ctx.exclusive_lock && | |
481 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
482 | ||
483 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
484 | ||
485 | notify_async_request( | |
486 | async_request_id, | |
487 | new MetadataUpdatePayload(async_request_id, key, | |
488 | std::optional<std::string>{value}), | |
489 | m_no_op_prog_ctx, on_finish); | |
490 | } | |
491 | ||
492 | template <typename I> | |
493 | void ImageWatcher<I>::notify_metadata_remove(uint64_t request_id, | |
494 | const std::string &key, | |
495 | Context *on_finish) { | |
496 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); | |
497 | ceph_assert(m_image_ctx.exclusive_lock && | |
498 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
499 | ||
500 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
501 | ||
502 | notify_async_request( | |
503 | async_request_id, | |
504 | new MetadataUpdatePayload(async_request_id, key, std::nullopt), | |
505 | m_no_op_prog_ctx, on_finish); | |
506 | } | |
507 | ||
7c673cae FG |
508 | template <typename I> |
509 | void ImageWatcher<I>::schedule_cancel_async_requests() { | |
f67539c2 TL |
510 | auto ctx = new LambdaContext([this](int r) { |
511 | if (r != -ECANCELED) { | |
512 | cancel_async_requests(); | |
513 | } | |
514 | }); | |
7c673cae FG |
515 | m_task_finisher->queue(TASK_CODE_CANCEL_ASYNC_REQUESTS, ctx); |
516 | } | |
517 | ||
518 | template <typename I> | |
519 | void ImageWatcher<I>::cancel_async_requests() { | |
9f95a23c | 520 | std::unique_lock l{m_async_request_lock}; |
f67539c2 TL |
521 | for (auto iter = m_async_requests.begin(); iter != m_async_requests.end(); ) { |
522 | if (iter->second.second == nullptr) { | |
523 | // Quiesce notify request. Skip. | |
524 | iter++; | |
525 | } else { | |
526 | iter->second.first->complete(-ERESTART); | |
527 | iter = m_async_requests.erase(iter); | |
528 | } | |
7c673cae | 529 | } |
7c673cae FG |
530 | } |
531 | ||
532 | template <typename I> | |
533 | void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) { | |
9f95a23c | 534 | ceph_assert(ceph_mutex_is_locked(m_owner_client_id_lock)); |
7c673cae FG |
535 | m_owner_client_id = client_id; |
536 | ldout(m_image_ctx.cct, 10) << this << " current lock owner: " | |
537 | << m_owner_client_id << dendl; | |
538 | } | |
539 | ||
540 | template <typename I> | |
541 | ClientId ImageWatcher<I>::get_client_id() { | |
9f95a23c | 542 | std::shared_lock l{this->m_watch_lock}; |
7c673cae FG |
543 | return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle); |
544 | } | |
545 | ||
546 | template <typename I> | |
547 | void ImageWatcher<I>::notify_acquired_lock() { | |
548 | ldout(m_image_ctx.cct, 10) << this << " notify acquired lock" << dendl; | |
549 | ||
550 | ClientId client_id = get_client_id(); | |
551 | { | |
9f95a23c | 552 | std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; |
7c673cae FG |
553 | set_owner_client_id(client_id); |
554 | } | |
555 | ||
f67539c2 | 556 | send_notify(new AcquiredLockPayload(client_id)); |
7c673cae FG |
557 | } |
558 | ||
559 | template <typename I> | |
560 | void ImageWatcher<I>::notify_released_lock() { | |
561 | ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl; | |
562 | ||
563 | { | |
9f95a23c | 564 | std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; |
7c673cae FG |
565 | set_owner_client_id(ClientId()); |
566 | } | |
567 | ||
f67539c2 | 568 | send_notify(new ReleasedLockPayload(get_client_id())); |
7c673cae FG |
569 | } |
570 | ||
571 | template <typename I> | |
572 | void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) { | |
9f95a23c | 573 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
7c673cae FG |
574 | |
575 | if (m_image_ctx.exclusive_lock == nullptr) { | |
576 | // exclusive lock dynamically disabled via image refresh | |
577 | return; | |
578 | } | |
11fdf7f2 TL |
579 | ceph_assert(m_image_ctx.exclusive_lock && |
580 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
7c673cae | 581 | |
9f95a23c | 582 | std::shared_lock watch_locker{this->m_watch_lock}; |
28e407b8 | 583 | if (this->is_registered(this->m_watch_lock)) { |
7c673cae FG |
584 | ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl; |
585 | ||
f67539c2 TL |
586 | auto ctx = new LambdaContext([this](int r) { |
587 | if (r != -ECANCELED) { | |
588 | notify_request_lock(); | |
589 | } | |
590 | }); | |
591 | ||
7c673cae FG |
592 | if (use_timer) { |
593 | if (timer_delay < 0) { | |
594 | timer_delay = RETRY_DELAY_SECONDS; | |
595 | } | |
596 | m_task_finisher->add_event_after(TASK_CODE_REQUEST_LOCK, | |
597 | timer_delay, ctx); | |
598 | } else { | |
599 | m_task_finisher->queue(TASK_CODE_REQUEST_LOCK, ctx); | |
600 | } | |
601 | } | |
602 | } | |
603 | ||
604 | template <typename I> | |
605 | void ImageWatcher<I>::notify_request_lock() { | |
9f95a23c TL |
606 | std::shared_lock owner_locker{m_image_ctx.owner_lock}; |
607 | std::shared_lock image_locker{m_image_ctx.image_lock}; | |
7c673cae FG |
608 | |
609 | // ExclusiveLock state machine can be dynamically disabled or | |
610 | // race with task cancel | |
611 | if (m_image_ctx.exclusive_lock == nullptr || | |
612 | m_image_ctx.exclusive_lock->is_lock_owner()) { | |
613 | return; | |
614 | } | |
615 | ||
616 | ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl; | |
617 | ||
f67539c2 | 618 | notify_lock_owner(new RequestLockPayload(get_client_id(), false), |
7c673cae FG |
619 | create_context_callback< |
620 | ImageWatcher, &ImageWatcher<I>::handle_request_lock>(this)); | |
621 | } | |
622 | ||
623 | template <typename I> | |
624 | void ImageWatcher<I>::handle_request_lock(int r) { | |
9f95a23c TL |
625 | std::shared_lock owner_locker{m_image_ctx.owner_lock}; |
626 | std::shared_lock image_locker{m_image_ctx.image_lock}; | |
7c673cae FG |
627 | |
628 | // ExclusiveLock state machine cannot transition -- but can be | |
629 | // dynamically disabled | |
630 | if (m_image_ctx.exclusive_lock == nullptr) { | |
631 | return; | |
632 | } | |
633 | ||
634 | if (r == -ETIMEDOUT) { | |
635 | ldout(m_image_ctx.cct, 5) << this << " timed out requesting lock: retrying" | |
636 | << dendl; | |
637 | ||
638 | // treat this is a dead client -- so retest acquiring the lock | |
639 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
640 | } else if (r == -EROFS) { | |
641 | ldout(m_image_ctx.cct, 5) << this << " peer will not release lock" << dendl; | |
642 | m_image_ctx.exclusive_lock->handle_peer_notification(r); | |
643 | } else if (r < 0) { | |
644 | lderr(m_image_ctx.cct) << this << " error requesting lock: " | |
645 | << cpp_strerror(r) << dendl; | |
646 | schedule_request_lock(true); | |
647 | } else { | |
648 | // lock owner acked -- but resend if we don't see them release the lock | |
11fdf7f2 | 649 | int retry_timeout = m_image_ctx.cct->_conf.template get_val<int64_t>( |
181888fb | 650 | "client_notify_timeout"); |
7c673cae FG |
651 | ldout(m_image_ctx.cct, 15) << this << " will retry in " << retry_timeout |
652 | << " seconds" << dendl; | |
653 | schedule_request_lock(true, retry_timeout); | |
654 | } | |
655 | } | |
656 | ||
657 | template <typename I> | |
f67539c2 | 658 | void ImageWatcher<I>::notify_lock_owner(Payload *payload, Context *on_finish) { |
11fdf7f2 | 659 | ceph_assert(on_finish != nullptr); |
9f95a23c | 660 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
7c673cae FG |
661 | |
662 | bufferlist bl; | |
11fdf7f2 | 663 | encode(NotifyMessage(payload), bl); |
7c673cae FG |
664 | |
665 | NotifyLockOwner *notify_lock_owner = NotifyLockOwner::create( | |
666 | m_image_ctx, this->m_notifier, std::move(bl), on_finish); | |
667 | notify_lock_owner->send(); | |
668 | } | |
669 | ||
f67539c2 TL |
670 | template <typename I> |
671 | bool ImageWatcher<I>::is_new_request(const AsyncRequestId &id) const { | |
672 | ceph_assert(ceph_mutex_is_locked(m_async_request_lock)); | |
673 | ||
674 | return m_async_pending.count(id) == 0 && m_async_complete.count(id) == 0; | |
675 | } | |
676 | ||
677 | template <typename I> | |
678 | bool ImageWatcher<I>::mark_async_request_complete(const AsyncRequestId &id, | |
679 | int r) { | |
680 | ceph_assert(ceph_mutex_is_locked(m_async_request_lock)); | |
681 | ||
682 | bool found = m_async_pending.erase(id); | |
683 | ||
684 | auto now = ceph_clock_now(); | |
685 | ||
686 | auto it = m_async_complete_expiration.begin(); | |
687 | while (it != m_async_complete_expiration.end() && it->first < now) { | |
688 | m_async_complete.erase(it->second); | |
689 | it = m_async_complete_expiration.erase(it); | |
690 | } | |
691 | ||
692 | if (!m_async_complete.insert({id, r}).second) { | |
693 | for (it = m_async_complete_expiration.begin(); | |
694 | it != m_async_complete_expiration.end(); it++) { | |
695 | if (it->second == id) { | |
696 | m_async_complete_expiration.erase(it); | |
697 | break; | |
698 | } | |
699 | } | |
700 | } | |
701 | auto expiration_time = now; | |
702 | expiration_time += 600; | |
703 | m_async_complete_expiration.insert({expiration_time, id}); | |
704 | ||
705 | return found; | |
706 | } | |
707 | ||
7c673cae FG |
708 | template <typename I> |
709 | Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id) { | |
9f95a23c | 710 | std::unique_lock async_request_locker{m_async_request_lock}; |
f67539c2 TL |
711 | |
712 | return remove_async_request(id, m_async_request_lock); | |
713 | } | |
714 | ||
715 | template <typename I> | |
716 | Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id, | |
717 | ceph::shared_mutex &lock) { | |
718 | ceph_assert(ceph_mutex_is_locked(lock)); | |
719 | ||
7c673cae FG |
720 | auto it = m_async_requests.find(id); |
721 | if (it != m_async_requests.end()) { | |
722 | Context *on_complete = it->second.first; | |
723 | m_async_requests.erase(it); | |
724 | return on_complete; | |
725 | } | |
726 | return nullptr; | |
727 | } | |
728 | ||
729 | template <typename I> | |
730 | void ImageWatcher<I>::schedule_async_request_timed_out(const AsyncRequestId &id) { | |
731 | ldout(m_image_ctx.cct, 20) << "scheduling async request time out: " << id | |
732 | << dendl; | |
733 | ||
f67539c2 TL |
734 | auto ctx = new LambdaContext([this, id](int r) { |
735 | if (r != -ECANCELED) { | |
736 | async_request_timed_out(id); | |
737 | } | |
738 | }); | |
7c673cae FG |
739 | |
740 | Task task(TASK_CODE_ASYNC_REQUEST, id); | |
741 | m_task_finisher->cancel(task); | |
742 | ||
11fdf7f2 TL |
743 | m_task_finisher->add_event_after( |
744 | task, m_image_ctx.config.template get_val<uint64_t>("rbd_request_timed_out_seconds"), | |
745 | ctx); | |
7c673cae FG |
746 | } |
747 | ||
748 | template <typename I> | |
749 | void ImageWatcher<I>::async_request_timed_out(const AsyncRequestId &id) { | |
750 | Context *on_complete = remove_async_request(id); | |
751 | if (on_complete != nullptr) { | |
752 | ldout(m_image_ctx.cct, 5) << "async request timed out: " << id << dendl; | |
753 | m_image_ctx.op_work_queue->queue(on_complete, -ETIMEDOUT); | |
754 | } | |
755 | } | |
756 | ||
757 | template <typename I> | |
f67539c2 TL |
758 | void ImageWatcher<I>::notify_async_request( |
759 | const AsyncRequestId &async_request_id, Payload *payload, | |
760 | ProgressContext& prog_ctx, Context *on_finish) { | |
11fdf7f2 | 761 | ceph_assert(on_finish != nullptr); |
9f95a23c | 762 | ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock)); |
7c673cae FG |
763 | |
764 | ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id | |
765 | << dendl; | |
766 | ||
9f95a23c | 767 | Context *on_notify = new LambdaContext([this, async_request_id](int r) { |
7c673cae FG |
768 | if (r < 0) { |
769 | // notification failed -- don't expect updates | |
770 | Context *on_complete = remove_async_request(async_request_id); | |
771 | if (on_complete != nullptr) { | |
772 | on_complete->complete(r); | |
773 | } | |
774 | } | |
775 | }); | |
776 | ||
9f95a23c | 777 | Context *on_complete = new LambdaContext( |
7c673cae FG |
778 | [this, async_request_id, on_finish](int r) { |
779 | m_task_finisher->cancel(Task(TASK_CODE_ASYNC_REQUEST, async_request_id)); | |
780 | on_finish->complete(r); | |
781 | }); | |
782 | ||
783 | { | |
9f95a23c | 784 | std::unique_lock async_request_locker{m_async_request_lock}; |
7c673cae FG |
785 | m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx); |
786 | } | |
787 | ||
788 | schedule_async_request_timed_out(async_request_id); | |
789 | notify_lock_owner(payload, on_notify); | |
790 | } | |
791 | ||
792 | template <typename I> | |
793 | int ImageWatcher<I>::prepare_async_request(const AsyncRequestId& async_request_id, | |
794 | bool* new_request, Context** ctx, | |
795 | ProgressContext** prog_ctx) { | |
796 | if (async_request_id.client_id == get_client_id()) { | |
797 | return -ERESTART; | |
798 | } else { | |
9f95a23c | 799 | std::unique_lock l{m_async_request_lock}; |
f67539c2 | 800 | if (is_new_request(async_request_id)) { |
7c673cae FG |
801 | m_async_pending.insert(async_request_id); |
802 | *new_request = true; | |
803 | *prog_ctx = new RemoteProgressContext(*this, async_request_id); | |
804 | *ctx = new RemoteContext(*this, async_request_id, *prog_ctx); | |
805 | } else { | |
806 | *new_request = false; | |
f67539c2 TL |
807 | auto it = m_async_complete.find(async_request_id); |
808 | if (it != m_async_complete.end()) { | |
809 | int r = it->second; | |
810 | // reset complete request expiration time | |
811 | mark_async_request_complete(async_request_id, r); | |
812 | return r; | |
813 | } | |
7c673cae FG |
814 | } |
815 | } | |
816 | return 0; | |
817 | } | |
818 | ||
f67539c2 TL |
819 | template <typename I> |
820 | Context *ImageWatcher<I>::prepare_quiesce_request( | |
821 | const AsyncRequestId &request, C_NotifyAck *ack_ctx) { | |
822 | std::unique_lock locker{m_async_request_lock}; | |
823 | ||
824 | auto timeout = 2 * watcher::Notifier::NOTIFY_TIMEOUT / 1000; | |
825 | ||
826 | if (!is_new_request(request)) { | |
827 | auto it = m_async_requests.find(request); | |
828 | if (it != m_async_requests.end()) { | |
829 | delete it->second.first; | |
830 | it->second.first = ack_ctx; | |
831 | } else { | |
832 | auto it = m_async_complete.find(request); | |
833 | ceph_assert(it != m_async_complete.end()); | |
834 | m_task_finisher->queue(new C_ResponseMessage(ack_ctx), it->second); | |
835 | // reset complete request expiration time | |
836 | mark_async_request_complete(request, it->second); | |
837 | } | |
838 | locker.unlock(); | |
839 | ||
840 | m_task_finisher->reschedule_event_after(Task(TASK_CODE_QUIESCE, request), | |
841 | timeout); | |
842 | return nullptr; | |
843 | } | |
844 | ||
845 | m_async_pending.insert(request); | |
846 | m_async_requests[request] = AsyncRequest(ack_ctx, nullptr); | |
847 | m_async_op_tracker.start_op(); | |
848 | ||
849 | return new LambdaContext( | |
850 | [this, request, timeout](int r) { | |
851 | auto unquiesce_ctx = new LambdaContext( | |
852 | [this, request](int r) { | |
853 | if (r == 0) { | |
854 | ldout(m_image_ctx.cct, 10) << this << " quiesce request " | |
855 | << request << " timed out" << dendl; | |
856 | } | |
857 | ||
858 | auto on_finish = new LambdaContext( | |
859 | [this](int r) { | |
860 | m_async_op_tracker.finish_op(); | |
861 | }); | |
862 | ||
863 | m_image_ctx.state->notify_unquiesce(on_finish); | |
864 | }); | |
865 | ||
866 | m_task_finisher->add_event_after(Task(TASK_CODE_QUIESCE, request), | |
867 | timeout, unquiesce_ctx); | |
868 | ||
869 | std::unique_lock async_request_locker{m_async_request_lock}; | |
870 | mark_async_request_complete(request, r); | |
871 | auto ctx = remove_async_request(request, m_async_request_lock); | |
872 | async_request_locker.unlock(); | |
873 | if (ctx != nullptr) { | |
874 | ctx = new C_ResponseMessage(static_cast<C_NotifyAck *>(ctx)); | |
875 | ctx->complete(r); | |
876 | } else { | |
877 | m_task_finisher->cancel(Task(TASK_CODE_QUIESCE, request)); | |
878 | } | |
879 | }); | |
880 | } | |
881 | ||
882 | template <typename I> | |
883 | void ImageWatcher<I>::prepare_unquiesce_request(const AsyncRequestId &request) { | |
884 | { | |
885 | std::unique_lock async_request_locker{m_async_request_lock}; | |
886 | auto it = m_async_complete.find(request); | |
887 | if (it == m_async_complete.end()) { | |
888 | ldout(m_image_ctx.cct, 20) << this << " " << request | |
889 | << ": not found in complete" << dendl; | |
890 | return; | |
891 | } | |
892 | // reset complete request expiration time | |
893 | mark_async_request_complete(request, it->second); | |
894 | } | |
895 | ||
896 | bool canceled = m_task_finisher->cancel(Task(TASK_CODE_QUIESCE, request)); | |
897 | if (!canceled) { | |
898 | ldout(m_image_ctx.cct, 20) << this << " " << request | |
899 | << ": timer task not found" << dendl; | |
900 | } | |
901 | } | |
902 | ||
903 | template <typename I> | |
904 | void ImageWatcher<I>::cancel_quiesce_requests() { | |
905 | std::unique_lock l{m_async_request_lock}; | |
906 | for (auto it = m_async_requests.begin(); it != m_async_requests.end(); ) { | |
907 | if (it->second.second == nullptr) { | |
908 | // Quiesce notify request. | |
909 | mark_async_request_complete(it->first, 0); | |
910 | delete it->second.first; | |
911 | it = m_async_requests.erase(it); | |
912 | } else { | |
913 | it++; | |
914 | } | |
915 | } | |
916 | } | |
917 | ||
918 | template <typename I> | |
919 | bool ImageWatcher<I>::handle_operation_request( | |
920 | const AsyncRequestId& async_request_id, | |
921 | exclusive_lock::OperationRequestType request_type, Operation operation, | |
922 | std::function<void(ProgressContext &prog_ctx, Context*)> execute, | |
923 | C_NotifyAck *ack_ctx) { | |
924 | std::shared_lock owner_locker{m_image_ctx.owner_lock}; | |
925 | ||
926 | if (m_image_ctx.exclusive_lock != nullptr) { | |
927 | int r = 0; | |
928 | if (m_image_ctx.exclusive_lock->accept_request(request_type, &r)) { | |
929 | bool new_request; | |
930 | Context *ctx; | |
931 | ProgressContext *prog_ctx; | |
932 | bool complete; | |
933 | if (async_request_id) { | |
934 | r = prepare_async_request(async_request_id, &new_request, &ctx, | |
935 | &prog_ctx); | |
936 | encode(ResponseMessage(r), ack_ctx->out); | |
937 | complete = true; | |
938 | } else { | |
939 | new_request = true; | |
940 | ctx = new C_ResponseMessage(ack_ctx); | |
941 | prog_ctx = &m_no_op_prog_ctx; | |
942 | complete = false; | |
943 | } | |
944 | if (r == 0 && new_request) { | |
945 | ctx = new LambdaContext( | |
946 | [this, operation, ctx](int r) { | |
947 | m_image_ctx.operations->finish_op(operation, r); | |
948 | ctx->complete(r); | |
949 | }); | |
950 | ctx = new LambdaContext( | |
951 | [this, execute, prog_ctx, ctx](int r) { | |
952 | if (r < 0) { | |
953 | ctx->complete(r); | |
954 | return; | |
955 | } | |
956 | std::shared_lock l{m_image_ctx.owner_lock}; | |
957 | execute(*prog_ctx, ctx); | |
958 | }); | |
959 | m_image_ctx.operations->start_op(operation, ctx); | |
960 | } | |
961 | return complete; | |
962 | } else if (r < 0) { | |
963 | encode(ResponseMessage(r), ack_ctx->out); | |
964 | } | |
965 | } | |
966 | return true; | |
967 | } | |
968 | ||
7c673cae FG |
969 | template <typename I> |
970 | bool ImageWatcher<I>::handle_payload(const HeaderUpdatePayload &payload, | |
971 | C_NotifyAck *ack_ctx) { | |
972 | ldout(m_image_ctx.cct, 10) << this << " image header updated" << dendl; | |
973 | ||
974 | m_image_ctx.state->handle_update_notification(); | |
975 | m_image_ctx.perfcounter->inc(l_librbd_notify); | |
976 | if (ack_ctx != nullptr) { | |
977 | m_image_ctx.state->flush_update_watchers(new C_ResponseMessage(ack_ctx)); | |
978 | return false; | |
979 | } | |
980 | return true; | |
981 | } | |
982 | ||
983 | template <typename I> | |
984 | bool ImageWatcher<I>::handle_payload(const AcquiredLockPayload &payload, | |
985 | C_NotifyAck *ack_ctx) { | |
986 | ldout(m_image_ctx.cct, 10) << this << " image exclusively locked announcement" | |
987 | << dendl; | |
988 | ||
989 | bool cancel_async_requests = true; | |
990 | if (payload.client_id.is_valid()) { | |
9f95a23c | 991 | std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; |
7c673cae FG |
992 | if (payload.client_id == m_owner_client_id) { |
993 | cancel_async_requests = false; | |
994 | } | |
995 | set_owner_client_id(payload.client_id); | |
996 | } | |
997 | ||
9f95a23c | 998 | std::shared_lock owner_locker{m_image_ctx.owner_lock}; |
7c673cae FG |
999 | if (m_image_ctx.exclusive_lock != nullptr) { |
1000 | // potentially wake up the exclusive lock state machine now that | |
1001 | // a lock owner has advertised itself | |
1002 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
1003 | } | |
1004 | if (cancel_async_requests && | |
1005 | (m_image_ctx.exclusive_lock == nullptr || | |
1006 | !m_image_ctx.exclusive_lock->is_lock_owner())) { | |
1007 | schedule_cancel_async_requests(); | |
1008 | } | |
1009 | return true; | |
1010 | } | |
1011 | ||
1012 | template <typename I> | |
1013 | bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload, | |
1014 | C_NotifyAck *ack_ctx) { | |
1015 | ldout(m_image_ctx.cct, 10) << this << " exclusive lock released" << dendl; | |
1016 | ||
1017 | bool cancel_async_requests = true; | |
1018 | if (payload.client_id.is_valid()) { | |
9f95a23c | 1019 | std::lock_guard l{m_owner_client_id_lock}; |
7c673cae FG |
1020 | if (payload.client_id != m_owner_client_id) { |
1021 | ldout(m_image_ctx.cct, 10) << this << " unexpected owner: " | |
1022 | << payload.client_id << " != " | |
1023 | << m_owner_client_id << dendl; | |
1024 | cancel_async_requests = false; | |
1025 | } else { | |
1026 | set_owner_client_id(ClientId()); | |
1027 | } | |
1028 | } | |
1029 | ||
9f95a23c | 1030 | std::shared_lock owner_locker{m_image_ctx.owner_lock}; |
7c673cae FG |
1031 | if (cancel_async_requests && |
1032 | (m_image_ctx.exclusive_lock == nullptr || | |
1033 | !m_image_ctx.exclusive_lock->is_lock_owner())) { | |
1034 | schedule_cancel_async_requests(); | |
1035 | } | |
1036 | ||
1037 | // alert the exclusive lock state machine that the lock is available | |
1038 | if (m_image_ctx.exclusive_lock != nullptr && | |
1039 | !m_image_ctx.exclusive_lock->is_lock_owner()) { | |
1040 | m_task_finisher->cancel(TASK_CODE_REQUEST_LOCK); | |
1041 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
1042 | } | |
1043 | return true; | |
1044 | } | |
1045 | ||
1046 | template <typename I> | |
1047 | bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload, | |
1048 | C_NotifyAck *ack_ctx) { | |
1049 | ldout(m_image_ctx.cct, 10) << this << " exclusive lock requested" << dendl; | |
1050 | if (payload.client_id == get_client_id()) { | |
1051 | return true; | |
1052 | } | |
1053 | ||
9f95a23c | 1054 | std::shared_lock l{m_image_ctx.owner_lock}; |
7c673cae FG |
1055 | if (m_image_ctx.exclusive_lock != nullptr && |
1056 | m_image_ctx.exclusive_lock->is_lock_owner()) { | |
1057 | int r = 0; | |
92f5a8d4 TL |
1058 | bool accept_request = m_image_ctx.exclusive_lock->accept_request( |
1059 | exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, &r); | |
7c673cae FG |
1060 | |
1061 | if (accept_request) { | |
11fdf7f2 | 1062 | ceph_assert(r == 0); |
9f95a23c | 1063 | std::lock_guard owner_client_id_locker{m_owner_client_id_lock}; |
7c673cae FG |
1064 | if (!m_owner_client_id.is_valid()) { |
1065 | return true; | |
1066 | } | |
1067 | ||
1068 | ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock" | |
1069 | << dendl; | |
1070 | r = m_image_ctx.get_exclusive_lock_policy()->lock_requested( | |
1071 | payload.force); | |
1072 | } | |
11fdf7f2 | 1073 | encode(ResponseMessage(r), ack_ctx->out); |
7c673cae FG |
1074 | } |
1075 | return true; | |
1076 | } | |
1077 | ||
1078 | template <typename I> | |
1079 | bool ImageWatcher<I>::handle_payload(const AsyncProgressPayload &payload, | |
1080 | C_NotifyAck *ack_ctx) { | |
9f95a23c | 1081 | std::shared_lock l{m_async_request_lock}; |
7c673cae FG |
1082 | std::map<AsyncRequestId, AsyncRequest>::iterator req_it = |
1083 | m_async_requests.find(payload.async_request_id); | |
1084 | if (req_it != m_async_requests.end()) { | |
1085 | ldout(m_image_ctx.cct, 20) << this << " request progress: " | |
1086 | << payload.async_request_id << " @ " | |
1087 | << payload.offset << "/" << payload.total | |
1088 | << dendl; | |
1089 | schedule_async_request_timed_out(payload.async_request_id); | |
1090 | req_it->second.second->update_progress(payload.offset, payload.total); | |
1091 | } | |
1092 | return true; | |
1093 | } | |
1094 | ||
1095 | template <typename I> | |
1096 | bool ImageWatcher<I>::handle_payload(const AsyncCompletePayload &payload, | |
1097 | C_NotifyAck *ack_ctx) { | |
1098 | Context *on_complete = remove_async_request(payload.async_request_id); | |
1099 | if (on_complete != nullptr) { | |
1100 | ldout(m_image_ctx.cct, 10) << this << " request finished: " | |
1101 | << payload.async_request_id << "=" | |
1102 | << payload.result << dendl; | |
1103 | on_complete->complete(payload.result); | |
1104 | } | |
1105 | return true; | |
1106 | } | |
1107 | ||
1108 | template <typename I> | |
1109 | bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload, | |
1110 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1111 | ldout(m_image_ctx.cct, 10) << this << " remote flatten request: " |
1112 | << payload.async_request_id << dendl; | |
7c673cae | 1113 | |
f67539c2 TL |
1114 | return handle_operation_request( |
1115 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1116 | OPERATION_FLATTEN, std::bind(&Operations<I>::execute_flatten, | |
1117 | m_image_ctx.operations, | |
1118 | std::placeholders::_1, | |
1119 | std::placeholders::_2), | |
1120 | ack_ctx); | |
7c673cae FG |
1121 | } |
1122 | ||
1123 | template <typename I> | |
1124 | bool ImageWatcher<I>::handle_payload(const ResizePayload &payload, | |
1125 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1126 | ldout(m_image_ctx.cct, 10) << this << " remote resize request: " |
1127 | << payload.async_request_id << " " | |
1128 | << payload.size << " " | |
1129 | << payload.allow_shrink << dendl; | |
7c673cae | 1130 | |
f67539c2 TL |
1131 | return handle_operation_request( |
1132 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1133 | OPERATION_RESIZE, std::bind(&Operations<I>::execute_resize, | |
1134 | m_image_ctx.operations, payload.size, | |
1135 | payload.allow_shrink, std::placeholders::_1, | |
1136 | std::placeholders::_2, 0), ack_ctx); | |
7c673cae FG |
1137 | } |
1138 | ||
1139 | template <typename I> | |
1140 | bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload, | |
1141 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1142 | ldout(m_image_ctx.cct, 10) << this << " remote snap_create request: " |
1143 | << payload.async_request_id << " " | |
1144 | << payload.snap_namespace << " " | |
1145 | << payload.snap_name << " " | |
1146 | << payload.flags << dendl; | |
1147 | ||
1148 | auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL; | |
1149 | ||
1150 | // rbd-mirror needs to accept forced promotion orphan snap create requests | |
1151 | auto mirror_ns = boost::get<cls::rbd::MirrorSnapshotNamespace>( | |
1152 | &payload.snap_namespace); | |
1153 | if (mirror_ns != nullptr && mirror_ns->is_orphan()) { | |
1154 | request_type = exclusive_lock::OPERATION_REQUEST_TYPE_FORCE_PROMOTION; | |
7c673cae | 1155 | } |
f67539c2 TL |
1156 | |
1157 | return handle_operation_request( | |
1158 | payload.async_request_id, request_type, | |
1159 | OPERATION_SNAP_CREATE, std::bind(&Operations<I>::execute_snap_create, | |
1160 | m_image_ctx.operations, | |
1161 | payload.snap_namespace, | |
1162 | payload.snap_name, std::placeholders::_2, | |
1163 | 0, payload.flags, std::placeholders::_1), | |
1164 | ack_ctx); | |
7c673cae FG |
1165 | } |
1166 | ||
1167 | template <typename I> | |
1168 | bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload, | |
1169 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1170 | ldout(m_image_ctx.cct, 10) << this << " remote snap_rename request: " |
1171 | << payload.async_request_id << " " | |
1172 | << payload.snap_id << " to " | |
1173 | << payload.snap_name << dendl; | |
1174 | ||
1175 | return handle_operation_request( | |
1176 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1177 | OPERATION_SNAP_RENAME, std::bind(&Operations<I>::execute_snap_rename, | |
1178 | m_image_ctx.operations, payload.snap_id, | |
1179 | payload.snap_name, | |
1180 | std::placeholders::_2), ack_ctx); | |
7c673cae FG |
1181 | } |
1182 | ||
1183 | template <typename I> | |
1184 | bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload, | |
1185 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1186 | ldout(m_image_ctx.cct, 10) << this << " remote snap_remove request: " |
1187 | << payload.snap_name << dendl; | |
7c673cae | 1188 | |
f67539c2 TL |
1189 | auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL; |
1190 | if (cls::rbd::get_snap_namespace_type(payload.snap_namespace) == | |
1191 | cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) { | |
1192 | request_type = exclusive_lock::OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE; | |
7c673cae | 1193 | } |
f67539c2 TL |
1194 | |
1195 | return handle_operation_request( | |
1196 | payload.async_request_id, request_type, OPERATION_SNAP_REMOVE, | |
1197 | std::bind(&Operations<I>::execute_snap_remove, m_image_ctx.operations, | |
1198 | payload.snap_namespace, payload.snap_name, | |
1199 | std::placeholders::_2), ack_ctx); | |
7c673cae FG |
1200 | } |
1201 | ||
1202 | template <typename I> | |
1203 | bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload, | |
1204 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1205 | ldout(m_image_ctx.cct, 10) << this << " remote snap_protect request: " |
1206 | << payload.async_request_id << " " | |
1207 | << payload.snap_name << dendl; | |
1208 | ||
1209 | return handle_operation_request( | |
1210 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1211 | OPERATION_SNAP_PROTECT, std::bind(&Operations<I>::execute_snap_protect, | |
1212 | m_image_ctx.operations, | |
1213 | payload.snap_namespace, | |
1214 | payload.snap_name, | |
1215 | std::placeholders::_2), ack_ctx); | |
7c673cae FG |
1216 | } |
1217 | ||
1218 | template <typename I> | |
1219 | bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload, | |
1220 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1221 | ldout(m_image_ctx.cct, 10) << this << " remote snap_unprotect request: " |
1222 | << payload.async_request_id << " " | |
1223 | << payload.snap_name << dendl; | |
1224 | ||
1225 | return handle_operation_request( | |
1226 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1227 | OPERATION_SNAP_UNPROTECT, std::bind(&Operations<I>::execute_snap_unprotect, | |
1228 | m_image_ctx.operations, | |
1229 | payload.snap_namespace, | |
1230 | payload.snap_name, | |
1231 | std::placeholders::_2), ack_ctx); | |
7c673cae FG |
1232 | } |
1233 | ||
1234 | template <typename I> | |
1235 | bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload, | |
1236 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1237 | ldout(m_image_ctx.cct, 10) << this << " remote rebuild object map request: " |
1238 | << payload.async_request_id << dendl; | |
7c673cae | 1239 | |
f67539c2 TL |
1240 | return handle_operation_request( |
1241 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1242 | OPERATION_REBUILD_OBJECT_MAP, | |
1243 | std::bind(&Operations<I>::execute_rebuild_object_map, | |
1244 | m_image_ctx.operations, std::placeholders::_1, | |
1245 | std::placeholders::_2), ack_ctx); | |
7c673cae FG |
1246 | } |
1247 | ||
1248 | template <typename I> | |
1249 | bool ImageWatcher<I>::handle_payload(const RenamePayload& payload, | |
1250 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1251 | ldout(m_image_ctx.cct, 10) << this << " remote rename request: " |
1252 | << payload.async_request_id << " " | |
1253 | << payload.image_name << dendl; | |
7c673cae | 1254 | |
f67539c2 TL |
1255 | return handle_operation_request( |
1256 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1257 | OPERATION_RENAME, std::bind(&Operations<I>::execute_rename, | |
1258 | m_image_ctx.operations, payload.image_name, | |
1259 | std::placeholders::_2), ack_ctx); | |
7c673cae FG |
1260 | } |
1261 | ||
1262 | template <typename I> | |
1263 | bool ImageWatcher<I>::handle_payload(const UpdateFeaturesPayload& payload, | |
1264 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1265 | ldout(m_image_ctx.cct, 10) << this << " remote update_features request: " |
1266 | << payload.async_request_id << " " | |
1267 | << payload.features << " " | |
1268 | << (payload.enabled ? "enabled" : "disabled") | |
1269 | << dendl; | |
7c673cae | 1270 | |
f67539c2 TL |
1271 | return handle_operation_request( |
1272 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1273 | OPERATION_UPDATE_FEATURES, | |
1274 | std::bind(&Operations<I>::execute_update_features, m_image_ctx.operations, | |
1275 | payload.features, payload.enabled, std::placeholders::_2, 0), | |
1276 | ack_ctx); | |
11fdf7f2 TL |
1277 | } |
1278 | ||
1279 | template <typename I> | |
1280 | bool ImageWatcher<I>::handle_payload(const MigratePayload &payload, | |
1281 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1282 | ldout(m_image_ctx.cct, 10) << this << " remote migrate request: " |
1283 | << payload.async_request_id << dendl; | |
11fdf7f2 | 1284 | |
f67539c2 TL |
1285 | return handle_operation_request( |
1286 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1287 | OPERATION_MIGRATE, std::bind(&Operations<I>::execute_migrate, | |
1288 | m_image_ctx.operations, | |
1289 | std::placeholders::_1, | |
1290 | std::placeholders::_2), ack_ctx); | |
11fdf7f2 TL |
1291 | } |
1292 | ||
1293 | template <typename I> | |
1294 | bool ImageWatcher<I>::handle_payload(const SparsifyPayload &payload, | |
1295 | C_NotifyAck *ack_ctx) { | |
f67539c2 TL |
1296 | ldout(m_image_ctx.cct, 10) << this << " remote sparsify request: " |
1297 | << payload.async_request_id << dendl; | |
11fdf7f2 | 1298 | |
f67539c2 TL |
1299 | return handle_operation_request( |
1300 | payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1301 | OPERATION_SPARSIFY, std::bind(&Operations<I>::execute_sparsify, | |
1302 | m_image_ctx.operations, | |
1303 | payload.sparse_size, std::placeholders::_1, | |
1304 | std::placeholders::_2), ack_ctx); | |
1305 | } | |
1306 | ||
1307 | template <typename I> | |
1308 | bool ImageWatcher<I>::handle_payload(const MetadataUpdatePayload &payload, | |
1309 | C_NotifyAck *ack_ctx) { | |
1310 | if (payload.value) { | |
1311 | ldout(m_image_ctx.cct, 10) << this << " remote metadata_set request: " | |
1312 | << payload.async_request_id << " " | |
1313 | << "key=" << payload.key << ", value=" | |
1314 | << *payload.value << dendl; | |
1315 | ||
1316 | return handle_operation_request( | |
1317 | payload.async_request_id, | |
1318 | exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1319 | OPERATION_METADATA_UPDATE, | |
1320 | std::bind(&Operations<I>::execute_metadata_set, | |
1321 | m_image_ctx.operations, payload.key, *payload.value, | |
1322 | std::placeholders::_2), | |
1323 | ack_ctx); | |
1324 | } else { | |
1325 | ldout(m_image_ctx.cct, 10) << this << " remote metadata_remove request: " | |
1326 | << payload.async_request_id << " " | |
1327 | << "key=" << payload.key << dendl; | |
1328 | ||
1329 | return handle_operation_request( | |
1330 | payload.async_request_id, | |
1331 | exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, | |
1332 | OPERATION_METADATA_UPDATE, | |
1333 | std::bind(&Operations<I>::execute_metadata_remove, | |
1334 | m_image_ctx.operations, payload.key, std::placeholders::_2), | |
1335 | ack_ctx); | |
1336 | } | |
1337 | } | |
1338 | ||
1339 | template <typename I> | |
1340 | bool ImageWatcher<I>::handle_payload(const QuiescePayload &payload, | |
1341 | C_NotifyAck *ack_ctx) { | |
1342 | auto on_finish = prepare_quiesce_request(payload.async_request_id, ack_ctx); | |
1343 | if (on_finish == nullptr) { | |
1344 | ldout(m_image_ctx.cct, 10) << this << " duplicate quiesce request: " | |
1345 | << payload.async_request_id << dendl; | |
1346 | return false; | |
7c673cae | 1347 | } |
f67539c2 TL |
1348 | |
1349 | ldout(m_image_ctx.cct, 10) << this << " quiesce request: " | |
1350 | << payload.async_request_id << dendl; | |
1351 | m_image_ctx.state->notify_quiesce(on_finish); | |
1352 | return false; | |
1353 | } | |
1354 | ||
1355 | template <typename I> | |
1356 | bool ImageWatcher<I>::handle_payload(const UnquiescePayload &payload, | |
1357 | C_NotifyAck *ack_ctx) { | |
1358 | ldout(m_image_ctx.cct, 10) << this << " unquiesce request: " | |
1359 | << payload.async_request_id << dendl; | |
1360 | ||
1361 | prepare_unquiesce_request(payload.async_request_id); | |
7c673cae FG |
1362 | return true; |
1363 | } | |
1364 | ||
1365 | template <typename I> | |
1366 | bool ImageWatcher<I>::handle_payload(const UnknownPayload &payload, | |
1367 | C_NotifyAck *ack_ctx) { | |
9f95a23c | 1368 | std::shared_lock l{m_image_ctx.owner_lock}; |
7c673cae FG |
1369 | if (m_image_ctx.exclusive_lock != nullptr) { |
1370 | int r; | |
92f5a8d4 TL |
1371 | if (m_image_ctx.exclusive_lock->accept_request( |
1372 | exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, &r) || r < 0) { | |
11fdf7f2 | 1373 | encode(ResponseMessage(-EOPNOTSUPP), ack_ctx->out); |
7c673cae FG |
1374 | } |
1375 | } | |
1376 | return true; | |
1377 | } | |
1378 | ||
1379 | template <typename I> | |
1380 | void ImageWatcher<I>::process_payload(uint64_t notify_id, uint64_t handle, | |
f67539c2 TL |
1381 | Payload *payload) { |
1382 | auto ctx = new Watcher::C_NotifyAck(this, notify_id, handle); | |
1383 | bool complete; | |
1384 | ||
1385 | switch (payload->get_notify_op()) { | |
1386 | case NOTIFY_OP_ACQUIRED_LOCK: | |
1387 | complete = handle_payload(*(static_cast<AcquiredLockPayload *>(payload)), | |
1388 | ctx); | |
1389 | break; | |
1390 | case NOTIFY_OP_RELEASED_LOCK: | |
1391 | complete = handle_payload(*(static_cast<ReleasedLockPayload *>(payload)), | |
1392 | ctx); | |
1393 | break; | |
1394 | case NOTIFY_OP_REQUEST_LOCK: | |
1395 | complete = handle_payload(*(static_cast<RequestLockPayload *>(payload)), | |
1396 | ctx); | |
1397 | break; | |
1398 | case NOTIFY_OP_HEADER_UPDATE: | |
1399 | complete = handle_payload(*(static_cast<HeaderUpdatePayload *>(payload)), | |
1400 | ctx); | |
1401 | break; | |
1402 | case NOTIFY_OP_ASYNC_PROGRESS: | |
1403 | complete = handle_payload(*(static_cast<AsyncProgressPayload *>(payload)), | |
1404 | ctx); | |
1405 | break; | |
1406 | case NOTIFY_OP_ASYNC_COMPLETE: | |
1407 | complete = handle_payload(*(static_cast<AsyncCompletePayload *>(payload)), | |
1408 | ctx); | |
1409 | break; | |
1410 | case NOTIFY_OP_FLATTEN: | |
1411 | complete = handle_payload(*(static_cast<FlattenPayload *>(payload)), ctx); | |
1412 | break; | |
1413 | case NOTIFY_OP_RESIZE: | |
1414 | complete = handle_payload(*(static_cast<ResizePayload *>(payload)), ctx); | |
1415 | break; | |
1416 | case NOTIFY_OP_SNAP_CREATE: | |
1417 | complete = handle_payload(*(static_cast<SnapCreatePayload *>(payload)), | |
1418 | ctx); | |
1419 | break; | |
1420 | case NOTIFY_OP_SNAP_REMOVE: | |
1421 | complete = handle_payload(*(static_cast<SnapRemovePayload *>(payload)), | |
1422 | ctx); | |
1423 | break; | |
1424 | case NOTIFY_OP_SNAP_RENAME: | |
1425 | complete = handle_payload(*(static_cast<SnapRenamePayload *>(payload)), | |
1426 | ctx); | |
1427 | break; | |
1428 | case NOTIFY_OP_SNAP_PROTECT: | |
1429 | complete = handle_payload(*(static_cast<SnapProtectPayload *>(payload)), | |
1430 | ctx); | |
1431 | break; | |
1432 | case NOTIFY_OP_SNAP_UNPROTECT: | |
1433 | complete = handle_payload(*(static_cast<SnapUnprotectPayload *>(payload)), | |
1434 | ctx); | |
1435 | break; | |
1436 | case NOTIFY_OP_REBUILD_OBJECT_MAP: | |
1437 | complete = handle_payload(*(static_cast<RebuildObjectMapPayload *>(payload)), | |
1438 | ctx); | |
1439 | break; | |
1440 | case NOTIFY_OP_RENAME: | |
1441 | complete = handle_payload(*(static_cast<RenamePayload *>(payload)), ctx); | |
1442 | break; | |
1443 | case NOTIFY_OP_UPDATE_FEATURES: | |
1444 | complete = handle_payload(*(static_cast<UpdateFeaturesPayload *>(payload)), | |
1445 | ctx); | |
1446 | break; | |
1447 | case NOTIFY_OP_MIGRATE: | |
1448 | complete = handle_payload(*(static_cast<MigratePayload *>(payload)), ctx); | |
1449 | break; | |
1450 | case NOTIFY_OP_SPARSIFY: | |
1451 | complete = handle_payload(*(static_cast<SparsifyPayload *>(payload)), ctx); | |
1452 | break; | |
1453 | case NOTIFY_OP_QUIESCE: | |
1454 | complete = handle_payload(*(static_cast<QuiescePayload *>(payload)), ctx); | |
1455 | break; | |
1456 | case NOTIFY_OP_UNQUIESCE: | |
1457 | complete = handle_payload(*(static_cast<UnquiescePayload *>(payload)), ctx); | |
1458 | break; | |
1459 | case NOTIFY_OP_METADATA_UPDATE: | |
1460 | complete = handle_payload(*(static_cast<MetadataUpdatePayload *>(payload)), ctx); | |
1461 | break; | |
1462 | default: | |
1463 | ceph_assert(payload->get_notify_op() == static_cast<NotifyOp>(-1)); | |
1464 | complete = handle_payload(*(static_cast<UnknownPayload *>(payload)), ctx); | |
1465 | } | |
1466 | ||
1467 | if (complete) { | |
1468 | ctx->complete(0); | |
1469 | } | |
7c673cae FG |
1470 | } |
1471 | ||
1472 | template <typename I> | |
1473 | void ImageWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle, | |
1474 | uint64_t notifier_id, bufferlist &bl) { | |
1475 | NotifyMessage notify_message; | |
1476 | if (bl.length() == 0) { | |
1477 | // legacy notification for header updates | |
f67539c2 | 1478 | notify_message = NotifyMessage(new HeaderUpdatePayload()); |
7c673cae FG |
1479 | } else { |
1480 | try { | |
11fdf7f2 TL |
1481 | auto iter = bl.cbegin(); |
1482 | decode(notify_message, iter); | |
7c673cae FG |
1483 | } catch (const buffer::error &err) { |
1484 | lderr(m_image_ctx.cct) << this << " error decoding image notification: " | |
1485 | << err.what() << dendl; | |
1486 | return; | |
1487 | } | |
1488 | } | |
1489 | ||
1490 | // if an image refresh is required, refresh before processing the request | |
1491 | if (notify_message.check_for_refresh() && | |
1492 | m_image_ctx.state->is_refresh_required()) { | |
f67539c2 TL |
1493 | |
1494 | m_image_ctx.state->refresh( | |
1495 | new C_ProcessPayload(this, notify_id, handle, | |
1496 | std::move(notify_message.payload))); | |
7c673cae | 1497 | } else { |
f67539c2 | 1498 | process_payload(notify_id, handle, notify_message.payload.get()); |
7c673cae FG |
1499 | } |
1500 | } | |
1501 | ||
1502 | template <typename I> | |
1503 | void ImageWatcher<I>::handle_error(uint64_t handle, int err) { | |
1504 | lderr(m_image_ctx.cct) << this << " image watch failed: " << handle << ", " | |
1505 | << cpp_strerror(err) << dendl; | |
1506 | ||
1507 | { | |
9f95a23c | 1508 | std::lock_guard l{m_owner_client_id_lock}; |
7c673cae FG |
1509 | set_owner_client_id(ClientId()); |
1510 | } | |
1511 | ||
1512 | Watcher::handle_error(handle, err); | |
1513 | } | |
1514 | ||
1515 | template <typename I> | |
1516 | void ImageWatcher<I>::handle_rewatch_complete(int r) { | |
1517 | CephContext *cct = m_image_ctx.cct; | |
1518 | ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; | |
1519 | ||
1520 | { | |
9f95a23c | 1521 | std::shared_lock owner_locker{m_image_ctx.owner_lock}; |
7c673cae FG |
1522 | if (m_image_ctx.exclusive_lock != nullptr) { |
1523 | // update the lock cookie with the new watch handle | |
91327a77 | 1524 | m_image_ctx.exclusive_lock->reacquire_lock(nullptr); |
7c673cae FG |
1525 | } |
1526 | } | |
1527 | ||
1528 | // image might have been updated while we didn't have active watch | |
1529 | handle_payload(HeaderUpdatePayload(), nullptr); | |
1530 | } | |
1531 | ||
1532 | template <typename I> | |
f67539c2 | 1533 | void ImageWatcher<I>::send_notify(Payload *payload, Context *ctx) { |
7c673cae FG |
1534 | bufferlist bl; |
1535 | ||
11fdf7f2 | 1536 | encode(NotifyMessage(payload), bl); |
7c673cae FG |
1537 | Watcher::send_notify(bl, nullptr, ctx); |
1538 | } | |
1539 | ||
1540 | template <typename I> | |
1541 | void ImageWatcher<I>::RemoteContext::finish(int r) { | |
1542 | m_image_watcher.schedule_async_complete(m_async_request_id, r); | |
1543 | } | |
1544 | ||
1545 | template <typename I> | |
1546 | void ImageWatcher<I>::C_ResponseMessage::finish(int r) { | |
1547 | CephContext *cct = notify_ack->cct; | |
1548 | ldout(cct, 10) << this << " C_ResponseMessage: r=" << r << dendl; | |
1549 | ||
11fdf7f2 | 1550 | encode(ResponseMessage(r), notify_ack->out); |
7c673cae FG |
1551 | notify_ack->complete(0); |
1552 | } | |
1553 | ||
1554 | } // namespace librbd | |
1555 | ||
1556 | template class librbd::ImageWatcher<librbd::ImageCtx>; |