]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include "librbd/ImageWatcher.h" | |
5 | #include "librbd/ExclusiveLock.h" | |
6 | #include "librbd/ImageCtx.h" | |
7 | #include "librbd/ImageState.h" | |
8 | #include "librbd/internal.h" | |
9 | #include "librbd/Operations.h" | |
10 | #include "librbd/TaskFinisher.h" | |
11 | #include "librbd/Utils.h" | |
12 | #include "librbd/exclusive_lock/Policy.h" | |
13 | #include "librbd/image_watcher/NotifyLockOwner.h" | |
14 | #include "librbd/io/AioCompletion.h" | |
15 | #include "librbd/watcher/Utils.h" | |
16 | #include "include/encoding.h" | |
17 | #include "common/errno.h" | |
18 | #include "common/WorkQueue.h" | |
19 | #include <boost/bind.hpp> | |
20 | ||
21 | #define dout_subsys ceph_subsys_rbd | |
22 | #undef dout_prefix | |
23 | #define dout_prefix *_dout << "librbd::ImageWatcher: " | |
24 | ||
25 | namespace librbd { | |
26 | ||
27 | using namespace image_watcher; | |
28 | using namespace watch_notify; | |
29 | using util::create_async_context_callback; | |
30 | using util::create_context_callback; | |
31 | using util::create_rados_callback; | |
32 | using librbd::watcher::util::HandlePayloadVisitor; | |
33 | ||
34 | static const double RETRY_DELAY_SECONDS = 1.0; | |
35 | ||
31f18b77 FG |
36 | template <typename I> |
37 | struct ImageWatcher<I>::C_ProcessPayload : public Context { | |
38 | ImageWatcher *image_watcher; | |
39 | uint64_t notify_id; | |
40 | uint64_t handle; | |
41 | watch_notify::Payload payload; | |
42 | ||
43 | C_ProcessPayload(ImageWatcher *image_watcher_, uint64_t notify_id_, | |
44 | uint64_t handle_, const watch_notify::Payload &payload) | |
45 | : image_watcher(image_watcher_), notify_id(notify_id_), handle(handle_), | |
46 | payload(payload) { | |
47 | } | |
48 | ||
49 | void finish(int r) override { | |
50 | image_watcher->m_async_op_tracker.start_op(); | |
51 | if (image_watcher->notifications_blocked()) { | |
52 | // requests are blocked -- just ack the notification | |
53 | bufferlist bl; | |
54 | image_watcher->acknowledge_notify(notify_id, handle, bl); | |
55 | } else { | |
56 | image_watcher->process_payload(notify_id, handle, payload); | |
57 | } | |
58 | image_watcher->m_async_op_tracker.finish_op(); | |
59 | } | |
60 | }; | |
61 | ||
7c673cae FG |
62 | template <typename I> |
63 | ImageWatcher<I>::ImageWatcher(I &image_ctx) | |
64 | : Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid), | |
65 | m_image_ctx(image_ctx), | |
66 | m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)), | |
67 | m_async_request_lock(util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this)), | |
68 | m_owner_client_id_lock(util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this)) | |
69 | { | |
70 | } | |
71 | ||
72 | template <typename I> | |
73 | ImageWatcher<I>::~ImageWatcher() | |
74 | { | |
75 | delete m_task_finisher; | |
76 | } | |
77 | ||
78 | template <typename I> | |
79 | void ImageWatcher<I>::unregister_watch(Context *on_finish) { | |
80 | CephContext *cct = m_image_ctx.cct; | |
81 | ldout(cct, 10) << this << " unregistering image watcher" << dendl; | |
82 | ||
83 | cancel_async_requests(); | |
84 | ||
85 | FunctionContext *ctx = new FunctionContext([this, on_finish](int r) { | |
86 | m_task_finisher->cancel_all(on_finish); | |
87 | }); | |
88 | Watcher::unregister_watch(ctx); | |
89 | } | |
90 | ||
31f18b77 FG |
91 | template <typename I> |
92 | void ImageWatcher<I>::block_notifies(Context *on_finish) { | |
93 | CephContext *cct = m_image_ctx.cct; | |
94 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
95 | ||
96 | on_finish = new FunctionContext([this, on_finish](int r) { | |
97 | cancel_async_requests(); | |
98 | on_finish->complete(r); | |
99 | }); | |
100 | Watcher::block_notifies(on_finish); | |
101 | } | |
102 | ||
7c673cae FG |
103 | template <typename I> |
104 | void ImageWatcher<I>::schedule_async_progress(const AsyncRequestId &request, | |
105 | uint64_t offset, uint64_t total) { | |
106 | FunctionContext *ctx = new FunctionContext( | |
107 | boost::bind(&ImageWatcher<I>::notify_async_progress, this, request, offset, | |
108 | total)); | |
109 | m_task_finisher->queue(Task(TASK_CODE_ASYNC_PROGRESS, request), ctx); | |
110 | } | |
111 | ||
112 | template <typename I> | |
113 | int ImageWatcher<I>::notify_async_progress(const AsyncRequestId &request, | |
114 | uint64_t offset, uint64_t total) { | |
115 | ldout(m_image_ctx.cct, 20) << this << " remote async request progress: " | |
116 | << request << " @ " << offset | |
117 | << "/" << total << dendl; | |
118 | ||
119 | send_notify(AsyncProgressPayload(request, offset, total)); | |
120 | return 0; | |
121 | } | |
122 | ||
123 | template <typename I> | |
124 | void ImageWatcher<I>::schedule_async_complete(const AsyncRequestId &request, | |
125 | int r) { | |
126 | FunctionContext *ctx = new FunctionContext( | |
127 | boost::bind(&ImageWatcher<I>::notify_async_complete, this, request, r)); | |
128 | m_task_finisher->queue(ctx); | |
129 | } | |
130 | ||
131 | template <typename I> | |
132 | void ImageWatcher<I>::notify_async_complete(const AsyncRequestId &request, | |
133 | int r) { | |
134 | ldout(m_image_ctx.cct, 20) << this << " remote async request finished: " | |
135 | << request << " = " << r << dendl; | |
136 | ||
137 | send_notify(AsyncCompletePayload(request, r), | |
138 | new FunctionContext(boost::bind(&ImageWatcher<I>::handle_async_complete, | |
139 | this, request, r, _1))); | |
140 | } | |
141 | ||
142 | template <typename I> | |
143 | void ImageWatcher<I>::handle_async_complete(const AsyncRequestId &request, | |
144 | int r, int ret_val) { | |
145 | ldout(m_image_ctx.cct, 20) << this << " " << __func__ << ": " | |
146 | << "request=" << request << ", r=" << ret_val | |
147 | << dendl; | |
148 | if (ret_val < 0) { | |
149 | lderr(m_image_ctx.cct) << this << " failed to notify async complete: " | |
150 | << cpp_strerror(ret_val) << dendl; | |
151 | if (ret_val == -ETIMEDOUT) { | |
152 | schedule_async_complete(request, r); | |
153 | } | |
154 | } else { | |
155 | RWLock::WLocker async_request_locker(m_async_request_lock); | |
156 | m_async_pending.erase(request); | |
157 | } | |
158 | } | |
159 | ||
160 | template <typename I> | |
161 | void ImageWatcher<I>::notify_flatten(uint64_t request_id, | |
162 | ProgressContext &prog_ctx, | |
163 | Context *on_finish) { | |
164 | assert(m_image_ctx.owner_lock.is_locked()); | |
165 | assert(m_image_ctx.exclusive_lock && | |
166 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
167 | ||
168 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
169 | ||
170 | notify_async_request(async_request_id, FlattenPayload(async_request_id), | |
171 | prog_ctx, on_finish); | |
172 | } | |
173 | ||
174 | template <typename I> | |
175 | void ImageWatcher<I>::notify_resize(uint64_t request_id, uint64_t size, | |
176 | bool allow_shrink, | |
177 | ProgressContext &prog_ctx, | |
178 | Context *on_finish) { | |
179 | assert(m_image_ctx.owner_lock.is_locked()); | |
180 | assert(m_image_ctx.exclusive_lock && | |
181 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
182 | ||
183 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
184 | ||
185 | notify_async_request(async_request_id, | |
186 | ResizePayload(size, allow_shrink, async_request_id), | |
187 | prog_ctx, on_finish); | |
188 | } | |
189 | ||
190 | template <typename I> | |
191 | void ImageWatcher<I>::notify_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace, | |
192 | const std::string &snap_name, | |
193 | Context *on_finish) { | |
194 | assert(m_image_ctx.owner_lock.is_locked()); | |
195 | assert(m_image_ctx.exclusive_lock && | |
196 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
197 | ||
198 | notify_lock_owner(SnapCreatePayload(snap_namespace, snap_name), on_finish); | |
199 | } | |
200 | ||
201 | template <typename I> | |
202 | void ImageWatcher<I>::notify_snap_rename(const snapid_t &src_snap_id, | |
203 | const std::string &dst_snap_name, | |
204 | Context *on_finish) { | |
205 | assert(m_image_ctx.owner_lock.is_locked()); | |
206 | assert(m_image_ctx.exclusive_lock && | |
207 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
208 | ||
209 | notify_lock_owner(SnapRenamePayload(src_snap_id, dst_snap_name), on_finish); | |
210 | } | |
211 | ||
212 | template <typename I> | |
213 | void ImageWatcher<I>::notify_snap_remove(const cls::rbd::SnapshotNamespace &snap_namespace, | |
214 | const std::string &snap_name, | |
215 | Context *on_finish) { | |
216 | assert(m_image_ctx.owner_lock.is_locked()); | |
217 | assert(m_image_ctx.exclusive_lock && | |
218 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
219 | ||
220 | notify_lock_owner(SnapRemovePayload(snap_namespace, snap_name), on_finish); | |
221 | } | |
222 | ||
223 | template <typename I> | |
224 | void ImageWatcher<I>::notify_snap_protect(const cls::rbd::SnapshotNamespace &snap_namespace, | |
225 | const std::string &snap_name, | |
226 | Context *on_finish) { | |
227 | assert(m_image_ctx.owner_lock.is_locked()); | |
228 | assert(m_image_ctx.exclusive_lock && | |
229 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
230 | ||
231 | notify_lock_owner(SnapProtectPayload(snap_namespace, snap_name), on_finish); | |
232 | } | |
233 | ||
234 | template <typename I> | |
235 | void ImageWatcher<I>::notify_snap_unprotect(const cls::rbd::SnapshotNamespace &snap_namespace, | |
236 | const std::string &snap_name, | |
237 | Context *on_finish) { | |
238 | assert(m_image_ctx.owner_lock.is_locked()); | |
239 | assert(m_image_ctx.exclusive_lock && | |
240 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
241 | ||
242 | notify_lock_owner(SnapUnprotectPayload(snap_namespace, snap_name), on_finish); | |
243 | } | |
244 | ||
245 | template <typename I> | |
246 | void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id, | |
247 | ProgressContext &prog_ctx, | |
248 | Context *on_finish) { | |
249 | assert(m_image_ctx.owner_lock.is_locked()); | |
250 | assert(m_image_ctx.exclusive_lock && | |
251 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
252 | ||
253 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
254 | ||
255 | notify_async_request(async_request_id, | |
256 | RebuildObjectMapPayload(async_request_id), | |
257 | prog_ctx, on_finish); | |
258 | } | |
259 | ||
260 | template <typename I> | |
261 | void ImageWatcher<I>::notify_rename(const std::string &image_name, | |
262 | Context *on_finish) { | |
263 | assert(m_image_ctx.owner_lock.is_locked()); | |
264 | assert(m_image_ctx.exclusive_lock && | |
265 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
266 | ||
267 | notify_lock_owner(RenamePayload(image_name), on_finish); | |
268 | } | |
269 | ||
270 | template <typename I> | |
271 | void ImageWatcher<I>::notify_update_features(uint64_t features, bool enabled, | |
272 | Context *on_finish) { | |
273 | assert(m_image_ctx.owner_lock.is_locked()); | |
274 | assert(m_image_ctx.exclusive_lock && | |
275 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
276 | ||
277 | notify_lock_owner(UpdateFeaturesPayload(features, enabled), on_finish); | |
278 | } | |
279 | ||
280 | template <typename I> | |
281 | void ImageWatcher<I>::notify_header_update(Context *on_finish) { | |
282 | ldout(m_image_ctx.cct, 10) << this << ": " << __func__ << dendl; | |
283 | ||
284 | // supports legacy (empty buffer) clients | |
285 | send_notify(HeaderUpdatePayload(), on_finish); | |
286 | } | |
287 | ||
288 | template <typename I> | |
289 | void ImageWatcher<I>::notify_header_update(librados::IoCtx &io_ctx, | |
290 | const std::string &oid) { | |
291 | // supports legacy (empty buffer) clients | |
292 | bufferlist bl; | |
293 | ::encode(NotifyMessage(HeaderUpdatePayload()), bl); | |
294 | io_ctx.notify2(oid, bl, watcher::Notifier::NOTIFY_TIMEOUT, nullptr); | |
295 | } | |
296 | ||
297 | template <typename I> | |
298 | void ImageWatcher<I>::schedule_cancel_async_requests() { | |
299 | FunctionContext *ctx = new FunctionContext( | |
300 | boost::bind(&ImageWatcher<I>::cancel_async_requests, this)); | |
301 | m_task_finisher->queue(TASK_CODE_CANCEL_ASYNC_REQUESTS, ctx); | |
302 | } | |
303 | ||
304 | template <typename I> | |
305 | void ImageWatcher<I>::cancel_async_requests() { | |
306 | RWLock::WLocker l(m_async_request_lock); | |
307 | for (std::map<AsyncRequestId, AsyncRequest>::iterator iter = | |
308 | m_async_requests.begin(); | |
309 | iter != m_async_requests.end(); ++iter) { | |
310 | iter->second.first->complete(-ERESTART); | |
311 | } | |
312 | m_async_requests.clear(); | |
313 | } | |
314 | ||
315 | template <typename I> | |
316 | void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) { | |
317 | assert(m_owner_client_id_lock.is_locked()); | |
318 | m_owner_client_id = client_id; | |
319 | ldout(m_image_ctx.cct, 10) << this << " current lock owner: " | |
320 | << m_owner_client_id << dendl; | |
321 | } | |
322 | ||
323 | template <typename I> | |
324 | ClientId ImageWatcher<I>::get_client_id() { | |
325 | RWLock::RLocker l(this->m_watch_lock); | |
326 | return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle); | |
327 | } | |
328 | ||
329 | template <typename I> | |
330 | void ImageWatcher<I>::notify_acquired_lock() { | |
331 | ldout(m_image_ctx.cct, 10) << this << " notify acquired lock" << dendl; | |
332 | ||
333 | ClientId client_id = get_client_id(); | |
334 | { | |
335 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
336 | set_owner_client_id(client_id); | |
337 | } | |
338 | ||
339 | send_notify(AcquiredLockPayload(client_id)); | |
340 | } | |
341 | ||
342 | template <typename I> | |
343 | void ImageWatcher<I>::notify_released_lock() { | |
344 | ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl; | |
345 | ||
346 | { | |
347 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
348 | set_owner_client_id(ClientId()); | |
349 | } | |
350 | ||
351 | send_notify(ReleasedLockPayload(get_client_id())); | |
352 | } | |
353 | ||
354 | template <typename I> | |
355 | void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) { | |
356 | assert(m_image_ctx.owner_lock.is_locked()); | |
357 | ||
358 | if (m_image_ctx.exclusive_lock == nullptr) { | |
359 | // exclusive lock dynamically disabled via image refresh | |
360 | return; | |
361 | } | |
362 | assert(m_image_ctx.exclusive_lock && | |
363 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
364 | ||
365 | RWLock::RLocker watch_locker(this->m_watch_lock); | |
366 | if (this->m_watch_state == Watcher::WATCH_STATE_REGISTERED) { | |
367 | ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl; | |
368 | ||
369 | FunctionContext *ctx = new FunctionContext( | |
370 | boost::bind(&ImageWatcher<I>::notify_request_lock, this)); | |
371 | if (use_timer) { | |
372 | if (timer_delay < 0) { | |
373 | timer_delay = RETRY_DELAY_SECONDS; | |
374 | } | |
375 | m_task_finisher->add_event_after(TASK_CODE_REQUEST_LOCK, | |
376 | timer_delay, ctx); | |
377 | } else { | |
378 | m_task_finisher->queue(TASK_CODE_REQUEST_LOCK, ctx); | |
379 | } | |
380 | } | |
381 | } | |
382 | ||
383 | template <typename I> | |
384 | void ImageWatcher<I>::notify_request_lock() { | |
385 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
386 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
387 | ||
388 | // ExclusiveLock state machine can be dynamically disabled or | |
389 | // race with task cancel | |
390 | if (m_image_ctx.exclusive_lock == nullptr || | |
391 | m_image_ctx.exclusive_lock->is_lock_owner()) { | |
392 | return; | |
393 | } | |
394 | ||
395 | ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl; | |
396 | ||
397 | notify_lock_owner(RequestLockPayload(get_client_id(), false), | |
398 | create_context_callback< | |
399 | ImageWatcher, &ImageWatcher<I>::handle_request_lock>(this)); | |
400 | } | |
401 | ||
402 | template <typename I> | |
403 | void ImageWatcher<I>::handle_request_lock(int r) { | |
404 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
405 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
406 | ||
407 | // ExclusiveLock state machine cannot transition -- but can be | |
408 | // dynamically disabled | |
409 | if (m_image_ctx.exclusive_lock == nullptr) { | |
410 | return; | |
411 | } | |
412 | ||
413 | if (r == -ETIMEDOUT) { | |
414 | ldout(m_image_ctx.cct, 5) << this << " timed out requesting lock: retrying" | |
415 | << dendl; | |
416 | ||
417 | // treat this is a dead client -- so retest acquiring the lock | |
418 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
419 | } else if (r == -EROFS) { | |
420 | ldout(m_image_ctx.cct, 5) << this << " peer will not release lock" << dendl; | |
421 | m_image_ctx.exclusive_lock->handle_peer_notification(r); | |
422 | } else if (r < 0) { | |
423 | lderr(m_image_ctx.cct) << this << " error requesting lock: " | |
424 | << cpp_strerror(r) << dendl; | |
425 | schedule_request_lock(true); | |
426 | } else { | |
427 | // lock owner acked -- but resend if we don't see them release the lock | |
428 | int retry_timeout = m_image_ctx.cct->_conf->client_notify_timeout; | |
429 | ldout(m_image_ctx.cct, 15) << this << " will retry in " << retry_timeout | |
430 | << " seconds" << dendl; | |
431 | schedule_request_lock(true, retry_timeout); | |
432 | } | |
433 | } | |
434 | ||
435 | template <typename I> | |
436 | void ImageWatcher<I>::notify_lock_owner(const Payload& payload, | |
437 | Context *on_finish) { | |
438 | assert(on_finish != nullptr); | |
439 | assert(m_image_ctx.owner_lock.is_locked()); | |
440 | ||
441 | bufferlist bl; | |
442 | ::encode(NotifyMessage(payload), bl); | |
443 | ||
444 | NotifyLockOwner *notify_lock_owner = NotifyLockOwner::create( | |
445 | m_image_ctx, this->m_notifier, std::move(bl), on_finish); | |
446 | notify_lock_owner->send(); | |
447 | } | |
448 | ||
449 | template <typename I> | |
450 | Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id) { | |
451 | RWLock::WLocker async_request_locker(m_async_request_lock); | |
452 | auto it = m_async_requests.find(id); | |
453 | if (it != m_async_requests.end()) { | |
454 | Context *on_complete = it->second.first; | |
455 | m_async_requests.erase(it); | |
456 | return on_complete; | |
457 | } | |
458 | return nullptr; | |
459 | } | |
460 | ||
461 | template <typename I> | |
462 | void ImageWatcher<I>::schedule_async_request_timed_out(const AsyncRequestId &id) { | |
463 | ldout(m_image_ctx.cct, 20) << "scheduling async request time out: " << id | |
464 | << dendl; | |
465 | ||
466 | Context *ctx = new FunctionContext(boost::bind( | |
467 | &ImageWatcher<I>::async_request_timed_out, this, id)); | |
468 | ||
469 | Task task(TASK_CODE_ASYNC_REQUEST, id); | |
470 | m_task_finisher->cancel(task); | |
471 | ||
472 | m_task_finisher->add_event_after(task, m_image_ctx.request_timed_out_seconds, | |
473 | ctx); | |
474 | } | |
475 | ||
476 | template <typename I> | |
477 | void ImageWatcher<I>::async_request_timed_out(const AsyncRequestId &id) { | |
478 | Context *on_complete = remove_async_request(id); | |
479 | if (on_complete != nullptr) { | |
480 | ldout(m_image_ctx.cct, 5) << "async request timed out: " << id << dendl; | |
481 | m_image_ctx.op_work_queue->queue(on_complete, -ETIMEDOUT); | |
482 | } | |
483 | } | |
484 | ||
485 | template <typename I> | |
486 | void ImageWatcher<I>::notify_async_request(const AsyncRequestId &async_request_id, | |
487 | const Payload& payload, | |
488 | ProgressContext& prog_ctx, | |
489 | Context *on_finish) { | |
490 | assert(on_finish != nullptr); | |
491 | assert(m_image_ctx.owner_lock.is_locked()); | |
492 | ||
493 | ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id | |
494 | << dendl; | |
495 | ||
496 | Context *on_notify = new FunctionContext([this, async_request_id](int r) { | |
497 | if (r < 0) { | |
498 | // notification failed -- don't expect updates | |
499 | Context *on_complete = remove_async_request(async_request_id); | |
500 | if (on_complete != nullptr) { | |
501 | on_complete->complete(r); | |
502 | } | |
503 | } | |
504 | }); | |
505 | ||
506 | Context *on_complete = new FunctionContext( | |
507 | [this, async_request_id, on_finish](int r) { | |
508 | m_task_finisher->cancel(Task(TASK_CODE_ASYNC_REQUEST, async_request_id)); | |
509 | on_finish->complete(r); | |
510 | }); | |
511 | ||
512 | { | |
513 | RWLock::WLocker async_request_locker(m_async_request_lock); | |
514 | m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx); | |
515 | } | |
516 | ||
517 | schedule_async_request_timed_out(async_request_id); | |
518 | notify_lock_owner(payload, on_notify); | |
519 | } | |
520 | ||
521 | template <typename I> | |
522 | int ImageWatcher<I>::prepare_async_request(const AsyncRequestId& async_request_id, | |
523 | bool* new_request, Context** ctx, | |
524 | ProgressContext** prog_ctx) { | |
525 | if (async_request_id.client_id == get_client_id()) { | |
526 | return -ERESTART; | |
527 | } else { | |
528 | RWLock::WLocker l(m_async_request_lock); | |
529 | if (m_async_pending.count(async_request_id) == 0) { | |
530 | m_async_pending.insert(async_request_id); | |
531 | *new_request = true; | |
532 | *prog_ctx = new RemoteProgressContext(*this, async_request_id); | |
533 | *ctx = new RemoteContext(*this, async_request_id, *prog_ctx); | |
534 | } else { | |
535 | *new_request = false; | |
536 | } | |
537 | } | |
538 | return 0; | |
539 | } | |
540 | ||
541 | template <typename I> | |
542 | bool ImageWatcher<I>::handle_payload(const HeaderUpdatePayload &payload, | |
543 | C_NotifyAck *ack_ctx) { | |
544 | ldout(m_image_ctx.cct, 10) << this << " image header updated" << dendl; | |
545 | ||
546 | m_image_ctx.state->handle_update_notification(); | |
547 | m_image_ctx.perfcounter->inc(l_librbd_notify); | |
548 | if (ack_ctx != nullptr) { | |
549 | m_image_ctx.state->flush_update_watchers(new C_ResponseMessage(ack_ctx)); | |
550 | return false; | |
551 | } | |
552 | return true; | |
553 | } | |
554 | ||
555 | template <typename I> | |
556 | bool ImageWatcher<I>::handle_payload(const AcquiredLockPayload &payload, | |
557 | C_NotifyAck *ack_ctx) { | |
558 | ldout(m_image_ctx.cct, 10) << this << " image exclusively locked announcement" | |
559 | << dendl; | |
560 | ||
561 | bool cancel_async_requests = true; | |
562 | if (payload.client_id.is_valid()) { | |
563 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
564 | if (payload.client_id == m_owner_client_id) { | |
565 | cancel_async_requests = false; | |
566 | } | |
567 | set_owner_client_id(payload.client_id); | |
568 | } | |
569 | ||
570 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
571 | if (m_image_ctx.exclusive_lock != nullptr) { | |
572 | // potentially wake up the exclusive lock state machine now that | |
573 | // a lock owner has advertised itself | |
574 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
575 | } | |
576 | if (cancel_async_requests && | |
577 | (m_image_ctx.exclusive_lock == nullptr || | |
578 | !m_image_ctx.exclusive_lock->is_lock_owner())) { | |
579 | schedule_cancel_async_requests(); | |
580 | } | |
581 | return true; | |
582 | } | |
583 | ||
584 | template <typename I> | |
585 | bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload, | |
586 | C_NotifyAck *ack_ctx) { | |
587 | ldout(m_image_ctx.cct, 10) << this << " exclusive lock released" << dendl; | |
588 | ||
589 | bool cancel_async_requests = true; | |
590 | if (payload.client_id.is_valid()) { | |
591 | Mutex::Locker l(m_owner_client_id_lock); | |
592 | if (payload.client_id != m_owner_client_id) { | |
593 | ldout(m_image_ctx.cct, 10) << this << " unexpected owner: " | |
594 | << payload.client_id << " != " | |
595 | << m_owner_client_id << dendl; | |
596 | cancel_async_requests = false; | |
597 | } else { | |
598 | set_owner_client_id(ClientId()); | |
599 | } | |
600 | } | |
601 | ||
602 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
603 | if (cancel_async_requests && | |
604 | (m_image_ctx.exclusive_lock == nullptr || | |
605 | !m_image_ctx.exclusive_lock->is_lock_owner())) { | |
606 | schedule_cancel_async_requests(); | |
607 | } | |
608 | ||
609 | // alert the exclusive lock state machine that the lock is available | |
610 | if (m_image_ctx.exclusive_lock != nullptr && | |
611 | !m_image_ctx.exclusive_lock->is_lock_owner()) { | |
612 | m_task_finisher->cancel(TASK_CODE_REQUEST_LOCK); | |
613 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
614 | } | |
615 | return true; | |
616 | } | |
617 | ||
618 | template <typename I> | |
619 | bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload, | |
620 | C_NotifyAck *ack_ctx) { | |
621 | ldout(m_image_ctx.cct, 10) << this << " exclusive lock requested" << dendl; | |
622 | if (payload.client_id == get_client_id()) { | |
623 | return true; | |
624 | } | |
625 | ||
626 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
627 | if (m_image_ctx.exclusive_lock != nullptr && | |
628 | m_image_ctx.exclusive_lock->is_lock_owner()) { | |
629 | int r = 0; | |
630 | bool accept_request = m_image_ctx.exclusive_lock->accept_requests(&r); | |
631 | ||
632 | if (accept_request) { | |
633 | assert(r == 0); | |
634 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
635 | if (!m_owner_client_id.is_valid()) { | |
636 | return true; | |
637 | } | |
638 | ||
639 | ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock" | |
640 | << dendl; | |
641 | r = m_image_ctx.get_exclusive_lock_policy()->lock_requested( | |
642 | payload.force); | |
643 | } | |
644 | ::encode(ResponseMessage(r), ack_ctx->out); | |
645 | } | |
646 | return true; | |
647 | } | |
648 | ||
649 | template <typename I> | |
650 | bool ImageWatcher<I>::handle_payload(const AsyncProgressPayload &payload, | |
651 | C_NotifyAck *ack_ctx) { | |
652 | RWLock::RLocker l(m_async_request_lock); | |
653 | std::map<AsyncRequestId, AsyncRequest>::iterator req_it = | |
654 | m_async_requests.find(payload.async_request_id); | |
655 | if (req_it != m_async_requests.end()) { | |
656 | ldout(m_image_ctx.cct, 20) << this << " request progress: " | |
657 | << payload.async_request_id << " @ " | |
658 | << payload.offset << "/" << payload.total | |
659 | << dendl; | |
660 | schedule_async_request_timed_out(payload.async_request_id); | |
661 | req_it->second.second->update_progress(payload.offset, payload.total); | |
662 | } | |
663 | return true; | |
664 | } | |
665 | ||
666 | template <typename I> | |
667 | bool ImageWatcher<I>::handle_payload(const AsyncCompletePayload &payload, | |
668 | C_NotifyAck *ack_ctx) { | |
669 | Context *on_complete = remove_async_request(payload.async_request_id); | |
670 | if (on_complete != nullptr) { | |
671 | ldout(m_image_ctx.cct, 10) << this << " request finished: " | |
672 | << payload.async_request_id << "=" | |
673 | << payload.result << dendl; | |
674 | on_complete->complete(payload.result); | |
675 | } | |
676 | return true; | |
677 | } | |
678 | ||
679 | template <typename I> | |
680 | bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload, | |
681 | C_NotifyAck *ack_ctx) { | |
682 | ||
683 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
684 | if (m_image_ctx.exclusive_lock != nullptr) { | |
685 | int r; | |
686 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
687 | bool new_request; | |
688 | Context *ctx; | |
689 | ProgressContext *prog_ctx; | |
690 | r = prepare_async_request(payload.async_request_id, &new_request, | |
691 | &ctx, &prog_ctx); | |
692 | if (r == 0 && new_request) { | |
693 | ldout(m_image_ctx.cct, 10) << this << " remote flatten request: " | |
694 | << payload.async_request_id << dendl; | |
695 | m_image_ctx.operations->execute_flatten(*prog_ctx, ctx); | |
696 | } | |
697 | ||
698 | ::encode(ResponseMessage(r), ack_ctx->out); | |
699 | } else if (r < 0) { | |
700 | ::encode(ResponseMessage(r), ack_ctx->out); | |
701 | } | |
702 | } | |
703 | return true; | |
704 | } | |
705 | ||
706 | template <typename I> | |
707 | bool ImageWatcher<I>::handle_payload(const ResizePayload &payload, | |
708 | C_NotifyAck *ack_ctx) { | |
709 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
710 | if (m_image_ctx.exclusive_lock != nullptr) { | |
711 | int r; | |
712 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
713 | bool new_request; | |
714 | Context *ctx; | |
715 | ProgressContext *prog_ctx; | |
716 | r = prepare_async_request(payload.async_request_id, &new_request, | |
717 | &ctx, &prog_ctx); | |
718 | if (r == 0 && new_request) { | |
719 | ldout(m_image_ctx.cct, 10) << this << " remote resize request: " | |
720 | << payload.async_request_id << " " | |
721 | << payload.size << " " | |
722 | << payload.allow_shrink << dendl; | |
723 | m_image_ctx.operations->execute_resize(payload.size, payload.allow_shrink, *prog_ctx, ctx, 0); | |
724 | } | |
725 | ||
726 | ::encode(ResponseMessage(r), ack_ctx->out); | |
727 | } else if (r < 0) { | |
728 | ::encode(ResponseMessage(r), ack_ctx->out); | |
729 | } | |
730 | } | |
731 | return true; | |
732 | } | |
733 | ||
734 | template <typename I> | |
735 | bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload, | |
736 | C_NotifyAck *ack_ctx) { | |
737 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
738 | if (m_image_ctx.exclusive_lock != nullptr) { | |
739 | int r; | |
740 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
741 | ldout(m_image_ctx.cct, 10) << this << " remote snap_create request: " | |
742 | << payload.snap_name << dendl; | |
743 | ||
744 | m_image_ctx.operations->execute_snap_create(payload.snap_namespace, | |
745 | payload.snap_name, | |
746 | new C_ResponseMessage(ack_ctx), | |
747 | 0, false); | |
748 | return false; | |
749 | } else if (r < 0) { | |
750 | ::encode(ResponseMessage(r), ack_ctx->out); | |
751 | } | |
752 | } | |
753 | return true; | |
754 | } | |
755 | ||
756 | template <typename I> | |
757 | bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload, | |
758 | C_NotifyAck *ack_ctx) { | |
759 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
760 | if (m_image_ctx.exclusive_lock != nullptr) { | |
761 | int r; | |
762 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
763 | ldout(m_image_ctx.cct, 10) << this << " remote snap_rename request: " | |
764 | << payload.snap_id << " to " | |
765 | << payload.snap_name << dendl; | |
766 | ||
767 | m_image_ctx.operations->execute_snap_rename(payload.snap_id, | |
768 | payload.snap_name, | |
769 | new C_ResponseMessage(ack_ctx)); | |
770 | return false; | |
771 | } else if (r < 0) { | |
772 | ::encode(ResponseMessage(r), ack_ctx->out); | |
773 | } | |
774 | } | |
775 | return true; | |
776 | } | |
777 | ||
778 | template <typename I> | |
779 | bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload, | |
780 | C_NotifyAck *ack_ctx) { | |
781 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
782 | if (m_image_ctx.exclusive_lock != nullptr) { | |
783 | int r; | |
784 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
785 | ldout(m_image_ctx.cct, 10) << this << " remote snap_remove request: " | |
786 | << payload.snap_name << dendl; | |
787 | ||
788 | m_image_ctx.operations->execute_snap_remove(payload.snap_namespace, | |
789 | payload.snap_name, | |
790 | new C_ResponseMessage(ack_ctx)); | |
791 | return false; | |
792 | } else if (r < 0) { | |
793 | ::encode(ResponseMessage(r), ack_ctx->out); | |
794 | } | |
795 | } | |
796 | return true; | |
797 | } | |
798 | ||
799 | template <typename I> | |
800 | bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload, | |
801 | C_NotifyAck *ack_ctx) { | |
802 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
803 | if (m_image_ctx.exclusive_lock != nullptr) { | |
804 | int r; | |
805 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
806 | ldout(m_image_ctx.cct, 10) << this << " remote snap_protect request: " | |
807 | << payload.snap_name << dendl; | |
808 | ||
809 | m_image_ctx.operations->execute_snap_protect(payload.snap_namespace, | |
810 | payload.snap_name, | |
811 | new C_ResponseMessage(ack_ctx)); | |
812 | return false; | |
813 | } else if (r < 0) { | |
814 | ::encode(ResponseMessage(r), ack_ctx->out); | |
815 | } | |
816 | } | |
817 | return true; | |
818 | } | |
819 | ||
820 | template <typename I> | |
821 | bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload, | |
822 | C_NotifyAck *ack_ctx) { | |
823 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
824 | if (m_image_ctx.exclusive_lock != nullptr) { | |
825 | int r; | |
826 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
827 | ldout(m_image_ctx.cct, 10) << this << " remote snap_unprotect request: " | |
828 | << payload.snap_name << dendl; | |
829 | ||
830 | m_image_ctx.operations->execute_snap_unprotect(payload.snap_namespace, | |
831 | payload.snap_name, | |
832 | new C_ResponseMessage(ack_ctx)); | |
833 | return false; | |
834 | } else if (r < 0) { | |
835 | ::encode(ResponseMessage(r), ack_ctx->out); | |
836 | } | |
837 | } | |
838 | return true; | |
839 | } | |
840 | ||
841 | template <typename I> | |
842 | bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload, | |
843 | C_NotifyAck *ack_ctx) { | |
844 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
845 | if (m_image_ctx.exclusive_lock != nullptr) { | |
846 | int r; | |
847 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
848 | bool new_request; | |
849 | Context *ctx; | |
850 | ProgressContext *prog_ctx; | |
851 | r = prepare_async_request(payload.async_request_id, &new_request, | |
852 | &ctx, &prog_ctx); | |
853 | if (r == 0 && new_request) { | |
854 | ldout(m_image_ctx.cct, 10) << this | |
855 | << " remote rebuild object map request: " | |
856 | << payload.async_request_id << dendl; | |
857 | m_image_ctx.operations->execute_rebuild_object_map(*prog_ctx, ctx); | |
858 | } | |
859 | ||
860 | ::encode(ResponseMessage(r), ack_ctx->out); | |
861 | } else if (r < 0) { | |
862 | ::encode(ResponseMessage(r), ack_ctx->out); | |
863 | } | |
864 | } | |
865 | return true; | |
866 | } | |
867 | ||
868 | template <typename I> | |
869 | bool ImageWatcher<I>::handle_payload(const RenamePayload& payload, | |
870 | C_NotifyAck *ack_ctx) { | |
871 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
872 | if (m_image_ctx.exclusive_lock != nullptr) { | |
873 | int r; | |
874 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
875 | ldout(m_image_ctx.cct, 10) << this << " remote rename request: " | |
876 | << payload.image_name << dendl; | |
877 | ||
878 | m_image_ctx.operations->execute_rename(payload.image_name, | |
879 | new C_ResponseMessage(ack_ctx)); | |
880 | return false; | |
881 | } else if (r < 0) { | |
882 | ::encode(ResponseMessage(r), ack_ctx->out); | |
883 | } | |
884 | } | |
885 | return true; | |
886 | } | |
887 | ||
888 | template <typename I> | |
889 | bool ImageWatcher<I>::handle_payload(const UpdateFeaturesPayload& payload, | |
890 | C_NotifyAck *ack_ctx) { | |
891 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
892 | if (m_image_ctx.exclusive_lock != nullptr) { | |
893 | int r; | |
894 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
895 | ldout(m_image_ctx.cct, 10) << this << " remote update_features request: " | |
896 | << payload.features << " " | |
897 | << (payload.enabled ? "enabled" : "disabled") | |
898 | << dendl; | |
899 | ||
900 | m_image_ctx.operations->execute_update_features( | |
901 | payload.features, payload.enabled, new C_ResponseMessage(ack_ctx), 0); | |
902 | return false; | |
903 | } else if (r < 0) { | |
904 | ::encode(ResponseMessage(r), ack_ctx->out); | |
905 | } | |
906 | } | |
907 | return true; | |
908 | } | |
909 | ||
910 | template <typename I> | |
911 | bool ImageWatcher<I>::handle_payload(const UnknownPayload &payload, | |
912 | C_NotifyAck *ack_ctx) { | |
913 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
914 | if (m_image_ctx.exclusive_lock != nullptr) { | |
915 | int r; | |
916 | if (m_image_ctx.exclusive_lock->accept_requests(&r) || r < 0) { | |
917 | ::encode(ResponseMessage(-EOPNOTSUPP), ack_ctx->out); | |
918 | } | |
919 | } | |
920 | return true; | |
921 | } | |
922 | ||
923 | template <typename I> | |
924 | void ImageWatcher<I>::process_payload(uint64_t notify_id, uint64_t handle, | |
31f18b77 FG |
925 | const Payload &payload) { |
926 | apply_visitor(HandlePayloadVisitor<ImageWatcher<I>>(this, notify_id, handle), | |
927 | payload); | |
7c673cae FG |
928 | } |
929 | ||
930 | template <typename I> | |
931 | void ImageWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle, | |
932 | uint64_t notifier_id, bufferlist &bl) { | |
933 | NotifyMessage notify_message; | |
934 | if (bl.length() == 0) { | |
935 | // legacy notification for header updates | |
936 | notify_message = NotifyMessage(HeaderUpdatePayload()); | |
937 | } else { | |
938 | try { | |
939 | bufferlist::iterator iter = bl.begin(); | |
940 | ::decode(notify_message, iter); | |
941 | } catch (const buffer::error &err) { | |
942 | lderr(m_image_ctx.cct) << this << " error decoding image notification: " | |
943 | << err.what() << dendl; | |
944 | return; | |
945 | } | |
946 | } | |
947 | ||
948 | // if an image refresh is required, refresh before processing the request | |
949 | if (notify_message.check_for_refresh() && | |
950 | m_image_ctx.state->is_refresh_required()) { | |
951 | m_image_ctx.state->refresh(new C_ProcessPayload(this, notify_id, handle, | |
952 | notify_message.payload)); | |
953 | } else { | |
31f18b77 | 954 | process_payload(notify_id, handle, notify_message.payload); |
7c673cae FG |
955 | } |
956 | } | |
957 | ||
958 | template <typename I> | |
959 | void ImageWatcher<I>::handle_error(uint64_t handle, int err) { | |
960 | lderr(m_image_ctx.cct) << this << " image watch failed: " << handle << ", " | |
961 | << cpp_strerror(err) << dendl; | |
962 | ||
963 | { | |
964 | Mutex::Locker l(m_owner_client_id_lock); | |
965 | set_owner_client_id(ClientId()); | |
966 | } | |
967 | ||
968 | Watcher::handle_error(handle, err); | |
969 | } | |
970 | ||
971 | template <typename I> | |
972 | void ImageWatcher<I>::handle_rewatch_complete(int r) { | |
973 | CephContext *cct = m_image_ctx.cct; | |
974 | ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; | |
975 | ||
976 | { | |
977 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
978 | if (m_image_ctx.exclusive_lock != nullptr) { | |
979 | // update the lock cookie with the new watch handle | |
980 | m_image_ctx.exclusive_lock->reacquire_lock(); | |
981 | } | |
982 | } | |
983 | ||
984 | // image might have been updated while we didn't have active watch | |
985 | handle_payload(HeaderUpdatePayload(), nullptr); | |
986 | } | |
987 | ||
988 | template <typename I> | |
989 | void ImageWatcher<I>::send_notify(const Payload &payload, Context *ctx) { | |
990 | bufferlist bl; | |
991 | ||
992 | ::encode(NotifyMessage(payload), bl); | |
993 | Watcher::send_notify(bl, nullptr, ctx); | |
994 | } | |
995 | ||
996 | template <typename I> | |
997 | void ImageWatcher<I>::RemoteContext::finish(int r) { | |
998 | m_image_watcher.schedule_async_complete(m_async_request_id, r); | |
999 | } | |
1000 | ||
1001 | template <typename I> | |
1002 | void ImageWatcher<I>::C_ResponseMessage::finish(int r) { | |
1003 | CephContext *cct = notify_ack->cct; | |
1004 | ldout(cct, 10) << this << " C_ResponseMessage: r=" << r << dendl; | |
1005 | ||
1006 | ::encode(ResponseMessage(r), notify_ack->out); | |
1007 | notify_ack->complete(0); | |
1008 | } | |
1009 | ||
1010 | } // namespace librbd | |
1011 | ||
1012 | template class librbd::ImageWatcher<librbd::ImageCtx>; |