]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
4 | #include "librbd/ImageWatcher.h" | |
5 | #include "librbd/ExclusiveLock.h" | |
6 | #include "librbd/ImageCtx.h" | |
7 | #include "librbd/ImageState.h" | |
8 | #include "librbd/internal.h" | |
9 | #include "librbd/Operations.h" | |
10 | #include "librbd/TaskFinisher.h" | |
b32b8144 | 11 | #include "librbd/Types.h" |
7c673cae FG |
12 | #include "librbd/Utils.h" |
13 | #include "librbd/exclusive_lock/Policy.h" | |
14 | #include "librbd/image_watcher/NotifyLockOwner.h" | |
15 | #include "librbd/io/AioCompletion.h" | |
16 | #include "librbd/watcher/Utils.h" | |
17 | #include "include/encoding.h" | |
18 | #include "common/errno.h" | |
19 | #include "common/WorkQueue.h" | |
20 | #include <boost/bind.hpp> | |
21 | ||
22 | #define dout_subsys ceph_subsys_rbd | |
23 | #undef dout_prefix | |
24 | #define dout_prefix *_dout << "librbd::ImageWatcher: " | |
25 | ||
26 | namespace librbd { | |
27 | ||
28 | using namespace image_watcher; | |
29 | using namespace watch_notify; | |
30 | using util::create_async_context_callback; | |
31 | using util::create_context_callback; | |
32 | using util::create_rados_callback; | |
33 | using librbd::watcher::util::HandlePayloadVisitor; | |
34 | ||
35 | static const double RETRY_DELAY_SECONDS = 1.0; | |
36 | ||
31f18b77 FG |
37 | template <typename I> |
38 | struct ImageWatcher<I>::C_ProcessPayload : public Context { | |
39 | ImageWatcher *image_watcher; | |
40 | uint64_t notify_id; | |
41 | uint64_t handle; | |
42 | watch_notify::Payload payload; | |
43 | ||
44 | C_ProcessPayload(ImageWatcher *image_watcher_, uint64_t notify_id_, | |
45 | uint64_t handle_, const watch_notify::Payload &payload) | |
46 | : image_watcher(image_watcher_), notify_id(notify_id_), handle(handle_), | |
47 | payload(payload) { | |
48 | } | |
49 | ||
50 | void finish(int r) override { | |
51 | image_watcher->m_async_op_tracker.start_op(); | |
52 | if (image_watcher->notifications_blocked()) { | |
53 | // requests are blocked -- just ack the notification | |
54 | bufferlist bl; | |
55 | image_watcher->acknowledge_notify(notify_id, handle, bl); | |
56 | } else { | |
57 | image_watcher->process_payload(notify_id, handle, payload); | |
58 | } | |
59 | image_watcher->m_async_op_tracker.finish_op(); | |
60 | } | |
61 | }; | |
62 | ||
7c673cae FG |
63 | template <typename I> |
64 | ImageWatcher<I>::ImageWatcher(I &image_ctx) | |
65 | : Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid), | |
66 | m_image_ctx(image_ctx), | |
67 | m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)), | |
68 | m_async_request_lock(util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this)), | |
69 | m_owner_client_id_lock(util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this)) | |
70 | { | |
71 | } | |
72 | ||
73 | template <typename I> | |
74 | ImageWatcher<I>::~ImageWatcher() | |
75 | { | |
76 | delete m_task_finisher; | |
77 | } | |
78 | ||
79 | template <typename I> | |
80 | void ImageWatcher<I>::unregister_watch(Context *on_finish) { | |
81 | CephContext *cct = m_image_ctx.cct; | |
82 | ldout(cct, 10) << this << " unregistering image watcher" << dendl; | |
83 | ||
84 | cancel_async_requests(); | |
85 | ||
86 | FunctionContext *ctx = new FunctionContext([this, on_finish](int r) { | |
87 | m_task_finisher->cancel_all(on_finish); | |
88 | }); | |
89 | Watcher::unregister_watch(ctx); | |
90 | } | |
91 | ||
31f18b77 FG |
92 | template <typename I> |
93 | void ImageWatcher<I>::block_notifies(Context *on_finish) { | |
94 | CephContext *cct = m_image_ctx.cct; | |
95 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
96 | ||
97 | on_finish = new FunctionContext([this, on_finish](int r) { | |
98 | cancel_async_requests(); | |
99 | on_finish->complete(r); | |
100 | }); | |
101 | Watcher::block_notifies(on_finish); | |
102 | } | |
103 | ||
7c673cae FG |
104 | template <typename I> |
105 | void ImageWatcher<I>::schedule_async_progress(const AsyncRequestId &request, | |
106 | uint64_t offset, uint64_t total) { | |
107 | FunctionContext *ctx = new FunctionContext( | |
108 | boost::bind(&ImageWatcher<I>::notify_async_progress, this, request, offset, | |
109 | total)); | |
110 | m_task_finisher->queue(Task(TASK_CODE_ASYNC_PROGRESS, request), ctx); | |
111 | } | |
112 | ||
113 | template <typename I> | |
114 | int ImageWatcher<I>::notify_async_progress(const AsyncRequestId &request, | |
115 | uint64_t offset, uint64_t total) { | |
116 | ldout(m_image_ctx.cct, 20) << this << " remote async request progress: " | |
117 | << request << " @ " << offset | |
118 | << "/" << total << dendl; | |
119 | ||
120 | send_notify(AsyncProgressPayload(request, offset, total)); | |
121 | return 0; | |
122 | } | |
123 | ||
124 | template <typename I> | |
125 | void ImageWatcher<I>::schedule_async_complete(const AsyncRequestId &request, | |
126 | int r) { | |
127 | FunctionContext *ctx = new FunctionContext( | |
128 | boost::bind(&ImageWatcher<I>::notify_async_complete, this, request, r)); | |
129 | m_task_finisher->queue(ctx); | |
130 | } | |
131 | ||
132 | template <typename I> | |
133 | void ImageWatcher<I>::notify_async_complete(const AsyncRequestId &request, | |
134 | int r) { | |
135 | ldout(m_image_ctx.cct, 20) << this << " remote async request finished: " | |
136 | << request << " = " << r << dendl; | |
137 | ||
138 | send_notify(AsyncCompletePayload(request, r), | |
139 | new FunctionContext(boost::bind(&ImageWatcher<I>::handle_async_complete, | |
140 | this, request, r, _1))); | |
141 | } | |
142 | ||
143 | template <typename I> | |
144 | void ImageWatcher<I>::handle_async_complete(const AsyncRequestId &request, | |
145 | int r, int ret_val) { | |
146 | ldout(m_image_ctx.cct, 20) << this << " " << __func__ << ": " | |
147 | << "request=" << request << ", r=" << ret_val | |
148 | << dendl; | |
149 | if (ret_val < 0) { | |
150 | lderr(m_image_ctx.cct) << this << " failed to notify async complete: " | |
151 | << cpp_strerror(ret_val) << dendl; | |
152 | if (ret_val == -ETIMEDOUT) { | |
153 | schedule_async_complete(request, r); | |
154 | } | |
155 | } else { | |
156 | RWLock::WLocker async_request_locker(m_async_request_lock); | |
157 | m_async_pending.erase(request); | |
158 | } | |
159 | } | |
160 | ||
161 | template <typename I> | |
162 | void ImageWatcher<I>::notify_flatten(uint64_t request_id, | |
163 | ProgressContext &prog_ctx, | |
164 | Context *on_finish) { | |
165 | assert(m_image_ctx.owner_lock.is_locked()); | |
166 | assert(m_image_ctx.exclusive_lock && | |
167 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
168 | ||
169 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
170 | ||
171 | notify_async_request(async_request_id, FlattenPayload(async_request_id), | |
172 | prog_ctx, on_finish); | |
173 | } | |
174 | ||
175 | template <typename I> | |
176 | void ImageWatcher<I>::notify_resize(uint64_t request_id, uint64_t size, | |
177 | bool allow_shrink, | |
178 | ProgressContext &prog_ctx, | |
179 | Context *on_finish) { | |
180 | assert(m_image_ctx.owner_lock.is_locked()); | |
181 | assert(m_image_ctx.exclusive_lock && | |
182 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
183 | ||
184 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
185 | ||
186 | notify_async_request(async_request_id, | |
187 | ResizePayload(size, allow_shrink, async_request_id), | |
188 | prog_ctx, on_finish); | |
189 | } | |
190 | ||
191 | template <typename I> | |
192 | void ImageWatcher<I>::notify_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace, | |
193 | const std::string &snap_name, | |
194 | Context *on_finish) { | |
195 | assert(m_image_ctx.owner_lock.is_locked()); | |
196 | assert(m_image_ctx.exclusive_lock && | |
197 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
198 | ||
199 | notify_lock_owner(SnapCreatePayload(snap_namespace, snap_name), on_finish); | |
200 | } | |
201 | ||
202 | template <typename I> | |
203 | void ImageWatcher<I>::notify_snap_rename(const snapid_t &src_snap_id, | |
204 | const std::string &dst_snap_name, | |
205 | Context *on_finish) { | |
206 | assert(m_image_ctx.owner_lock.is_locked()); | |
207 | assert(m_image_ctx.exclusive_lock && | |
208 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
209 | ||
210 | notify_lock_owner(SnapRenamePayload(src_snap_id, dst_snap_name), on_finish); | |
211 | } | |
212 | ||
213 | template <typename I> | |
214 | void ImageWatcher<I>::notify_snap_remove(const cls::rbd::SnapshotNamespace &snap_namespace, | |
215 | const std::string &snap_name, | |
216 | Context *on_finish) { | |
217 | assert(m_image_ctx.owner_lock.is_locked()); | |
218 | assert(m_image_ctx.exclusive_lock && | |
219 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
220 | ||
221 | notify_lock_owner(SnapRemovePayload(snap_namespace, snap_name), on_finish); | |
222 | } | |
223 | ||
224 | template <typename I> | |
225 | void ImageWatcher<I>::notify_snap_protect(const cls::rbd::SnapshotNamespace &snap_namespace, | |
226 | const std::string &snap_name, | |
227 | Context *on_finish) { | |
228 | assert(m_image_ctx.owner_lock.is_locked()); | |
229 | assert(m_image_ctx.exclusive_lock && | |
230 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
231 | ||
232 | notify_lock_owner(SnapProtectPayload(snap_namespace, snap_name), on_finish); | |
233 | } | |
234 | ||
235 | template <typename I> | |
236 | void ImageWatcher<I>::notify_snap_unprotect(const cls::rbd::SnapshotNamespace &snap_namespace, | |
237 | const std::string &snap_name, | |
238 | Context *on_finish) { | |
239 | assert(m_image_ctx.owner_lock.is_locked()); | |
240 | assert(m_image_ctx.exclusive_lock && | |
241 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
242 | ||
243 | notify_lock_owner(SnapUnprotectPayload(snap_namespace, snap_name), on_finish); | |
244 | } | |
245 | ||
246 | template <typename I> | |
247 | void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id, | |
248 | ProgressContext &prog_ctx, | |
249 | Context *on_finish) { | |
250 | assert(m_image_ctx.owner_lock.is_locked()); | |
251 | assert(m_image_ctx.exclusive_lock && | |
252 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
253 | ||
254 | AsyncRequestId async_request_id(get_client_id(), request_id); | |
255 | ||
256 | notify_async_request(async_request_id, | |
257 | RebuildObjectMapPayload(async_request_id), | |
258 | prog_ctx, on_finish); | |
259 | } | |
260 | ||
261 | template <typename I> | |
262 | void ImageWatcher<I>::notify_rename(const std::string &image_name, | |
263 | Context *on_finish) { | |
264 | assert(m_image_ctx.owner_lock.is_locked()); | |
265 | assert(m_image_ctx.exclusive_lock && | |
266 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
267 | ||
268 | notify_lock_owner(RenamePayload(image_name), on_finish); | |
269 | } | |
270 | ||
271 | template <typename I> | |
272 | void ImageWatcher<I>::notify_update_features(uint64_t features, bool enabled, | |
273 | Context *on_finish) { | |
274 | assert(m_image_ctx.owner_lock.is_locked()); | |
275 | assert(m_image_ctx.exclusive_lock && | |
276 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
277 | ||
278 | notify_lock_owner(UpdateFeaturesPayload(features, enabled), on_finish); | |
279 | } | |
280 | ||
281 | template <typename I> | |
282 | void ImageWatcher<I>::notify_header_update(Context *on_finish) { | |
283 | ldout(m_image_ctx.cct, 10) << this << ": " << __func__ << dendl; | |
284 | ||
285 | // supports legacy (empty buffer) clients | |
286 | send_notify(HeaderUpdatePayload(), on_finish); | |
287 | } | |
288 | ||
289 | template <typename I> | |
290 | void ImageWatcher<I>::notify_header_update(librados::IoCtx &io_ctx, | |
291 | const std::string &oid) { | |
292 | // supports legacy (empty buffer) clients | |
293 | bufferlist bl; | |
294 | ::encode(NotifyMessage(HeaderUpdatePayload()), bl); | |
295 | io_ctx.notify2(oid, bl, watcher::Notifier::NOTIFY_TIMEOUT, nullptr); | |
296 | } | |
297 | ||
298 | template <typename I> | |
299 | void ImageWatcher<I>::schedule_cancel_async_requests() { | |
300 | FunctionContext *ctx = new FunctionContext( | |
301 | boost::bind(&ImageWatcher<I>::cancel_async_requests, this)); | |
302 | m_task_finisher->queue(TASK_CODE_CANCEL_ASYNC_REQUESTS, ctx); | |
303 | } | |
304 | ||
305 | template <typename I> | |
306 | void ImageWatcher<I>::cancel_async_requests() { | |
307 | RWLock::WLocker l(m_async_request_lock); | |
308 | for (std::map<AsyncRequestId, AsyncRequest>::iterator iter = | |
309 | m_async_requests.begin(); | |
310 | iter != m_async_requests.end(); ++iter) { | |
311 | iter->second.first->complete(-ERESTART); | |
312 | } | |
313 | m_async_requests.clear(); | |
314 | } | |
315 | ||
316 | template <typename I> | |
317 | void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) { | |
318 | assert(m_owner_client_id_lock.is_locked()); | |
319 | m_owner_client_id = client_id; | |
320 | ldout(m_image_ctx.cct, 10) << this << " current lock owner: " | |
321 | << m_owner_client_id << dendl; | |
322 | } | |
323 | ||
324 | template <typename I> | |
325 | ClientId ImageWatcher<I>::get_client_id() { | |
326 | RWLock::RLocker l(this->m_watch_lock); | |
327 | return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle); | |
328 | } | |
329 | ||
330 | template <typename I> | |
331 | void ImageWatcher<I>::notify_acquired_lock() { | |
332 | ldout(m_image_ctx.cct, 10) << this << " notify acquired lock" << dendl; | |
333 | ||
334 | ClientId client_id = get_client_id(); | |
335 | { | |
336 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
337 | set_owner_client_id(client_id); | |
338 | } | |
339 | ||
340 | send_notify(AcquiredLockPayload(client_id)); | |
341 | } | |
342 | ||
343 | template <typename I> | |
344 | void ImageWatcher<I>::notify_released_lock() { | |
345 | ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl; | |
346 | ||
347 | { | |
348 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
349 | set_owner_client_id(ClientId()); | |
350 | } | |
351 | ||
352 | send_notify(ReleasedLockPayload(get_client_id())); | |
353 | } | |
354 | ||
355 | template <typename I> | |
356 | void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) { | |
357 | assert(m_image_ctx.owner_lock.is_locked()); | |
358 | ||
359 | if (m_image_ctx.exclusive_lock == nullptr) { | |
360 | // exclusive lock dynamically disabled via image refresh | |
361 | return; | |
362 | } | |
363 | assert(m_image_ctx.exclusive_lock && | |
364 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
365 | ||
366 | RWLock::RLocker watch_locker(this->m_watch_lock); | |
367 | if (this->m_watch_state == Watcher::WATCH_STATE_REGISTERED) { | |
368 | ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl; | |
369 | ||
370 | FunctionContext *ctx = new FunctionContext( | |
371 | boost::bind(&ImageWatcher<I>::notify_request_lock, this)); | |
372 | if (use_timer) { | |
373 | if (timer_delay < 0) { | |
374 | timer_delay = RETRY_DELAY_SECONDS; | |
375 | } | |
376 | m_task_finisher->add_event_after(TASK_CODE_REQUEST_LOCK, | |
377 | timer_delay, ctx); | |
378 | } else { | |
379 | m_task_finisher->queue(TASK_CODE_REQUEST_LOCK, ctx); | |
380 | } | |
381 | } | |
382 | } | |
383 | ||
384 | template <typename I> | |
385 | void ImageWatcher<I>::notify_request_lock() { | |
386 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
387 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
388 | ||
389 | // ExclusiveLock state machine can be dynamically disabled or | |
390 | // race with task cancel | |
391 | if (m_image_ctx.exclusive_lock == nullptr || | |
392 | m_image_ctx.exclusive_lock->is_lock_owner()) { | |
393 | return; | |
394 | } | |
395 | ||
396 | ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl; | |
397 | ||
398 | notify_lock_owner(RequestLockPayload(get_client_id(), false), | |
399 | create_context_callback< | |
400 | ImageWatcher, &ImageWatcher<I>::handle_request_lock>(this)); | |
401 | } | |
402 | ||
403 | template <typename I> | |
404 | void ImageWatcher<I>::handle_request_lock(int r) { | |
405 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
406 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
407 | ||
408 | // ExclusiveLock state machine cannot transition -- but can be | |
409 | // dynamically disabled | |
410 | if (m_image_ctx.exclusive_lock == nullptr) { | |
411 | return; | |
412 | } | |
413 | ||
414 | if (r == -ETIMEDOUT) { | |
415 | ldout(m_image_ctx.cct, 5) << this << " timed out requesting lock: retrying" | |
416 | << dendl; | |
417 | ||
418 | // treat this is a dead client -- so retest acquiring the lock | |
419 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
420 | } else if (r == -EROFS) { | |
421 | ldout(m_image_ctx.cct, 5) << this << " peer will not release lock" << dendl; | |
422 | m_image_ctx.exclusive_lock->handle_peer_notification(r); | |
423 | } else if (r < 0) { | |
424 | lderr(m_image_ctx.cct) << this << " error requesting lock: " | |
425 | << cpp_strerror(r) << dendl; | |
426 | schedule_request_lock(true); | |
427 | } else { | |
428 | // lock owner acked -- but resend if we don't see them release the lock | |
181888fb FG |
429 | int retry_timeout = m_image_ctx.cct->_conf->template get_val<int64_t>( |
430 | "client_notify_timeout"); | |
7c673cae FG |
431 | ldout(m_image_ctx.cct, 15) << this << " will retry in " << retry_timeout |
432 | << " seconds" << dendl; | |
433 | schedule_request_lock(true, retry_timeout); | |
434 | } | |
435 | } | |
436 | ||
437 | template <typename I> | |
438 | void ImageWatcher<I>::notify_lock_owner(const Payload& payload, | |
439 | Context *on_finish) { | |
440 | assert(on_finish != nullptr); | |
441 | assert(m_image_ctx.owner_lock.is_locked()); | |
442 | ||
443 | bufferlist bl; | |
444 | ::encode(NotifyMessage(payload), bl); | |
445 | ||
446 | NotifyLockOwner *notify_lock_owner = NotifyLockOwner::create( | |
447 | m_image_ctx, this->m_notifier, std::move(bl), on_finish); | |
448 | notify_lock_owner->send(); | |
449 | } | |
450 | ||
451 | template <typename I> | |
452 | Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id) { | |
453 | RWLock::WLocker async_request_locker(m_async_request_lock); | |
454 | auto it = m_async_requests.find(id); | |
455 | if (it != m_async_requests.end()) { | |
456 | Context *on_complete = it->second.first; | |
457 | m_async_requests.erase(it); | |
458 | return on_complete; | |
459 | } | |
460 | return nullptr; | |
461 | } | |
462 | ||
463 | template <typename I> | |
464 | void ImageWatcher<I>::schedule_async_request_timed_out(const AsyncRequestId &id) { | |
465 | ldout(m_image_ctx.cct, 20) << "scheduling async request time out: " << id | |
466 | << dendl; | |
467 | ||
468 | Context *ctx = new FunctionContext(boost::bind( | |
469 | &ImageWatcher<I>::async_request_timed_out, this, id)); | |
470 | ||
471 | Task task(TASK_CODE_ASYNC_REQUEST, id); | |
472 | m_task_finisher->cancel(task); | |
473 | ||
474 | m_task_finisher->add_event_after(task, m_image_ctx.request_timed_out_seconds, | |
475 | ctx); | |
476 | } | |
477 | ||
478 | template <typename I> | |
479 | void ImageWatcher<I>::async_request_timed_out(const AsyncRequestId &id) { | |
480 | Context *on_complete = remove_async_request(id); | |
481 | if (on_complete != nullptr) { | |
482 | ldout(m_image_ctx.cct, 5) << "async request timed out: " << id << dendl; | |
483 | m_image_ctx.op_work_queue->queue(on_complete, -ETIMEDOUT); | |
484 | } | |
485 | } | |
486 | ||
487 | template <typename I> | |
488 | void ImageWatcher<I>::notify_async_request(const AsyncRequestId &async_request_id, | |
489 | const Payload& payload, | |
490 | ProgressContext& prog_ctx, | |
491 | Context *on_finish) { | |
492 | assert(on_finish != nullptr); | |
493 | assert(m_image_ctx.owner_lock.is_locked()); | |
494 | ||
495 | ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id | |
496 | << dendl; | |
497 | ||
498 | Context *on_notify = new FunctionContext([this, async_request_id](int r) { | |
499 | if (r < 0) { | |
500 | // notification failed -- don't expect updates | |
501 | Context *on_complete = remove_async_request(async_request_id); | |
502 | if (on_complete != nullptr) { | |
503 | on_complete->complete(r); | |
504 | } | |
505 | } | |
506 | }); | |
507 | ||
508 | Context *on_complete = new FunctionContext( | |
509 | [this, async_request_id, on_finish](int r) { | |
510 | m_task_finisher->cancel(Task(TASK_CODE_ASYNC_REQUEST, async_request_id)); | |
511 | on_finish->complete(r); | |
512 | }); | |
513 | ||
514 | { | |
515 | RWLock::WLocker async_request_locker(m_async_request_lock); | |
516 | m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx); | |
517 | } | |
518 | ||
519 | schedule_async_request_timed_out(async_request_id); | |
520 | notify_lock_owner(payload, on_notify); | |
521 | } | |
522 | ||
523 | template <typename I> | |
524 | int ImageWatcher<I>::prepare_async_request(const AsyncRequestId& async_request_id, | |
525 | bool* new_request, Context** ctx, | |
526 | ProgressContext** prog_ctx) { | |
527 | if (async_request_id.client_id == get_client_id()) { | |
528 | return -ERESTART; | |
529 | } else { | |
530 | RWLock::WLocker l(m_async_request_lock); | |
531 | if (m_async_pending.count(async_request_id) == 0) { | |
532 | m_async_pending.insert(async_request_id); | |
533 | *new_request = true; | |
534 | *prog_ctx = new RemoteProgressContext(*this, async_request_id); | |
535 | *ctx = new RemoteContext(*this, async_request_id, *prog_ctx); | |
536 | } else { | |
537 | *new_request = false; | |
538 | } | |
539 | } | |
540 | return 0; | |
541 | } | |
542 | ||
543 | template <typename I> | |
544 | bool ImageWatcher<I>::handle_payload(const HeaderUpdatePayload &payload, | |
545 | C_NotifyAck *ack_ctx) { | |
546 | ldout(m_image_ctx.cct, 10) << this << " image header updated" << dendl; | |
547 | ||
548 | m_image_ctx.state->handle_update_notification(); | |
549 | m_image_ctx.perfcounter->inc(l_librbd_notify); | |
550 | if (ack_ctx != nullptr) { | |
551 | m_image_ctx.state->flush_update_watchers(new C_ResponseMessage(ack_ctx)); | |
552 | return false; | |
553 | } | |
554 | return true; | |
555 | } | |
556 | ||
557 | template <typename I> | |
558 | bool ImageWatcher<I>::handle_payload(const AcquiredLockPayload &payload, | |
559 | C_NotifyAck *ack_ctx) { | |
560 | ldout(m_image_ctx.cct, 10) << this << " image exclusively locked announcement" | |
561 | << dendl; | |
562 | ||
563 | bool cancel_async_requests = true; | |
564 | if (payload.client_id.is_valid()) { | |
565 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
566 | if (payload.client_id == m_owner_client_id) { | |
567 | cancel_async_requests = false; | |
568 | } | |
569 | set_owner_client_id(payload.client_id); | |
570 | } | |
571 | ||
572 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
573 | if (m_image_ctx.exclusive_lock != nullptr) { | |
574 | // potentially wake up the exclusive lock state machine now that | |
575 | // a lock owner has advertised itself | |
576 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
577 | } | |
578 | if (cancel_async_requests && | |
579 | (m_image_ctx.exclusive_lock == nullptr || | |
580 | !m_image_ctx.exclusive_lock->is_lock_owner())) { | |
581 | schedule_cancel_async_requests(); | |
582 | } | |
583 | return true; | |
584 | } | |
585 | ||
586 | template <typename I> | |
587 | bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload, | |
588 | C_NotifyAck *ack_ctx) { | |
589 | ldout(m_image_ctx.cct, 10) << this << " exclusive lock released" << dendl; | |
590 | ||
591 | bool cancel_async_requests = true; | |
592 | if (payload.client_id.is_valid()) { | |
593 | Mutex::Locker l(m_owner_client_id_lock); | |
594 | if (payload.client_id != m_owner_client_id) { | |
595 | ldout(m_image_ctx.cct, 10) << this << " unexpected owner: " | |
596 | << payload.client_id << " != " | |
597 | << m_owner_client_id << dendl; | |
598 | cancel_async_requests = false; | |
599 | } else { | |
600 | set_owner_client_id(ClientId()); | |
601 | } | |
602 | } | |
603 | ||
604 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
605 | if (cancel_async_requests && | |
606 | (m_image_ctx.exclusive_lock == nullptr || | |
607 | !m_image_ctx.exclusive_lock->is_lock_owner())) { | |
608 | schedule_cancel_async_requests(); | |
609 | } | |
610 | ||
611 | // alert the exclusive lock state machine that the lock is available | |
612 | if (m_image_ctx.exclusive_lock != nullptr && | |
613 | !m_image_ctx.exclusive_lock->is_lock_owner()) { | |
614 | m_task_finisher->cancel(TASK_CODE_REQUEST_LOCK); | |
615 | m_image_ctx.exclusive_lock->handle_peer_notification(0); | |
616 | } | |
617 | return true; | |
618 | } | |
619 | ||
620 | template <typename I> | |
621 | bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload, | |
622 | C_NotifyAck *ack_ctx) { | |
623 | ldout(m_image_ctx.cct, 10) << this << " exclusive lock requested" << dendl; | |
624 | if (payload.client_id == get_client_id()) { | |
625 | return true; | |
626 | } | |
627 | ||
628 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
629 | if (m_image_ctx.exclusive_lock != nullptr && | |
630 | m_image_ctx.exclusive_lock->is_lock_owner()) { | |
631 | int r = 0; | |
632 | bool accept_request = m_image_ctx.exclusive_lock->accept_requests(&r); | |
633 | ||
634 | if (accept_request) { | |
635 | assert(r == 0); | |
636 | Mutex::Locker owner_client_id_locker(m_owner_client_id_lock); | |
637 | if (!m_owner_client_id.is_valid()) { | |
638 | return true; | |
639 | } | |
640 | ||
641 | ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock" | |
642 | << dendl; | |
643 | r = m_image_ctx.get_exclusive_lock_policy()->lock_requested( | |
644 | payload.force); | |
645 | } | |
646 | ::encode(ResponseMessage(r), ack_ctx->out); | |
647 | } | |
648 | return true; | |
649 | } | |
650 | ||
651 | template <typename I> | |
652 | bool ImageWatcher<I>::handle_payload(const AsyncProgressPayload &payload, | |
653 | C_NotifyAck *ack_ctx) { | |
654 | RWLock::RLocker l(m_async_request_lock); | |
655 | std::map<AsyncRequestId, AsyncRequest>::iterator req_it = | |
656 | m_async_requests.find(payload.async_request_id); | |
657 | if (req_it != m_async_requests.end()) { | |
658 | ldout(m_image_ctx.cct, 20) << this << " request progress: " | |
659 | << payload.async_request_id << " @ " | |
660 | << payload.offset << "/" << payload.total | |
661 | << dendl; | |
662 | schedule_async_request_timed_out(payload.async_request_id); | |
663 | req_it->second.second->update_progress(payload.offset, payload.total); | |
664 | } | |
665 | return true; | |
666 | } | |
667 | ||
668 | template <typename I> | |
669 | bool ImageWatcher<I>::handle_payload(const AsyncCompletePayload &payload, | |
670 | C_NotifyAck *ack_ctx) { | |
671 | Context *on_complete = remove_async_request(payload.async_request_id); | |
672 | if (on_complete != nullptr) { | |
673 | ldout(m_image_ctx.cct, 10) << this << " request finished: " | |
674 | << payload.async_request_id << "=" | |
675 | << payload.result << dendl; | |
676 | on_complete->complete(payload.result); | |
677 | } | |
678 | return true; | |
679 | } | |
680 | ||
681 | template <typename I> | |
682 | bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload, | |
683 | C_NotifyAck *ack_ctx) { | |
684 | ||
685 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
686 | if (m_image_ctx.exclusive_lock != nullptr) { | |
687 | int r; | |
688 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
689 | bool new_request; | |
690 | Context *ctx; | |
691 | ProgressContext *prog_ctx; | |
692 | r = prepare_async_request(payload.async_request_id, &new_request, | |
693 | &ctx, &prog_ctx); | |
694 | if (r == 0 && new_request) { | |
695 | ldout(m_image_ctx.cct, 10) << this << " remote flatten request: " | |
696 | << payload.async_request_id << dendl; | |
697 | m_image_ctx.operations->execute_flatten(*prog_ctx, ctx); | |
698 | } | |
699 | ||
700 | ::encode(ResponseMessage(r), ack_ctx->out); | |
701 | } else if (r < 0) { | |
702 | ::encode(ResponseMessage(r), ack_ctx->out); | |
703 | } | |
704 | } | |
705 | return true; | |
706 | } | |
707 | ||
708 | template <typename I> | |
709 | bool ImageWatcher<I>::handle_payload(const ResizePayload &payload, | |
710 | C_NotifyAck *ack_ctx) { | |
711 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
712 | if (m_image_ctx.exclusive_lock != nullptr) { | |
713 | int r; | |
714 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
715 | bool new_request; | |
716 | Context *ctx; | |
717 | ProgressContext *prog_ctx; | |
718 | r = prepare_async_request(payload.async_request_id, &new_request, | |
719 | &ctx, &prog_ctx); | |
720 | if (r == 0 && new_request) { | |
721 | ldout(m_image_ctx.cct, 10) << this << " remote resize request: " | |
722 | << payload.async_request_id << " " | |
723 | << payload.size << " " | |
724 | << payload.allow_shrink << dendl; | |
725 | m_image_ctx.operations->execute_resize(payload.size, payload.allow_shrink, *prog_ctx, ctx, 0); | |
726 | } | |
727 | ||
728 | ::encode(ResponseMessage(r), ack_ctx->out); | |
729 | } else if (r < 0) { | |
730 | ::encode(ResponseMessage(r), ack_ctx->out); | |
731 | } | |
732 | } | |
733 | return true; | |
734 | } | |
735 | ||
736 | template <typename I> | |
737 | bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload, | |
738 | C_NotifyAck *ack_ctx) { | |
739 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
740 | if (m_image_ctx.exclusive_lock != nullptr) { | |
741 | int r; | |
742 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
743 | ldout(m_image_ctx.cct, 10) << this << " remote snap_create request: " | |
744 | << payload.snap_name << dendl; | |
745 | ||
746 | m_image_ctx.operations->execute_snap_create(payload.snap_namespace, | |
747 | payload.snap_name, | |
748 | new C_ResponseMessage(ack_ctx), | |
749 | 0, false); | |
750 | return false; | |
751 | } else if (r < 0) { | |
752 | ::encode(ResponseMessage(r), ack_ctx->out); | |
753 | } | |
754 | } | |
755 | return true; | |
756 | } | |
757 | ||
758 | template <typename I> | |
759 | bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload, | |
760 | C_NotifyAck *ack_ctx) { | |
761 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
762 | if (m_image_ctx.exclusive_lock != nullptr) { | |
763 | int r; | |
764 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
765 | ldout(m_image_ctx.cct, 10) << this << " remote snap_rename request: " | |
766 | << payload.snap_id << " to " | |
767 | << payload.snap_name << dendl; | |
768 | ||
769 | m_image_ctx.operations->execute_snap_rename(payload.snap_id, | |
770 | payload.snap_name, | |
771 | new C_ResponseMessage(ack_ctx)); | |
772 | return false; | |
773 | } else if (r < 0) { | |
774 | ::encode(ResponseMessage(r), ack_ctx->out); | |
775 | } | |
776 | } | |
777 | return true; | |
778 | } | |
779 | ||
780 | template <typename I> | |
781 | bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload, | |
782 | C_NotifyAck *ack_ctx) { | |
783 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
784 | if (m_image_ctx.exclusive_lock != nullptr) { | |
785 | int r; | |
786 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
787 | ldout(m_image_ctx.cct, 10) << this << " remote snap_remove request: " | |
788 | << payload.snap_name << dendl; | |
789 | ||
790 | m_image_ctx.operations->execute_snap_remove(payload.snap_namespace, | |
791 | payload.snap_name, | |
792 | new C_ResponseMessage(ack_ctx)); | |
793 | return false; | |
794 | } else if (r < 0) { | |
795 | ::encode(ResponseMessage(r), ack_ctx->out); | |
796 | } | |
797 | } | |
798 | return true; | |
799 | } | |
800 | ||
801 | template <typename I> | |
802 | bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload, | |
803 | C_NotifyAck *ack_ctx) { | |
804 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
805 | if (m_image_ctx.exclusive_lock != nullptr) { | |
806 | int r; | |
807 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
808 | ldout(m_image_ctx.cct, 10) << this << " remote snap_protect request: " | |
809 | << payload.snap_name << dendl; | |
810 | ||
811 | m_image_ctx.operations->execute_snap_protect(payload.snap_namespace, | |
812 | payload.snap_name, | |
813 | new C_ResponseMessage(ack_ctx)); | |
814 | return false; | |
815 | } else if (r < 0) { | |
816 | ::encode(ResponseMessage(r), ack_ctx->out); | |
817 | } | |
818 | } | |
819 | return true; | |
820 | } | |
821 | ||
822 | template <typename I> | |
823 | bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload, | |
824 | C_NotifyAck *ack_ctx) { | |
825 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
826 | if (m_image_ctx.exclusive_lock != nullptr) { | |
827 | int r; | |
828 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
829 | ldout(m_image_ctx.cct, 10) << this << " remote snap_unprotect request: " | |
830 | << payload.snap_name << dendl; | |
831 | ||
832 | m_image_ctx.operations->execute_snap_unprotect(payload.snap_namespace, | |
833 | payload.snap_name, | |
834 | new C_ResponseMessage(ack_ctx)); | |
835 | return false; | |
836 | } else if (r < 0) { | |
837 | ::encode(ResponseMessage(r), ack_ctx->out); | |
838 | } | |
839 | } | |
840 | return true; | |
841 | } | |
842 | ||
843 | template <typename I> | |
844 | bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload, | |
845 | C_NotifyAck *ack_ctx) { | |
846 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
847 | if (m_image_ctx.exclusive_lock != nullptr) { | |
848 | int r; | |
849 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
850 | bool new_request; | |
851 | Context *ctx; | |
852 | ProgressContext *prog_ctx; | |
853 | r = prepare_async_request(payload.async_request_id, &new_request, | |
854 | &ctx, &prog_ctx); | |
855 | if (r == 0 && new_request) { | |
856 | ldout(m_image_ctx.cct, 10) << this | |
857 | << " remote rebuild object map request: " | |
858 | << payload.async_request_id << dendl; | |
859 | m_image_ctx.operations->execute_rebuild_object_map(*prog_ctx, ctx); | |
860 | } | |
861 | ||
862 | ::encode(ResponseMessage(r), ack_ctx->out); | |
863 | } else if (r < 0) { | |
864 | ::encode(ResponseMessage(r), ack_ctx->out); | |
865 | } | |
866 | } | |
867 | return true; | |
868 | } | |
869 | ||
870 | template <typename I> | |
871 | bool ImageWatcher<I>::handle_payload(const RenamePayload& payload, | |
872 | C_NotifyAck *ack_ctx) { | |
873 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
874 | if (m_image_ctx.exclusive_lock != nullptr) { | |
875 | int r; | |
876 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
877 | ldout(m_image_ctx.cct, 10) << this << " remote rename request: " | |
878 | << payload.image_name << dendl; | |
879 | ||
880 | m_image_ctx.operations->execute_rename(payload.image_name, | |
881 | new C_ResponseMessage(ack_ctx)); | |
882 | return false; | |
883 | } else if (r < 0) { | |
884 | ::encode(ResponseMessage(r), ack_ctx->out); | |
885 | } | |
886 | } | |
887 | return true; | |
888 | } | |
889 | ||
890 | template <typename I> | |
891 | bool ImageWatcher<I>::handle_payload(const UpdateFeaturesPayload& payload, | |
892 | C_NotifyAck *ack_ctx) { | |
893 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
894 | if (m_image_ctx.exclusive_lock != nullptr) { | |
895 | int r; | |
896 | if (m_image_ctx.exclusive_lock->accept_requests(&r)) { | |
897 | ldout(m_image_ctx.cct, 10) << this << " remote update_features request: " | |
898 | << payload.features << " " | |
899 | << (payload.enabled ? "enabled" : "disabled") | |
900 | << dendl; | |
901 | ||
902 | m_image_ctx.operations->execute_update_features( | |
903 | payload.features, payload.enabled, new C_ResponseMessage(ack_ctx), 0); | |
904 | return false; | |
905 | } else if (r < 0) { | |
906 | ::encode(ResponseMessage(r), ack_ctx->out); | |
907 | } | |
908 | } | |
909 | return true; | |
910 | } | |
911 | ||
912 | template <typename I> | |
913 | bool ImageWatcher<I>::handle_payload(const UnknownPayload &payload, | |
914 | C_NotifyAck *ack_ctx) { | |
915 | RWLock::RLocker l(m_image_ctx.owner_lock); | |
916 | if (m_image_ctx.exclusive_lock != nullptr) { | |
917 | int r; | |
918 | if (m_image_ctx.exclusive_lock->accept_requests(&r) || r < 0) { | |
919 | ::encode(ResponseMessage(-EOPNOTSUPP), ack_ctx->out); | |
920 | } | |
921 | } | |
922 | return true; | |
923 | } | |
924 | ||
925 | template <typename I> | |
926 | void ImageWatcher<I>::process_payload(uint64_t notify_id, uint64_t handle, | |
31f18b77 FG |
927 | const Payload &payload) { |
928 | apply_visitor(HandlePayloadVisitor<ImageWatcher<I>>(this, notify_id, handle), | |
929 | payload); | |
7c673cae FG |
930 | } |
931 | ||
932 | template <typename I> | |
933 | void ImageWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle, | |
934 | uint64_t notifier_id, bufferlist &bl) { | |
935 | NotifyMessage notify_message; | |
936 | if (bl.length() == 0) { | |
937 | // legacy notification for header updates | |
938 | notify_message = NotifyMessage(HeaderUpdatePayload()); | |
939 | } else { | |
940 | try { | |
941 | bufferlist::iterator iter = bl.begin(); | |
942 | ::decode(notify_message, iter); | |
943 | } catch (const buffer::error &err) { | |
944 | lderr(m_image_ctx.cct) << this << " error decoding image notification: " | |
945 | << err.what() << dendl; | |
946 | return; | |
947 | } | |
948 | } | |
949 | ||
950 | // if an image refresh is required, refresh before processing the request | |
951 | if (notify_message.check_for_refresh() && | |
952 | m_image_ctx.state->is_refresh_required()) { | |
953 | m_image_ctx.state->refresh(new C_ProcessPayload(this, notify_id, handle, | |
954 | notify_message.payload)); | |
955 | } else { | |
31f18b77 | 956 | process_payload(notify_id, handle, notify_message.payload); |
7c673cae FG |
957 | } |
958 | } | |
959 | ||
960 | template <typename I> | |
961 | void ImageWatcher<I>::handle_error(uint64_t handle, int err) { | |
962 | lderr(m_image_ctx.cct) << this << " image watch failed: " << handle << ", " | |
963 | << cpp_strerror(err) << dendl; | |
964 | ||
965 | { | |
966 | Mutex::Locker l(m_owner_client_id_lock); | |
967 | set_owner_client_id(ClientId()); | |
968 | } | |
969 | ||
970 | Watcher::handle_error(handle, err); | |
971 | } | |
972 | ||
973 | template <typename I> | |
974 | void ImageWatcher<I>::handle_rewatch_complete(int r) { | |
975 | CephContext *cct = m_image_ctx.cct; | |
976 | ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl; | |
977 | ||
978 | { | |
979 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
980 | if (m_image_ctx.exclusive_lock != nullptr) { | |
981 | // update the lock cookie with the new watch handle | |
982 | m_image_ctx.exclusive_lock->reacquire_lock(); | |
983 | } | |
984 | } | |
985 | ||
986 | // image might have been updated while we didn't have active watch | |
987 | handle_payload(HeaderUpdatePayload(), nullptr); | |
988 | } | |
989 | ||
990 | template <typename I> | |
991 | void ImageWatcher<I>::send_notify(const Payload &payload, Context *ctx) { | |
992 | bufferlist bl; | |
993 | ||
994 | ::encode(NotifyMessage(payload), bl); | |
995 | Watcher::send_notify(bl, nullptr, ctx); | |
996 | } | |
997 | ||
998 | template <typename I> | |
999 | void ImageWatcher<I>::RemoteContext::finish(int r) { | |
1000 | m_image_watcher.schedule_async_complete(m_async_request_id, r); | |
1001 | } | |
1002 | ||
1003 | template <typename I> | |
1004 | void ImageWatcher<I>::C_ResponseMessage::finish(int r) { | |
1005 | CephContext *cct = notify_ack->cct; | |
1006 | ldout(cct, 10) << this << " C_ResponseMessage: r=" << r << dendl; | |
1007 | ||
1008 | ::encode(ResponseMessage(r), notify_ack->out); | |
1009 | notify_ack->complete(0); | |
1010 | } | |
1011 | ||
1012 | } // namespace librbd | |
1013 | ||
1014 | template class librbd::ImageWatcher<librbd::ImageCtx>; |