]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/image/RefreshRequest.cc
7397b681a7164557ecd4463f5bb1398065b8194f
[ceph.git] / ceph / src / librbd / image / RefreshRequest.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/image/RefreshRequest.h"
5 #include "common/dout.h"
6 #include "common/errno.h"
7 #include "cls/lock/cls_lock_client.h"
8 #include "cls/rbd/cls_rbd_client.h"
9 #include "librbd/ExclusiveLock.h"
10 #include "librbd/ImageCtx.h"
11 #include "librbd/Journal.h"
12 #include "librbd/ObjectMap.h"
13 #include "librbd/Utils.h"
14 #include "librbd/image/RefreshParentRequest.h"
15 #include "librbd/io/ImageRequestWQ.h"
16 #include "librbd/journal/Policy.h"
17
18 #define dout_subsys ceph_subsys_rbd
19 #undef dout_prefix
20 #define dout_prefix *_dout << "librbd::image::RefreshRequest: "
21
22 namespace librbd {
23 namespace image {
24
25 using util::create_rados_callback;
26 using util::create_async_context_callback;
27 using util::create_context_callback;
28
29 template <typename I>
30 RefreshRequest<I>::RefreshRequest(I &image_ctx, bool acquiring_lock,
31 bool skip_open_parent, Context *on_finish)
32 : m_image_ctx(image_ctx), m_acquiring_lock(acquiring_lock),
33 m_skip_open_parent_image(skip_open_parent),
34 m_on_finish(create_async_context_callback(m_image_ctx, on_finish)),
35 m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
36 m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
37 }
38
39 template <typename I>
40 RefreshRequest<I>::~RefreshRequest() {
41 // these require state machine to close
42 assert(m_exclusive_lock == nullptr);
43 assert(m_object_map == nullptr);
44 assert(m_journal == nullptr);
45 assert(m_refresh_parent == nullptr);
46 assert(!m_blocked_writes);
47 }
48
49 template <typename I>
50 void RefreshRequest<I>::send() {
51 if (m_image_ctx.old_format) {
52 send_v1_read_header();
53 } else {
54 send_v2_get_mutable_metadata();
55 }
56 }
57
58 template <typename I>
59 void RefreshRequest<I>::send_v1_read_header() {
60 CephContext *cct = m_image_ctx.cct;
61 ldout(cct, 10) << this << " " << __func__ << dendl;
62
63 librados::ObjectReadOperation op;
64 op.read(0, 0, nullptr, nullptr);
65
66 using klass = RefreshRequest<I>;
67 librados::AioCompletion *comp = create_rados_callback<
68 klass, &klass::handle_v1_read_header>(this);
69 m_out_bl.clear();
70 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
71 &m_out_bl);
72 assert(r == 0);
73 comp->release();
74 }
75
76 template <typename I>
77 Context *RefreshRequest<I>::handle_v1_read_header(int *result) {
78 CephContext *cct = m_image_ctx.cct;
79 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
80
81 rbd_obj_header_ondisk v1_header;
82 if (*result < 0) {
83 return m_on_finish;
84 } else if (m_out_bl.length() < sizeof(v1_header)) {
85 lderr(cct) << "v1 header too small" << dendl;
86 *result = -EIO;
87 return m_on_finish;
88 } else if (memcmp(RBD_HEADER_TEXT, m_out_bl.c_str(),
89 sizeof(RBD_HEADER_TEXT)) != 0) {
90 lderr(cct) << "unrecognized v1 header" << dendl;
91 *result = -ENXIO;
92 return m_on_finish;
93 }
94
95 memcpy(&v1_header, m_out_bl.c_str(), sizeof(v1_header));
96 m_order = v1_header.options.order;
97 m_size = v1_header.image_size;
98 m_object_prefix = v1_header.block_name;
99 send_v1_get_snapshots();
100 return nullptr;
101 }
102
103 template <typename I>
104 void RefreshRequest<I>::send_v1_get_snapshots() {
105 CephContext *cct = m_image_ctx.cct;
106 ldout(cct, 10) << this << " " << __func__ << dendl;
107
108 librados::ObjectReadOperation op;
109 cls_client::old_snapshot_list_start(&op);
110
111 using klass = RefreshRequest<I>;
112 librados::AioCompletion *comp = create_rados_callback<
113 klass, &klass::handle_v1_get_snapshots>(this);
114 m_out_bl.clear();
115 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
116 &m_out_bl);
117 assert(r == 0);
118 comp->release();
119 }
120
121 template <typename I>
122 Context *RefreshRequest<I>::handle_v1_get_snapshots(int *result) {
123 CephContext *cct = m_image_ctx.cct;
124 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
125
126 if (*result == 0) {
127 bufferlist::iterator it = m_out_bl.begin();
128 *result = cls_client::old_snapshot_list_finish(
129 &it, &m_snap_names, &m_snap_sizes, &m_snapc);
130 }
131
132 if (*result < 0) {
133 lderr(cct) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result)
134 << dendl;
135 return m_on_finish;
136 }
137
138 if (!m_snapc.is_valid()) {
139 lderr(cct) << "v1 image snap context is invalid" << dendl;
140 *result = -EIO;
141 return m_on_finish;
142 }
143
144 //m_snap_namespaces = {m_snap_names.size(), cls::rbd::UserSnapshotNamespace()};
145 m_snap_namespaces = std::vector<cls::rbd::SnapshotNamespace>(
146 m_snap_names.size(),
147 cls::rbd::UserSnapshotNamespace());
148
149 m_snap_timestamps = std::vector<utime_t>(m_snap_names.size(), utime_t());
150
151 send_v1_get_locks();
152 return nullptr;
153 }
154
155 template <typename I>
156 void RefreshRequest<I>::send_v1_get_locks() {
157 CephContext *cct = m_image_ctx.cct;
158 ldout(cct, 10) << this << " " << __func__ << dendl;
159
160 librados::ObjectReadOperation op;
161 rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
162
163 using klass = RefreshRequest<I>;
164 librados::AioCompletion *comp = create_rados_callback<
165 klass, &klass::handle_v1_get_locks>(this);
166 m_out_bl.clear();
167 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
168 &m_out_bl);
169 assert(r == 0);
170 comp->release();
171 }
172
173 template <typename I>
174 Context *RefreshRequest<I>::handle_v1_get_locks(int *result) {
175 CephContext *cct = m_image_ctx.cct;
176 ldout(cct, 10) << this << " " << __func__ << ": "
177 << "r=" << *result << dendl;
178
179 // If EOPNOTSUPP, treat image as if there are no locks (we can't
180 // query them).
181 if (*result == -EOPNOTSUPP) {
182 *result = 0;
183 } else if (*result == 0) {
184 bufferlist::iterator it = m_out_bl.begin();
185 ClsLockType lock_type;
186 *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
187 &lock_type, &m_lock_tag);
188 if (*result == 0) {
189 m_exclusive_locked = (lock_type == LOCK_EXCLUSIVE);
190 }
191 }
192 if (*result < 0) {
193 lderr(cct) << "failed to retrieve locks: " << cpp_strerror(*result)
194 << dendl;
195 return m_on_finish;
196 }
197
198 send_v1_apply();
199 return nullptr;
200 }
201
202 template <typename I>
203 void RefreshRequest<I>::send_v1_apply() {
204 CephContext *cct = m_image_ctx.cct;
205 ldout(cct, 10) << this << " " << __func__ << dendl;
206
207 // ensure we are not in a rados callback when applying updates
208 using klass = RefreshRequest<I>;
209 Context *ctx = create_context_callback<
210 klass, &klass::handle_v1_apply>(this);
211 m_image_ctx.op_work_queue->queue(ctx, 0);
212 }
213
214 template <typename I>
215 Context *RefreshRequest<I>::handle_v1_apply(int *result) {
216 CephContext *cct = m_image_ctx.cct;
217 ldout(cct, 10) << this << " " << __func__ << dendl;
218
219 apply();
220 return send_flush_aio();
221 }
222
223 template <typename I>
224 void RefreshRequest<I>::send_v2_get_mutable_metadata() {
225 CephContext *cct = m_image_ctx.cct;
226 ldout(cct, 10) << this << " " << __func__ << dendl;
227
228 uint64_t snap_id;
229 {
230 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
231 snap_id = m_image_ctx.snap_id;
232 }
233
234 bool read_only = m_image_ctx.read_only || snap_id != CEPH_NOSNAP;
235 librados::ObjectReadOperation op;
236 cls_client::get_mutable_metadata_start(&op, read_only);
237
238 using klass = RefreshRequest<I>;
239 librados::AioCompletion *comp = create_rados_callback<
240 klass, &klass::handle_v2_get_mutable_metadata>(this);
241 m_out_bl.clear();
242 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
243 &m_out_bl);
244 assert(r == 0);
245 comp->release();
246 }
247
248 template <typename I>
249 Context *RefreshRequest<I>::handle_v2_get_mutable_metadata(int *result) {
250 CephContext *cct = m_image_ctx.cct;
251 ldout(cct, 10) << this << " " << __func__ << ": "
252 << "r=" << *result << dendl;
253
254 if (*result == 0) {
255 bufferlist::iterator it = m_out_bl.begin();
256 *result = cls_client::get_mutable_metadata_finish(&it, &m_size, &m_features,
257 &m_incompatible_features,
258 &m_lockers,
259 &m_exclusive_locked,
260 &m_lock_tag, &m_snapc,
261 &m_parent_md);
262 }
263 if (*result < 0) {
264 lderr(cct) << "failed to retrieve mutable metadata: "
265 << cpp_strerror(*result) << dendl;
266 return m_on_finish;
267 }
268
269 uint64_t unsupported = m_incompatible_features & ~RBD_FEATURES_ALL;
270 if (unsupported != 0ULL) {
271 lderr(cct) << "Image uses unsupported features: " << unsupported << dendl;
272 *result = -ENOSYS;
273 return m_on_finish;
274 }
275
276 if (!m_snapc.is_valid()) {
277 lderr(cct) << "image snap context is invalid!" << dendl;
278 *result = -EIO;
279 return m_on_finish;
280 }
281
282 if (m_acquiring_lock && (m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
283 ldout(cct, 5) << "ignoring dynamically disabled exclusive lock" << dendl;
284 m_features |= RBD_FEATURE_EXCLUSIVE_LOCK;
285 m_incomplete_update = true;
286 }
287
288 send_v2_get_flags();
289 return nullptr;
290 }
291
292 template <typename I>
293 void RefreshRequest<I>::send_v2_get_flags() {
294 CephContext *cct = m_image_ctx.cct;
295 ldout(cct, 10) << this << " " << __func__ << dendl;
296
297 librados::ObjectReadOperation op;
298 cls_client::get_flags_start(&op, m_snapc.snaps);
299
300 using klass = RefreshRequest<I>;
301 librados::AioCompletion *comp = create_rados_callback<
302 klass, &klass::handle_v2_get_flags>(this);
303 m_out_bl.clear();
304 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
305 &m_out_bl);
306 assert(r == 0);
307 comp->release();
308 }
309
310 template <typename I>
311 Context *RefreshRequest<I>::handle_v2_get_flags(int *result) {
312 CephContext *cct = m_image_ctx.cct;
313 ldout(cct, 10) << this << " " << __func__ << ": "
314 << "r=" << *result << dendl;
315
316 if (*result == 0) {
317 bufferlist::iterator it = m_out_bl.begin();
318 cls_client::get_flags_finish(&it, &m_flags, m_snapc.snaps, &m_snap_flags);
319 }
320 if (*result == -EOPNOTSUPP) {
321 // Older OSD doesn't support RBD flags, need to assume the worst
322 *result = 0;
323 ldout(cct, 10) << "OSD does not support RBD flags, disabling object map "
324 << "optimizations" << dendl;
325 m_flags = RBD_FLAG_OBJECT_MAP_INVALID;
326 if ((m_features & RBD_FEATURE_FAST_DIFF) != 0) {
327 m_flags |= RBD_FLAG_FAST_DIFF_INVALID;
328 }
329
330 std::vector<uint64_t> default_flags(m_snapc.snaps.size(), m_flags);
331 m_snap_flags = std::move(default_flags);
332 } else if (*result == -ENOENT) {
333 ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl;
334 send_v2_get_mutable_metadata();
335 return nullptr;
336 } else if (*result < 0) {
337 lderr(cct) << "failed to retrieve flags: " << cpp_strerror(*result)
338 << dendl;
339 return m_on_finish;
340 }
341
342 send_v2_get_group();
343 return nullptr;
344 }
345
346 template <typename I>
347 void RefreshRequest<I>::send_v2_get_group() {
348 CephContext *cct = m_image_ctx.cct;
349 ldout(cct, 10) << this << " " << __func__ << dendl;
350
351 librados::ObjectReadOperation op;
352 cls_client::image_get_group_start(&op);
353
354 using klass = RefreshRequest<I>;
355 librados::AioCompletion *comp = create_rados_callback<
356 klass, &klass::handle_v2_get_group>(this);
357 m_out_bl.clear();
358 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
359 &m_out_bl);
360 assert(r == 0);
361 comp->release();
362 }
363
364 template <typename I>
365 Context *RefreshRequest<I>::handle_v2_get_group(int *result) {
366 CephContext *cct = m_image_ctx.cct;
367 ldout(cct, 10) << this << " " << __func__ << ": "
368 << "r=" << *result << dendl;
369
370 if (*result == 0) {
371 bufferlist::iterator it = m_out_bl.begin();
372 cls_client::image_get_group_finish(&it, &m_group_spec);
373 }
374 if (*result == -EOPNOTSUPP) {
375 // Older OSD doesn't support RBD groups
376 *result = 0;
377 ldout(cct, 10) << "OSD does not support consistency groups" << dendl;
378 } else if (*result < 0) {
379 lderr(cct) << "failed to retrieve group: " << cpp_strerror(*result)
380 << dendl;
381 return m_on_finish;
382 }
383
384 send_v2_get_snapshots();
385 return nullptr;
386 }
387
388 template <typename I>
389 void RefreshRequest<I>::send_v2_get_snapshots() {
390 if (m_snapc.snaps.empty()) {
391 m_snap_names.clear();
392 m_snap_namespaces.clear();
393 m_snap_sizes.clear();
394 m_snap_parents.clear();
395 m_snap_protection.clear();
396 m_snap_timestamps.clear();
397 send_v2_refresh_parent();
398 return;
399 }
400
401 CephContext *cct = m_image_ctx.cct;
402 ldout(cct, 10) << this << " " << __func__ << dendl;
403
404 librados::ObjectReadOperation op;
405 cls_client::snapshot_list_start(&op, m_snapc.snaps);
406
407 using klass = RefreshRequest<I>;
408 librados::AioCompletion *comp = create_rados_callback<
409 klass, &klass::handle_v2_get_snapshots>(this);
410 m_out_bl.clear();
411 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
412 &m_out_bl);
413 assert(r == 0);
414 comp->release();
415 }
416
417 template <typename I>
418 Context *RefreshRequest<I>::handle_v2_get_snapshots(int *result) {
419 CephContext *cct = m_image_ctx.cct;
420 ldout(cct, 10) << this << " " << __func__ << ": "
421 << "r=" << *result << dendl;
422
423 if (*result == 0) {
424 bufferlist::iterator it = m_out_bl.begin();
425 *result = cls_client::snapshot_list_finish(&it, m_snapc.snaps,
426 &m_snap_names,
427 &m_snap_sizes,
428 &m_snap_parents,
429 &m_snap_protection);
430 }
431 if (*result == -ENOENT) {
432 ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl;
433 send_v2_get_mutable_metadata();
434 return nullptr;
435 } else if (*result < 0) {
436 lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result)
437 << dendl;
438 return m_on_finish;
439 }
440
441 send_v2_get_snap_timestamps();
442 return nullptr;
443 }
444
445 template <typename I>
446 void RefreshRequest<I>::send_v2_get_snap_timestamps() {
447 CephContext *cct = m_image_ctx.cct;
448 ldout(cct, 10) << this << " " << __func__ << dendl;
449
450 librados::ObjectReadOperation op;
451 cls_client::snapshot_timestamp_list_start(&op, m_snapc.snaps);
452
453 using klass = RefreshRequest<I>;
454 librados::AioCompletion *comp = create_rados_callback<
455 klass, &klass::handle_v2_get_snap_timestamps>(this);
456 m_out_bl.clear();
457 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
458 &m_out_bl);
459 assert(r == 0);
460 comp->release();
461 }
462
463 template <typename I>
464 Context *RefreshRequest<I>::handle_v2_get_snap_timestamps(int *result) {
465 CephContext *cct = m_image_ctx.cct;
466 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
467
468 if (*result == 0) {
469 bufferlist::iterator it = m_out_bl.begin();
470 *result = cls_client::snapshot_timestamp_list_finish(&it, m_snapc.snaps, &m_snap_timestamps);
471 }
472 if (*result == -ENOENT) {
473 ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl;
474 send_v2_get_mutable_metadata();
475 return nullptr;
476 } else if (*result == -EOPNOTSUPP) {
477 m_snap_timestamps = std::vector<utime_t>(m_snap_names.size(), utime_t());
478 // Ignore it means no snap timestamps are available
479 } else if (*result < 0) {
480 lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result)
481 << dendl;
482 return m_on_finish;
483 }
484
485 send_v2_get_snap_namespaces();
486 return nullptr;
487 }
488
489 template <typename I>
490 void RefreshRequest<I>::send_v2_get_snap_namespaces() {
491 CephContext *cct = m_image_ctx.cct;
492 ldout(cct, 10) << this << " " << __func__ << dendl;
493
494 librados::ObjectReadOperation op;
495 cls_client::snapshot_namespace_list_start(&op, m_snapc.snaps);
496
497 using klass = RefreshRequest<I>;
498 librados::AioCompletion *comp = create_rados_callback<
499 klass, &klass::handle_v2_get_snap_namespaces>(this);
500 m_out_bl.clear();
501 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
502 &m_out_bl);
503 assert(r == 0);
504 comp->release();
505 }
506
507 template <typename I>
508 Context *RefreshRequest<I>::handle_v2_get_snap_namespaces(int *result) {
509 CephContext *cct = m_image_ctx.cct;
510 ldout(cct, 10) << this << " " << __func__ << ": "
511 << "r=" << *result << dendl;
512
513 if (*result == 0) {
514 bufferlist::iterator it = m_out_bl.begin();
515 *result = cls_client::snapshot_namespace_list_finish(&it, m_snapc.snaps,
516 &m_snap_namespaces);
517 }
518 if (*result == -ENOENT) {
519 ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl;
520 send_v2_get_mutable_metadata();
521 return nullptr;
522 } else if (*result == -EOPNOTSUPP) {
523 m_snap_namespaces = std::vector
524 <cls::rbd::SnapshotNamespace>(
525 m_snap_names.size(),
526 cls::rbd::UserSnapshotNamespace());
527 // Ignore it means no snap namespaces are available
528 } else if (*result < 0) {
529 lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result)
530 << dendl;
531 return m_on_finish;
532 }
533
534 send_v2_refresh_parent();
535 return nullptr;
536 }
537
538 template <typename I>
539 void RefreshRequest<I>::send_v2_refresh_parent() {
540 {
541 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
542 RWLock::RLocker parent_locker(m_image_ctx.parent_lock);
543
544 ParentInfo parent_md;
545 int r = get_parent_info(m_image_ctx.snap_id, &parent_md);
546 if (!m_skip_open_parent_image && (r < 0 ||
547 RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md))) {
548 CephContext *cct = m_image_ctx.cct;
549 ldout(cct, 10) << this << " " << __func__ << dendl;
550
551 using klass = RefreshRequest<I>;
552 Context *ctx = create_context_callback<
553 klass, &klass::handle_v2_refresh_parent>(this);
554 m_refresh_parent = RefreshParentRequest<I>::create(
555 m_image_ctx, parent_md, ctx);
556 }
557 }
558
559 if (m_refresh_parent != nullptr) {
560 m_refresh_parent->send();
561 } else {
562 send_v2_init_exclusive_lock();
563 }
564 }
565
566 template <typename I>
567 Context *RefreshRequest<I>::handle_v2_refresh_parent(int *result) {
568 CephContext *cct = m_image_ctx.cct;
569 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
570
571 if (*result < 0) {
572 lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result)
573 << dendl;
574 save_result(result);
575 send_v2_apply();
576 return nullptr;
577 }
578
579 send_v2_init_exclusive_lock();
580 return nullptr;
581 }
582
583 template <typename I>
584 void RefreshRequest<I>::send_v2_init_exclusive_lock() {
585 if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0 ||
586 m_image_ctx.read_only || !m_image_ctx.snap_name.empty() ||
587 m_image_ctx.exclusive_lock != nullptr) {
588 send_v2_open_object_map();
589 return;
590 }
591
592 // implies exclusive lock dynamically enabled or image open in-progress
593 CephContext *cct = m_image_ctx.cct;
594 ldout(cct, 10) << this << " " << __func__ << dendl;
595
596 // TODO need safe shut down
597 m_exclusive_lock = m_image_ctx.create_exclusive_lock();
598
599 using klass = RefreshRequest<I>;
600 Context *ctx = create_context_callback<
601 klass, &klass::handle_v2_init_exclusive_lock>(this);
602
603 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
604 m_exclusive_lock->init(m_features, ctx);
605 }
606
607 template <typename I>
608 Context *RefreshRequest<I>::handle_v2_init_exclusive_lock(int *result) {
609 CephContext *cct = m_image_ctx.cct;
610 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
611
612 if (*result < 0) {
613 lderr(cct) << "failed to initialize exclusive lock: "
614 << cpp_strerror(*result) << dendl;
615 save_result(result);
616 }
617
618 // object map and journal will be opened when exclusive lock is
619 // acquired (if features are enabled)
620 send_v2_apply();
621 return nullptr;
622 }
623
624 template <typename I>
625 void RefreshRequest<I>::send_v2_open_journal() {
626 bool journal_disabled = (
627 (m_features & RBD_FEATURE_JOURNALING) == 0 ||
628 m_image_ctx.read_only ||
629 !m_image_ctx.snap_name.empty() ||
630 m_image_ctx.journal != nullptr ||
631 m_image_ctx.exclusive_lock == nullptr ||
632 !m_image_ctx.exclusive_lock->is_lock_owner());
633 bool journal_disabled_by_policy;
634 {
635 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
636 journal_disabled_by_policy = (
637 !journal_disabled &&
638 m_image_ctx.get_journal_policy()->journal_disabled());
639 }
640
641 if (journal_disabled || journal_disabled_by_policy) {
642 // journal dynamically enabled -- doesn't own exclusive lock
643 if ((m_features & RBD_FEATURE_JOURNALING) != 0 &&
644 !journal_disabled_by_policy &&
645 m_image_ctx.exclusive_lock != nullptr &&
646 m_image_ctx.journal == nullptr) {
647 m_image_ctx.io_work_queue->set_require_lock_on_read();
648 }
649 send_v2_block_writes();
650 return;
651 }
652
653 // implies journal dynamically enabled since ExclusiveLock will init
654 // the journal upon acquiring the lock
655 CephContext *cct = m_image_ctx.cct;
656 ldout(cct, 10) << this << " " << __func__ << dendl;
657
658 using klass = RefreshRequest<I>;
659 Context *ctx = create_context_callback<
660 klass, &klass::handle_v2_open_journal>(this);
661
662 // TODO need safe close
663 m_journal = m_image_ctx.create_journal();
664 m_journal->open(ctx);
665 }
666
667 template <typename I>
668 Context *RefreshRequest<I>::handle_v2_open_journal(int *result) {
669 CephContext *cct = m_image_ctx.cct;
670 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
671
672 if (*result < 0) {
673 lderr(cct) << "failed to initialize journal: " << cpp_strerror(*result)
674 << dendl;
675 save_result(result);
676 }
677
678 send_v2_block_writes();
679 return nullptr;
680 }
681
682 template <typename I>
683 void RefreshRequest<I>::send_v2_block_writes() {
684 bool disabled_journaling = false;
685 {
686 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
687 disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
688 (m_features & RBD_FEATURE_JOURNALING) == 0 &&
689 m_image_ctx.journal != nullptr);
690 }
691
692 if (!disabled_journaling) {
693 send_v2_apply();
694 return;
695 }
696
697 CephContext *cct = m_image_ctx.cct;
698 ldout(cct, 10) << this << " " << __func__ << dendl;
699
700 // we need to block writes temporarily to avoid in-flight journal
701 // writes
702 m_blocked_writes = true;
703 Context *ctx = create_context_callback<
704 RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this);
705
706 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
707 m_image_ctx.io_work_queue->block_writes(ctx);
708 }
709
710 template <typename I>
711 Context *RefreshRequest<I>::handle_v2_block_writes(int *result) {
712 CephContext *cct = m_image_ctx.cct;
713 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
714
715 if (*result < 0) {
716 lderr(cct) << "failed to block writes: " << cpp_strerror(*result)
717 << dendl;
718 save_result(result);
719 }
720 send_v2_apply();
721 return nullptr;
722 }
723
724 template <typename I>
725 void RefreshRequest<I>::send_v2_open_object_map() {
726 if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0 ||
727 m_image_ctx.object_map != nullptr ||
728 (m_image_ctx.snap_name.empty() &&
729 (m_image_ctx.read_only ||
730 m_image_ctx.exclusive_lock == nullptr ||
731 !m_image_ctx.exclusive_lock->is_lock_owner()))) {
732 send_v2_open_journal();
733 return;
734 }
735
736 // implies object map dynamically enabled or image open in-progress
737 // since SetSnapRequest loads the object map for a snapshot and
738 // ExclusiveLock loads the object map for HEAD
739 CephContext *cct = m_image_ctx.cct;
740 ldout(cct, 10) << this << " " << __func__ << dendl;
741
742 if (m_image_ctx.snap_name.empty()) {
743 m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP);
744 } else {
745 for (size_t snap_idx = 0; snap_idx < m_snap_names.size(); ++snap_idx) {
746 if (m_snap_names[snap_idx] == m_image_ctx.snap_name) {
747 m_object_map = m_image_ctx.create_object_map(
748 m_snapc.snaps[snap_idx].val);
749 break;
750 }
751 }
752
753 if (m_object_map == nullptr) {
754 lderr(cct) << "failed to locate snapshot: " << m_image_ctx.snap_name
755 << dendl;
756 send_v2_open_journal();
757 return;
758 }
759 }
760
761 using klass = RefreshRequest<I>;
762 Context *ctx = create_context_callback<
763 klass, &klass::handle_v2_open_object_map>(this);
764 m_object_map->open(ctx);
765 }
766
767 template <typename I>
768 Context *RefreshRequest<I>::handle_v2_open_object_map(int *result) {
769 CephContext *cct = m_image_ctx.cct;
770 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
771
772 if (*result < 0) {
773 lderr(cct) << "failed to open object map: " << cpp_strerror(*result)
774 << dendl;
775 delete m_object_map;
776 m_object_map = nullptr;
777 }
778
779 send_v2_open_journal();
780 return nullptr;
781 }
782
783 template <typename I>
784 void RefreshRequest<I>::send_v2_apply() {
785 CephContext *cct = m_image_ctx.cct;
786 ldout(cct, 10) << this << " " << __func__ << dendl;
787
788 // ensure we are not in a rados callback when applying updates
789 using klass = RefreshRequest<I>;
790 Context *ctx = create_context_callback<
791 klass, &klass::handle_v2_apply>(this);
792 m_image_ctx.op_work_queue->queue(ctx, 0);
793 }
794
795 template <typename I>
796 Context *RefreshRequest<I>::handle_v2_apply(int *result) {
797 CephContext *cct = m_image_ctx.cct;
798 ldout(cct, 10) << this << " " << __func__ << dendl;
799
800 apply();
801
802 return send_v2_finalize_refresh_parent();
803 }
804
805 template <typename I>
806 Context *RefreshRequest<I>::send_v2_finalize_refresh_parent() {
807 if (m_refresh_parent == nullptr) {
808 return send_v2_shut_down_exclusive_lock();
809 }
810
811 CephContext *cct = m_image_ctx.cct;
812 ldout(cct, 10) << this << " " << __func__ << dendl;
813
814 using klass = RefreshRequest<I>;
815 Context *ctx = create_context_callback<
816 klass, &klass::handle_v2_finalize_refresh_parent>(this);
817 m_refresh_parent->finalize(ctx);
818 return nullptr;
819 }
820
821 template <typename I>
822 Context *RefreshRequest<I>::handle_v2_finalize_refresh_parent(int *result) {
823 CephContext *cct = m_image_ctx.cct;
824 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
825
826 assert(m_refresh_parent != nullptr);
827 delete m_refresh_parent;
828 m_refresh_parent = nullptr;
829
830 return send_v2_shut_down_exclusive_lock();
831 }
832
833 template <typename I>
834 Context *RefreshRequest<I>::send_v2_shut_down_exclusive_lock() {
835 if (m_exclusive_lock == nullptr) {
836 return send_v2_close_journal();
837 }
838
839 CephContext *cct = m_image_ctx.cct;
840 ldout(cct, 10) << this << " " << __func__ << dendl;
841
842 // exclusive lock feature was dynamically disabled. in-flight IO will be
843 // flushed and in-flight requests will be canceled before releasing lock
844 using klass = RefreshRequest<I>;
845 Context *ctx = create_context_callback<
846 klass, &klass::handle_v2_shut_down_exclusive_lock>(this);
847 m_exclusive_lock->shut_down(ctx);
848 return nullptr;
849 }
850
851 template <typename I>
852 Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) {
853 CephContext *cct = m_image_ctx.cct;
854 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
855
856 if (*result < 0) {
857 lderr(cct) << "failed to shut down exclusive lock: "
858 << cpp_strerror(*result) << dendl;
859 save_result(result);
860 }
861
862 {
863 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
864 assert(m_image_ctx.exclusive_lock == nullptr);
865 }
866
867 assert(m_exclusive_lock != nullptr);
868 delete m_exclusive_lock;
869 m_exclusive_lock = nullptr;
870
871 return send_v2_close_journal();
872 }
873
874 template <typename I>
875 Context *RefreshRequest<I>::send_v2_close_journal() {
876 if (m_journal == nullptr) {
877 return send_v2_close_object_map();
878 }
879
880 CephContext *cct = m_image_ctx.cct;
881 ldout(cct, 10) << this << " " << __func__ << dendl;
882
883 // journal feature was dynamically disabled
884 using klass = RefreshRequest<I>;
885 Context *ctx = create_context_callback<
886 klass, &klass::handle_v2_close_journal>(this);
887 m_journal->close(ctx);
888 return nullptr;
889 }
890
891 template <typename I>
892 Context *RefreshRequest<I>::handle_v2_close_journal(int *result) {
893 CephContext *cct = m_image_ctx.cct;
894 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
895
896 if (*result < 0) {
897 save_result(result);
898 lderr(cct) << "failed to close journal: " << cpp_strerror(*result)
899 << dendl;
900 }
901
902 assert(m_journal != nullptr);
903 delete m_journal;
904 m_journal = nullptr;
905
906 assert(m_blocked_writes);
907 m_blocked_writes = false;
908
909 m_image_ctx.io_work_queue->unblock_writes();
910 return send_v2_close_object_map();
911 }
912
913 template <typename I>
914 Context *RefreshRequest<I>::send_v2_close_object_map() {
915 if (m_object_map == nullptr) {
916 return send_flush_aio();
917 }
918
919 CephContext *cct = m_image_ctx.cct;
920 ldout(cct, 10) << this << " " << __func__ << dendl;
921
922 // object map was dynamically disabled
923 using klass = RefreshRequest<I>;
924 Context *ctx = create_context_callback<
925 klass, &klass::handle_v2_close_object_map>(this);
926 m_object_map->close(ctx);
927 return nullptr;
928 }
929
930 template <typename I>
931 Context *RefreshRequest<I>::handle_v2_close_object_map(int *result) {
932 CephContext *cct = m_image_ctx.cct;
933 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
934
935 assert(*result == 0);
936 assert(m_object_map != nullptr);
937 delete m_object_map;
938 m_object_map = nullptr;
939
940 return send_flush_aio();
941 }
942
943 template <typename I>
944 Context *RefreshRequest<I>::send_flush_aio() {
945 if (m_incomplete_update && m_error_result == 0) {
946 // if this was a partial refresh, notify ImageState
947 m_error_result = -ERESTART;
948 }
949
950 if (m_flush_aio) {
951 CephContext *cct = m_image_ctx.cct;
952 ldout(cct, 10) << this << " " << __func__ << dendl;
953
954 RWLock::RLocker owner_lock(m_image_ctx.owner_lock);
955 using klass = RefreshRequest<I>;
956 Context *ctx = create_context_callback<
957 klass, &klass::handle_flush_aio>(this);
958 m_image_ctx.flush(ctx);
959 return nullptr;
960 } else if (m_error_result < 0) {
961 // propagate saved error back to caller
962 Context *ctx = create_context_callback<
963 RefreshRequest<I>, &RefreshRequest<I>::handle_error>(this);
964 m_image_ctx.op_work_queue->queue(ctx, 0);
965 return nullptr;
966 }
967
968 return m_on_finish;
969 }
970
971 template <typename I>
972 Context *RefreshRequest<I>::handle_flush_aio(int *result) {
973 CephContext *cct = m_image_ctx.cct;
974 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
975
976 if (*result < 0) {
977 lderr(cct) << "failed to flush pending AIO: " << cpp_strerror(*result)
978 << dendl;
979 }
980
981 return handle_error(result);
982 }
983
984 template <typename I>
985 Context *RefreshRequest<I>::handle_error(int *result) {
986 if (m_error_result < 0) {
987 *result = m_error_result;
988
989 CephContext *cct = m_image_ctx.cct;
990 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
991 }
992 return m_on_finish;
993 }
994
995 template <typename I>
996 void RefreshRequest<I>::apply() {
997 CephContext *cct = m_image_ctx.cct;
998 ldout(cct, 20) << this << " " << __func__ << dendl;
999
1000 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
1001 RWLock::WLocker md_locker(m_image_ctx.md_lock);
1002
1003 {
1004 Mutex::Locker cache_locker(m_image_ctx.cache_lock);
1005 RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
1006 RWLock::WLocker parent_locker(m_image_ctx.parent_lock);
1007
1008 m_image_ctx.size = m_size;
1009 m_image_ctx.lockers = m_lockers;
1010 m_image_ctx.lock_tag = m_lock_tag;
1011 m_image_ctx.exclusive_locked = m_exclusive_locked;
1012
1013 if (m_image_ctx.old_format) {
1014 m_image_ctx.order = m_order;
1015 m_image_ctx.features = 0;
1016 m_image_ctx.flags = 0;
1017 m_image_ctx.object_prefix = std::move(m_object_prefix);
1018 m_image_ctx.init_layout();
1019 } else {
1020 m_image_ctx.features = m_features;
1021 m_image_ctx.flags = m_flags;
1022 m_image_ctx.group_spec = m_group_spec;
1023 m_image_ctx.parent_md = m_parent_md;
1024 }
1025
1026 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1027 std::vector<librados::snap_t>::const_iterator it = std::find(
1028 m_image_ctx.snaps.begin(), m_image_ctx.snaps.end(),
1029 m_snapc.snaps[i].val);
1030 if (it == m_image_ctx.snaps.end()) {
1031 m_flush_aio = true;
1032 ldout(cct, 20) << "new snapshot id=" << m_snapc.snaps[i].val
1033 << " name=" << m_snap_names[i]
1034 << " size=" << m_snap_sizes[i]
1035 << dendl;
1036 }
1037 }
1038
1039 m_image_ctx.snaps.clear();
1040 m_image_ctx.snap_info.clear();
1041 m_image_ctx.snap_ids.clear();
1042 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1043 uint64_t flags = m_image_ctx.old_format ? 0 : m_snap_flags[i];
1044 uint8_t protection_status = m_image_ctx.old_format ?
1045 static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED) :
1046 m_snap_protection[i];
1047 ParentInfo parent;
1048 if (!m_image_ctx.old_format) {
1049 parent = m_snap_parents[i];
1050 }
1051
1052 m_image_ctx.add_snap(m_snap_namespaces[i], m_snap_names[i],
1053 m_snapc.snaps[i].val, m_snap_sizes[i], parent,
1054 protection_status, flags, m_snap_timestamps[i]);
1055 }
1056 m_image_ctx.snapc = m_snapc;
1057
1058 if (m_image_ctx.snap_id != CEPH_NOSNAP &&
1059 m_image_ctx.get_snap_id(m_image_ctx.snap_namespace,
1060 m_image_ctx.snap_name) != m_image_ctx.snap_id) {
1061 lderr(cct) << "tried to read from a snapshot that no longer exists: "
1062 << m_image_ctx.snap_name << dendl;
1063 m_image_ctx.snap_exists = false;
1064 }
1065
1066 if (m_refresh_parent != nullptr) {
1067 m_refresh_parent->apply();
1068 }
1069 m_image_ctx.data_ctx.selfmanaged_snap_set_write_ctx(m_image_ctx.snapc.seq,
1070 m_image_ctx.snaps);
1071
1072 // handle dynamically enabled / disabled features
1073 if (m_image_ctx.exclusive_lock != nullptr &&
1074 !m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK,
1075 m_image_ctx.snap_lock)) {
1076 // disabling exclusive lock will automatically handle closing
1077 // object map and journaling
1078 assert(m_exclusive_lock == nullptr);
1079 m_exclusive_lock = m_image_ctx.exclusive_lock;
1080 m_image_ctx.io_work_queue->clear_require_lock_on_read();
1081 } else {
1082 if (m_exclusive_lock != nullptr) {
1083 assert(m_image_ctx.exclusive_lock == nullptr);
1084 std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock);
1085 }
1086 if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
1087 m_image_ctx.snap_lock)) {
1088 if (m_image_ctx.journal != nullptr) {
1089 m_image_ctx.io_work_queue->clear_require_lock_on_read();
1090 }
1091 std::swap(m_journal, m_image_ctx.journal);
1092 } else if (m_journal != nullptr) {
1093 std::swap(m_journal, m_image_ctx.journal);
1094 }
1095 if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
1096 m_image_ctx.snap_lock) ||
1097 m_object_map != nullptr) {
1098 std::swap(m_object_map, m_image_ctx.object_map);
1099 }
1100 if (m_image_ctx.clone_copy_on_read &&
1101 m_image_ctx.io_work_queue->is_lock_required()) {
1102 m_image_ctx.io_work_queue->set_require_lock_on_read();
1103 }
1104 }
1105 }
1106 }
1107
1108 template <typename I>
1109 int RefreshRequest<I>::get_parent_info(uint64_t snap_id,
1110 ParentInfo *parent_md) {
1111 if (snap_id == CEPH_NOSNAP) {
1112 *parent_md = m_parent_md;
1113 return 0;
1114 } else {
1115 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1116 if (m_snapc.snaps[i].val == snap_id) {
1117 *parent_md = m_snap_parents[i];
1118 return 0;
1119 }
1120 }
1121 }
1122 return -ENOENT;
1123 }
1124
1125 } // namespace image
1126 } // namespace librbd
1127
1128 template class librbd::image::RefreshRequest<librbd::ImageCtx>;