1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "include/ceph_assert.h"
6 #include "librbd/image/RefreshRequest.h"
7 #include "common/dout.h"
8 #include "common/errno.h"
9 #include "cls/lock/cls_lock_client.h"
10 #include "cls/rbd/cls_rbd_client.h"
11 #include "librbd/ExclusiveLock.h"
12 #include "librbd/ImageCtx.h"
13 #include "librbd/ImageWatcher.h"
14 #include "librbd/Journal.h"
15 #include "librbd/ObjectMap.h"
16 #include "librbd/Utils.h"
17 #include "librbd/deep_copy/Utils.h"
18 #include "librbd/image/GetMetadataRequest.h"
19 #include "librbd/image/RefreshParentRequest.h"
20 #include "librbd/io/AioCompletion.h"
21 #include "librbd/io/ImageDispatchSpec.h"
22 #include "librbd/io/ImageRequestWQ.h"
23 #include "librbd/journal/Policy.h"
25 #define dout_subsys ceph_subsys_rbd
27 #define dout_prefix *_dout << "librbd::image::RefreshRequest: "
32 using util::create_rados_callback
;
33 using util::create_async_context_callback
;
34 using util::create_context_callback
;
37 RefreshRequest
<I
>::RefreshRequest(I
&image_ctx
, bool acquiring_lock
,
38 bool skip_open_parent
, Context
*on_finish
)
39 : m_image_ctx(image_ctx
), m_acquiring_lock(acquiring_lock
),
40 m_skip_open_parent_image(skip_open_parent
),
41 m_on_finish(create_async_context_callback(m_image_ctx
, on_finish
)),
42 m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
43 m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
44 m_pool_metadata_io_ctx
.dup(image_ctx
.md_ctx
);
45 m_pool_metadata_io_ctx
.set_namespace("");
49 RefreshRequest
<I
>::~RefreshRequest() {
50 // these require state machine to close
51 ceph_assert(m_exclusive_lock
== nullptr);
52 ceph_assert(m_object_map
== nullptr);
53 ceph_assert(m_journal
== nullptr);
54 ceph_assert(m_refresh_parent
== nullptr);
55 ceph_assert(!m_blocked_writes
);
59 void RefreshRequest
<I
>::send() {
60 if (m_image_ctx
.old_format
) {
61 send_v1_read_header();
63 send_v2_get_mutable_metadata();
68 void RefreshRequest
<I
>::send_get_migration_header() {
69 if (m_image_ctx
.ignore_migrating
) {
70 if (m_image_ctx
.old_format
) {
71 send_v1_get_snapshots();
73 send_v2_get_metadata();
78 CephContext
*cct
= m_image_ctx
.cct
;
79 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
81 librados::ObjectReadOperation op
;
82 cls_client::migration_get_start(&op
);
84 using klass
= RefreshRequest
<I
>;
85 librados::AioCompletion
*comp
=
86 create_rados_callback
<klass
, &klass::handle_get_migration_header
>(this);
88 m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
94 Context
*RefreshRequest
<I
>::handle_get_migration_header(int *result
) {
95 CephContext
*cct
= m_image_ctx
.cct
;
96 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
99 auto it
= m_out_bl
.cbegin();
100 *result
= cls_client::migration_get_finish(&it
, &m_migration_spec
);
101 } else if (*result
== -ENOENT
) {
102 ldout(cct
, 5) << this << " " << __func__
<< ": no migration header found"
103 << ", retrying" << dendl
;
109 lderr(cct
) << "failed to retrieve migration header: "
110 << cpp_strerror(*result
) << dendl
;
114 switch(m_migration_spec
.header_type
) {
115 case cls::rbd::MIGRATION_HEADER_TYPE_SRC
:
117 lderr(cct
) << "image being migrated" << dendl
;
121 ldout(cct
, 1) << this << " " << __func__
<< ": migrating to: "
122 << m_migration_spec
<< dendl
;
124 case cls::rbd::MIGRATION_HEADER_TYPE_DST
:
125 ldout(cct
, 1) << this << " " << __func__
<< ": migrating from: "
126 << m_migration_spec
<< dendl
;
127 if (m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_PREPARED
&&
128 m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_EXECUTING
&&
129 m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_EXECUTED
) {
130 ldout(cct
, 5) << this << " " << __func__
<< ": current migration state: "
131 << m_migration_spec
.state
<< ", retrying" << dendl
;
137 ldout(cct
, 1) << this << " " << __func__
<< ": migration type "
138 << m_migration_spec
.header_type
<< dendl
;
143 if (m_image_ctx
.old_format
) {
144 send_v1_get_snapshots();
146 send_v2_get_metadata();
151 template <typename I
>
152 void RefreshRequest
<I
>::send_v1_read_header() {
153 CephContext
*cct
= m_image_ctx
.cct
;
154 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
156 librados::ObjectReadOperation op
;
157 op
.read(0, 0, nullptr, nullptr);
159 using klass
= RefreshRequest
<I
>;
160 librados::AioCompletion
*comp
= create_rados_callback
<
161 klass
, &klass::handle_v1_read_header
>(this);
163 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
169 template <typename I
>
170 Context
*RefreshRequest
<I
>::handle_v1_read_header(int *result
) {
171 CephContext
*cct
= m_image_ctx
.cct
;
172 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
174 rbd_obj_header_ondisk v1_header
;
175 bool migrating
= false;
178 } else if (m_out_bl
.length() < sizeof(v1_header
)) {
179 lderr(cct
) << "v1 header too small" << dendl
;
182 } else if (memcmp(RBD_HEADER_TEXT
, m_out_bl
.c_str(),
183 sizeof(RBD_HEADER_TEXT
)) != 0) {
184 if (memcmp(RBD_MIGRATE_HEADER_TEXT
, m_out_bl
.c_str(),
185 sizeof(RBD_MIGRATE_HEADER_TEXT
)) == 0) {
186 ldout(cct
, 1) << this << " " << __func__
<< ": migration v1 header detected"
190 lderr(cct
) << "unrecognized v1 header" << dendl
;
197 std::shared_lock image_locker
{m_image_ctx
.image_lock
};
198 m_read_only
= m_image_ctx
.read_only
;
199 m_read_only_flags
= m_image_ctx
.read_only_flags
;
202 memcpy(&v1_header
, m_out_bl
.c_str(), sizeof(v1_header
));
203 m_order
= v1_header
.options
.order
;
204 m_size
= v1_header
.image_size
;
205 m_object_prefix
= v1_header
.block_name
;
207 send_get_migration_header();
209 send_v1_get_snapshots();
214 template <typename I
>
215 void RefreshRequest
<I
>::send_v1_get_snapshots() {
216 CephContext
*cct
= m_image_ctx
.cct
;
217 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
219 librados::ObjectReadOperation op
;
220 cls_client::old_snapshot_list_start(&op
);
222 using klass
= RefreshRequest
<I
>;
223 librados::AioCompletion
*comp
= create_rados_callback
<
224 klass
, &klass::handle_v1_get_snapshots
>(this);
226 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
232 template <typename I
>
233 Context
*RefreshRequest
<I
>::handle_v1_get_snapshots(int *result
) {
234 CephContext
*cct
= m_image_ctx
.cct
;
235 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
237 std::vector
<std::string
> snap_names
;
238 std::vector
<uint64_t> snap_sizes
;
240 auto it
= m_out_bl
.cbegin();
241 *result
= cls_client::old_snapshot_list_finish(&it
, &snap_names
,
242 &snap_sizes
, &m_snapc
);
246 lderr(cct
) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result
)
251 if (!m_snapc
.is_valid()) {
252 lderr(cct
) << "v1 image snap context is invalid" << dendl
;
257 m_snap_infos
.clear();
258 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
259 m_snap_infos
.push_back({m_snapc
.snaps
[i
],
260 {cls::rbd::UserSnapshotNamespace
{}},
261 snap_names
[i
], snap_sizes
[i
], {}, 0});
268 template <typename I
>
269 void RefreshRequest
<I
>::send_v1_get_locks() {
270 CephContext
*cct
= m_image_ctx
.cct
;
271 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
273 librados::ObjectReadOperation op
;
274 rados::cls::lock::get_lock_info_start(&op
, RBD_LOCK_NAME
);
276 using klass
= RefreshRequest
<I
>;
277 librados::AioCompletion
*comp
= create_rados_callback
<
278 klass
, &klass::handle_v1_get_locks
>(this);
280 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
286 template <typename I
>
287 Context
*RefreshRequest
<I
>::handle_v1_get_locks(int *result
) {
288 CephContext
*cct
= m_image_ctx
.cct
;
289 ldout(cct
, 10) << this << " " << __func__
<< ": "
290 << "r=" << *result
<< dendl
;
293 auto it
= m_out_bl
.cbegin();
294 ClsLockType lock_type
;
295 *result
= rados::cls::lock::get_lock_info_finish(&it
, &m_lockers
,
296 &lock_type
, &m_lock_tag
);
298 m_exclusive_locked
= (lock_type
== LOCK_EXCLUSIVE
);
302 lderr(cct
) << "failed to retrieve locks: " << cpp_strerror(*result
)
311 template <typename I
>
312 void RefreshRequest
<I
>::send_v1_apply() {
313 CephContext
*cct
= m_image_ctx
.cct
;
314 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
316 // ensure we are not in a rados callback when applying updates
317 using klass
= RefreshRequest
<I
>;
318 Context
*ctx
= create_context_callback
<
319 klass
, &klass::handle_v1_apply
>(this);
320 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
323 template <typename I
>
324 Context
*RefreshRequest
<I
>::handle_v1_apply(int *result
) {
325 CephContext
*cct
= m_image_ctx
.cct
;
326 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
329 return send_flush_aio();
332 template <typename I
>
333 void RefreshRequest
<I
>::send_v2_get_mutable_metadata() {
334 CephContext
*cct
= m_image_ctx
.cct
;
335 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
339 std::shared_lock image_locker
{m_image_ctx
.image_lock
};
340 snap_id
= m_image_ctx
.snap_id
;
341 m_read_only
= m_image_ctx
.read_only
;
342 m_read_only_flags
= m_image_ctx
.read_only_flags
;
345 // mask out the non-primary read-only flag since its state can change
347 ((m_read_only_flags
& ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY
) != 0) ||
348 (snap_id
!= CEPH_NOSNAP
));
349 librados::ObjectReadOperation op
;
350 cls_client::get_size_start(&op
, CEPH_NOSNAP
);
351 cls_client::get_features_start(&op
, read_only
);
352 cls_client::get_flags_start(&op
, CEPH_NOSNAP
);
353 cls_client::get_snapcontext_start(&op
);
354 rados::cls::lock::get_lock_info_start(&op
, RBD_LOCK_NAME
);
356 using klass
= RefreshRequest
<I
>;
357 librados::AioCompletion
*comp
= create_rados_callback
<
358 klass
, &klass::handle_v2_get_mutable_metadata
>(this);
360 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
366 template <typename I
>
367 Context
*RefreshRequest
<I
>::handle_v2_get_mutable_metadata(int *result
) {
368 CephContext
*cct
= m_image_ctx
.cct
;
369 ldout(cct
, 10) << this << " " << __func__
<< ": "
370 << "r=" << *result
<< dendl
;
372 auto it
= m_out_bl
.cbegin();
375 *result
= cls_client::get_size_finish(&it
, &m_size
, &order
);
379 *result
= cls_client::get_features_finish(&it
, &m_features
,
380 &m_incompatible_features
);
384 *result
= cls_client::get_flags_finish(&it
, &m_flags
);
388 *result
= cls_client::get_snapcontext_finish(&it
, &m_snapc
);
392 ClsLockType lock_type
= LOCK_NONE
;
393 *result
= rados::cls::lock::get_lock_info_finish(&it
, &m_lockers
,
394 &lock_type
, &m_lock_tag
);
396 m_exclusive_locked
= (lock_type
== LOCK_EXCLUSIVE
);
401 lderr(cct
) << "failed to retrieve mutable metadata: "
402 << cpp_strerror(*result
) << dendl
;
406 uint64_t unsupported
= m_incompatible_features
& ~RBD_FEATURES_ALL
;
407 if (unsupported
!= 0ULL) {
408 lderr(cct
) << "Image uses unsupported features: " << unsupported
<< dendl
;
413 if (!m_snapc
.is_valid()) {
414 lderr(cct
) << "image snap context is invalid!" << dendl
;
419 if (m_acquiring_lock
&& (m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0) {
420 ldout(cct
, 5) << "ignoring dynamically disabled exclusive lock" << dendl
;
421 m_features
|= RBD_FEATURE_EXCLUSIVE_LOCK
;
422 m_incomplete_update
= true;
425 if (((m_incompatible_features
& RBD_FEATURE_NON_PRIMARY
) != 0U) &&
426 ((m_read_only_flags
& IMAGE_READ_ONLY_FLAG_NON_PRIMARY
) == 0U) &&
427 ((m_image_ctx
.read_only_mask
& IMAGE_READ_ONLY_FLAG_NON_PRIMARY
) != 0U)) {
428 // implies we opened a non-primary image in R/W mode
429 ldout(cct
, 5) << "adding non-primary read-only image flag" << dendl
;
430 m_read_only_flags
|= IMAGE_READ_ONLY_FLAG_NON_PRIMARY
;
431 } else if ((((m_incompatible_features
& RBD_FEATURE_NON_PRIMARY
) == 0U) ||
432 ((m_image_ctx
.read_only_mask
&
433 IMAGE_READ_ONLY_FLAG_NON_PRIMARY
) == 0U)) &&
434 ((m_read_only_flags
& IMAGE_READ_ONLY_FLAG_NON_PRIMARY
) != 0U)) {
435 ldout(cct
, 5) << "removing non-primary read-only image flag" << dendl
;
436 m_read_only_flags
&= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY
;
438 m_read_only
= (m_read_only_flags
!= 0U);
440 send_v2_get_parent();
444 template <typename I
>
445 void RefreshRequest
<I
>::send_v2_get_parent() {
446 // NOTE: remove support when Mimic is EOLed
447 CephContext
*cct
= m_image_ctx
.cct
;
448 ldout(cct
, 10) << this << " " << __func__
<< ": legacy=" << m_legacy_parent
451 librados::ObjectReadOperation op
;
452 if (!m_legacy_parent
) {
453 cls_client::parent_get_start(&op
);
454 cls_client::parent_overlap_get_start(&op
, CEPH_NOSNAP
);
456 cls_client::get_parent_start(&op
, CEPH_NOSNAP
);
459 auto aio_comp
= create_rados_callback
<
460 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_get_parent
>(this);
462 m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, aio_comp
, &op
,
467 template <typename I
>
468 Context
*RefreshRequest
<I
>::handle_v2_get_parent(int *result
) {
469 // NOTE: remove support when Mimic is EOLed
470 CephContext
*cct
= m_image_ctx
.cct
;
471 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
473 auto it
= m_out_bl
.cbegin();
474 if (!m_legacy_parent
) {
476 *result
= cls_client::parent_get_finish(&it
, &m_parent_md
.spec
);
479 std::optional
<uint64_t> parent_overlap
;
481 *result
= cls_client::parent_overlap_get_finish(&it
, &parent_overlap
);
484 if (*result
== 0 && parent_overlap
) {
485 m_parent_md
.overlap
= *parent_overlap
;
486 m_head_parent_overlap
= true;
488 } else if (*result
== 0) {
489 *result
= cls_client::get_parent_finish(&it
, &m_parent_md
.spec
,
490 &m_parent_md
.overlap
);
491 m_head_parent_overlap
= true;
494 if (*result
== -EOPNOTSUPP
&& !m_legacy_parent
) {
495 ldout(cct
, 10) << "retrying using legacy parent method" << dendl
;
496 m_legacy_parent
= true;
497 send_v2_get_parent();
500 lderr(cct
) << "failed to retrieve parent: " << cpp_strerror(*result
)
505 if ((m_features
& RBD_FEATURE_MIGRATING
) != 0) {
506 ldout(cct
, 1) << "migrating feature set" << dendl
;
507 send_get_migration_header();
511 send_v2_get_metadata();
515 template <typename I
>
516 void RefreshRequest
<I
>::send_v2_get_metadata() {
517 CephContext
*cct
= m_image_ctx
.cct
;
518 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
520 auto ctx
= create_context_callback
<
521 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_get_metadata
>(this);
522 auto req
= GetMetadataRequest
<I
>::create(
523 m_image_ctx
.md_ctx
, m_image_ctx
.header_oid
, true,
524 ImageCtx::METADATA_CONF_PREFIX
, ImageCtx::METADATA_CONF_PREFIX
, 0U,
529 template <typename I
>
530 Context
*RefreshRequest
<I
>::handle_v2_get_metadata(int *result
) {
531 CephContext
*cct
= m_image_ctx
.cct
;
532 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
535 lderr(cct
) << "failed to retrieve metadata: " << cpp_strerror(*result
)
540 send_v2_get_pool_metadata();
544 template <typename I
>
545 void RefreshRequest
<I
>::send_v2_get_pool_metadata() {
546 CephContext
*cct
= m_image_ctx
.cct
;
547 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
549 auto ctx
= create_context_callback
<
550 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_get_pool_metadata
>(this);
551 auto req
= GetMetadataRequest
<I
>::create(
552 m_pool_metadata_io_ctx
, RBD_INFO
, true, ImageCtx::METADATA_CONF_PREFIX
,
553 ImageCtx::METADATA_CONF_PREFIX
, 0U, &m_metadata
, ctx
);
557 template <typename I
>
558 Context
*RefreshRequest
<I
>::handle_v2_get_pool_metadata(int *result
) {
559 CephContext
*cct
= m_image_ctx
.cct
;
560 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
563 lderr(cct
) << "failed to retrieve pool metadata: " << cpp_strerror(*result
)
568 bool thread_safe
= m_image_ctx
.image_watcher
->is_unregistered();
569 m_image_ctx
.apply_metadata(m_metadata
, thread_safe
);
571 send_v2_get_op_features();
575 template <typename I
>
576 void RefreshRequest
<I
>::send_v2_get_op_features() {
577 if ((m_features
& RBD_FEATURE_OPERATIONS
) == 0LL) {
582 CephContext
*cct
= m_image_ctx
.cct
;
583 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
585 librados::ObjectReadOperation op
;
586 cls_client::op_features_get_start(&op
);
588 librados::AioCompletion
*comp
= create_rados_callback
<
589 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_get_op_features
>(this);
591 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
597 template <typename I
>
598 Context
*RefreshRequest
<I
>::handle_v2_get_op_features(int *result
) {
599 CephContext
*cct
= m_image_ctx
.cct
;
600 ldout(cct
, 10) << this << " " << __func__
<< ": "
601 << "r=" << *result
<< dendl
;
603 // -EOPNOTSUPP handler not required since feature bit implies OSD
604 // supports the method
606 auto it
= m_out_bl
.cbegin();
607 cls_client::op_features_get_finish(&it
, &m_op_features
);
608 } else if (*result
< 0) {
609 lderr(cct
) << "failed to retrieve op features: " << cpp_strerror(*result
)
618 template <typename I
>
619 void RefreshRequest
<I
>::send_v2_get_group() {
620 CephContext
*cct
= m_image_ctx
.cct
;
621 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
623 librados::ObjectReadOperation op
;
624 cls_client::image_group_get_start(&op
);
626 using klass
= RefreshRequest
<I
>;
627 librados::AioCompletion
*comp
= create_rados_callback
<
628 klass
, &klass::handle_v2_get_group
>(this);
630 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
636 template <typename I
>
637 Context
*RefreshRequest
<I
>::handle_v2_get_group(int *result
) {
638 CephContext
*cct
= m_image_ctx
.cct
;
639 ldout(cct
, 10) << this << " " << __func__
<< ": "
640 << "r=" << *result
<< dendl
;
643 auto it
= m_out_bl
.cbegin();
644 cls_client::image_group_get_finish(&it
, &m_group_spec
);
647 lderr(cct
) << "failed to retrieve group: " << cpp_strerror(*result
)
652 send_v2_get_snapshots();
656 template <typename I
>
657 void RefreshRequest
<I
>::send_v2_get_snapshots() {
658 m_snap_infos
.resize(m_snapc
.snaps
.size());
659 m_snap_flags
.resize(m_snapc
.snaps
.size());
660 m_snap_parents
.resize(m_snapc
.snaps
.size());
661 m_snap_protection
.resize(m_snapc
.snaps
.size());
663 if (m_snapc
.snaps
.empty()) {
664 send_v2_refresh_parent();
668 CephContext
*cct
= m_image_ctx
.cct
;
669 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
671 librados::ObjectReadOperation op
;
672 for (auto snap_id
: m_snapc
.snaps
) {
673 if (m_legacy_snapshot
) {
674 /// NOTE: remove after Luminous is retired
675 cls_client::get_snapshot_name_start(&op
, snap_id
);
676 cls_client::get_size_start(&op
, snap_id
);
677 cls_client::get_snapshot_timestamp_start(&op
, snap_id
);
679 cls_client::snapshot_get_start(&op
, snap_id
);
682 if (m_legacy_parent
) {
683 cls_client::get_parent_start(&op
, snap_id
);
685 cls_client::parent_overlap_get_start(&op
, snap_id
);
688 cls_client::get_flags_start(&op
, snap_id
);
689 cls_client::get_protection_status_start(&op
, snap_id
);
692 using klass
= RefreshRequest
<I
>;
693 librados::AioCompletion
*comp
= create_rados_callback
<
694 klass
, &klass::handle_v2_get_snapshots
>(this);
696 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
702 template <typename I
>
703 Context
*RefreshRequest
<I
>::handle_v2_get_snapshots(int *result
) {
704 CephContext
*cct
= m_image_ctx
.cct
;
705 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
707 auto it
= m_out_bl
.cbegin();
708 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
709 if (m_legacy_snapshot
) {
710 /// NOTE: remove after Luminous is retired
711 std::string snap_name
;
713 *result
= cls_client::get_snapshot_name_finish(&it
, &snap_name
);
719 *result
= cls_client::get_size_finish(&it
, &snap_size
, &order
);
722 utime_t snap_timestamp
;
724 *result
= cls_client::get_snapshot_timestamp_finish(&it
,
729 m_snap_infos
[i
] = {m_snapc
.snaps
[i
],
730 {cls::rbd::UserSnapshotNamespace
{}},
731 snap_name
, snap_size
, snap_timestamp
, 0};
733 } else if (*result
>= 0) {
734 *result
= cls_client::snapshot_get_finish(&it
, &m_snap_infos
[i
]);
738 if (m_legacy_parent
) {
739 *result
= cls_client::get_parent_finish(&it
, &m_snap_parents
[i
].spec
,
740 &m_snap_parents
[i
].overlap
);
742 std::optional
<uint64_t> parent_overlap
;
743 *result
= cls_client::parent_overlap_get_finish(&it
, &parent_overlap
);
744 if (*result
== 0 && parent_overlap
&& m_parent_md
.spec
.pool_id
> -1) {
745 m_snap_parents
[i
].spec
= m_parent_md
.spec
;
746 m_snap_parents
[i
].overlap
= *parent_overlap
;
752 *result
= cls_client::get_flags_finish(&it
, &m_snap_flags
[i
]);
756 *result
= cls_client::get_protection_status_finish(
757 &it
, &m_snap_protection
[i
]);
765 if (*result
== -ENOENT
) {
766 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
767 send_v2_get_mutable_metadata();
769 } else if (!m_legacy_snapshot
&& *result
== -EOPNOTSUPP
) {
770 ldout(cct
, 10) << "retrying using legacy snapshot methods" << dendl
;
771 m_legacy_snapshot
= true;
772 send_v2_get_snapshots();
774 } else if (*result
< 0) {
775 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
780 send_v2_refresh_parent();
784 template <typename I
>
785 void RefreshRequest
<I
>::send_v2_refresh_parent() {
787 std::shared_lock image_locker
{m_image_ctx
.image_lock
};
789 ParentImageInfo parent_md
;
790 MigrationInfo migration_info
;
791 int r
= get_parent_info(m_image_ctx
.snap_id
, &parent_md
, &migration_info
);
792 if (!m_skip_open_parent_image
&& (r
< 0 ||
793 RefreshParentRequest
<I
>::is_refresh_required(m_image_ctx
, parent_md
,
795 CephContext
*cct
= m_image_ctx
.cct
;
796 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
798 using klass
= RefreshRequest
<I
>;
799 Context
*ctx
= create_context_callback
<
800 klass
, &klass::handle_v2_refresh_parent
>(this);
801 m_refresh_parent
= RefreshParentRequest
<I
>::create(
802 m_image_ctx
, parent_md
, migration_info
, ctx
);
806 if (m_refresh_parent
!= nullptr) {
807 m_refresh_parent
->send();
809 send_v2_init_exclusive_lock();
813 template <typename I
>
814 Context
*RefreshRequest
<I
>::handle_v2_refresh_parent(int *result
) {
815 CephContext
*cct
= m_image_ctx
.cct
;
816 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
819 lderr(cct
) << "failed to refresh parent image: " << cpp_strerror(*result
)
826 send_v2_init_exclusive_lock();
830 template <typename I
>
831 void RefreshRequest
<I
>::send_v2_init_exclusive_lock() {
832 if ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0 ||
833 m_read_only
|| !m_image_ctx
.snap_name
.empty() ||
834 m_image_ctx
.exclusive_lock
!= nullptr) {
835 send_v2_open_object_map();
839 // implies exclusive lock dynamically enabled or image open in-progress
840 CephContext
*cct
= m_image_ctx
.cct
;
841 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
843 // TODO need safe shut down
844 m_exclusive_lock
= m_image_ctx
.create_exclusive_lock();
846 using klass
= RefreshRequest
<I
>;
847 Context
*ctx
= create_context_callback
<
848 klass
, &klass::handle_v2_init_exclusive_lock
>(this);
850 std::shared_lock owner_locker
{m_image_ctx
.owner_lock
};
851 m_exclusive_lock
->init(m_features
, ctx
);
854 template <typename I
>
855 Context
*RefreshRequest
<I
>::handle_v2_init_exclusive_lock(int *result
) {
856 CephContext
*cct
= m_image_ctx
.cct
;
857 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
860 lderr(cct
) << "failed to initialize exclusive lock: "
861 << cpp_strerror(*result
) << dendl
;
865 // object map and journal will be opened when exclusive lock is
866 // acquired (if features are enabled)
871 template <typename I
>
872 void RefreshRequest
<I
>::send_v2_open_journal() {
873 bool journal_disabled
= (
874 (m_features
& RBD_FEATURE_JOURNALING
) == 0 ||
876 !m_image_ctx
.snap_name
.empty() ||
877 m_image_ctx
.journal
!= nullptr ||
878 m_image_ctx
.exclusive_lock
== nullptr ||
879 !m_image_ctx
.exclusive_lock
->is_lock_owner());
880 bool journal_disabled_by_policy
;
882 std::shared_lock image_locker
{m_image_ctx
.image_lock
};
883 journal_disabled_by_policy
= (
885 m_image_ctx
.get_journal_policy()->journal_disabled());
888 if (journal_disabled
|| journal_disabled_by_policy
) {
889 // journal dynamically enabled -- doesn't own exclusive lock
890 if ((m_features
& RBD_FEATURE_JOURNALING
) != 0 &&
891 !journal_disabled_by_policy
&&
892 m_image_ctx
.exclusive_lock
!= nullptr &&
893 m_image_ctx
.journal
== nullptr) {
894 m_image_ctx
.io_work_queue
->set_require_lock(librbd::io::DIRECTION_BOTH
,
897 send_v2_block_writes();
901 // implies journal dynamically enabled since ExclusiveLock will init
902 // the journal upon acquiring the lock
903 CephContext
*cct
= m_image_ctx
.cct
;
904 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
906 using klass
= RefreshRequest
<I
>;
907 Context
*ctx
= create_context_callback
<
908 klass
, &klass::handle_v2_open_journal
>(this);
910 // TODO need safe close
911 m_journal
= m_image_ctx
.create_journal();
912 m_journal
->open(ctx
);
915 template <typename I
>
916 Context
*RefreshRequest
<I
>::handle_v2_open_journal(int *result
) {
917 CephContext
*cct
= m_image_ctx
.cct
;
918 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
921 lderr(cct
) << "failed to initialize journal: " << cpp_strerror(*result
)
926 send_v2_block_writes();
930 template <typename I
>
931 void RefreshRequest
<I
>::send_v2_block_writes() {
932 bool disabled_journaling
= false;
934 std::shared_lock image_locker
{m_image_ctx
.image_lock
};
935 disabled_journaling
= ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) != 0 &&
936 (m_features
& RBD_FEATURE_JOURNALING
) == 0 &&
937 m_image_ctx
.journal
!= nullptr);
940 if (!disabled_journaling
) {
945 CephContext
*cct
= m_image_ctx
.cct
;
946 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
948 // we need to block writes temporarily to avoid in-flight journal
950 m_blocked_writes
= true;
951 Context
*ctx
= create_context_callback
<
952 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_block_writes
>(this);
954 std::shared_lock owner_locker
{m_image_ctx
.owner_lock
};
955 m_image_ctx
.io_work_queue
->block_writes(ctx
);
958 template <typename I
>
959 Context
*RefreshRequest
<I
>::handle_v2_block_writes(int *result
) {
960 CephContext
*cct
= m_image_ctx
.cct
;
961 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
964 lderr(cct
) << "failed to block writes: " << cpp_strerror(*result
)
972 template <typename I
>
973 void RefreshRequest
<I
>::send_v2_open_object_map() {
974 if ((m_features
& RBD_FEATURE_OBJECT_MAP
) == 0 ||
975 m_image_ctx
.object_map
!= nullptr ||
976 (m_image_ctx
.snap_name
.empty() &&
978 m_image_ctx
.exclusive_lock
== nullptr ||
979 !m_image_ctx
.exclusive_lock
->is_lock_owner()))) {
980 send_v2_open_journal();
984 // implies object map dynamically enabled or image open in-progress
985 // since SetSnapRequest loads the object map for a snapshot and
986 // ExclusiveLock loads the object map for HEAD
987 CephContext
*cct
= m_image_ctx
.cct
;
988 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
990 if (m_image_ctx
.snap_name
.empty()) {
991 m_object_map
= m_image_ctx
.create_object_map(CEPH_NOSNAP
);
993 for (size_t snap_idx
= 0; snap_idx
< m_snap_infos
.size(); ++snap_idx
) {
994 if (m_snap_infos
[snap_idx
].name
== m_image_ctx
.snap_name
) {
995 m_object_map
= m_image_ctx
.create_object_map(
996 m_snapc
.snaps
[snap_idx
].val
);
1001 if (m_object_map
== nullptr) {
1002 lderr(cct
) << "failed to locate snapshot: " << m_image_ctx
.snap_name
1004 send_v2_open_journal();
1009 using klass
= RefreshRequest
<I
>;
1010 Context
*ctx
= create_context_callback
<
1011 klass
, &klass::handle_v2_open_object_map
>(this);
1012 m_object_map
->open(ctx
);
1015 template <typename I
>
1016 Context
*RefreshRequest
<I
>::handle_v2_open_object_map(int *result
) {
1017 CephContext
*cct
= m_image_ctx
.cct
;
1018 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1021 lderr(cct
) << "failed to open object map: " << cpp_strerror(*result
)
1023 m_object_map
->put();
1024 m_object_map
= nullptr;
1026 if (*result
!= -EFBIG
) {
1027 save_result(result
);
1031 send_v2_open_journal();
1035 template <typename I
>
1036 void RefreshRequest
<I
>::send_v2_apply() {
1037 CephContext
*cct
= m_image_ctx
.cct
;
1038 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1040 // ensure we are not in a rados callback when applying updates
1041 using klass
= RefreshRequest
<I
>;
1042 Context
*ctx
= create_context_callback
<
1043 klass
, &klass::handle_v2_apply
>(this);
1044 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
1047 template <typename I
>
1048 Context
*RefreshRequest
<I
>::handle_v2_apply(int *result
) {
1049 CephContext
*cct
= m_image_ctx
.cct
;
1050 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1054 return send_v2_finalize_refresh_parent();
1057 template <typename I
>
1058 Context
*RefreshRequest
<I
>::send_v2_finalize_refresh_parent() {
1059 if (m_refresh_parent
== nullptr) {
1060 return send_v2_shut_down_exclusive_lock();
1063 CephContext
*cct
= m_image_ctx
.cct
;
1064 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1066 using klass
= RefreshRequest
<I
>;
1067 Context
*ctx
= create_context_callback
<
1068 klass
, &klass::handle_v2_finalize_refresh_parent
>(this);
1069 m_refresh_parent
->finalize(ctx
);
1073 template <typename I
>
1074 Context
*RefreshRequest
<I
>::handle_v2_finalize_refresh_parent(int *result
) {
1075 CephContext
*cct
= m_image_ctx
.cct
;
1076 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1078 ceph_assert(m_refresh_parent
!= nullptr);
1079 delete m_refresh_parent
;
1080 m_refresh_parent
= nullptr;
1082 return send_v2_shut_down_exclusive_lock();
1085 template <typename I
>
1086 Context
*RefreshRequest
<I
>::send_v2_shut_down_exclusive_lock() {
1087 if (m_exclusive_lock
== nullptr) {
1088 return send_v2_close_journal();
1091 CephContext
*cct
= m_image_ctx
.cct
;
1092 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1094 // exclusive lock feature was dynamically disabled. in-flight IO will be
1095 // flushed and in-flight requests will be canceled before releasing lock
1096 using klass
= RefreshRequest
<I
>;
1097 Context
*ctx
= create_context_callback
<
1098 klass
, &klass::handle_v2_shut_down_exclusive_lock
>(this);
1099 m_exclusive_lock
->shut_down(ctx
);
1103 template <typename I
>
1104 Context
*RefreshRequest
<I
>::handle_v2_shut_down_exclusive_lock(int *result
) {
1105 CephContext
*cct
= m_image_ctx
.cct
;
1106 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1109 lderr(cct
) << "failed to shut down exclusive lock: "
1110 << cpp_strerror(*result
) << dendl
;
1111 save_result(result
);
1115 std::unique_lock owner_locker
{m_image_ctx
.owner_lock
};
1116 ceph_assert(m_image_ctx
.exclusive_lock
== nullptr);
1119 ceph_assert(m_exclusive_lock
!= nullptr);
1120 m_exclusive_lock
->put();
1121 m_exclusive_lock
= nullptr;
1123 return send_v2_close_journal();
1126 template <typename I
>
1127 Context
*RefreshRequest
<I
>::send_v2_close_journal() {
1128 if (m_journal
== nullptr) {
1129 return send_v2_close_object_map();
1132 CephContext
*cct
= m_image_ctx
.cct
;
1133 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1135 // journal feature was dynamically disabled
1136 using klass
= RefreshRequest
<I
>;
1137 Context
*ctx
= create_context_callback
<
1138 klass
, &klass::handle_v2_close_journal
>(this);
1139 m_journal
->close(ctx
);
1143 template <typename I
>
1144 Context
*RefreshRequest
<I
>::handle_v2_close_journal(int *result
) {
1145 CephContext
*cct
= m_image_ctx
.cct
;
1146 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1149 save_result(result
);
1150 lderr(cct
) << "failed to close journal: " << cpp_strerror(*result
)
1154 ceph_assert(m_journal
!= nullptr);
1156 m_journal
= nullptr;
1158 ceph_assert(m_blocked_writes
);
1159 m_blocked_writes
= false;
1161 m_image_ctx
.io_work_queue
->unblock_writes();
1162 return send_v2_close_object_map();
1165 template <typename I
>
1166 Context
*RefreshRequest
<I
>::send_v2_close_object_map() {
1167 if (m_object_map
== nullptr) {
1168 return send_flush_aio();
1171 CephContext
*cct
= m_image_ctx
.cct
;
1172 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1174 // object map was dynamically disabled
1175 using klass
= RefreshRequest
<I
>;
1176 Context
*ctx
= create_context_callback
<
1177 klass
, &klass::handle_v2_close_object_map
>(this);
1178 m_object_map
->close(ctx
);
1182 template <typename I
>
1183 Context
*RefreshRequest
<I
>::handle_v2_close_object_map(int *result
) {
1184 CephContext
*cct
= m_image_ctx
.cct
;
1185 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1188 lderr(cct
) << "failed to close object map: " << cpp_strerror(*result
)
1192 ceph_assert(m_object_map
!= nullptr);
1194 m_object_map
->put();
1195 m_object_map
= nullptr;
1197 return send_flush_aio();
1200 template <typename I
>
1201 Context
*RefreshRequest
<I
>::send_flush_aio() {
1202 if (m_incomplete_update
&& m_error_result
== 0) {
1203 // if this was a partial refresh, notify ImageState
1204 m_error_result
= -ERESTART
;
1208 CephContext
*cct
= m_image_ctx
.cct
;
1209 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1211 std::shared_lock owner_locker
{m_image_ctx
.owner_lock
};
1212 auto ctx
= create_context_callback
<
1213 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_flush_aio
>(this);
1214 auto aio_comp
= io::AioCompletion::create_and_start(
1215 ctx
, util::get_image_ctx(&m_image_ctx
), io::AIO_TYPE_FLUSH
);
1216 auto req
= io::ImageDispatchSpec
<I
>::create_flush_request(
1217 m_image_ctx
, aio_comp
, io::FLUSH_SOURCE_INTERNAL
, {});
1221 } else if (m_error_result
< 0) {
1222 // propagate saved error back to caller
1223 Context
*ctx
= create_context_callback
<
1224 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_error
>(this);
1225 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
1232 template <typename I
>
1233 Context
*RefreshRequest
<I
>::handle_flush_aio(int *result
) {
1234 CephContext
*cct
= m_image_ctx
.cct
;
1235 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1238 lderr(cct
) << "failed to flush pending AIO: " << cpp_strerror(*result
)
1242 return handle_error(result
);
1245 template <typename I
>
1246 Context
*RefreshRequest
<I
>::handle_error(int *result
) {
1247 if (m_error_result
< 0) {
1248 *result
= m_error_result
;
1250 CephContext
*cct
= m_image_ctx
.cct
;
1251 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1256 template <typename I
>
1257 void RefreshRequest
<I
>::apply() {
1258 CephContext
*cct
= m_image_ctx
.cct
;
1259 ldout(cct
, 20) << this << " " << __func__
<< dendl
;
1261 std::scoped_lock locker
{m_image_ctx
.owner_lock
, m_image_ctx
.image_lock
};
1263 m_image_ctx
.read_only_flags
= m_read_only_flags
;
1264 m_image_ctx
.read_only
= m_read_only
;
1265 m_image_ctx
.size
= m_size
;
1266 m_image_ctx
.lockers
= m_lockers
;
1267 m_image_ctx
.lock_tag
= m_lock_tag
;
1268 m_image_ctx
.exclusive_locked
= m_exclusive_locked
;
1270 std::map
<uint64_t, uint64_t> migration_reverse_snap_seq
;
1272 if (m_image_ctx
.old_format
) {
1273 m_image_ctx
.order
= m_order
;
1274 m_image_ctx
.features
= 0;
1275 m_image_ctx
.flags
= 0;
1276 m_image_ctx
.op_features
= 0;
1277 m_image_ctx
.operations_disabled
= false;
1278 m_image_ctx
.object_prefix
= std::move(m_object_prefix
);
1279 m_image_ctx
.init_layout(m_image_ctx
.md_ctx
.get_id());
1281 // HEAD revision doesn't have a defined overlap so it's only
1282 // applicable to snapshots
1283 if (!m_head_parent_overlap
) {
1287 m_image_ctx
.features
= m_features
;
1288 m_image_ctx
.flags
= m_flags
;
1289 m_image_ctx
.op_features
= m_op_features
;
1290 m_image_ctx
.operations_disabled
= (
1291 (m_op_features
& ~RBD_OPERATION_FEATURES_ALL
) != 0ULL);
1292 m_image_ctx
.group_spec
= m_group_spec
;
1293 if (get_migration_info(&m_image_ctx
.parent_md
,
1294 &m_image_ctx
.migration_info
)) {
1295 for (auto it
: m_image_ctx
.migration_info
.snap_map
) {
1296 migration_reverse_snap_seq
[it
.second
.front()] = it
.first
;
1299 m_image_ctx
.parent_md
= m_parent_md
;
1300 m_image_ctx
.migration_info
= {};
1303 librados::Rados
rados(m_image_ctx
.md_ctx
);
1304 int8_t require_osd_release
;
1305 int r
= rados
.get_min_compatible_osd(&require_osd_release
);
1306 if (r
== 0 && require_osd_release
>= CEPH_RELEASE_OCTOPUS
) {
1307 m_image_ctx
.enable_sparse_copyup
= true;
1311 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1312 std::vector
<librados::snap_t
>::const_iterator it
= std::find(
1313 m_image_ctx
.snaps
.begin(), m_image_ctx
.snaps
.end(),
1314 m_snapc
.snaps
[i
].val
);
1315 if (it
== m_image_ctx
.snaps
.end()) {
1317 ldout(cct
, 20) << "new snapshot id=" << m_snapc
.snaps
[i
].val
1318 << " name=" << m_snap_infos
[i
].name
1319 << " size=" << m_snap_infos
[i
].image_size
1324 m_image_ctx
.snaps
.clear();
1325 m_image_ctx
.snap_info
.clear();
1326 m_image_ctx
.snap_ids
.clear();
1327 auto overlap
= m_image_ctx
.parent_md
.overlap
;
1328 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1329 uint64_t flags
= m_image_ctx
.old_format
? 0 : m_snap_flags
[i
];
1330 uint8_t protection_status
= m_image_ctx
.old_format
?
1331 static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED
) :
1332 m_snap_protection
[i
];
1333 ParentImageInfo parent
;
1334 if (!m_image_ctx
.old_format
) {
1335 if (!m_image_ctx
.migration_info
.empty()) {
1336 parent
= m_image_ctx
.parent_md
;
1337 auto it
= migration_reverse_snap_seq
.find(m_snapc
.snaps
[i
].val
);
1338 if (it
!= migration_reverse_snap_seq
.end()) {
1339 parent
.spec
.snap_id
= it
->second
;
1340 parent
.overlap
= m_snap_infos
[i
].image_size
;
1342 overlap
= std::min(overlap
, m_snap_infos
[i
].image_size
);
1343 parent
.overlap
= overlap
;
1346 parent
= m_snap_parents
[i
];
1349 m_image_ctx
.add_snap(m_snap_infos
[i
].snapshot_namespace
,
1350 m_snap_infos
[i
].name
, m_snapc
.snaps
[i
].val
,
1351 m_snap_infos
[i
].image_size
, parent
,
1352 protection_status
, flags
,
1353 m_snap_infos
[i
].timestamp
);
1355 m_image_ctx
.parent_md
.overlap
= std::min(overlap
, m_image_ctx
.size
);
1356 m_image_ctx
.snapc
= m_snapc
;
1358 if (m_image_ctx
.snap_id
!= CEPH_NOSNAP
&&
1359 m_image_ctx
.get_snap_id(m_image_ctx
.snap_namespace
,
1360 m_image_ctx
.snap_name
) != m_image_ctx
.snap_id
) {
1361 lderr(cct
) << "tried to read from a snapshot that no longer exists: "
1362 << m_image_ctx
.snap_name
<< dendl
;
1363 m_image_ctx
.snap_exists
= false;
1366 if (m_refresh_parent
!= nullptr) {
1367 m_refresh_parent
->apply();
1369 if (m_image_ctx
.data_ctx
.is_valid()) {
1370 m_image_ctx
.data_ctx
.selfmanaged_snap_set_write_ctx(m_image_ctx
.snapc
.seq
,
1374 // handle dynamically enabled / disabled features
1375 if (m_image_ctx
.exclusive_lock
!= nullptr &&
1376 !m_image_ctx
.test_features(RBD_FEATURE_EXCLUSIVE_LOCK
,
1377 m_image_ctx
.image_lock
)) {
1378 // disabling exclusive lock will automatically handle closing
1379 // object map and journaling
1380 ceph_assert(m_exclusive_lock
== nullptr);
1381 m_exclusive_lock
= m_image_ctx
.exclusive_lock
;
1383 if (m_exclusive_lock
!= nullptr) {
1384 ceph_assert(m_image_ctx
.exclusive_lock
== nullptr);
1385 std::swap(m_exclusive_lock
, m_image_ctx
.exclusive_lock
);
1387 if (!m_image_ctx
.test_features(RBD_FEATURE_JOURNALING
,
1388 m_image_ctx
.image_lock
)) {
1389 if (!m_image_ctx
.clone_copy_on_read
&& m_image_ctx
.journal
!= nullptr) {
1390 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_READ
,
1393 std::swap(m_journal
, m_image_ctx
.journal
);
1394 } else if (m_journal
!= nullptr) {
1395 std::swap(m_journal
, m_image_ctx
.journal
);
1397 if (!m_image_ctx
.test_features(RBD_FEATURE_OBJECT_MAP
,
1398 m_image_ctx
.image_lock
) ||
1399 m_object_map
!= nullptr) {
1400 std::swap(m_object_map
, m_image_ctx
.object_map
);
1405 template <typename I
>
1406 int RefreshRequest
<I
>::get_parent_info(uint64_t snap_id
,
1407 ParentImageInfo
*parent_md
,
1408 MigrationInfo
*migration_info
) {
1409 if (get_migration_info(parent_md
, migration_info
)) {
1411 } else if (snap_id
== CEPH_NOSNAP
) {
1412 *parent_md
= m_parent_md
;
1413 *migration_info
= {};
1416 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1417 if (m_snapc
.snaps
[i
].val
== snap_id
) {
1418 *parent_md
= m_snap_parents
[i
];
1419 *migration_info
= {};
1427 template <typename I
>
1428 bool RefreshRequest
<I
>::get_migration_info(ParentImageInfo
*parent_md
,
1429 MigrationInfo
*migration_info
) {
1430 if (m_migration_spec
.header_type
!= cls::rbd::MIGRATION_HEADER_TYPE_DST
||
1431 (m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_PREPARED
&&
1432 m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_EXECUTING
)) {
1433 ceph_assert(m_migration_spec
.header_type
==
1434 cls::rbd::MIGRATION_HEADER_TYPE_SRC
||
1435 m_migration_spec
.pool_id
== -1 ||
1436 m_migration_spec
.state
== cls::rbd::MIGRATION_STATE_EXECUTED
);
1441 parent_md
->spec
.pool_id
= m_migration_spec
.pool_id
;
1442 parent_md
->spec
.pool_namespace
= m_migration_spec
.pool_namespace
;
1443 parent_md
->spec
.image_id
= m_migration_spec
.image_id
;
1444 parent_md
->spec
.snap_id
= CEPH_NOSNAP
;
1445 parent_md
->overlap
= std::min(m_size
, m_migration_spec
.overlap
);
1447 auto snap_seqs
= m_migration_spec
.snap_seqs
;
1448 // If new snapshots have been created on destination image after
1449 // migration stared, map the source CEPH_NOSNAP to the earliest of
1451 snapid_t snap_id
= snap_seqs
.empty() ? 0 : snap_seqs
.rbegin()->second
;
1452 auto it
= std::upper_bound(m_snapc
.snaps
.rbegin(), m_snapc
.snaps
.rend(),
1454 if (it
!= m_snapc
.snaps
.rend()) {
1455 snap_seqs
[CEPH_NOSNAP
] = *it
;
1457 snap_seqs
[CEPH_NOSNAP
] = CEPH_NOSNAP
;
1460 std::set
<uint64_t> snap_ids
;
1461 for (auto& it
: snap_seqs
) {
1462 snap_ids
.insert(it
.second
);
1464 uint64_t overlap
= snap_ids
.find(CEPH_NOSNAP
) != snap_ids
.end() ?
1465 parent_md
->overlap
: 0;
1466 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1467 if (snap_ids
.find(m_snapc
.snaps
[i
].val
) != snap_ids
.end()) {
1468 overlap
= std::max(overlap
, m_snap_infos
[i
].image_size
);
1472 *migration_info
= {m_migration_spec
.pool_id
, m_migration_spec
.pool_namespace
,
1473 m_migration_spec
.image_name
, m_migration_spec
.image_id
, {},
1474 overlap
, m_migration_spec
.flatten
};
1476 deep_copy::util::compute_snap_map(m_image_ctx
.cct
, 0, CEPH_NOSNAP
, {},
1477 snap_seqs
, &migration_info
->snap_map
);
1481 } // namespace image
1482 } // namespace librbd
1484 template class librbd::image::RefreshRequest
<librbd::ImageCtx
>;