1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include <boost/algorithm/string/predicate.hpp>
5 #include "include/ceph_assert.h"
7 #include "librbd/image/RefreshRequest.h"
8 #include "common/dout.h"
9 #include "common/errno.h"
10 #include "cls/lock/cls_lock_client.h"
11 #include "cls/rbd/cls_rbd_client.h"
12 #include "librbd/ExclusiveLock.h"
13 #include "librbd/ImageCtx.h"
14 #include "librbd/ImageWatcher.h"
15 #include "librbd/Journal.h"
16 #include "librbd/ObjectMap.h"
17 #include "librbd/Utils.h"
18 #include "librbd/deep_copy/Utils.h"
19 #include "librbd/image/RefreshParentRequest.h"
20 #include "librbd/io/AioCompletion.h"
21 #include "librbd/io/ImageDispatchSpec.h"
22 #include "librbd/io/ImageRequestWQ.h"
23 #include "librbd/journal/Policy.h"
25 #define dout_subsys ceph_subsys_rbd
27 #define dout_prefix *_dout << "librbd::image::RefreshRequest: "
34 const uint64_t MAX_METADATA_ITEMS
= 128;
38 using util::create_rados_callback
;
39 using util::create_async_context_callback
;
40 using util::create_context_callback
;
43 RefreshRequest
<I
>::RefreshRequest(I
&image_ctx
, bool acquiring_lock
,
44 bool skip_open_parent
, Context
*on_finish
)
45 : m_image_ctx(image_ctx
), m_acquiring_lock(acquiring_lock
),
46 m_skip_open_parent_image(skip_open_parent
),
47 m_on_finish(create_async_context_callback(m_image_ctx
, on_finish
)),
48 m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
49 m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
50 m_pool_metadata_io_ctx
.dup(image_ctx
.md_ctx
);
51 m_pool_metadata_io_ctx
.set_namespace("");
55 RefreshRequest
<I
>::~RefreshRequest() {
56 // these require state machine to close
57 ceph_assert(m_exclusive_lock
== nullptr);
58 ceph_assert(m_object_map
== nullptr);
59 ceph_assert(m_journal
== nullptr);
60 ceph_assert(m_refresh_parent
== nullptr);
61 ceph_assert(!m_blocked_writes
);
65 void RefreshRequest
<I
>::send() {
66 if (m_image_ctx
.old_format
) {
67 send_v1_read_header();
69 send_v2_get_mutable_metadata();
74 void RefreshRequest
<I
>::send_get_migration_header() {
75 if (m_image_ctx
.ignore_migrating
) {
76 if (m_image_ctx
.old_format
) {
77 send_v1_get_snapshots();
79 send_v2_get_metadata();
84 CephContext
*cct
= m_image_ctx
.cct
;
85 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
87 librados::ObjectReadOperation op
;
88 cls_client::migration_get_start(&op
);
90 using klass
= RefreshRequest
<I
>;
91 librados::AioCompletion
*comp
=
92 create_rados_callback
<klass
, &klass::handle_get_migration_header
>(this);
94 m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
100 Context
*RefreshRequest
<I
>::handle_get_migration_header(int *result
) {
101 CephContext
*cct
= m_image_ctx
.cct
;
102 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
105 auto it
= m_out_bl
.cbegin();
106 *result
= cls_client::migration_get_finish(&it
, &m_migration_spec
);
107 } else if (*result
== -ENOENT
) {
108 ldout(cct
, 5) << this << " " << __func__
<< ": no migration header found"
109 << ", retrying" << dendl
;
115 lderr(cct
) << "failed to retrieve migration header: "
116 << cpp_strerror(*result
) << dendl
;
120 switch(m_migration_spec
.header_type
) {
121 case cls::rbd::MIGRATION_HEADER_TYPE_SRC
:
122 if (!m_image_ctx
.read_only
) {
123 lderr(cct
) << "image being migrated" << dendl
;
127 ldout(cct
, 1) << this << " " << __func__
<< ": migrating to: "
128 << m_migration_spec
<< dendl
;
130 case cls::rbd::MIGRATION_HEADER_TYPE_DST
:
131 ldout(cct
, 1) << this << " " << __func__
<< ": migrating from: "
132 << m_migration_spec
<< dendl
;
133 if (m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_PREPARED
&&
134 m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_EXECUTING
&&
135 m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_EXECUTED
) {
136 ldout(cct
, 5) << this << " " << __func__
<< ": current migration state: "
137 << m_migration_spec
.state
<< ", retrying" << dendl
;
143 ldout(cct
, 1) << this << " " << __func__
<< ": migration type "
144 << m_migration_spec
.header_type
<< dendl
;
149 if (m_image_ctx
.old_format
) {
150 send_v1_get_snapshots();
152 send_v2_get_metadata();
157 template <typename I
>
158 void RefreshRequest
<I
>::send_v1_read_header() {
159 CephContext
*cct
= m_image_ctx
.cct
;
160 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
162 librados::ObjectReadOperation op
;
163 op
.read(0, 0, nullptr, nullptr);
165 using klass
= RefreshRequest
<I
>;
166 librados::AioCompletion
*comp
= create_rados_callback
<
167 klass
, &klass::handle_v1_read_header
>(this);
169 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
175 template <typename I
>
176 Context
*RefreshRequest
<I
>::handle_v1_read_header(int *result
) {
177 CephContext
*cct
= m_image_ctx
.cct
;
178 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
180 rbd_obj_header_ondisk v1_header
;
181 bool migrating
= false;
184 } else if (m_out_bl
.length() < sizeof(v1_header
)) {
185 lderr(cct
) << "v1 header too small" << dendl
;
188 } else if (memcmp(RBD_HEADER_TEXT
, m_out_bl
.c_str(),
189 sizeof(RBD_HEADER_TEXT
)) != 0) {
190 if (memcmp(RBD_MIGRATE_HEADER_TEXT
, m_out_bl
.c_str(),
191 sizeof(RBD_MIGRATE_HEADER_TEXT
)) == 0) {
192 ldout(cct
, 1) << this << " " << __func__
<< ": migration v1 header detected"
196 lderr(cct
) << "unrecognized v1 header" << dendl
;
202 memcpy(&v1_header
, m_out_bl
.c_str(), sizeof(v1_header
));
203 m_order
= v1_header
.options
.order
;
204 m_size
= v1_header
.image_size
;
205 m_object_prefix
= v1_header
.block_name
;
207 send_get_migration_header();
209 send_v1_get_snapshots();
214 template <typename I
>
215 void RefreshRequest
<I
>::send_v1_get_snapshots() {
216 CephContext
*cct
= m_image_ctx
.cct
;
217 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
219 librados::ObjectReadOperation op
;
220 cls_client::old_snapshot_list_start(&op
);
222 using klass
= RefreshRequest
<I
>;
223 librados::AioCompletion
*comp
= create_rados_callback
<
224 klass
, &klass::handle_v1_get_snapshots
>(this);
226 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
232 template <typename I
>
233 Context
*RefreshRequest
<I
>::handle_v1_get_snapshots(int *result
) {
234 CephContext
*cct
= m_image_ctx
.cct
;
235 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
237 std::vector
<std::string
> snap_names
;
238 std::vector
<uint64_t> snap_sizes
;
240 auto it
= m_out_bl
.cbegin();
241 *result
= cls_client::old_snapshot_list_finish(&it
, &snap_names
,
242 &snap_sizes
, &m_snapc
);
246 lderr(cct
) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result
)
251 if (!m_snapc
.is_valid()) {
252 lderr(cct
) << "v1 image snap context is invalid" << dendl
;
257 m_snap_infos
.clear();
258 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
259 m_snap_infos
.push_back({m_snapc
.snaps
[i
],
260 {cls::rbd::UserSnapshotNamespace
{}},
261 snap_names
[i
], snap_sizes
[i
], {}, 0});
268 template <typename I
>
269 void RefreshRequest
<I
>::send_v1_get_locks() {
270 CephContext
*cct
= m_image_ctx
.cct
;
271 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
273 librados::ObjectReadOperation op
;
274 rados::cls::lock::get_lock_info_start(&op
, RBD_LOCK_NAME
);
276 using klass
= RefreshRequest
<I
>;
277 librados::AioCompletion
*comp
= create_rados_callback
<
278 klass
, &klass::handle_v1_get_locks
>(this);
280 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
286 template <typename I
>
287 Context
*RefreshRequest
<I
>::handle_v1_get_locks(int *result
) {
288 CephContext
*cct
= m_image_ctx
.cct
;
289 ldout(cct
, 10) << this << " " << __func__
<< ": "
290 << "r=" << *result
<< dendl
;
293 auto it
= m_out_bl
.cbegin();
294 ClsLockType lock_type
;
295 *result
= rados::cls::lock::get_lock_info_finish(&it
, &m_lockers
,
296 &lock_type
, &m_lock_tag
);
298 m_exclusive_locked
= (lock_type
== LOCK_EXCLUSIVE
);
302 lderr(cct
) << "failed to retrieve locks: " << cpp_strerror(*result
)
311 template <typename I
>
312 void RefreshRequest
<I
>::send_v1_apply() {
313 CephContext
*cct
= m_image_ctx
.cct
;
314 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
316 // ensure we are not in a rados callback when applying updates
317 using klass
= RefreshRequest
<I
>;
318 Context
*ctx
= create_context_callback
<
319 klass
, &klass::handle_v1_apply
>(this);
320 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
323 template <typename I
>
324 Context
*RefreshRequest
<I
>::handle_v1_apply(int *result
) {
325 CephContext
*cct
= m_image_ctx
.cct
;
326 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
329 return send_flush_aio();
332 template <typename I
>
333 void RefreshRequest
<I
>::send_v2_get_mutable_metadata() {
334 CephContext
*cct
= m_image_ctx
.cct
;
335 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
339 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
340 snap_id
= m_image_ctx
.snap_id
;
343 bool read_only
= m_image_ctx
.read_only
|| snap_id
!= CEPH_NOSNAP
;
344 librados::ObjectReadOperation op
;
345 cls_client::get_size_start(&op
, CEPH_NOSNAP
);
346 cls_client::get_features_start(&op
, read_only
);
347 cls_client::get_flags_start(&op
, CEPH_NOSNAP
);
348 cls_client::get_snapcontext_start(&op
);
349 rados::cls::lock::get_lock_info_start(&op
, RBD_LOCK_NAME
);
351 using klass
= RefreshRequest
<I
>;
352 librados::AioCompletion
*comp
= create_rados_callback
<
353 klass
, &klass::handle_v2_get_mutable_metadata
>(this);
355 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
361 template <typename I
>
362 Context
*RefreshRequest
<I
>::handle_v2_get_mutable_metadata(int *result
) {
363 CephContext
*cct
= m_image_ctx
.cct
;
364 ldout(cct
, 10) << this << " " << __func__
<< ": "
365 << "r=" << *result
<< dendl
;
367 auto it
= m_out_bl
.cbegin();
370 *result
= cls_client::get_size_finish(&it
, &m_size
, &order
);
374 *result
= cls_client::get_features_finish(&it
, &m_features
,
375 &m_incompatible_features
);
379 *result
= cls_client::get_flags_finish(&it
, &m_flags
);
383 *result
= cls_client::get_snapcontext_finish(&it
, &m_snapc
);
387 ClsLockType lock_type
= LOCK_NONE
;
388 *result
= rados::cls::lock::get_lock_info_finish(&it
, &m_lockers
,
389 &lock_type
, &m_lock_tag
);
391 m_exclusive_locked
= (lock_type
== LOCK_EXCLUSIVE
);
396 lderr(cct
) << "failed to retrieve mutable metadata: "
397 << cpp_strerror(*result
) << dendl
;
401 uint64_t unsupported
= m_incompatible_features
& ~RBD_FEATURES_ALL
;
402 if (unsupported
!= 0ULL) {
403 lderr(cct
) << "Image uses unsupported features: " << unsupported
<< dendl
;
408 if (!m_snapc
.is_valid()) {
409 lderr(cct
) << "image snap context is invalid!" << dendl
;
414 if (m_acquiring_lock
&& (m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0) {
415 ldout(cct
, 5) << "ignoring dynamically disabled exclusive lock" << dendl
;
416 m_features
|= RBD_FEATURE_EXCLUSIVE_LOCK
;
417 m_incomplete_update
= true;
420 send_v2_get_parent();
424 template <typename I
>
425 void RefreshRequest
<I
>::send_v2_get_parent() {
426 // NOTE: remove support when Mimic is EOLed
427 CephContext
*cct
= m_image_ctx
.cct
;
428 ldout(cct
, 10) << this << " " << __func__
<< ": legacy=" << m_legacy_parent
431 librados::ObjectReadOperation op
;
432 if (!m_legacy_parent
) {
433 cls_client::parent_get_start(&op
);
434 cls_client::parent_overlap_get_start(&op
, CEPH_NOSNAP
);
436 cls_client::get_parent_start(&op
, CEPH_NOSNAP
);
439 auto aio_comp
= create_rados_callback
<
440 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_get_parent
>(this);
442 m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, aio_comp
, &op
,
447 template <typename I
>
448 Context
*RefreshRequest
<I
>::handle_v2_get_parent(int *result
) {
449 // NOTE: remove support when Mimic is EOLed
450 CephContext
*cct
= m_image_ctx
.cct
;
451 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
453 auto it
= m_out_bl
.cbegin();
454 if (!m_legacy_parent
) {
456 *result
= cls_client::parent_get_finish(&it
, &m_parent_md
.spec
);
459 std::optional
<uint64_t> parent_overlap
;
461 *result
= cls_client::parent_overlap_get_finish(&it
, &parent_overlap
);
464 if (*result
== 0 && parent_overlap
) {
465 m_parent_md
.overlap
= *parent_overlap
;
466 m_head_parent_overlap
= true;
468 } else if (*result
== 0) {
469 *result
= cls_client::get_parent_finish(&it
, &m_parent_md
.spec
,
470 &m_parent_md
.overlap
);
471 m_head_parent_overlap
= true;
474 if (*result
== -EOPNOTSUPP
&& !m_legacy_parent
) {
475 ldout(cct
, 10) << "retrying using legacy parent method" << dendl
;
476 m_legacy_parent
= true;
477 send_v2_get_parent();
480 lderr(cct
) << "failed to retrieve parent: " << cpp_strerror(*result
)
485 if ((m_features
& RBD_FEATURE_MIGRATING
) != 0) {
486 ldout(cct
, 1) << "migrating feature set" << dendl
;
487 send_get_migration_header();
491 send_v2_get_metadata();
495 template <typename I
>
496 void RefreshRequest
<I
>::send_v2_get_metadata() {
497 CephContext
*cct
= m_image_ctx
.cct
;
498 ldout(cct
, 10) << this << " " << __func__
<< ": "
499 << "start_key=" << m_last_metadata_key
<< dendl
;
501 librados::ObjectReadOperation op
;
502 cls_client::metadata_list_start(&op
, m_last_metadata_key
, MAX_METADATA_ITEMS
);
504 using klass
= RefreshRequest
<I
>;
505 librados::AioCompletion
*comp
=
506 create_rados_callback
<klass
, &klass::handle_v2_get_metadata
>(this);
508 m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
513 template <typename I
>
514 Context
*RefreshRequest
<I
>::handle_v2_get_metadata(int *result
) {
515 CephContext
*cct
= m_image_ctx
.cct
;
516 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
518 std::map
<std::string
, bufferlist
> metadata
;
520 auto it
= m_out_bl
.cbegin();
521 *result
= cls_client::metadata_list_finish(&it
, &metadata
);
525 lderr(cct
) << "failed to retrieve metadata: " << cpp_strerror(*result
)
530 if (!metadata
.empty()) {
531 m_metadata
.insert(metadata
.begin(), metadata
.end());
532 m_last_metadata_key
= metadata
.rbegin()->first
;
533 if (boost::starts_with(m_last_metadata_key
,
534 ImageCtx::METADATA_CONF_PREFIX
)) {
535 send_v2_get_metadata();
540 m_last_metadata_key
.clear();
541 send_v2_get_pool_metadata();
545 template <typename I
>
546 void RefreshRequest
<I
>::send_v2_get_pool_metadata() {
547 CephContext
*cct
= m_image_ctx
.cct
;
548 ldout(cct
, 10) << this << " " << __func__
<< ": "
549 << "start_key=" << m_last_metadata_key
<< dendl
;
551 librados::ObjectReadOperation op
;
552 cls_client::metadata_list_start(&op
, m_last_metadata_key
, MAX_METADATA_ITEMS
);
554 using klass
= RefreshRequest
<I
>;
555 librados::AioCompletion
*comp
=
556 create_rados_callback
<klass
, &klass::handle_v2_get_pool_metadata
>(this);
558 m_pool_metadata_io_ctx
.aio_operate(RBD_INFO
, comp
, &op
, &m_out_bl
);
562 template <typename I
>
563 Context
*RefreshRequest
<I
>::handle_v2_get_pool_metadata(int *result
) {
564 CephContext
*cct
= m_image_ctx
.cct
;
565 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
567 std::map
<std::string
, bufferlist
> metadata
;
569 auto it
= m_out_bl
.cbegin();
570 *result
= cls_client::metadata_list_finish(&it
, &metadata
);
573 if (*result
== -EOPNOTSUPP
|| *result
== -ENOENT
) {
574 ldout(cct
, 10) << "pool metadata not supported by OSD" << dendl
;
575 } else if (*result
< 0) {
576 lderr(cct
) << "failed to retrieve pool metadata: " << cpp_strerror(*result
)
581 if (!metadata
.empty()) {
582 m_metadata
.insert(metadata
.begin(), metadata
.end());
583 m_last_metadata_key
= metadata
.rbegin()->first
;
584 if (boost::starts_with(m_last_metadata_key
,
585 ImageCtx::METADATA_CONF_PREFIX
)) {
586 send_v2_get_pool_metadata();
591 bool thread_safe
= m_image_ctx
.image_watcher
->is_unregistered();
592 m_image_ctx
.apply_metadata(m_metadata
, thread_safe
);
594 send_v2_get_op_features();
598 template <typename I
>
599 void RefreshRequest
<I
>::send_v2_get_op_features() {
600 if ((m_features
& RBD_FEATURE_OPERATIONS
) == 0LL) {
605 CephContext
*cct
= m_image_ctx
.cct
;
606 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
608 librados::ObjectReadOperation op
;
609 cls_client::op_features_get_start(&op
);
611 librados::AioCompletion
*comp
= create_rados_callback
<
612 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_get_op_features
>(this);
614 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
620 template <typename I
>
621 Context
*RefreshRequest
<I
>::handle_v2_get_op_features(int *result
) {
622 CephContext
*cct
= m_image_ctx
.cct
;
623 ldout(cct
, 10) << this << " " << __func__
<< ": "
624 << "r=" << *result
<< dendl
;
626 // -EOPNOTSUPP handler not required since feature bit implies OSD
627 // supports the method
629 auto it
= m_out_bl
.cbegin();
630 cls_client::op_features_get_finish(&it
, &m_op_features
);
631 } else if (*result
< 0) {
632 lderr(cct
) << "failed to retrieve op features: " << cpp_strerror(*result
)
641 template <typename I
>
642 void RefreshRequest
<I
>::send_v2_get_group() {
643 CephContext
*cct
= m_image_ctx
.cct
;
644 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
646 librados::ObjectReadOperation op
;
647 cls_client::image_group_get_start(&op
);
649 using klass
= RefreshRequest
<I
>;
650 librados::AioCompletion
*comp
= create_rados_callback
<
651 klass
, &klass::handle_v2_get_group
>(this);
653 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
659 template <typename I
>
660 Context
*RefreshRequest
<I
>::handle_v2_get_group(int *result
) {
661 CephContext
*cct
= m_image_ctx
.cct
;
662 ldout(cct
, 10) << this << " " << __func__
<< ": "
663 << "r=" << *result
<< dendl
;
666 auto it
= m_out_bl
.cbegin();
667 cls_client::image_group_get_finish(&it
, &m_group_spec
);
669 if (*result
< 0 && *result
!= -EOPNOTSUPP
) {
670 lderr(cct
) << "failed to retrieve group: " << cpp_strerror(*result
)
675 send_v2_get_snapshots();
679 template <typename I
>
680 void RefreshRequest
<I
>::send_v2_get_snapshots() {
681 m_snap_infos
.resize(m_snapc
.snaps
.size());
682 m_snap_flags
.resize(m_snapc
.snaps
.size());
683 m_snap_parents
.resize(m_snapc
.snaps
.size());
684 m_snap_protection
.resize(m_snapc
.snaps
.size());
686 if (m_snapc
.snaps
.empty()) {
687 send_v2_refresh_parent();
691 CephContext
*cct
= m_image_ctx
.cct
;
692 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
694 librados::ObjectReadOperation op
;
695 for (auto snap_id
: m_snapc
.snaps
) {
696 if (m_legacy_snapshot
) {
697 /// NOTE: remove after Luminous is retired
698 cls_client::get_snapshot_name_start(&op
, snap_id
);
699 cls_client::get_size_start(&op
, snap_id
);
700 cls_client::get_snapshot_timestamp_start(&op
, snap_id
);
702 cls_client::snapshot_get_start(&op
, snap_id
);
705 if (m_legacy_parent
) {
706 cls_client::get_parent_start(&op
, snap_id
);
708 cls_client::parent_overlap_get_start(&op
, snap_id
);
711 cls_client::get_flags_start(&op
, snap_id
);
712 cls_client::get_protection_status_start(&op
, snap_id
);
715 using klass
= RefreshRequest
<I
>;
716 librados::AioCompletion
*comp
= create_rados_callback
<
717 klass
, &klass::handle_v2_get_snapshots
>(this);
719 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
725 template <typename I
>
726 Context
*RefreshRequest
<I
>::handle_v2_get_snapshots(int *result
) {
727 CephContext
*cct
= m_image_ctx
.cct
;
728 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
730 auto it
= m_out_bl
.cbegin();
731 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
732 if (m_legacy_snapshot
) {
733 /// NOTE: remove after Luminous is retired
734 std::string snap_name
;
736 *result
= cls_client::get_snapshot_name_finish(&it
, &snap_name
);
742 *result
= cls_client::get_size_finish(&it
, &snap_size
, &order
);
745 utime_t snap_timestamp
;
747 *result
= cls_client::get_snapshot_timestamp_finish(&it
,
752 m_snap_infos
[i
] = {m_snapc
.snaps
[i
],
753 {cls::rbd::UserSnapshotNamespace
{}},
754 snap_name
, snap_size
, snap_timestamp
, 0};
756 } else if (*result
>= 0) {
757 *result
= cls_client::snapshot_get_finish(&it
, &m_snap_infos
[i
]);
761 if (m_legacy_parent
) {
762 *result
= cls_client::get_parent_finish(&it
, &m_snap_parents
[i
].spec
,
763 &m_snap_parents
[i
].overlap
);
765 std::optional
<uint64_t> parent_overlap
;
766 *result
= cls_client::parent_overlap_get_finish(&it
, &parent_overlap
);
767 if (*result
== 0 && parent_overlap
&& m_parent_md
.spec
.pool_id
> -1) {
768 m_snap_parents
[i
].spec
= m_parent_md
.spec
;
769 m_snap_parents
[i
].overlap
= *parent_overlap
;
775 *result
= cls_client::get_flags_finish(&it
, &m_snap_flags
[i
]);
779 *result
= cls_client::get_protection_status_finish(
780 &it
, &m_snap_protection
[i
]);
788 if (*result
== -ENOENT
) {
789 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
790 send_v2_get_mutable_metadata();
792 } else if (!m_legacy_snapshot
&& *result
== -EOPNOTSUPP
) {
793 ldout(cct
, 10) << "retrying using legacy snapshot methods" << dendl
;
794 m_legacy_snapshot
= true;
795 send_v2_get_snapshots();
797 } else if (*result
< 0) {
798 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
803 send_v2_refresh_parent();
807 template <typename I
>
808 void RefreshRequest
<I
>::send_v2_refresh_parent() {
810 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
811 RWLock::RLocker
parent_locker(m_image_ctx
.parent_lock
);
813 ParentImageInfo parent_md
;
814 MigrationInfo migration_info
;
815 int r
= get_parent_info(m_image_ctx
.snap_id
, &parent_md
, &migration_info
);
816 if (!m_skip_open_parent_image
&& (r
< 0 ||
817 RefreshParentRequest
<I
>::is_refresh_required(m_image_ctx
, parent_md
,
819 CephContext
*cct
= m_image_ctx
.cct
;
820 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
822 using klass
= RefreshRequest
<I
>;
823 Context
*ctx
= create_context_callback
<
824 klass
, &klass::handle_v2_refresh_parent
>(this);
825 m_refresh_parent
= RefreshParentRequest
<I
>::create(
826 m_image_ctx
, parent_md
, migration_info
, ctx
);
830 if (m_refresh_parent
!= nullptr) {
831 m_refresh_parent
->send();
833 send_v2_init_exclusive_lock();
837 template <typename I
>
838 Context
*RefreshRequest
<I
>::handle_v2_refresh_parent(int *result
) {
839 CephContext
*cct
= m_image_ctx
.cct
;
840 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
843 lderr(cct
) << "failed to refresh parent image: " << cpp_strerror(*result
)
850 send_v2_init_exclusive_lock();
854 template <typename I
>
855 void RefreshRequest
<I
>::send_v2_init_exclusive_lock() {
856 if ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0 ||
857 m_image_ctx
.read_only
|| !m_image_ctx
.snap_name
.empty() ||
858 m_image_ctx
.exclusive_lock
!= nullptr) {
859 send_v2_open_object_map();
863 // implies exclusive lock dynamically enabled or image open in-progress
864 CephContext
*cct
= m_image_ctx
.cct
;
865 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
867 // TODO need safe shut down
868 m_exclusive_lock
= m_image_ctx
.create_exclusive_lock();
870 using klass
= RefreshRequest
<I
>;
871 Context
*ctx
= create_context_callback
<
872 klass
, &klass::handle_v2_init_exclusive_lock
>(this);
874 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
875 m_exclusive_lock
->init(m_features
, ctx
);
878 template <typename I
>
879 Context
*RefreshRequest
<I
>::handle_v2_init_exclusive_lock(int *result
) {
880 CephContext
*cct
= m_image_ctx
.cct
;
881 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
884 lderr(cct
) << "failed to initialize exclusive lock: "
885 << cpp_strerror(*result
) << dendl
;
889 // object map and journal will be opened when exclusive lock is
890 // acquired (if features are enabled)
895 template <typename I
>
896 void RefreshRequest
<I
>::send_v2_open_journal() {
897 bool journal_disabled
= (
898 (m_features
& RBD_FEATURE_JOURNALING
) == 0 ||
899 m_image_ctx
.read_only
||
900 !m_image_ctx
.snap_name
.empty() ||
901 m_image_ctx
.journal
!= nullptr ||
902 m_image_ctx
.exclusive_lock
== nullptr ||
903 !m_image_ctx
.exclusive_lock
->is_lock_owner());
904 bool journal_disabled_by_policy
;
906 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
907 journal_disabled_by_policy
= (
909 m_image_ctx
.get_journal_policy()->journal_disabled());
912 if (journal_disabled
|| journal_disabled_by_policy
) {
913 // journal dynamically enabled -- doesn't own exclusive lock
914 if ((m_features
& RBD_FEATURE_JOURNALING
) != 0 &&
915 !journal_disabled_by_policy
&&
916 m_image_ctx
.exclusive_lock
!= nullptr &&
917 m_image_ctx
.journal
== nullptr) {
918 m_image_ctx
.io_work_queue
->set_require_lock(librbd::io::DIRECTION_BOTH
,
921 send_v2_block_writes();
925 // implies journal dynamically enabled since ExclusiveLock will init
926 // the journal upon acquiring the lock
927 CephContext
*cct
= m_image_ctx
.cct
;
928 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
930 using klass
= RefreshRequest
<I
>;
931 Context
*ctx
= create_context_callback
<
932 klass
, &klass::handle_v2_open_journal
>(this);
934 // TODO need safe close
935 m_journal
= m_image_ctx
.create_journal();
936 m_journal
->open(ctx
);
939 template <typename I
>
940 Context
*RefreshRequest
<I
>::handle_v2_open_journal(int *result
) {
941 CephContext
*cct
= m_image_ctx
.cct
;
942 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
945 lderr(cct
) << "failed to initialize journal: " << cpp_strerror(*result
)
950 send_v2_block_writes();
954 template <typename I
>
955 void RefreshRequest
<I
>::send_v2_block_writes() {
956 bool disabled_journaling
= false;
958 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
959 disabled_journaling
= ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) != 0 &&
960 (m_features
& RBD_FEATURE_JOURNALING
) == 0 &&
961 m_image_ctx
.journal
!= nullptr);
964 if (!disabled_journaling
) {
969 CephContext
*cct
= m_image_ctx
.cct
;
970 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
972 // we need to block writes temporarily to avoid in-flight journal
974 m_blocked_writes
= true;
975 Context
*ctx
= create_context_callback
<
976 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_block_writes
>(this);
978 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
979 m_image_ctx
.io_work_queue
->block_writes(ctx
);
982 template <typename I
>
983 Context
*RefreshRequest
<I
>::handle_v2_block_writes(int *result
) {
984 CephContext
*cct
= m_image_ctx
.cct
;
985 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
988 lderr(cct
) << "failed to block writes: " << cpp_strerror(*result
)
996 template <typename I
>
997 void RefreshRequest
<I
>::send_v2_open_object_map() {
998 if ((m_features
& RBD_FEATURE_OBJECT_MAP
) == 0 ||
999 m_image_ctx
.object_map
!= nullptr ||
1000 (m_image_ctx
.snap_name
.empty() &&
1001 (m_image_ctx
.read_only
||
1002 m_image_ctx
.exclusive_lock
== nullptr ||
1003 !m_image_ctx
.exclusive_lock
->is_lock_owner()))) {
1004 send_v2_open_journal();
1008 // implies object map dynamically enabled or image open in-progress
1009 // since SetSnapRequest loads the object map for a snapshot and
1010 // ExclusiveLock loads the object map for HEAD
1011 CephContext
*cct
= m_image_ctx
.cct
;
1012 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1014 if (m_image_ctx
.snap_name
.empty()) {
1015 m_object_map
= m_image_ctx
.create_object_map(CEPH_NOSNAP
);
1017 for (size_t snap_idx
= 0; snap_idx
< m_snap_infos
.size(); ++snap_idx
) {
1018 if (m_snap_infos
[snap_idx
].name
== m_image_ctx
.snap_name
) {
1019 m_object_map
= m_image_ctx
.create_object_map(
1020 m_snapc
.snaps
[snap_idx
].val
);
1025 if (m_object_map
== nullptr) {
1026 lderr(cct
) << "failed to locate snapshot: " << m_image_ctx
.snap_name
1028 send_v2_open_journal();
1033 using klass
= RefreshRequest
<I
>;
1034 Context
*ctx
= create_context_callback
<
1035 klass
, &klass::handle_v2_open_object_map
>(this);
1036 m_object_map
->open(ctx
);
1039 template <typename I
>
1040 Context
*RefreshRequest
<I
>::handle_v2_open_object_map(int *result
) {
1041 CephContext
*cct
= m_image_ctx
.cct
;
1042 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1045 lderr(cct
) << "failed to open object map: " << cpp_strerror(*result
)
1047 delete m_object_map
;
1048 m_object_map
= nullptr;
1050 if (*result
!= -EFBIG
) {
1051 save_result(result
);
1055 send_v2_open_journal();
1059 template <typename I
>
1060 void RefreshRequest
<I
>::send_v2_apply() {
1061 CephContext
*cct
= m_image_ctx
.cct
;
1062 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1064 // ensure we are not in a rados callback when applying updates
1065 using klass
= RefreshRequest
<I
>;
1066 Context
*ctx
= create_context_callback
<
1067 klass
, &klass::handle_v2_apply
>(this);
1068 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
1071 template <typename I
>
1072 Context
*RefreshRequest
<I
>::handle_v2_apply(int *result
) {
1073 CephContext
*cct
= m_image_ctx
.cct
;
1074 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1078 return send_v2_finalize_refresh_parent();
1081 template <typename I
>
1082 Context
*RefreshRequest
<I
>::send_v2_finalize_refresh_parent() {
1083 if (m_refresh_parent
== nullptr) {
1084 return send_v2_shut_down_exclusive_lock();
1087 CephContext
*cct
= m_image_ctx
.cct
;
1088 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1090 using klass
= RefreshRequest
<I
>;
1091 Context
*ctx
= create_context_callback
<
1092 klass
, &klass::handle_v2_finalize_refresh_parent
>(this);
1093 m_refresh_parent
->finalize(ctx
);
1097 template <typename I
>
1098 Context
*RefreshRequest
<I
>::handle_v2_finalize_refresh_parent(int *result
) {
1099 CephContext
*cct
= m_image_ctx
.cct
;
1100 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1102 ceph_assert(m_refresh_parent
!= nullptr);
1103 delete m_refresh_parent
;
1104 m_refresh_parent
= nullptr;
1106 return send_v2_shut_down_exclusive_lock();
1109 template <typename I
>
1110 Context
*RefreshRequest
<I
>::send_v2_shut_down_exclusive_lock() {
1111 if (m_exclusive_lock
== nullptr) {
1112 return send_v2_close_journal();
1115 CephContext
*cct
= m_image_ctx
.cct
;
1116 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1118 // exclusive lock feature was dynamically disabled. in-flight IO will be
1119 // flushed and in-flight requests will be canceled before releasing lock
1120 using klass
= RefreshRequest
<I
>;
1121 Context
*ctx
= create_context_callback
<
1122 klass
, &klass::handle_v2_shut_down_exclusive_lock
>(this);
1123 m_exclusive_lock
->shut_down(ctx
);
1127 template <typename I
>
1128 Context
*RefreshRequest
<I
>::handle_v2_shut_down_exclusive_lock(int *result
) {
1129 CephContext
*cct
= m_image_ctx
.cct
;
1130 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1133 lderr(cct
) << "failed to shut down exclusive lock: "
1134 << cpp_strerror(*result
) << dendl
;
1135 save_result(result
);
1139 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
1140 ceph_assert(m_image_ctx
.exclusive_lock
== nullptr);
1143 ceph_assert(m_exclusive_lock
!= nullptr);
1144 delete m_exclusive_lock
;
1145 m_exclusive_lock
= nullptr;
1147 return send_v2_close_journal();
1150 template <typename I
>
1151 Context
*RefreshRequest
<I
>::send_v2_close_journal() {
1152 if (m_journal
== nullptr) {
1153 return send_v2_close_object_map();
1156 CephContext
*cct
= m_image_ctx
.cct
;
1157 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1159 // journal feature was dynamically disabled
1160 using klass
= RefreshRequest
<I
>;
1161 Context
*ctx
= create_context_callback
<
1162 klass
, &klass::handle_v2_close_journal
>(this);
1163 m_journal
->close(ctx
);
1167 template <typename I
>
1168 Context
*RefreshRequest
<I
>::handle_v2_close_journal(int *result
) {
1169 CephContext
*cct
= m_image_ctx
.cct
;
1170 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1173 save_result(result
);
1174 lderr(cct
) << "failed to close journal: " << cpp_strerror(*result
)
1178 ceph_assert(m_journal
!= nullptr);
1180 m_journal
= nullptr;
1182 ceph_assert(m_blocked_writes
);
1183 m_blocked_writes
= false;
1185 m_image_ctx
.io_work_queue
->unblock_writes();
1186 return send_v2_close_object_map();
1189 template <typename I
>
1190 Context
*RefreshRequest
<I
>::send_v2_close_object_map() {
1191 if (m_object_map
== nullptr) {
1192 return send_flush_aio();
1195 CephContext
*cct
= m_image_ctx
.cct
;
1196 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1198 // object map was dynamically disabled
1199 using klass
= RefreshRequest
<I
>;
1200 Context
*ctx
= create_context_callback
<
1201 klass
, &klass::handle_v2_close_object_map
>(this);
1202 m_object_map
->close(ctx
);
1206 template <typename I
>
1207 Context
*RefreshRequest
<I
>::handle_v2_close_object_map(int *result
) {
1208 CephContext
*cct
= m_image_ctx
.cct
;
1209 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1212 lderr(cct
) << "failed to close object map: " << cpp_strerror(*result
)
1216 ceph_assert(m_object_map
!= nullptr);
1217 delete m_object_map
;
1218 m_object_map
= nullptr;
1220 return send_flush_aio();
1223 template <typename I
>
1224 Context
*RefreshRequest
<I
>::send_flush_aio() {
1225 if (m_incomplete_update
&& m_error_result
== 0) {
1226 // if this was a partial refresh, notify ImageState
1227 m_error_result
= -ERESTART
;
1231 CephContext
*cct
= m_image_ctx
.cct
;
1232 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1234 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
1235 auto ctx
= create_context_callback
<
1236 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_flush_aio
>(this);
1237 auto aio_comp
= io::AioCompletion::create(
1238 ctx
, util::get_image_ctx(&m_image_ctx
), io::AIO_TYPE_FLUSH
);
1239 auto req
= io::ImageDispatchSpec
<I
>::create_flush_request(
1240 m_image_ctx
, aio_comp
, io::FLUSH_SOURCE_INTERNAL
, {});
1244 } else if (m_error_result
< 0) {
1245 // propagate saved error back to caller
1246 Context
*ctx
= create_context_callback
<
1247 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_error
>(this);
1248 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
1255 template <typename I
>
1256 Context
*RefreshRequest
<I
>::handle_flush_aio(int *result
) {
1257 CephContext
*cct
= m_image_ctx
.cct
;
1258 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1261 lderr(cct
) << "failed to flush pending AIO: " << cpp_strerror(*result
)
1265 return handle_error(result
);
1268 template <typename I
>
1269 Context
*RefreshRequest
<I
>::handle_error(int *result
) {
1270 if (m_error_result
< 0) {
1271 *result
= m_error_result
;
1273 CephContext
*cct
= m_image_ctx
.cct
;
1274 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1279 template <typename I
>
1280 void RefreshRequest
<I
>::apply() {
1281 CephContext
*cct
= m_image_ctx
.cct
;
1282 ldout(cct
, 20) << this << " " << __func__
<< dendl
;
1284 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
1285 RWLock::WLocker
md_locker(m_image_ctx
.md_lock
);
1288 RWLock::WLocker
snap_locker(m_image_ctx
.snap_lock
);
1289 RWLock::WLocker
parent_locker(m_image_ctx
.parent_lock
);
1291 m_image_ctx
.size
= m_size
;
1292 m_image_ctx
.lockers
= m_lockers
;
1293 m_image_ctx
.lock_tag
= m_lock_tag
;
1294 m_image_ctx
.exclusive_locked
= m_exclusive_locked
;
1296 std::map
<uint64_t, uint64_t> migration_reverse_snap_seq
;
1298 if (m_image_ctx
.old_format
) {
1299 m_image_ctx
.order
= m_order
;
1300 m_image_ctx
.features
= 0;
1301 m_image_ctx
.flags
= 0;
1302 m_image_ctx
.op_features
= 0;
1303 m_image_ctx
.operations_disabled
= false;
1304 m_image_ctx
.object_prefix
= std::move(m_object_prefix
);
1305 m_image_ctx
.init_layout();
1307 // HEAD revision doesn't have a defined overlap so it's only
1308 // applicable to snapshots
1309 if (!m_head_parent_overlap
) {
1313 m_image_ctx
.features
= m_features
;
1314 m_image_ctx
.flags
= m_flags
;
1315 m_image_ctx
.op_features
= m_op_features
;
1316 m_image_ctx
.operations_disabled
= (
1317 (m_op_features
& ~RBD_OPERATION_FEATURES_ALL
) != 0ULL);
1318 m_image_ctx
.group_spec
= m_group_spec
;
1319 if (get_migration_info(&m_image_ctx
.parent_md
,
1320 &m_image_ctx
.migration_info
)) {
1321 for (auto it
: m_image_ctx
.migration_info
.snap_map
) {
1322 migration_reverse_snap_seq
[it
.second
.front()] = it
.first
;
1325 m_image_ctx
.parent_md
= m_parent_md
;
1326 m_image_ctx
.migration_info
= {};
1330 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1331 std::vector
<librados::snap_t
>::const_iterator it
= std::find(
1332 m_image_ctx
.snaps
.begin(), m_image_ctx
.snaps
.end(),
1333 m_snapc
.snaps
[i
].val
);
1334 if (it
== m_image_ctx
.snaps
.end()) {
1336 ldout(cct
, 20) << "new snapshot id=" << m_snapc
.snaps
[i
].val
1337 << " name=" << m_snap_infos
[i
].name
1338 << " size=" << m_snap_infos
[i
].image_size
1343 m_image_ctx
.snaps
.clear();
1344 m_image_ctx
.snap_info
.clear();
1345 m_image_ctx
.snap_ids
.clear();
1346 auto overlap
= m_image_ctx
.parent_md
.overlap
;
1347 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1348 uint64_t flags
= m_image_ctx
.old_format
? 0 : m_snap_flags
[i
];
1349 uint8_t protection_status
= m_image_ctx
.old_format
?
1350 static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED
) :
1351 m_snap_protection
[i
];
1352 ParentImageInfo parent
;
1353 if (!m_image_ctx
.old_format
) {
1354 if (!m_image_ctx
.migration_info
.empty()) {
1355 parent
= m_image_ctx
.parent_md
;
1356 auto it
= migration_reverse_snap_seq
.find(m_snapc
.snaps
[i
].val
);
1357 if (it
!= migration_reverse_snap_seq
.end()) {
1358 parent
.spec
.snap_id
= it
->second
;
1359 parent
.overlap
= m_snap_infos
[i
].image_size
;
1361 overlap
= std::min(overlap
, m_snap_infos
[i
].image_size
);
1362 parent
.overlap
= overlap
;
1365 parent
= m_snap_parents
[i
];
1368 m_image_ctx
.add_snap(m_snap_infos
[i
].snapshot_namespace
,
1369 m_snap_infos
[i
].name
, m_snapc
.snaps
[i
].val
,
1370 m_snap_infos
[i
].image_size
, parent
,
1371 protection_status
, flags
,
1372 m_snap_infos
[i
].timestamp
);
1374 m_image_ctx
.parent_md
.overlap
= std::min(overlap
, m_image_ctx
.size
);
1375 m_image_ctx
.snapc
= m_snapc
;
1377 if (m_image_ctx
.snap_id
!= CEPH_NOSNAP
&&
1378 m_image_ctx
.get_snap_id(m_image_ctx
.snap_namespace
,
1379 m_image_ctx
.snap_name
) != m_image_ctx
.snap_id
) {
1380 lderr(cct
) << "tried to read from a snapshot that no longer exists: "
1381 << m_image_ctx
.snap_name
<< dendl
;
1382 m_image_ctx
.snap_exists
= false;
1385 if (m_refresh_parent
!= nullptr) {
1386 m_refresh_parent
->apply();
1388 m_image_ctx
.data_ctx
.selfmanaged_snap_set_write_ctx(m_image_ctx
.snapc
.seq
,
1391 // handle dynamically enabled / disabled features
1392 if (m_image_ctx
.exclusive_lock
!= nullptr &&
1393 !m_image_ctx
.test_features(RBD_FEATURE_EXCLUSIVE_LOCK
,
1394 m_image_ctx
.snap_lock
)) {
1395 // disabling exclusive lock will automatically handle closing
1396 // object map and journaling
1397 ceph_assert(m_exclusive_lock
== nullptr);
1398 m_exclusive_lock
= m_image_ctx
.exclusive_lock
;
1400 if (m_exclusive_lock
!= nullptr) {
1401 ceph_assert(m_image_ctx
.exclusive_lock
== nullptr);
1402 std::swap(m_exclusive_lock
, m_image_ctx
.exclusive_lock
);
1404 if (!m_image_ctx
.test_features(RBD_FEATURE_JOURNALING
,
1405 m_image_ctx
.snap_lock
)) {
1406 if (!m_image_ctx
.clone_copy_on_read
&& m_image_ctx
.journal
!= nullptr) {
1407 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_READ
,
1410 std::swap(m_journal
, m_image_ctx
.journal
);
1411 } else if (m_journal
!= nullptr) {
1412 std::swap(m_journal
, m_image_ctx
.journal
);
1414 if (!m_image_ctx
.test_features(RBD_FEATURE_OBJECT_MAP
,
1415 m_image_ctx
.snap_lock
) ||
1416 m_object_map
!= nullptr) {
1417 std::swap(m_object_map
, m_image_ctx
.object_map
);
1423 template <typename I
>
1424 int RefreshRequest
<I
>::get_parent_info(uint64_t snap_id
,
1425 ParentImageInfo
*parent_md
,
1426 MigrationInfo
*migration_info
) {
1427 if (get_migration_info(parent_md
, migration_info
)) {
1429 } else if (snap_id
== CEPH_NOSNAP
) {
1430 *parent_md
= m_parent_md
;
1431 *migration_info
= {};
1434 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1435 if (m_snapc
.snaps
[i
].val
== snap_id
) {
1436 *parent_md
= m_snap_parents
[i
];
1437 *migration_info
= {};
1445 template <typename I
>
1446 bool RefreshRequest
<I
>::get_migration_info(ParentImageInfo
*parent_md
,
1447 MigrationInfo
*migration_info
) {
1448 if (m_migration_spec
.header_type
!= cls::rbd::MIGRATION_HEADER_TYPE_DST
||
1449 (m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_PREPARED
&&
1450 m_migration_spec
.state
!= cls::rbd::MIGRATION_STATE_EXECUTING
)) {
1451 ceph_assert(m_migration_spec
.header_type
==
1452 cls::rbd::MIGRATION_HEADER_TYPE_SRC
||
1453 m_migration_spec
.pool_id
== -1 ||
1454 m_migration_spec
.state
== cls::rbd::MIGRATION_STATE_EXECUTED
);
1459 parent_md
->spec
.pool_id
= m_migration_spec
.pool_id
;
1460 parent_md
->spec
.pool_namespace
= m_migration_spec
.pool_namespace
;
1461 parent_md
->spec
.image_id
= m_migration_spec
.image_id
;
1462 parent_md
->spec
.snap_id
= CEPH_NOSNAP
;
1463 parent_md
->overlap
= std::min(m_size
, m_migration_spec
.overlap
);
1465 auto snap_seqs
= m_migration_spec
.snap_seqs
;
1466 // If new snapshots have been created on destination image after
1467 // migration stared, map the source CEPH_NOSNAP to the earliest of
1469 snapid_t snap_id
= snap_seqs
.empty() ? 0 : snap_seqs
.rbegin()->second
;
1470 auto it
= std::upper_bound(m_snapc
.snaps
.rbegin(), m_snapc
.snaps
.rend(),
1472 if (it
!= m_snapc
.snaps
.rend()) {
1473 snap_seqs
[CEPH_NOSNAP
] = *it
;
1475 snap_seqs
[CEPH_NOSNAP
] = CEPH_NOSNAP
;
1478 std::set
<uint64_t> snap_ids
;
1479 for (auto& it
: snap_seqs
) {
1480 snap_ids
.insert(it
.second
);
1482 uint64_t overlap
= snap_ids
.find(CEPH_NOSNAP
) != snap_ids
.end() ?
1483 parent_md
->overlap
: 0;
1484 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1485 if (snap_ids
.find(m_snapc
.snaps
[i
].val
) != snap_ids
.end()) {
1486 overlap
= std::max(overlap
, m_snap_infos
[i
].image_size
);
1490 *migration_info
= {m_migration_spec
.pool_id
, m_migration_spec
.pool_namespace
,
1491 m_migration_spec
.image_name
, m_migration_spec
.image_id
, {},
1492 overlap
, m_migration_spec
.flatten
};
1494 deep_copy::util::compute_snap_map(0, CEPH_NOSNAP
, snap_seqs
,
1495 &migration_info
->snap_map
);
1499 } // namespace image
1500 } // namespace librbd
1502 template class librbd::image::RefreshRequest
<librbd::ImageCtx
>;