1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "librbd/image/RefreshRequest.h"
5 #include "common/dout.h"
6 #include "common/errno.h"
7 #include "cls/lock/cls_lock_client.h"
8 #include "cls/rbd/cls_rbd_client.h"
9 #include "librbd/ExclusiveLock.h"
10 #include "librbd/ImageCtx.h"
11 #include "librbd/Journal.h"
12 #include "librbd/ObjectMap.h"
13 #include "librbd/Utils.h"
14 #include "librbd/image/RefreshParentRequest.h"
15 #include "librbd/io/ImageRequestWQ.h"
16 #include "librbd/journal/Policy.h"
18 #define dout_subsys ceph_subsys_rbd
20 #define dout_prefix *_dout << "librbd::image::RefreshRequest: "
25 using util::create_rados_callback
;
26 using util::create_async_context_callback
;
27 using util::create_context_callback
;
30 RefreshRequest
<I
>::RefreshRequest(I
&image_ctx
, bool acquiring_lock
,
31 bool skip_open_parent
, Context
*on_finish
)
32 : m_image_ctx(image_ctx
), m_acquiring_lock(acquiring_lock
),
33 m_skip_open_parent_image(skip_open_parent
),
34 m_on_finish(create_async_context_callback(m_image_ctx
, on_finish
)),
35 m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
36 m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
40 RefreshRequest
<I
>::~RefreshRequest() {
41 // these require state machine to close
42 assert(m_exclusive_lock
== nullptr);
43 assert(m_object_map
== nullptr);
44 assert(m_journal
== nullptr);
45 assert(m_refresh_parent
== nullptr);
46 assert(!m_blocked_writes
);
50 void RefreshRequest
<I
>::send() {
51 if (m_image_ctx
.old_format
) {
52 send_v1_read_header();
54 send_v2_get_mutable_metadata();
59 void RefreshRequest
<I
>::send_v1_read_header() {
60 CephContext
*cct
= m_image_ctx
.cct
;
61 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
63 librados::ObjectReadOperation op
;
64 op
.read(0, 0, nullptr, nullptr);
66 using klass
= RefreshRequest
<I
>;
67 librados::AioCompletion
*comp
= create_rados_callback
<
68 klass
, &klass::handle_v1_read_header
>(this);
70 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
77 Context
*RefreshRequest
<I
>::handle_v1_read_header(int *result
) {
78 CephContext
*cct
= m_image_ctx
.cct
;
79 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
81 rbd_obj_header_ondisk v1_header
;
84 } else if (m_out_bl
.length() < sizeof(v1_header
)) {
85 lderr(cct
) << "v1 header too small" << dendl
;
88 } else if (memcmp(RBD_HEADER_TEXT
, m_out_bl
.c_str(),
89 sizeof(RBD_HEADER_TEXT
)) != 0) {
90 lderr(cct
) << "unrecognized v1 header" << dendl
;
95 memcpy(&v1_header
, m_out_bl
.c_str(), sizeof(v1_header
));
96 m_order
= v1_header
.options
.order
;
97 m_size
= v1_header
.image_size
;
98 m_object_prefix
= v1_header
.block_name
;
99 send_v1_get_snapshots();
103 template <typename I
>
104 void RefreshRequest
<I
>::send_v1_get_snapshots() {
105 CephContext
*cct
= m_image_ctx
.cct
;
106 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
108 librados::ObjectReadOperation op
;
109 cls_client::old_snapshot_list_start(&op
);
111 using klass
= RefreshRequest
<I
>;
112 librados::AioCompletion
*comp
= create_rados_callback
<
113 klass
, &klass::handle_v1_get_snapshots
>(this);
115 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
121 template <typename I
>
122 Context
*RefreshRequest
<I
>::handle_v1_get_snapshots(int *result
) {
123 CephContext
*cct
= m_image_ctx
.cct
;
124 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
127 bufferlist::iterator it
= m_out_bl
.begin();
128 *result
= cls_client::old_snapshot_list_finish(
129 &it
, &m_snap_names
, &m_snap_sizes
, &m_snapc
);
133 lderr(cct
) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result
)
138 if (!m_snapc
.is_valid()) {
139 lderr(cct
) << "v1 image snap context is invalid" << dendl
;
144 //m_snap_namespaces = {m_snap_names.size(), cls::rbd::UserSnapshotNamespace()};
145 m_snap_namespaces
= std::vector
<cls::rbd::SnapshotNamespace
>(
147 cls::rbd::UserSnapshotNamespace());
149 m_snap_timestamps
= std::vector
<utime_t
>(m_snap_names
.size(), utime_t());
155 template <typename I
>
156 void RefreshRequest
<I
>::send_v1_get_locks() {
157 CephContext
*cct
= m_image_ctx
.cct
;
158 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
160 librados::ObjectReadOperation op
;
161 rados::cls::lock::get_lock_info_start(&op
, RBD_LOCK_NAME
);
163 using klass
= RefreshRequest
<I
>;
164 librados::AioCompletion
*comp
= create_rados_callback
<
165 klass
, &klass::handle_v1_get_locks
>(this);
167 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
173 template <typename I
>
174 Context
*RefreshRequest
<I
>::handle_v1_get_locks(int *result
) {
175 CephContext
*cct
= m_image_ctx
.cct
;
176 ldout(cct
, 10) << this << " " << __func__
<< ": "
177 << "r=" << *result
<< dendl
;
179 // If EOPNOTSUPP, treat image as if there are no locks (we can't
181 if (*result
== -EOPNOTSUPP
) {
183 } else if (*result
== 0) {
184 bufferlist::iterator it
= m_out_bl
.begin();
185 ClsLockType lock_type
;
186 *result
= rados::cls::lock::get_lock_info_finish(&it
, &m_lockers
,
187 &lock_type
, &m_lock_tag
);
189 m_exclusive_locked
= (lock_type
== LOCK_EXCLUSIVE
);
193 lderr(cct
) << "failed to retrieve locks: " << cpp_strerror(*result
)
202 template <typename I
>
203 void RefreshRequest
<I
>::send_v1_apply() {
204 CephContext
*cct
= m_image_ctx
.cct
;
205 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
207 // ensure we are not in a rados callback when applying updates
208 using klass
= RefreshRequest
<I
>;
209 Context
*ctx
= create_context_callback
<
210 klass
, &klass::handle_v1_apply
>(this);
211 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
214 template <typename I
>
215 Context
*RefreshRequest
<I
>::handle_v1_apply(int *result
) {
216 CephContext
*cct
= m_image_ctx
.cct
;
217 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
220 return send_flush_aio();
223 template <typename I
>
224 void RefreshRequest
<I
>::send_v2_get_mutable_metadata() {
225 CephContext
*cct
= m_image_ctx
.cct
;
226 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
230 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
231 snap_id
= m_image_ctx
.snap_id
;
234 bool read_only
= m_image_ctx
.read_only
|| snap_id
!= CEPH_NOSNAP
;
235 librados::ObjectReadOperation op
;
236 cls_client::get_mutable_metadata_start(&op
, read_only
);
238 using klass
= RefreshRequest
<I
>;
239 librados::AioCompletion
*comp
= create_rados_callback
<
240 klass
, &klass::handle_v2_get_mutable_metadata
>(this);
242 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
248 template <typename I
>
249 Context
*RefreshRequest
<I
>::handle_v2_get_mutable_metadata(int *result
) {
250 CephContext
*cct
= m_image_ctx
.cct
;
251 ldout(cct
, 10) << this << " " << __func__
<< ": "
252 << "r=" << *result
<< dendl
;
255 bufferlist::iterator it
= m_out_bl
.begin();
256 *result
= cls_client::get_mutable_metadata_finish(&it
, &m_size
, &m_features
,
257 &m_incompatible_features
,
260 &m_lock_tag
, &m_snapc
,
264 lderr(cct
) << "failed to retrieve mutable metadata: "
265 << cpp_strerror(*result
) << dendl
;
269 uint64_t unsupported
= m_incompatible_features
& ~RBD_FEATURES_ALL
;
270 if (unsupported
!= 0ULL) {
271 lderr(cct
) << "Image uses unsupported features: " << unsupported
<< dendl
;
276 if (!m_snapc
.is_valid()) {
277 lderr(cct
) << "image snap context is invalid!" << dendl
;
282 if (m_acquiring_lock
&& (m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0) {
283 ldout(cct
, 5) << "ignoring dynamically disabled exclusive lock" << dendl
;
284 m_features
|= RBD_FEATURE_EXCLUSIVE_LOCK
;
285 m_incomplete_update
= true;
292 template <typename I
>
293 void RefreshRequest
<I
>::send_v2_get_flags() {
294 CephContext
*cct
= m_image_ctx
.cct
;
295 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
297 librados::ObjectReadOperation op
;
298 cls_client::get_flags_start(&op
, m_snapc
.snaps
);
300 using klass
= RefreshRequest
<I
>;
301 librados::AioCompletion
*comp
= create_rados_callback
<
302 klass
, &klass::handle_v2_get_flags
>(this);
304 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
310 template <typename I
>
311 Context
*RefreshRequest
<I
>::handle_v2_get_flags(int *result
) {
312 CephContext
*cct
= m_image_ctx
.cct
;
313 ldout(cct
, 10) << this << " " << __func__
<< ": "
314 << "r=" << *result
<< dendl
;
317 bufferlist::iterator it
= m_out_bl
.begin();
318 cls_client::get_flags_finish(&it
, &m_flags
, m_snapc
.snaps
, &m_snap_flags
);
320 if (*result
== -EOPNOTSUPP
) {
321 // Older OSD doesn't support RBD flags, need to assume the worst
323 ldout(cct
, 10) << "OSD does not support RBD flags, disabling object map "
324 << "optimizations" << dendl
;
325 m_flags
= RBD_FLAG_OBJECT_MAP_INVALID
;
326 if ((m_features
& RBD_FEATURE_FAST_DIFF
) != 0) {
327 m_flags
|= RBD_FLAG_FAST_DIFF_INVALID
;
330 std::vector
<uint64_t> default_flags(m_snapc
.snaps
.size(), m_flags
);
331 m_snap_flags
= std::move(default_flags
);
332 } else if (*result
== -ENOENT
) {
333 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
334 send_v2_get_mutable_metadata();
336 } else if (*result
< 0) {
337 lderr(cct
) << "failed to retrieve flags: " << cpp_strerror(*result
)
346 template <typename I
>
347 void RefreshRequest
<I
>::send_v2_get_group() {
348 CephContext
*cct
= m_image_ctx
.cct
;
349 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
351 librados::ObjectReadOperation op
;
352 cls_client::image_get_group_start(&op
);
354 using klass
= RefreshRequest
<I
>;
355 librados::AioCompletion
*comp
= create_rados_callback
<
356 klass
, &klass::handle_v2_get_group
>(this);
358 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
364 template <typename I
>
365 Context
*RefreshRequest
<I
>::handle_v2_get_group(int *result
) {
366 CephContext
*cct
= m_image_ctx
.cct
;
367 ldout(cct
, 10) << this << " " << __func__
<< ": "
368 << "r=" << *result
<< dendl
;
371 bufferlist::iterator it
= m_out_bl
.begin();
372 cls_client::image_get_group_finish(&it
, &m_group_spec
);
374 if (*result
== -EOPNOTSUPP
) {
375 // Older OSD doesn't support RBD groups
377 ldout(cct
, 10) << "OSD does not support consistency groups" << dendl
;
378 } else if (*result
< 0) {
379 lderr(cct
) << "failed to retrieve group: " << cpp_strerror(*result
)
384 send_v2_get_snapshots();
388 template <typename I
>
389 void RefreshRequest
<I
>::send_v2_get_snapshots() {
390 if (m_snapc
.snaps
.empty()) {
391 m_snap_names
.clear();
392 m_snap_namespaces
.clear();
393 m_snap_sizes
.clear();
394 m_snap_parents
.clear();
395 m_snap_protection
.clear();
396 m_snap_timestamps
.clear();
397 send_v2_refresh_parent();
401 CephContext
*cct
= m_image_ctx
.cct
;
402 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
404 librados::ObjectReadOperation op
;
405 cls_client::snapshot_list_start(&op
, m_snapc
.snaps
);
407 using klass
= RefreshRequest
<I
>;
408 librados::AioCompletion
*comp
= create_rados_callback
<
409 klass
, &klass::handle_v2_get_snapshots
>(this);
411 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
417 template <typename I
>
418 Context
*RefreshRequest
<I
>::handle_v2_get_snapshots(int *result
) {
419 CephContext
*cct
= m_image_ctx
.cct
;
420 ldout(cct
, 10) << this << " " << __func__
<< ": "
421 << "r=" << *result
<< dendl
;
424 bufferlist::iterator it
= m_out_bl
.begin();
425 *result
= cls_client::snapshot_list_finish(&it
, m_snapc
.snaps
,
431 if (*result
== -ENOENT
) {
432 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
433 send_v2_get_mutable_metadata();
435 } else if (*result
< 0) {
436 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
441 send_v2_get_snap_timestamps();
445 template <typename I
>
446 void RefreshRequest
<I
>::send_v2_get_snap_timestamps() {
447 CephContext
*cct
= m_image_ctx
.cct
;
448 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
450 librados::ObjectReadOperation op
;
451 cls_client::snapshot_timestamp_list_start(&op
, m_snapc
.snaps
);
453 using klass
= RefreshRequest
<I
>;
454 librados::AioCompletion
*comp
= create_rados_callback
<
455 klass
, &klass::handle_v2_get_snap_timestamps
>(this);
457 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
463 template <typename I
>
464 Context
*RefreshRequest
<I
>::handle_v2_get_snap_timestamps(int *result
) {
465 CephContext
*cct
= m_image_ctx
.cct
;
466 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
469 bufferlist::iterator it
= m_out_bl
.begin();
470 *result
= cls_client::snapshot_timestamp_list_finish(&it
, m_snapc
.snaps
, &m_snap_timestamps
);
472 if (*result
== -ENOENT
) {
473 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
474 send_v2_get_mutable_metadata();
476 } else if (*result
== -EOPNOTSUPP
) {
477 m_snap_timestamps
= std::vector
<utime_t
>(m_snap_names
.size(), utime_t());
478 // Ignore it means no snap timestamps are available
479 } else if (*result
< 0) {
480 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
485 send_v2_get_snap_namespaces();
489 template <typename I
>
490 void RefreshRequest
<I
>::send_v2_get_snap_namespaces() {
491 CephContext
*cct
= m_image_ctx
.cct
;
492 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
494 librados::ObjectReadOperation op
;
495 cls_client::snapshot_namespace_list_start(&op
, m_snapc
.snaps
);
497 using klass
= RefreshRequest
<I
>;
498 librados::AioCompletion
*comp
= create_rados_callback
<
499 klass
, &klass::handle_v2_get_snap_namespaces
>(this);
501 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
507 template <typename I
>
508 Context
*RefreshRequest
<I
>::handle_v2_get_snap_namespaces(int *result
) {
509 CephContext
*cct
= m_image_ctx
.cct
;
510 ldout(cct
, 10) << this << " " << __func__
<< ": "
511 << "r=" << *result
<< dendl
;
514 bufferlist::iterator it
= m_out_bl
.begin();
515 *result
= cls_client::snapshot_namespace_list_finish(&it
, m_snapc
.snaps
,
518 if (*result
== -ENOENT
) {
519 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
520 send_v2_get_mutable_metadata();
522 } else if (*result
== -EOPNOTSUPP
) {
523 m_snap_namespaces
= std::vector
524 <cls::rbd::SnapshotNamespace
>(
526 cls::rbd::UserSnapshotNamespace());
527 // Ignore it means no snap namespaces are available
528 } else if (*result
< 0) {
529 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
534 send_v2_refresh_parent();
538 template <typename I
>
539 void RefreshRequest
<I
>::send_v2_refresh_parent() {
541 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
542 RWLock::RLocker
parent_locker(m_image_ctx
.parent_lock
);
544 ParentInfo parent_md
;
545 int r
= get_parent_info(m_image_ctx
.snap_id
, &parent_md
);
546 if (!m_skip_open_parent_image
&& (r
< 0 ||
547 RefreshParentRequest
<I
>::is_refresh_required(m_image_ctx
, parent_md
))) {
548 CephContext
*cct
= m_image_ctx
.cct
;
549 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
551 using klass
= RefreshRequest
<I
>;
552 Context
*ctx
= create_context_callback
<
553 klass
, &klass::handle_v2_refresh_parent
>(this);
554 m_refresh_parent
= RefreshParentRequest
<I
>::create(
555 m_image_ctx
, parent_md
, ctx
);
559 if (m_refresh_parent
!= nullptr) {
560 m_refresh_parent
->send();
562 send_v2_init_exclusive_lock();
566 template <typename I
>
567 Context
*RefreshRequest
<I
>::handle_v2_refresh_parent(int *result
) {
568 CephContext
*cct
= m_image_ctx
.cct
;
569 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
572 lderr(cct
) << "failed to refresh parent image: " << cpp_strerror(*result
)
579 send_v2_init_exclusive_lock();
583 template <typename I
>
584 void RefreshRequest
<I
>::send_v2_init_exclusive_lock() {
585 if ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0 ||
586 m_image_ctx
.read_only
|| !m_image_ctx
.snap_name
.empty() ||
587 m_image_ctx
.exclusive_lock
!= nullptr) {
588 send_v2_open_object_map();
592 // implies exclusive lock dynamically enabled or image open in-progress
593 CephContext
*cct
= m_image_ctx
.cct
;
594 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
596 // TODO need safe shut down
597 m_exclusive_lock
= m_image_ctx
.create_exclusive_lock();
599 using klass
= RefreshRequest
<I
>;
600 Context
*ctx
= create_context_callback
<
601 klass
, &klass::handle_v2_init_exclusive_lock
>(this);
603 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
604 m_exclusive_lock
->init(m_features
, ctx
);
607 template <typename I
>
608 Context
*RefreshRequest
<I
>::handle_v2_init_exclusive_lock(int *result
) {
609 CephContext
*cct
= m_image_ctx
.cct
;
610 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
613 lderr(cct
) << "failed to initialize exclusive lock: "
614 << cpp_strerror(*result
) << dendl
;
618 // object map and journal will be opened when exclusive lock is
619 // acquired (if features are enabled)
624 template <typename I
>
625 void RefreshRequest
<I
>::send_v2_open_journal() {
626 bool journal_disabled
= (
627 (m_features
& RBD_FEATURE_JOURNALING
) == 0 ||
628 m_image_ctx
.read_only
||
629 !m_image_ctx
.snap_name
.empty() ||
630 m_image_ctx
.journal
!= nullptr ||
631 m_image_ctx
.exclusive_lock
== nullptr ||
632 !m_image_ctx
.exclusive_lock
->is_lock_owner());
633 bool journal_disabled_by_policy
;
635 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
636 journal_disabled_by_policy
= (
638 m_image_ctx
.get_journal_policy()->journal_disabled());
641 if (journal_disabled
|| journal_disabled_by_policy
) {
642 // journal dynamically enabled -- doesn't own exclusive lock
643 if ((m_features
& RBD_FEATURE_JOURNALING
) != 0 &&
644 !journal_disabled_by_policy
&&
645 m_image_ctx
.exclusive_lock
!= nullptr &&
646 m_image_ctx
.journal
== nullptr) {
647 m_image_ctx
.io_work_queue
->set_require_lock_on_read();
649 send_v2_block_writes();
653 // implies journal dynamically enabled since ExclusiveLock will init
654 // the journal upon acquiring the lock
655 CephContext
*cct
= m_image_ctx
.cct
;
656 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
658 using klass
= RefreshRequest
<I
>;
659 Context
*ctx
= create_context_callback
<
660 klass
, &klass::handle_v2_open_journal
>(this);
662 // TODO need safe close
663 m_journal
= m_image_ctx
.create_journal();
664 m_journal
->open(ctx
);
667 template <typename I
>
668 Context
*RefreshRequest
<I
>::handle_v2_open_journal(int *result
) {
669 CephContext
*cct
= m_image_ctx
.cct
;
670 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
673 lderr(cct
) << "failed to initialize journal: " << cpp_strerror(*result
)
678 send_v2_block_writes();
682 template <typename I
>
683 void RefreshRequest
<I
>::send_v2_block_writes() {
684 bool disabled_journaling
= false;
686 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
687 disabled_journaling
= ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) != 0 &&
688 (m_features
& RBD_FEATURE_JOURNALING
) == 0 &&
689 m_image_ctx
.journal
!= nullptr);
692 if (!disabled_journaling
) {
697 CephContext
*cct
= m_image_ctx
.cct
;
698 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
700 // we need to block writes temporarily to avoid in-flight journal
702 m_blocked_writes
= true;
703 Context
*ctx
= create_context_callback
<
704 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_block_writes
>(this);
706 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
707 m_image_ctx
.io_work_queue
->block_writes(ctx
);
710 template <typename I
>
711 Context
*RefreshRequest
<I
>::handle_v2_block_writes(int *result
) {
712 CephContext
*cct
= m_image_ctx
.cct
;
713 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
716 lderr(cct
) << "failed to block writes: " << cpp_strerror(*result
)
724 template <typename I
>
725 void RefreshRequest
<I
>::send_v2_open_object_map() {
726 if ((m_features
& RBD_FEATURE_OBJECT_MAP
) == 0 ||
727 m_image_ctx
.object_map
!= nullptr ||
728 (m_image_ctx
.snap_name
.empty() &&
729 (m_image_ctx
.read_only
||
730 m_image_ctx
.exclusive_lock
== nullptr ||
731 !m_image_ctx
.exclusive_lock
->is_lock_owner()))) {
732 send_v2_open_journal();
736 // implies object map dynamically enabled or image open in-progress
737 // since SetSnapRequest loads the object map for a snapshot and
738 // ExclusiveLock loads the object map for HEAD
739 CephContext
*cct
= m_image_ctx
.cct
;
740 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
742 if (m_image_ctx
.snap_name
.empty()) {
743 m_object_map
= m_image_ctx
.create_object_map(CEPH_NOSNAP
);
745 for (size_t snap_idx
= 0; snap_idx
< m_snap_names
.size(); ++snap_idx
) {
746 if (m_snap_names
[snap_idx
] == m_image_ctx
.snap_name
) {
747 m_object_map
= m_image_ctx
.create_object_map(
748 m_snapc
.snaps
[snap_idx
].val
);
753 if (m_object_map
== nullptr) {
754 lderr(cct
) << "failed to locate snapshot: " << m_image_ctx
.snap_name
756 send_v2_open_journal();
761 using klass
= RefreshRequest
<I
>;
762 Context
*ctx
= create_context_callback
<
763 klass
, &klass::handle_v2_open_object_map
>(this);
764 m_object_map
->open(ctx
);
767 template <typename I
>
768 Context
*RefreshRequest
<I
>::handle_v2_open_object_map(int *result
) {
769 CephContext
*cct
= m_image_ctx
.cct
;
770 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
773 lderr(cct
) << "failed to open object map: " << cpp_strerror(*result
)
776 m_object_map
= nullptr;
779 send_v2_open_journal();
783 template <typename I
>
784 void RefreshRequest
<I
>::send_v2_apply() {
785 CephContext
*cct
= m_image_ctx
.cct
;
786 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
788 // ensure we are not in a rados callback when applying updates
789 using klass
= RefreshRequest
<I
>;
790 Context
*ctx
= create_context_callback
<
791 klass
, &klass::handle_v2_apply
>(this);
792 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
795 template <typename I
>
796 Context
*RefreshRequest
<I
>::handle_v2_apply(int *result
) {
797 CephContext
*cct
= m_image_ctx
.cct
;
798 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
802 return send_v2_finalize_refresh_parent();
805 template <typename I
>
806 Context
*RefreshRequest
<I
>::send_v2_finalize_refresh_parent() {
807 if (m_refresh_parent
== nullptr) {
808 return send_v2_shut_down_exclusive_lock();
811 CephContext
*cct
= m_image_ctx
.cct
;
812 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
814 using klass
= RefreshRequest
<I
>;
815 Context
*ctx
= create_context_callback
<
816 klass
, &klass::handle_v2_finalize_refresh_parent
>(this);
817 m_refresh_parent
->finalize(ctx
);
821 template <typename I
>
822 Context
*RefreshRequest
<I
>::handle_v2_finalize_refresh_parent(int *result
) {
823 CephContext
*cct
= m_image_ctx
.cct
;
824 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
826 assert(m_refresh_parent
!= nullptr);
827 delete m_refresh_parent
;
828 m_refresh_parent
= nullptr;
830 return send_v2_shut_down_exclusive_lock();
833 template <typename I
>
834 Context
*RefreshRequest
<I
>::send_v2_shut_down_exclusive_lock() {
835 if (m_exclusive_lock
== nullptr) {
836 return send_v2_close_journal();
839 CephContext
*cct
= m_image_ctx
.cct
;
840 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
842 // exclusive lock feature was dynamically disabled. in-flight IO will be
843 // flushed and in-flight requests will be canceled before releasing lock
844 using klass
= RefreshRequest
<I
>;
845 Context
*ctx
= create_context_callback
<
846 klass
, &klass::handle_v2_shut_down_exclusive_lock
>(this);
847 m_exclusive_lock
->shut_down(ctx
);
851 template <typename I
>
852 Context
*RefreshRequest
<I
>::handle_v2_shut_down_exclusive_lock(int *result
) {
853 CephContext
*cct
= m_image_ctx
.cct
;
854 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
857 lderr(cct
) << "failed to shut down exclusive lock: "
858 << cpp_strerror(*result
) << dendl
;
863 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
864 assert(m_image_ctx
.exclusive_lock
== nullptr);
867 assert(m_exclusive_lock
!= nullptr);
868 delete m_exclusive_lock
;
869 m_exclusive_lock
= nullptr;
871 return send_v2_close_journal();
874 template <typename I
>
875 Context
*RefreshRequest
<I
>::send_v2_close_journal() {
876 if (m_journal
== nullptr) {
877 return send_v2_close_object_map();
880 CephContext
*cct
= m_image_ctx
.cct
;
881 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
883 // journal feature was dynamically disabled
884 using klass
= RefreshRequest
<I
>;
885 Context
*ctx
= create_context_callback
<
886 klass
, &klass::handle_v2_close_journal
>(this);
887 m_journal
->close(ctx
);
891 template <typename I
>
892 Context
*RefreshRequest
<I
>::handle_v2_close_journal(int *result
) {
893 CephContext
*cct
= m_image_ctx
.cct
;
894 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
898 lderr(cct
) << "failed to close journal: " << cpp_strerror(*result
)
902 assert(m_journal
!= nullptr);
906 assert(m_blocked_writes
);
907 m_blocked_writes
= false;
909 m_image_ctx
.io_work_queue
->unblock_writes();
910 return send_v2_close_object_map();
913 template <typename I
>
914 Context
*RefreshRequest
<I
>::send_v2_close_object_map() {
915 if (m_object_map
== nullptr) {
916 return send_flush_aio();
919 CephContext
*cct
= m_image_ctx
.cct
;
920 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
922 // object map was dynamically disabled
923 using klass
= RefreshRequest
<I
>;
924 Context
*ctx
= create_context_callback
<
925 klass
, &klass::handle_v2_close_object_map
>(this);
926 m_object_map
->close(ctx
);
930 template <typename I
>
931 Context
*RefreshRequest
<I
>::handle_v2_close_object_map(int *result
) {
932 CephContext
*cct
= m_image_ctx
.cct
;
933 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
935 assert(*result
== 0);
936 assert(m_object_map
!= nullptr);
938 m_object_map
= nullptr;
940 return send_flush_aio();
943 template <typename I
>
944 Context
*RefreshRequest
<I
>::send_flush_aio() {
945 if (m_incomplete_update
&& m_error_result
== 0) {
946 // if this was a partial refresh, notify ImageState
947 m_error_result
= -ERESTART
;
951 CephContext
*cct
= m_image_ctx
.cct
;
952 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
954 RWLock::RLocker
owner_lock(m_image_ctx
.owner_lock
);
955 using klass
= RefreshRequest
<I
>;
956 Context
*ctx
= create_context_callback
<
957 klass
, &klass::handle_flush_aio
>(this);
958 m_image_ctx
.flush(ctx
);
960 } else if (m_error_result
< 0) {
961 // propagate saved error back to caller
962 Context
*ctx
= create_context_callback
<
963 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_error
>(this);
964 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
971 template <typename I
>
972 Context
*RefreshRequest
<I
>::handle_flush_aio(int *result
) {
973 CephContext
*cct
= m_image_ctx
.cct
;
974 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
977 lderr(cct
) << "failed to flush pending AIO: " << cpp_strerror(*result
)
981 return handle_error(result
);
984 template <typename I
>
985 Context
*RefreshRequest
<I
>::handle_error(int *result
) {
986 if (m_error_result
< 0) {
987 *result
= m_error_result
;
989 CephContext
*cct
= m_image_ctx
.cct
;
990 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
995 template <typename I
>
996 void RefreshRequest
<I
>::apply() {
997 CephContext
*cct
= m_image_ctx
.cct
;
998 ldout(cct
, 20) << this << " " << __func__
<< dendl
;
1000 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
1001 RWLock::WLocker
md_locker(m_image_ctx
.md_lock
);
1004 Mutex::Locker
cache_locker(m_image_ctx
.cache_lock
);
1005 RWLock::WLocker
snap_locker(m_image_ctx
.snap_lock
);
1006 RWLock::WLocker
parent_locker(m_image_ctx
.parent_lock
);
1008 m_image_ctx
.size
= m_size
;
1009 m_image_ctx
.lockers
= m_lockers
;
1010 m_image_ctx
.lock_tag
= m_lock_tag
;
1011 m_image_ctx
.exclusive_locked
= m_exclusive_locked
;
1013 if (m_image_ctx
.old_format
) {
1014 m_image_ctx
.order
= m_order
;
1015 m_image_ctx
.features
= 0;
1016 m_image_ctx
.flags
= 0;
1017 m_image_ctx
.object_prefix
= std::move(m_object_prefix
);
1018 m_image_ctx
.init_layout();
1020 m_image_ctx
.features
= m_features
;
1021 m_image_ctx
.flags
= m_flags
;
1022 m_image_ctx
.group_spec
= m_group_spec
;
1023 m_image_ctx
.parent_md
= m_parent_md
;
1026 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1027 std::vector
<librados::snap_t
>::const_iterator it
= std::find(
1028 m_image_ctx
.snaps
.begin(), m_image_ctx
.snaps
.end(),
1029 m_snapc
.snaps
[i
].val
);
1030 if (it
== m_image_ctx
.snaps
.end()) {
1032 ldout(cct
, 20) << "new snapshot id=" << m_snapc
.snaps
[i
].val
1033 << " name=" << m_snap_names
[i
]
1034 << " size=" << m_snap_sizes
[i
]
1039 m_image_ctx
.snaps
.clear();
1040 m_image_ctx
.snap_info
.clear();
1041 m_image_ctx
.snap_ids
.clear();
1042 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1043 uint64_t flags
= m_image_ctx
.old_format
? 0 : m_snap_flags
[i
];
1044 uint8_t protection_status
= m_image_ctx
.old_format
?
1045 static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED
) :
1046 m_snap_protection
[i
];
1048 if (!m_image_ctx
.old_format
) {
1049 parent
= m_snap_parents
[i
];
1052 m_image_ctx
.add_snap(m_snap_namespaces
[i
], m_snap_names
[i
],
1053 m_snapc
.snaps
[i
].val
, m_snap_sizes
[i
], parent
,
1054 protection_status
, flags
, m_snap_timestamps
[i
]);
1056 m_image_ctx
.snapc
= m_snapc
;
1058 if (m_image_ctx
.snap_id
!= CEPH_NOSNAP
&&
1059 m_image_ctx
.get_snap_id(m_image_ctx
.snap_namespace
,
1060 m_image_ctx
.snap_name
) != m_image_ctx
.snap_id
) {
1061 lderr(cct
) << "tried to read from a snapshot that no longer exists: "
1062 << m_image_ctx
.snap_name
<< dendl
;
1063 m_image_ctx
.snap_exists
= false;
1066 if (m_refresh_parent
!= nullptr) {
1067 m_refresh_parent
->apply();
1069 m_image_ctx
.data_ctx
.selfmanaged_snap_set_write_ctx(m_image_ctx
.snapc
.seq
,
1072 // handle dynamically enabled / disabled features
1073 if (m_image_ctx
.exclusive_lock
!= nullptr &&
1074 !m_image_ctx
.test_features(RBD_FEATURE_EXCLUSIVE_LOCK
,
1075 m_image_ctx
.snap_lock
)) {
1076 // disabling exclusive lock will automatically handle closing
1077 // object map and journaling
1078 assert(m_exclusive_lock
== nullptr);
1079 m_exclusive_lock
= m_image_ctx
.exclusive_lock
;
1080 m_image_ctx
.io_work_queue
->clear_require_lock_on_read();
1082 if (m_exclusive_lock
!= nullptr) {
1083 assert(m_image_ctx
.exclusive_lock
== nullptr);
1084 std::swap(m_exclusive_lock
, m_image_ctx
.exclusive_lock
);
1086 if (!m_image_ctx
.test_features(RBD_FEATURE_JOURNALING
,
1087 m_image_ctx
.snap_lock
)) {
1088 if (m_image_ctx
.journal
!= nullptr) {
1089 m_image_ctx
.io_work_queue
->clear_require_lock_on_read();
1091 std::swap(m_journal
, m_image_ctx
.journal
);
1092 } else if (m_journal
!= nullptr) {
1093 std::swap(m_journal
, m_image_ctx
.journal
);
1095 if (!m_image_ctx
.test_features(RBD_FEATURE_OBJECT_MAP
,
1096 m_image_ctx
.snap_lock
) ||
1097 m_object_map
!= nullptr) {
1098 std::swap(m_object_map
, m_image_ctx
.object_map
);
1100 if (m_image_ctx
.clone_copy_on_read
&&
1101 m_image_ctx
.io_work_queue
->is_lock_required()) {
1102 m_image_ctx
.io_work_queue
->set_require_lock_on_read();
1108 template <typename I
>
1109 int RefreshRequest
<I
>::get_parent_info(uint64_t snap_id
,
1110 ParentInfo
*parent_md
) {
1111 if (snap_id
== CEPH_NOSNAP
) {
1112 *parent_md
= m_parent_md
;
1115 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1116 if (m_snapc
.snaps
[i
].val
== snap_id
) {
1117 *parent_md
= m_snap_parents
[i
];
1125 } // namespace image
1126 } // namespace librbd
1128 template class librbd::image::RefreshRequest
<librbd::ImageCtx
>;