1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include <boost/algorithm/string/predicate.hpp>
5 #include "include/assert.h"
7 #include "librbd/image/RefreshRequest.h"
8 #include "common/dout.h"
9 #include "common/errno.h"
10 #include "cls/lock/cls_lock_client.h"
11 #include "cls/rbd/cls_rbd_client.h"
12 #include "librbd/ExclusiveLock.h"
13 #include "librbd/ImageCtx.h"
14 #include "librbd/Journal.h"
15 #include "librbd/ObjectMap.h"
16 #include "librbd/Utils.h"
17 #include "librbd/image/RefreshParentRequest.h"
18 #include "librbd/io/ImageRequestWQ.h"
19 #include "librbd/journal/Policy.h"
21 #define dout_subsys ceph_subsys_rbd
23 #define dout_prefix *_dout << "librbd::image::RefreshRequest: "
30 const uint64_t MAX_METADATA_ITEMS
= 128;
34 using util::create_rados_callback
;
35 using util::create_async_context_callback
;
36 using util::create_context_callback
;
39 RefreshRequest
<I
>::RefreshRequest(I
&image_ctx
, bool acquiring_lock
,
40 bool skip_open_parent
, Context
*on_finish
)
41 : m_image_ctx(image_ctx
), m_acquiring_lock(acquiring_lock
),
42 m_skip_open_parent_image(skip_open_parent
),
43 m_on_finish(create_async_context_callback(m_image_ctx
, on_finish
)),
44 m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
45 m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
49 RefreshRequest
<I
>::~RefreshRequest() {
50 // these require state machine to close
51 assert(m_exclusive_lock
== nullptr);
52 assert(m_object_map
== nullptr);
53 assert(m_journal
== nullptr);
54 assert(m_refresh_parent
== nullptr);
55 assert(!m_blocked_writes
);
59 void RefreshRequest
<I
>::send() {
60 if (m_image_ctx
.old_format
) {
61 send_v1_read_header();
63 send_v2_get_mutable_metadata();
68 void RefreshRequest
<I
>::send_v1_read_header() {
69 CephContext
*cct
= m_image_ctx
.cct
;
70 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
72 librados::ObjectReadOperation op
;
73 op
.read(0, 0, nullptr, nullptr);
75 using klass
= RefreshRequest
<I
>;
76 librados::AioCompletion
*comp
= create_rados_callback
<
77 klass
, &klass::handle_v1_read_header
>(this);
79 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
86 Context
*RefreshRequest
<I
>::handle_v1_read_header(int *result
) {
87 CephContext
*cct
= m_image_ctx
.cct
;
88 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
90 rbd_obj_header_ondisk v1_header
;
93 } else if (m_out_bl
.length() < sizeof(v1_header
)) {
94 lderr(cct
) << "v1 header too small" << dendl
;
97 } else if (memcmp(RBD_HEADER_TEXT
, m_out_bl
.c_str(),
98 sizeof(RBD_HEADER_TEXT
)) != 0) {
99 lderr(cct
) << "unrecognized v1 header" << dendl
;
104 memcpy(&v1_header
, m_out_bl
.c_str(), sizeof(v1_header
));
105 m_order
= v1_header
.options
.order
;
106 m_size
= v1_header
.image_size
;
107 m_object_prefix
= v1_header
.block_name
;
108 send_v1_get_snapshots();
112 template <typename I
>
113 void RefreshRequest
<I
>::send_v1_get_snapshots() {
114 CephContext
*cct
= m_image_ctx
.cct
;
115 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
117 librados::ObjectReadOperation op
;
118 cls_client::old_snapshot_list_start(&op
);
120 using klass
= RefreshRequest
<I
>;
121 librados::AioCompletion
*comp
= create_rados_callback
<
122 klass
, &klass::handle_v1_get_snapshots
>(this);
124 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
130 template <typename I
>
131 Context
*RefreshRequest
<I
>::handle_v1_get_snapshots(int *result
) {
132 CephContext
*cct
= m_image_ctx
.cct
;
133 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
136 bufferlist::iterator it
= m_out_bl
.begin();
137 *result
= cls_client::old_snapshot_list_finish(
138 &it
, &m_snap_names
, &m_snap_sizes
, &m_snapc
);
142 lderr(cct
) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result
)
147 if (!m_snapc
.is_valid()) {
148 lderr(cct
) << "v1 image snap context is invalid" << dendl
;
153 //m_snap_namespaces = {m_snap_names.size(), cls::rbd::UserSnapshotNamespace()};
154 m_snap_namespaces
= std::vector
<cls::rbd::SnapshotNamespace
>(
156 cls::rbd::UserSnapshotNamespace());
158 m_snap_timestamps
= std::vector
<utime_t
>(m_snap_names
.size(), utime_t());
164 template <typename I
>
165 void RefreshRequest
<I
>::send_v1_get_locks() {
166 CephContext
*cct
= m_image_ctx
.cct
;
167 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
169 librados::ObjectReadOperation op
;
170 rados::cls::lock::get_lock_info_start(&op
, RBD_LOCK_NAME
);
172 using klass
= RefreshRequest
<I
>;
173 librados::AioCompletion
*comp
= create_rados_callback
<
174 klass
, &klass::handle_v1_get_locks
>(this);
176 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
182 template <typename I
>
183 Context
*RefreshRequest
<I
>::handle_v1_get_locks(int *result
) {
184 CephContext
*cct
= m_image_ctx
.cct
;
185 ldout(cct
, 10) << this << " " << __func__
<< ": "
186 << "r=" << *result
<< dendl
;
188 // If EOPNOTSUPP, treat image as if there are no locks (we can't
190 if (*result
== -EOPNOTSUPP
) {
192 } else if (*result
== 0) {
193 bufferlist::iterator it
= m_out_bl
.begin();
194 ClsLockType lock_type
;
195 *result
= rados::cls::lock::get_lock_info_finish(&it
, &m_lockers
,
196 &lock_type
, &m_lock_tag
);
198 m_exclusive_locked
= (lock_type
== LOCK_EXCLUSIVE
);
202 lderr(cct
) << "failed to retrieve locks: " << cpp_strerror(*result
)
211 template <typename I
>
212 void RefreshRequest
<I
>::send_v1_apply() {
213 CephContext
*cct
= m_image_ctx
.cct
;
214 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
216 // ensure we are not in a rados callback when applying updates
217 using klass
= RefreshRequest
<I
>;
218 Context
*ctx
= create_context_callback
<
219 klass
, &klass::handle_v1_apply
>(this);
220 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
223 template <typename I
>
224 Context
*RefreshRequest
<I
>::handle_v1_apply(int *result
) {
225 CephContext
*cct
= m_image_ctx
.cct
;
226 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
229 return send_flush_aio();
232 template <typename I
>
233 void RefreshRequest
<I
>::send_v2_get_mutable_metadata() {
234 CephContext
*cct
= m_image_ctx
.cct
;
235 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
239 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
240 snap_id
= m_image_ctx
.snap_id
;
243 bool read_only
= m_image_ctx
.read_only
|| snap_id
!= CEPH_NOSNAP
;
244 librados::ObjectReadOperation op
;
245 cls_client::get_mutable_metadata_start(&op
, read_only
);
247 using klass
= RefreshRequest
<I
>;
248 librados::AioCompletion
*comp
= create_rados_callback
<
249 klass
, &klass::handle_v2_get_mutable_metadata
>(this);
251 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
257 template <typename I
>
258 Context
*RefreshRequest
<I
>::handle_v2_get_mutable_metadata(int *result
) {
259 CephContext
*cct
= m_image_ctx
.cct
;
260 ldout(cct
, 10) << this << " " << __func__
<< ": "
261 << "r=" << *result
<< dendl
;
264 bufferlist::iterator it
= m_out_bl
.begin();
265 *result
= cls_client::get_mutable_metadata_finish(&it
, &m_size
, &m_features
,
266 &m_incompatible_features
,
269 &m_lock_tag
, &m_snapc
,
273 lderr(cct
) << "failed to retrieve mutable metadata: "
274 << cpp_strerror(*result
) << dendl
;
278 uint64_t unsupported
= m_incompatible_features
& ~RBD_FEATURES_ALL
;
279 if (unsupported
!= 0ULL) {
280 lderr(cct
) << "Image uses unsupported features: " << unsupported
<< dendl
;
285 if (!m_snapc
.is_valid()) {
286 lderr(cct
) << "image snap context is invalid!" << dendl
;
291 if (m_acquiring_lock
&& (m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0) {
292 ldout(cct
, 5) << "ignoring dynamically disabled exclusive lock" << dendl
;
293 m_features
|= RBD_FEATURE_EXCLUSIVE_LOCK
;
294 m_incomplete_update
= true;
297 send_v2_get_metadata();
301 template <typename I
>
302 void RefreshRequest
<I
>::send_v2_get_metadata() {
303 CephContext
*cct
= m_image_ctx
.cct
;
304 ldout(cct
, 10) << this << " " << __func__
<< ": "
305 << "start_key=" << m_last_metadata_key
<< dendl
;
307 librados::ObjectReadOperation op
;
308 cls_client::metadata_list_start(&op
, m_last_metadata_key
, MAX_METADATA_ITEMS
);
310 using klass
= RefreshRequest
<I
>;
311 librados::AioCompletion
*comp
=
312 create_rados_callback
<klass
, &klass::handle_v2_get_metadata
>(this);
314 m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
319 template <typename I
>
320 Context
*RefreshRequest
<I
>::handle_v2_get_metadata(int *result
) {
321 CephContext
*cct
= m_image_ctx
.cct
;
322 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
324 std::map
<std::string
, bufferlist
> metadata
;
326 bufferlist::iterator it
= m_out_bl
.begin();
327 *result
= cls_client::metadata_list_finish(&it
, &metadata
);
330 if (*result
== -EOPNOTSUPP
|| *result
== -EIO
) {
331 ldout(cct
, 10) << "config metadata not supported by OSD" << dendl
;
332 } else if (*result
< 0) {
333 lderr(cct
) << "failed to retrieve metadata: " << cpp_strerror(*result
)
338 if (!metadata
.empty()) {
339 m_metadata
.insert(metadata
.begin(), metadata
.end());
340 m_last_metadata_key
= metadata
.rbegin()->first
;
341 if (boost::starts_with(m_last_metadata_key
,
342 ImageCtx::METADATA_CONF_PREFIX
)) {
343 send_v2_get_metadata();
348 m_image_ctx
.apply_metadata(m_metadata
, false);
354 template <typename I
>
355 void RefreshRequest
<I
>::send_v2_get_flags() {
356 CephContext
*cct
= m_image_ctx
.cct
;
357 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
359 librados::ObjectReadOperation op
;
360 cls_client::get_flags_start(&op
, m_snapc
.snaps
);
362 using klass
= RefreshRequest
<I
>;
363 librados::AioCompletion
*comp
= create_rados_callback
<
364 klass
, &klass::handle_v2_get_flags
>(this);
366 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
372 template <typename I
>
373 Context
*RefreshRequest
<I
>::handle_v2_get_flags(int *result
) {
374 CephContext
*cct
= m_image_ctx
.cct
;
375 ldout(cct
, 10) << this << " " << __func__
<< ": "
376 << "r=" << *result
<< dendl
;
379 bufferlist::iterator it
= m_out_bl
.begin();
380 cls_client::get_flags_finish(&it
, &m_flags
, m_snapc
.snaps
, &m_snap_flags
);
382 if (*result
== -EOPNOTSUPP
) {
383 // Older OSD doesn't support RBD flags, need to assume the worst
385 ldout(cct
, 10) << "OSD does not support RBD flags, disabling object map "
386 << "optimizations" << dendl
;
387 m_flags
= RBD_FLAG_OBJECT_MAP_INVALID
;
388 if ((m_features
& RBD_FEATURE_FAST_DIFF
) != 0) {
389 m_flags
|= RBD_FLAG_FAST_DIFF_INVALID
;
392 std::vector
<uint64_t> default_flags(m_snapc
.snaps
.size(), m_flags
);
393 m_snap_flags
= std::move(default_flags
);
394 } else if (*result
== -ENOENT
) {
395 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
396 send_v2_get_mutable_metadata();
398 } else if (*result
< 0) {
399 lderr(cct
) << "failed to retrieve flags: " << cpp_strerror(*result
)
408 template <typename I
>
409 void RefreshRequest
<I
>::send_v2_get_group() {
410 CephContext
*cct
= m_image_ctx
.cct
;
411 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
413 librados::ObjectReadOperation op
;
414 cls_client::image_get_group_start(&op
);
416 using klass
= RefreshRequest
<I
>;
417 librados::AioCompletion
*comp
= create_rados_callback
<
418 klass
, &klass::handle_v2_get_group
>(this);
420 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
426 template <typename I
>
427 Context
*RefreshRequest
<I
>::handle_v2_get_group(int *result
) {
428 CephContext
*cct
= m_image_ctx
.cct
;
429 ldout(cct
, 10) << this << " " << __func__
<< ": "
430 << "r=" << *result
<< dendl
;
433 bufferlist::iterator it
= m_out_bl
.begin();
434 cls_client::image_get_group_finish(&it
, &m_group_spec
);
436 if (*result
== -EOPNOTSUPP
) {
437 // Older OSD doesn't support RBD groups
439 ldout(cct
, 10) << "OSD does not support consistency groups" << dendl
;
440 } else if (*result
< 0) {
441 lderr(cct
) << "failed to retrieve group: " << cpp_strerror(*result
)
446 send_v2_get_snapshots();
450 template <typename I
>
451 void RefreshRequest
<I
>::send_v2_get_snapshots() {
452 if (m_snapc
.snaps
.empty()) {
453 m_snap_names
.clear();
454 m_snap_namespaces
.clear();
455 m_snap_sizes
.clear();
456 m_snap_parents
.clear();
457 m_snap_protection
.clear();
458 m_snap_timestamps
.clear();
459 send_v2_refresh_parent();
463 CephContext
*cct
= m_image_ctx
.cct
;
464 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
466 librados::ObjectReadOperation op
;
467 cls_client::snapshot_list_start(&op
, m_snapc
.snaps
);
469 using klass
= RefreshRequest
<I
>;
470 librados::AioCompletion
*comp
= create_rados_callback
<
471 klass
, &klass::handle_v2_get_snapshots
>(this);
473 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
479 template <typename I
>
480 Context
*RefreshRequest
<I
>::handle_v2_get_snapshots(int *result
) {
481 CephContext
*cct
= m_image_ctx
.cct
;
482 ldout(cct
, 10) << this << " " << __func__
<< ": "
483 << "r=" << *result
<< dendl
;
486 bufferlist::iterator it
= m_out_bl
.begin();
487 *result
= cls_client::snapshot_list_finish(&it
, m_snapc
.snaps
,
493 if (*result
== -ENOENT
) {
494 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
495 send_v2_get_mutable_metadata();
497 } else if (*result
< 0) {
498 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
503 send_v2_get_snap_timestamps();
507 template <typename I
>
508 void RefreshRequest
<I
>::send_v2_get_snap_timestamps() {
509 CephContext
*cct
= m_image_ctx
.cct
;
510 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
512 librados::ObjectReadOperation op
;
513 cls_client::snapshot_timestamp_list_start(&op
, m_snapc
.snaps
);
515 using klass
= RefreshRequest
<I
>;
516 librados::AioCompletion
*comp
= create_rados_callback
<
517 klass
, &klass::handle_v2_get_snap_timestamps
>(this);
519 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
525 template <typename I
>
526 Context
*RefreshRequest
<I
>::handle_v2_get_snap_timestamps(int *result
) {
527 CephContext
*cct
= m_image_ctx
.cct
;
528 ldout(cct
, 10) << this << " " << __func__
<< ": " << "r=" << *result
<< dendl
;
531 bufferlist::iterator it
= m_out_bl
.begin();
532 *result
= cls_client::snapshot_timestamp_list_finish(&it
, m_snapc
.snaps
, &m_snap_timestamps
);
534 if (*result
== -ENOENT
) {
535 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
536 send_v2_get_mutable_metadata();
538 } else if (*result
== -EOPNOTSUPP
) {
539 m_snap_timestamps
= std::vector
<utime_t
>(m_snap_names
.size(), utime_t());
540 // Ignore it means no snap timestamps are available
541 } else if (*result
< 0) {
542 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
547 send_v2_get_snap_namespaces();
551 template <typename I
>
552 void RefreshRequest
<I
>::send_v2_get_snap_namespaces() {
553 CephContext
*cct
= m_image_ctx
.cct
;
554 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
556 librados::ObjectReadOperation op
;
557 cls_client::snapshot_namespace_list_start(&op
, m_snapc
.snaps
);
559 using klass
= RefreshRequest
<I
>;
560 librados::AioCompletion
*comp
= create_rados_callback
<
561 klass
, &klass::handle_v2_get_snap_namespaces
>(this);
563 int r
= m_image_ctx
.md_ctx
.aio_operate(m_image_ctx
.header_oid
, comp
, &op
,
569 template <typename I
>
570 Context
*RefreshRequest
<I
>::handle_v2_get_snap_namespaces(int *result
) {
571 CephContext
*cct
= m_image_ctx
.cct
;
572 ldout(cct
, 10) << this << " " << __func__
<< ": "
573 << "r=" << *result
<< dendl
;
576 bufferlist::iterator it
= m_out_bl
.begin();
577 *result
= cls_client::snapshot_namespace_list_finish(&it
, m_snapc
.snaps
,
580 if (*result
== -ENOENT
) {
581 ldout(cct
, 10) << "out-of-sync snapshot state detected" << dendl
;
582 send_v2_get_mutable_metadata();
584 } else if (*result
== -EOPNOTSUPP
) {
585 m_snap_namespaces
= std::vector
586 <cls::rbd::SnapshotNamespace
>(
588 cls::rbd::UserSnapshotNamespace());
589 // Ignore it means no snap namespaces are available
590 } else if (*result
< 0) {
591 lderr(cct
) << "failed to retrieve snapshots: " << cpp_strerror(*result
)
596 send_v2_refresh_parent();
600 template <typename I
>
601 void RefreshRequest
<I
>::send_v2_refresh_parent() {
603 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
604 RWLock::RLocker
parent_locker(m_image_ctx
.parent_lock
);
606 ParentInfo parent_md
;
607 int r
= get_parent_info(m_image_ctx
.snap_id
, &parent_md
);
608 if (!m_skip_open_parent_image
&& (r
< 0 ||
609 RefreshParentRequest
<I
>::is_refresh_required(m_image_ctx
, parent_md
))) {
610 CephContext
*cct
= m_image_ctx
.cct
;
611 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
613 using klass
= RefreshRequest
<I
>;
614 Context
*ctx
= create_context_callback
<
615 klass
, &klass::handle_v2_refresh_parent
>(this);
616 m_refresh_parent
= RefreshParentRequest
<I
>::create(
617 m_image_ctx
, parent_md
, ctx
);
621 if (m_refresh_parent
!= nullptr) {
622 m_refresh_parent
->send();
624 send_v2_init_exclusive_lock();
628 template <typename I
>
629 Context
*RefreshRequest
<I
>::handle_v2_refresh_parent(int *result
) {
630 CephContext
*cct
= m_image_ctx
.cct
;
631 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
634 lderr(cct
) << "failed to refresh parent image: " << cpp_strerror(*result
)
641 send_v2_init_exclusive_lock();
645 template <typename I
>
646 void RefreshRequest
<I
>::send_v2_init_exclusive_lock() {
647 if ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) == 0 ||
648 m_image_ctx
.read_only
|| !m_image_ctx
.snap_name
.empty() ||
649 m_image_ctx
.exclusive_lock
!= nullptr) {
650 send_v2_open_object_map();
654 // implies exclusive lock dynamically enabled or image open in-progress
655 CephContext
*cct
= m_image_ctx
.cct
;
656 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
658 // TODO need safe shut down
659 m_exclusive_lock
= m_image_ctx
.create_exclusive_lock();
661 using klass
= RefreshRequest
<I
>;
662 Context
*ctx
= create_context_callback
<
663 klass
, &klass::handle_v2_init_exclusive_lock
>(this);
665 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
666 m_exclusive_lock
->init(m_features
, ctx
);
669 template <typename I
>
670 Context
*RefreshRequest
<I
>::handle_v2_init_exclusive_lock(int *result
) {
671 CephContext
*cct
= m_image_ctx
.cct
;
672 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
675 lderr(cct
) << "failed to initialize exclusive lock: "
676 << cpp_strerror(*result
) << dendl
;
680 // object map and journal will be opened when exclusive lock is
681 // acquired (if features are enabled)
686 template <typename I
>
687 void RefreshRequest
<I
>::send_v2_open_journal() {
688 bool journal_disabled
= (
689 (m_features
& RBD_FEATURE_JOURNALING
) == 0 ||
690 m_image_ctx
.read_only
||
691 !m_image_ctx
.snap_name
.empty() ||
692 m_image_ctx
.journal
!= nullptr ||
693 m_image_ctx
.exclusive_lock
== nullptr ||
694 !m_image_ctx
.exclusive_lock
->is_lock_owner());
695 bool journal_disabled_by_policy
;
697 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
698 journal_disabled_by_policy
= (
700 m_image_ctx
.get_journal_policy()->journal_disabled());
703 if (journal_disabled
|| journal_disabled_by_policy
) {
704 // journal dynamically enabled -- doesn't own exclusive lock
705 if ((m_features
& RBD_FEATURE_JOURNALING
) != 0 &&
706 !journal_disabled_by_policy
&&
707 m_image_ctx
.exclusive_lock
!= nullptr &&
708 m_image_ctx
.journal
== nullptr) {
709 m_image_ctx
.io_work_queue
->set_require_lock(librbd::io::DIRECTION_BOTH
,
712 send_v2_block_writes();
716 // implies journal dynamically enabled since ExclusiveLock will init
717 // the journal upon acquiring the lock
718 CephContext
*cct
= m_image_ctx
.cct
;
719 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
721 using klass
= RefreshRequest
<I
>;
722 Context
*ctx
= create_context_callback
<
723 klass
, &klass::handle_v2_open_journal
>(this);
725 // TODO need safe close
726 m_journal
= m_image_ctx
.create_journal();
727 m_journal
->open(ctx
);
730 template <typename I
>
731 Context
*RefreshRequest
<I
>::handle_v2_open_journal(int *result
) {
732 CephContext
*cct
= m_image_ctx
.cct
;
733 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
736 lderr(cct
) << "failed to initialize journal: " << cpp_strerror(*result
)
741 send_v2_block_writes();
745 template <typename I
>
746 void RefreshRequest
<I
>::send_v2_block_writes() {
747 bool disabled_journaling
= false;
749 RWLock::RLocker
snap_locker(m_image_ctx
.snap_lock
);
750 disabled_journaling
= ((m_features
& RBD_FEATURE_EXCLUSIVE_LOCK
) != 0 &&
751 (m_features
& RBD_FEATURE_JOURNALING
) == 0 &&
752 m_image_ctx
.journal
!= nullptr);
755 if (!disabled_journaling
) {
760 CephContext
*cct
= m_image_ctx
.cct
;
761 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
763 // we need to block writes temporarily to avoid in-flight journal
765 m_blocked_writes
= true;
766 Context
*ctx
= create_context_callback
<
767 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_v2_block_writes
>(this);
769 RWLock::RLocker
owner_locker(m_image_ctx
.owner_lock
);
770 m_image_ctx
.io_work_queue
->block_writes(ctx
);
773 template <typename I
>
774 Context
*RefreshRequest
<I
>::handle_v2_block_writes(int *result
) {
775 CephContext
*cct
= m_image_ctx
.cct
;
776 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
779 lderr(cct
) << "failed to block writes: " << cpp_strerror(*result
)
787 template <typename I
>
788 void RefreshRequest
<I
>::send_v2_open_object_map() {
789 if ((m_features
& RBD_FEATURE_OBJECT_MAP
) == 0 ||
790 m_image_ctx
.object_map
!= nullptr ||
791 (m_image_ctx
.snap_name
.empty() &&
792 (m_image_ctx
.read_only
||
793 m_image_ctx
.exclusive_lock
== nullptr ||
794 !m_image_ctx
.exclusive_lock
->is_lock_owner()))) {
795 send_v2_open_journal();
799 // implies object map dynamically enabled or image open in-progress
800 // since SetSnapRequest loads the object map for a snapshot and
801 // ExclusiveLock loads the object map for HEAD
802 CephContext
*cct
= m_image_ctx
.cct
;
803 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
805 if (m_image_ctx
.snap_name
.empty()) {
806 m_object_map
= m_image_ctx
.create_object_map(CEPH_NOSNAP
);
808 for (size_t snap_idx
= 0; snap_idx
< m_snap_names
.size(); ++snap_idx
) {
809 if (m_snap_names
[snap_idx
] == m_image_ctx
.snap_name
) {
810 m_object_map
= m_image_ctx
.create_object_map(
811 m_snapc
.snaps
[snap_idx
].val
);
816 if (m_object_map
== nullptr) {
817 lderr(cct
) << "failed to locate snapshot: " << m_image_ctx
.snap_name
819 send_v2_open_journal();
824 using klass
= RefreshRequest
<I
>;
825 Context
*ctx
= create_context_callback
<
826 klass
, &klass::handle_v2_open_object_map
>(this);
827 m_object_map
->open(ctx
);
830 template <typename I
>
831 Context
*RefreshRequest
<I
>::handle_v2_open_object_map(int *result
) {
832 CephContext
*cct
= m_image_ctx
.cct
;
833 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
836 lderr(cct
) << "failed to open object map: " << cpp_strerror(*result
)
839 m_object_map
= nullptr;
842 send_v2_open_journal();
846 template <typename I
>
847 void RefreshRequest
<I
>::send_v2_apply() {
848 CephContext
*cct
= m_image_ctx
.cct
;
849 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
851 // ensure we are not in a rados callback when applying updates
852 using klass
= RefreshRequest
<I
>;
853 Context
*ctx
= create_context_callback
<
854 klass
, &klass::handle_v2_apply
>(this);
855 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
858 template <typename I
>
859 Context
*RefreshRequest
<I
>::handle_v2_apply(int *result
) {
860 CephContext
*cct
= m_image_ctx
.cct
;
861 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
865 return send_v2_finalize_refresh_parent();
868 template <typename I
>
869 Context
*RefreshRequest
<I
>::send_v2_finalize_refresh_parent() {
870 if (m_refresh_parent
== nullptr) {
871 return send_v2_shut_down_exclusive_lock();
874 CephContext
*cct
= m_image_ctx
.cct
;
875 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
877 using klass
= RefreshRequest
<I
>;
878 Context
*ctx
= create_context_callback
<
879 klass
, &klass::handle_v2_finalize_refresh_parent
>(this);
880 m_refresh_parent
->finalize(ctx
);
884 template <typename I
>
885 Context
*RefreshRequest
<I
>::handle_v2_finalize_refresh_parent(int *result
) {
886 CephContext
*cct
= m_image_ctx
.cct
;
887 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
889 assert(m_refresh_parent
!= nullptr);
890 delete m_refresh_parent
;
891 m_refresh_parent
= nullptr;
893 return send_v2_shut_down_exclusive_lock();
896 template <typename I
>
897 Context
*RefreshRequest
<I
>::send_v2_shut_down_exclusive_lock() {
898 if (m_exclusive_lock
== nullptr) {
899 return send_v2_close_journal();
902 CephContext
*cct
= m_image_ctx
.cct
;
903 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
905 // exclusive lock feature was dynamically disabled. in-flight IO will be
906 // flushed and in-flight requests will be canceled before releasing lock
907 using klass
= RefreshRequest
<I
>;
908 Context
*ctx
= create_context_callback
<
909 klass
, &klass::handle_v2_shut_down_exclusive_lock
>(this);
910 m_exclusive_lock
->shut_down(ctx
);
914 template <typename I
>
915 Context
*RefreshRequest
<I
>::handle_v2_shut_down_exclusive_lock(int *result
) {
916 CephContext
*cct
= m_image_ctx
.cct
;
917 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
920 lderr(cct
) << "failed to shut down exclusive lock: "
921 << cpp_strerror(*result
) << dendl
;
926 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
927 assert(m_image_ctx
.exclusive_lock
== nullptr);
930 assert(m_exclusive_lock
!= nullptr);
931 delete m_exclusive_lock
;
932 m_exclusive_lock
= nullptr;
934 return send_v2_close_journal();
937 template <typename I
>
938 Context
*RefreshRequest
<I
>::send_v2_close_journal() {
939 if (m_journal
== nullptr) {
940 return send_v2_close_object_map();
943 CephContext
*cct
= m_image_ctx
.cct
;
944 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
946 // journal feature was dynamically disabled
947 using klass
= RefreshRequest
<I
>;
948 Context
*ctx
= create_context_callback
<
949 klass
, &klass::handle_v2_close_journal
>(this);
950 m_journal
->close(ctx
);
954 template <typename I
>
955 Context
*RefreshRequest
<I
>::handle_v2_close_journal(int *result
) {
956 CephContext
*cct
= m_image_ctx
.cct
;
957 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
961 lderr(cct
) << "failed to close journal: " << cpp_strerror(*result
)
965 assert(m_journal
!= nullptr);
969 assert(m_blocked_writes
);
970 m_blocked_writes
= false;
972 m_image_ctx
.io_work_queue
->unblock_writes();
973 return send_v2_close_object_map();
976 template <typename I
>
977 Context
*RefreshRequest
<I
>::send_v2_close_object_map() {
978 if (m_object_map
== nullptr) {
979 return send_flush_aio();
982 CephContext
*cct
= m_image_ctx
.cct
;
983 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
985 // object map was dynamically disabled
986 using klass
= RefreshRequest
<I
>;
987 Context
*ctx
= create_context_callback
<
988 klass
, &klass::handle_v2_close_object_map
>(this);
989 m_object_map
->close(ctx
);
993 template <typename I
>
994 Context
*RefreshRequest
<I
>::handle_v2_close_object_map(int *result
) {
995 CephContext
*cct
= m_image_ctx
.cct
;
996 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
998 assert(*result
== 0);
999 assert(m_object_map
!= nullptr);
1000 delete m_object_map
;
1001 m_object_map
= nullptr;
1003 return send_flush_aio();
1006 template <typename I
>
1007 Context
*RefreshRequest
<I
>::send_flush_aio() {
1008 if (m_incomplete_update
&& m_error_result
== 0) {
1009 // if this was a partial refresh, notify ImageState
1010 m_error_result
= -ERESTART
;
1014 CephContext
*cct
= m_image_ctx
.cct
;
1015 ldout(cct
, 10) << this << " " << __func__
<< dendl
;
1017 RWLock::RLocker
owner_lock(m_image_ctx
.owner_lock
);
1018 using klass
= RefreshRequest
<I
>;
1019 Context
*ctx
= create_context_callback
<
1020 klass
, &klass::handle_flush_aio
>(this);
1021 m_image_ctx
.flush(ctx
);
1023 } else if (m_error_result
< 0) {
1024 // propagate saved error back to caller
1025 Context
*ctx
= create_context_callback
<
1026 RefreshRequest
<I
>, &RefreshRequest
<I
>::handle_error
>(this);
1027 m_image_ctx
.op_work_queue
->queue(ctx
, 0);
1034 template <typename I
>
1035 Context
*RefreshRequest
<I
>::handle_flush_aio(int *result
) {
1036 CephContext
*cct
= m_image_ctx
.cct
;
1037 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1040 lderr(cct
) << "failed to flush pending AIO: " << cpp_strerror(*result
)
1044 return handle_error(result
);
1047 template <typename I
>
1048 Context
*RefreshRequest
<I
>::handle_error(int *result
) {
1049 if (m_error_result
< 0) {
1050 *result
= m_error_result
;
1052 CephContext
*cct
= m_image_ctx
.cct
;
1053 ldout(cct
, 10) << this << " " << __func__
<< ": r=" << *result
<< dendl
;
1058 template <typename I
>
1059 void RefreshRequest
<I
>::apply() {
1060 CephContext
*cct
= m_image_ctx
.cct
;
1061 ldout(cct
, 20) << this << " " << __func__
<< dendl
;
1063 RWLock::WLocker
owner_locker(m_image_ctx
.owner_lock
);
1064 RWLock::WLocker
md_locker(m_image_ctx
.md_lock
);
1067 Mutex::Locker
cache_locker(m_image_ctx
.cache_lock
);
1068 RWLock::WLocker
snap_locker(m_image_ctx
.snap_lock
);
1069 RWLock::WLocker
parent_locker(m_image_ctx
.parent_lock
);
1071 m_image_ctx
.size
= m_size
;
1072 m_image_ctx
.lockers
= m_lockers
;
1073 m_image_ctx
.lock_tag
= m_lock_tag
;
1074 m_image_ctx
.exclusive_locked
= m_exclusive_locked
;
1076 if (m_image_ctx
.old_format
) {
1077 m_image_ctx
.order
= m_order
;
1078 m_image_ctx
.features
= 0;
1079 m_image_ctx
.flags
= 0;
1080 m_image_ctx
.object_prefix
= std::move(m_object_prefix
);
1081 m_image_ctx
.init_layout();
1083 m_image_ctx
.features
= m_features
;
1084 m_image_ctx
.flags
= m_flags
;
1085 m_image_ctx
.group_spec
= m_group_spec
;
1086 m_image_ctx
.parent_md
= m_parent_md
;
1089 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1090 std::vector
<librados::snap_t
>::const_iterator it
= std::find(
1091 m_image_ctx
.snaps
.begin(), m_image_ctx
.snaps
.end(),
1092 m_snapc
.snaps
[i
].val
);
1093 if (it
== m_image_ctx
.snaps
.end()) {
1095 ldout(cct
, 20) << "new snapshot id=" << m_snapc
.snaps
[i
].val
1096 << " name=" << m_snap_names
[i
]
1097 << " size=" << m_snap_sizes
[i
]
1102 m_image_ctx
.snaps
.clear();
1103 m_image_ctx
.snap_info
.clear();
1104 m_image_ctx
.snap_ids
.clear();
1105 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1106 uint64_t flags
= m_image_ctx
.old_format
? 0 : m_snap_flags
[i
];
1107 uint8_t protection_status
= m_image_ctx
.old_format
?
1108 static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED
) :
1109 m_snap_protection
[i
];
1111 if (!m_image_ctx
.old_format
) {
1112 parent
= m_snap_parents
[i
];
1115 m_image_ctx
.add_snap(m_snap_namespaces
[i
], m_snap_names
[i
],
1116 m_snapc
.snaps
[i
].val
, m_snap_sizes
[i
], parent
,
1117 protection_status
, flags
, m_snap_timestamps
[i
]);
1119 m_image_ctx
.snapc
= m_snapc
;
1121 if (m_image_ctx
.snap_id
!= CEPH_NOSNAP
&&
1122 m_image_ctx
.get_snap_id(m_image_ctx
.snap_namespace
,
1123 m_image_ctx
.snap_name
) != m_image_ctx
.snap_id
) {
1124 lderr(cct
) << "tried to read from a snapshot that no longer exists: "
1125 << m_image_ctx
.snap_name
<< dendl
;
1126 m_image_ctx
.snap_exists
= false;
1129 if (m_refresh_parent
!= nullptr) {
1130 m_refresh_parent
->apply();
1132 m_image_ctx
.data_ctx
.selfmanaged_snap_set_write_ctx(m_image_ctx
.snapc
.seq
,
1135 // handle dynamically enabled / disabled features
1136 if (m_image_ctx
.exclusive_lock
!= nullptr &&
1137 !m_image_ctx
.test_features(RBD_FEATURE_EXCLUSIVE_LOCK
,
1138 m_image_ctx
.snap_lock
)) {
1139 // disabling exclusive lock will automatically handle closing
1140 // object map and journaling
1141 assert(m_exclusive_lock
== nullptr);
1142 m_exclusive_lock
= m_image_ctx
.exclusive_lock
;
1144 if (m_exclusive_lock
!= nullptr) {
1145 assert(m_image_ctx
.exclusive_lock
== nullptr);
1146 std::swap(m_exclusive_lock
, m_image_ctx
.exclusive_lock
);
1148 if (!m_image_ctx
.test_features(RBD_FEATURE_JOURNALING
,
1149 m_image_ctx
.snap_lock
)) {
1150 if (!m_image_ctx
.clone_copy_on_read
&& m_image_ctx
.journal
!= nullptr) {
1151 m_image_ctx
.io_work_queue
->set_require_lock(io::DIRECTION_READ
,
1154 std::swap(m_journal
, m_image_ctx
.journal
);
1155 } else if (m_journal
!= nullptr) {
1156 std::swap(m_journal
, m_image_ctx
.journal
);
1158 if (!m_image_ctx
.test_features(RBD_FEATURE_OBJECT_MAP
,
1159 m_image_ctx
.snap_lock
) ||
1160 m_object_map
!= nullptr) {
1161 std::swap(m_object_map
, m_image_ctx
.object_map
);
1167 template <typename I
>
1168 int RefreshRequest
<I
>::get_parent_info(uint64_t snap_id
,
1169 ParentInfo
*parent_md
) {
1170 if (snap_id
== CEPH_NOSNAP
) {
1171 *parent_md
= m_parent_md
;
1174 for (size_t i
= 0; i
< m_snapc
.snaps
.size(); ++i
) {
1175 if (m_snapc
.snaps
[i
].val
== snap_id
) {
1176 *parent_md
= m_snap_parents
[i
];
1184 } // namespace image
1185 } // namespace librbd
1187 template class librbd::image::RefreshRequest
<librbd::ImageCtx
>;