]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/image/RefreshRequest.cc
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / librbd / image / RefreshRequest.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <boost/algorithm/string/predicate.hpp>
5 #include "include/ceph_assert.h"
6
7 #include "librbd/image/RefreshRequest.h"
8 #include "common/dout.h"
9 #include "common/errno.h"
10 #include "cls/lock/cls_lock_client.h"
11 #include "cls/rbd/cls_rbd_client.h"
12 #include "librbd/ExclusiveLock.h"
13 #include "librbd/ImageCtx.h"
14 #include "librbd/ImageWatcher.h"
15 #include "librbd/Journal.h"
16 #include "librbd/ObjectMap.h"
17 #include "librbd/Utils.h"
18 #include "librbd/deep_copy/Utils.h"
19 #include "librbd/image/RefreshParentRequest.h"
20 #include "librbd/io/AioCompletion.h"
21 #include "librbd/io/ImageDispatchSpec.h"
22 #include "librbd/io/ImageRequestWQ.h"
23 #include "librbd/journal/Policy.h"
24
25 #define dout_subsys ceph_subsys_rbd
26 #undef dout_prefix
27 #define dout_prefix *_dout << "librbd::image::RefreshRequest: "
28
29 namespace librbd {
30 namespace image {
31
32 namespace {
33
34 const uint64_t MAX_METADATA_ITEMS = 128;
35
36 }
37
38 using util::create_rados_callback;
39 using util::create_async_context_callback;
40 using util::create_context_callback;
41
42 template <typename I>
43 RefreshRequest<I>::RefreshRequest(I &image_ctx, bool acquiring_lock,
44 bool skip_open_parent, Context *on_finish)
45 : m_image_ctx(image_ctx), m_acquiring_lock(acquiring_lock),
46 m_skip_open_parent_image(skip_open_parent),
47 m_on_finish(create_async_context_callback(m_image_ctx, on_finish)),
48 m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
49 m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
50 m_pool_metadata_io_ctx.dup(image_ctx.md_ctx);
51 m_pool_metadata_io_ctx.set_namespace("");
52 }
53
54 template <typename I>
55 RefreshRequest<I>::~RefreshRequest() {
56 // these require state machine to close
57 ceph_assert(m_exclusive_lock == nullptr);
58 ceph_assert(m_object_map == nullptr);
59 ceph_assert(m_journal == nullptr);
60 ceph_assert(m_refresh_parent == nullptr);
61 ceph_assert(!m_blocked_writes);
62 }
63
64 template <typename I>
65 void RefreshRequest<I>::send() {
66 if (m_image_ctx.old_format) {
67 send_v1_read_header();
68 } else {
69 send_v2_get_mutable_metadata();
70 }
71 }
72
73 template <typename I>
74 void RefreshRequest<I>::send_get_migration_header() {
75 if (m_image_ctx.ignore_migrating) {
76 if (m_image_ctx.old_format) {
77 send_v1_get_snapshots();
78 } else {
79 send_v2_get_metadata();
80 }
81 return;
82 }
83
84 CephContext *cct = m_image_ctx.cct;
85 ldout(cct, 10) << this << " " << __func__ << dendl;
86
87 librados::ObjectReadOperation op;
88 cls_client::migration_get_start(&op);
89
90 using klass = RefreshRequest<I>;
91 librados::AioCompletion *comp =
92 create_rados_callback<klass, &klass::handle_get_migration_header>(this);
93 m_out_bl.clear();
94 m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
95 &m_out_bl);
96 comp->release();
97 }
98
99 template <typename I>
100 Context *RefreshRequest<I>::handle_get_migration_header(int *result) {
101 CephContext *cct = m_image_ctx.cct;
102 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
103
104 if (*result == 0) {
105 auto it = m_out_bl.cbegin();
106 *result = cls_client::migration_get_finish(&it, &m_migration_spec);
107 } else if (*result == -ENOENT) {
108 ldout(cct, 5) << this << " " << __func__ << ": no migration header found"
109 << ", retrying" << dendl;
110 send();
111 return nullptr;
112 }
113
114 if (*result < 0) {
115 lderr(cct) << "failed to retrieve migration header: "
116 << cpp_strerror(*result) << dendl;
117 return m_on_finish;
118 }
119
120 switch(m_migration_spec.header_type) {
121 case cls::rbd::MIGRATION_HEADER_TYPE_SRC:
122 if (!m_image_ctx.read_only) {
123 lderr(cct) << "image being migrated" << dendl;
124 *result = -EROFS;
125 return m_on_finish;
126 }
127 ldout(cct, 1) << this << " " << __func__ << ": migrating to: "
128 << m_migration_spec << dendl;
129 break;
130 case cls::rbd::MIGRATION_HEADER_TYPE_DST:
131 ldout(cct, 1) << this << " " << __func__ << ": migrating from: "
132 << m_migration_spec << dendl;
133 if (m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
134 m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING &&
135 m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTED) {
136 ldout(cct, 5) << this << " " << __func__ << ": current migration state: "
137 << m_migration_spec.state << ", retrying" << dendl;
138 send();
139 return nullptr;
140 }
141 break;
142 default:
143 ldout(cct, 1) << this << " " << __func__ << ": migration type "
144 << m_migration_spec.header_type << dendl;
145 *result = -EBADMSG;
146 return m_on_finish;
147 }
148
149 if (m_image_ctx.old_format) {
150 send_v1_get_snapshots();
151 } else {
152 send_v2_get_metadata();
153 }
154 return nullptr;
155 }
156
157 template <typename I>
158 void RefreshRequest<I>::send_v1_read_header() {
159 CephContext *cct = m_image_ctx.cct;
160 ldout(cct, 10) << this << " " << __func__ << dendl;
161
162 librados::ObjectReadOperation op;
163 op.read(0, 0, nullptr, nullptr);
164
165 using klass = RefreshRequest<I>;
166 librados::AioCompletion *comp = create_rados_callback<
167 klass, &klass::handle_v1_read_header>(this);
168 m_out_bl.clear();
169 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
170 &m_out_bl);
171 ceph_assert(r == 0);
172 comp->release();
173 }
174
175 template <typename I>
176 Context *RefreshRequest<I>::handle_v1_read_header(int *result) {
177 CephContext *cct = m_image_ctx.cct;
178 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
179
180 rbd_obj_header_ondisk v1_header;
181 bool migrating = false;
182 if (*result < 0) {
183 return m_on_finish;
184 } else if (m_out_bl.length() < sizeof(v1_header)) {
185 lderr(cct) << "v1 header too small" << dendl;
186 *result = -EIO;
187 return m_on_finish;
188 } else if (memcmp(RBD_HEADER_TEXT, m_out_bl.c_str(),
189 sizeof(RBD_HEADER_TEXT)) != 0) {
190 if (memcmp(RBD_MIGRATE_HEADER_TEXT, m_out_bl.c_str(),
191 sizeof(RBD_MIGRATE_HEADER_TEXT)) == 0) {
192 ldout(cct, 1) << this << " " << __func__ << ": migration v1 header detected"
193 << dendl;
194 migrating = true;
195 } else {
196 lderr(cct) << "unrecognized v1 header" << dendl;
197 *result = -ENXIO;
198 return m_on_finish;
199 }
200 }
201
202 memcpy(&v1_header, m_out_bl.c_str(), sizeof(v1_header));
203 m_order = v1_header.options.order;
204 m_size = v1_header.image_size;
205 m_object_prefix = v1_header.block_name;
206 if (migrating) {
207 send_get_migration_header();
208 } else {
209 send_v1_get_snapshots();
210 }
211 return nullptr;
212 }
213
214 template <typename I>
215 void RefreshRequest<I>::send_v1_get_snapshots() {
216 CephContext *cct = m_image_ctx.cct;
217 ldout(cct, 10) << this << " " << __func__ << dendl;
218
219 librados::ObjectReadOperation op;
220 cls_client::old_snapshot_list_start(&op);
221
222 using klass = RefreshRequest<I>;
223 librados::AioCompletion *comp = create_rados_callback<
224 klass, &klass::handle_v1_get_snapshots>(this);
225 m_out_bl.clear();
226 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
227 &m_out_bl);
228 ceph_assert(r == 0);
229 comp->release();
230 }
231
232 template <typename I>
233 Context *RefreshRequest<I>::handle_v1_get_snapshots(int *result) {
234 CephContext *cct = m_image_ctx.cct;
235 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
236
237 std::vector<std::string> snap_names;
238 std::vector<uint64_t> snap_sizes;
239 if (*result == 0) {
240 auto it = m_out_bl.cbegin();
241 *result = cls_client::old_snapshot_list_finish(&it, &snap_names,
242 &snap_sizes, &m_snapc);
243 }
244
245 if (*result < 0) {
246 lderr(cct) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result)
247 << dendl;
248 return m_on_finish;
249 }
250
251 if (!m_snapc.is_valid()) {
252 lderr(cct) << "v1 image snap context is invalid" << dendl;
253 *result = -EIO;
254 return m_on_finish;
255 }
256
257 m_snap_infos.clear();
258 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
259 m_snap_infos.push_back({m_snapc.snaps[i],
260 {cls::rbd::UserSnapshotNamespace{}},
261 snap_names[i], snap_sizes[i], {}, 0});
262 }
263
264 send_v1_get_locks();
265 return nullptr;
266 }
267
268 template <typename I>
269 void RefreshRequest<I>::send_v1_get_locks() {
270 CephContext *cct = m_image_ctx.cct;
271 ldout(cct, 10) << this << " " << __func__ << dendl;
272
273 librados::ObjectReadOperation op;
274 rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
275
276 using klass = RefreshRequest<I>;
277 librados::AioCompletion *comp = create_rados_callback<
278 klass, &klass::handle_v1_get_locks>(this);
279 m_out_bl.clear();
280 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
281 &m_out_bl);
282 ceph_assert(r == 0);
283 comp->release();
284 }
285
286 template <typename I>
287 Context *RefreshRequest<I>::handle_v1_get_locks(int *result) {
288 CephContext *cct = m_image_ctx.cct;
289 ldout(cct, 10) << this << " " << __func__ << ": "
290 << "r=" << *result << dendl;
291
292 if (*result == 0) {
293 auto it = m_out_bl.cbegin();
294 ClsLockType lock_type;
295 *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
296 &lock_type, &m_lock_tag);
297 if (*result == 0) {
298 m_exclusive_locked = (lock_type == LOCK_EXCLUSIVE);
299 }
300 }
301 if (*result < 0) {
302 lderr(cct) << "failed to retrieve locks: " << cpp_strerror(*result)
303 << dendl;
304 return m_on_finish;
305 }
306
307 send_v1_apply();
308 return nullptr;
309 }
310
311 template <typename I>
312 void RefreshRequest<I>::send_v1_apply() {
313 CephContext *cct = m_image_ctx.cct;
314 ldout(cct, 10) << this << " " << __func__ << dendl;
315
316 // ensure we are not in a rados callback when applying updates
317 using klass = RefreshRequest<I>;
318 Context *ctx = create_context_callback<
319 klass, &klass::handle_v1_apply>(this);
320 m_image_ctx.op_work_queue->queue(ctx, 0);
321 }
322
323 template <typename I>
324 Context *RefreshRequest<I>::handle_v1_apply(int *result) {
325 CephContext *cct = m_image_ctx.cct;
326 ldout(cct, 10) << this << " " << __func__ << dendl;
327
328 apply();
329 return send_flush_aio();
330 }
331
332 template <typename I>
333 void RefreshRequest<I>::send_v2_get_mutable_metadata() {
334 CephContext *cct = m_image_ctx.cct;
335 ldout(cct, 10) << this << " " << __func__ << dendl;
336
337 uint64_t snap_id;
338 {
339 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
340 snap_id = m_image_ctx.snap_id;
341 }
342
343 bool read_only = m_image_ctx.read_only || snap_id != CEPH_NOSNAP;
344 librados::ObjectReadOperation op;
345 cls_client::get_size_start(&op, CEPH_NOSNAP);
346 cls_client::get_features_start(&op, read_only);
347 cls_client::get_flags_start(&op, CEPH_NOSNAP);
348 cls_client::get_snapcontext_start(&op);
349 rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
350
351 using klass = RefreshRequest<I>;
352 librados::AioCompletion *comp = create_rados_callback<
353 klass, &klass::handle_v2_get_mutable_metadata>(this);
354 m_out_bl.clear();
355 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
356 &m_out_bl);
357 ceph_assert(r == 0);
358 comp->release();
359 }
360
361 template <typename I>
362 Context *RefreshRequest<I>::handle_v2_get_mutable_metadata(int *result) {
363 CephContext *cct = m_image_ctx.cct;
364 ldout(cct, 10) << this << " " << __func__ << ": "
365 << "r=" << *result << dendl;
366
367 auto it = m_out_bl.cbegin();
368 if (*result >= 0) {
369 uint8_t order;
370 *result = cls_client::get_size_finish(&it, &m_size, &order);
371 }
372
373 if (*result >= 0) {
374 *result = cls_client::get_features_finish(&it, &m_features,
375 &m_incompatible_features);
376 }
377
378 if (*result >= 0) {
379 *result = cls_client::get_flags_finish(&it, &m_flags);
380 }
381
382 if (*result >= 0) {
383 *result = cls_client::get_snapcontext_finish(&it, &m_snapc);
384 }
385
386 if (*result >= 0) {
387 ClsLockType lock_type = LOCK_NONE;
388 *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
389 &lock_type, &m_lock_tag);
390 if (*result == 0) {
391 m_exclusive_locked = (lock_type == LOCK_EXCLUSIVE);
392 }
393 }
394
395 if (*result < 0) {
396 lderr(cct) << "failed to retrieve mutable metadata: "
397 << cpp_strerror(*result) << dendl;
398 return m_on_finish;
399 }
400
401 uint64_t unsupported = m_incompatible_features & ~RBD_FEATURES_ALL;
402 if (unsupported != 0ULL) {
403 lderr(cct) << "Image uses unsupported features: " << unsupported << dendl;
404 *result = -ENOSYS;
405 return m_on_finish;
406 }
407
408 if (!m_snapc.is_valid()) {
409 lderr(cct) << "image snap context is invalid!" << dendl;
410 *result = -EIO;
411 return m_on_finish;
412 }
413
414 if (m_acquiring_lock && (m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
415 ldout(cct, 5) << "ignoring dynamically disabled exclusive lock" << dendl;
416 m_features |= RBD_FEATURE_EXCLUSIVE_LOCK;
417 m_incomplete_update = true;
418 }
419
420 send_v2_get_parent();
421 return nullptr;
422 }
423
424 template <typename I>
425 void RefreshRequest<I>::send_v2_get_parent() {
426 // NOTE: remove support when Mimic is EOLed
427 CephContext *cct = m_image_ctx.cct;
428 ldout(cct, 10) << this << " " << __func__ << ": legacy=" << m_legacy_parent
429 << dendl;
430
431 librados::ObjectReadOperation op;
432 if (!m_legacy_parent) {
433 cls_client::parent_get_start(&op);
434 cls_client::parent_overlap_get_start(&op, CEPH_NOSNAP);
435 } else {
436 cls_client::get_parent_start(&op, CEPH_NOSNAP);
437 }
438
439 auto aio_comp = create_rados_callback<
440 RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_parent>(this);
441 m_out_bl.clear();
442 m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op,
443 &m_out_bl);
444 aio_comp->release();
445 }
446
447 template <typename I>
448 Context *RefreshRequest<I>::handle_v2_get_parent(int *result) {
449 // NOTE: remove support when Mimic is EOLed
450 CephContext *cct = m_image_ctx.cct;
451 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
452
453 auto it = m_out_bl.cbegin();
454 if (!m_legacy_parent) {
455 if (*result == 0) {
456 *result = cls_client::parent_get_finish(&it, &m_parent_md.spec);
457 }
458
459 std::optional<uint64_t> parent_overlap;
460 if (*result == 0) {
461 *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap);
462 }
463
464 if (*result == 0 && parent_overlap) {
465 m_parent_md.overlap = *parent_overlap;
466 m_head_parent_overlap = true;
467 }
468 } else if (*result == 0) {
469 *result = cls_client::get_parent_finish(&it, &m_parent_md.spec,
470 &m_parent_md.overlap);
471 m_head_parent_overlap = true;
472 }
473
474 if (*result == -EOPNOTSUPP && !m_legacy_parent) {
475 ldout(cct, 10) << "retrying using legacy parent method" << dendl;
476 m_legacy_parent = true;
477 send_v2_get_parent();
478 return nullptr;
479 } if (*result < 0) {
480 lderr(cct) << "failed to retrieve parent: " << cpp_strerror(*result)
481 << dendl;
482 return m_on_finish;
483 }
484
485 if ((m_features & RBD_FEATURE_MIGRATING) != 0) {
486 ldout(cct, 1) << "migrating feature set" << dendl;
487 send_get_migration_header();
488 return nullptr;
489 }
490
491 send_v2_get_metadata();
492 return nullptr;
493 }
494
495 template <typename I>
496 void RefreshRequest<I>::send_v2_get_metadata() {
497 CephContext *cct = m_image_ctx.cct;
498 ldout(cct, 10) << this << " " << __func__ << ": "
499 << "start_key=" << m_last_metadata_key << dendl;
500
501 librados::ObjectReadOperation op;
502 cls_client::metadata_list_start(&op, m_last_metadata_key, MAX_METADATA_ITEMS);
503
504 using klass = RefreshRequest<I>;
505 librados::AioCompletion *comp =
506 create_rados_callback<klass, &klass::handle_v2_get_metadata>(this);
507 m_out_bl.clear();
508 m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
509 &m_out_bl);
510 comp->release();
511 }
512
513 template <typename I>
514 Context *RefreshRequest<I>::handle_v2_get_metadata(int *result) {
515 CephContext *cct = m_image_ctx.cct;
516 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
517
518 std::map<std::string, bufferlist> metadata;
519 if (*result == 0) {
520 auto it = m_out_bl.cbegin();
521 *result = cls_client::metadata_list_finish(&it, &metadata);
522 }
523
524 if (*result < 0) {
525 lderr(cct) << "failed to retrieve metadata: " << cpp_strerror(*result)
526 << dendl;
527 return m_on_finish;
528 }
529
530 if (!metadata.empty()) {
531 m_metadata.insert(metadata.begin(), metadata.end());
532 m_last_metadata_key = metadata.rbegin()->first;
533 if (boost::starts_with(m_last_metadata_key,
534 ImageCtx::METADATA_CONF_PREFIX)) {
535 send_v2_get_metadata();
536 return nullptr;
537 }
538 }
539
540 m_last_metadata_key.clear();
541 send_v2_get_pool_metadata();
542 return nullptr;
543 }
544
545 template <typename I>
546 void RefreshRequest<I>::send_v2_get_pool_metadata() {
547 CephContext *cct = m_image_ctx.cct;
548 ldout(cct, 10) << this << " " << __func__ << ": "
549 << "start_key=" << m_last_metadata_key << dendl;
550
551 librados::ObjectReadOperation op;
552 cls_client::metadata_list_start(&op, m_last_metadata_key, MAX_METADATA_ITEMS);
553
554 using klass = RefreshRequest<I>;
555 librados::AioCompletion *comp =
556 create_rados_callback<klass, &klass::handle_v2_get_pool_metadata>(this);
557 m_out_bl.clear();
558 m_pool_metadata_io_ctx.aio_operate(RBD_INFO, comp, &op, &m_out_bl);
559 comp->release();
560 }
561
562 template <typename I>
563 Context *RefreshRequest<I>::handle_v2_get_pool_metadata(int *result) {
564 CephContext *cct = m_image_ctx.cct;
565 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
566
567 std::map<std::string, bufferlist> metadata;
568 if (*result == 0) {
569 auto it = m_out_bl.cbegin();
570 *result = cls_client::metadata_list_finish(&it, &metadata);
571 }
572
573 if (*result == -EOPNOTSUPP || *result == -ENOENT) {
574 ldout(cct, 10) << "pool metadata not supported by OSD" << dendl;
575 } else if (*result < 0) {
576 lderr(cct) << "failed to retrieve pool metadata: " << cpp_strerror(*result)
577 << dendl;
578 return m_on_finish;
579 }
580
581 if (!metadata.empty()) {
582 m_metadata.insert(metadata.begin(), metadata.end());
583 m_last_metadata_key = metadata.rbegin()->first;
584 if (boost::starts_with(m_last_metadata_key,
585 ImageCtx::METADATA_CONF_PREFIX)) {
586 send_v2_get_pool_metadata();
587 return nullptr;
588 }
589 }
590
591 bool thread_safe = m_image_ctx.image_watcher->is_unregistered();
592 m_image_ctx.apply_metadata(m_metadata, thread_safe);
593
594 send_v2_get_op_features();
595 return nullptr;
596 }
597
598 template <typename I>
599 void RefreshRequest<I>::send_v2_get_op_features() {
600 if ((m_features & RBD_FEATURE_OPERATIONS) == 0LL) {
601 send_v2_get_group();
602 return;
603 }
604
605 CephContext *cct = m_image_ctx.cct;
606 ldout(cct, 10) << this << " " << __func__ << dendl;
607
608 librados::ObjectReadOperation op;
609 cls_client::op_features_get_start(&op);
610
611 librados::AioCompletion *comp = create_rados_callback<
612 RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_op_features>(this);
613 m_out_bl.clear();
614 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
615 &m_out_bl);
616 ceph_assert(r == 0);
617 comp->release();
618 }
619
620 template <typename I>
621 Context *RefreshRequest<I>::handle_v2_get_op_features(int *result) {
622 CephContext *cct = m_image_ctx.cct;
623 ldout(cct, 10) << this << " " << __func__ << ": "
624 << "r=" << *result << dendl;
625
626 // -EOPNOTSUPP handler not required since feature bit implies OSD
627 // supports the method
628 if (*result == 0) {
629 auto it = m_out_bl.cbegin();
630 cls_client::op_features_get_finish(&it, &m_op_features);
631 } else if (*result < 0) {
632 lderr(cct) << "failed to retrieve op features: " << cpp_strerror(*result)
633 << dendl;
634 return m_on_finish;
635 }
636
637 send_v2_get_group();
638 return nullptr;
639 }
640
641 template <typename I>
642 void RefreshRequest<I>::send_v2_get_group() {
643 CephContext *cct = m_image_ctx.cct;
644 ldout(cct, 10) << this << " " << __func__ << dendl;
645
646 librados::ObjectReadOperation op;
647 cls_client::image_group_get_start(&op);
648
649 using klass = RefreshRequest<I>;
650 librados::AioCompletion *comp = create_rados_callback<
651 klass, &klass::handle_v2_get_group>(this);
652 m_out_bl.clear();
653 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
654 &m_out_bl);
655 ceph_assert(r == 0);
656 comp->release();
657 }
658
659 template <typename I>
660 Context *RefreshRequest<I>::handle_v2_get_group(int *result) {
661 CephContext *cct = m_image_ctx.cct;
662 ldout(cct, 10) << this << " " << __func__ << ": "
663 << "r=" << *result << dendl;
664
665 if (*result == 0) {
666 auto it = m_out_bl.cbegin();
667 cls_client::image_group_get_finish(&it, &m_group_spec);
668 }
669 if (*result < 0 && *result != -EOPNOTSUPP) {
670 lderr(cct) << "failed to retrieve group: " << cpp_strerror(*result)
671 << dendl;
672 return m_on_finish;
673 }
674
675 send_v2_get_snapshots();
676 return nullptr;
677 }
678
679 template <typename I>
680 void RefreshRequest<I>::send_v2_get_snapshots() {
681 m_snap_infos.resize(m_snapc.snaps.size());
682 m_snap_flags.resize(m_snapc.snaps.size());
683 m_snap_parents.resize(m_snapc.snaps.size());
684 m_snap_protection.resize(m_snapc.snaps.size());
685
686 if (m_snapc.snaps.empty()) {
687 send_v2_refresh_parent();
688 return;
689 }
690
691 CephContext *cct = m_image_ctx.cct;
692 ldout(cct, 10) << this << " " << __func__ << dendl;
693
694 librados::ObjectReadOperation op;
695 for (auto snap_id : m_snapc.snaps) {
696 if (m_legacy_snapshot) {
697 /// NOTE: remove after Luminous is retired
698 cls_client::get_snapshot_name_start(&op, snap_id);
699 cls_client::get_size_start(&op, snap_id);
700 cls_client::get_snapshot_timestamp_start(&op, snap_id);
701 } else {
702 cls_client::snapshot_get_start(&op, snap_id);
703 }
704
705 if (m_legacy_parent) {
706 cls_client::get_parent_start(&op, snap_id);
707 } else {
708 cls_client::parent_overlap_get_start(&op, snap_id);
709 }
710
711 cls_client::get_flags_start(&op, snap_id);
712 cls_client::get_protection_status_start(&op, snap_id);
713 }
714
715 using klass = RefreshRequest<I>;
716 librados::AioCompletion *comp = create_rados_callback<
717 klass, &klass::handle_v2_get_snapshots>(this);
718 m_out_bl.clear();
719 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
720 &m_out_bl);
721 ceph_assert(r == 0);
722 comp->release();
723 }
724
725 template <typename I>
726 Context *RefreshRequest<I>::handle_v2_get_snapshots(int *result) {
727 CephContext *cct = m_image_ctx.cct;
728 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
729
730 auto it = m_out_bl.cbegin();
731 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
732 if (m_legacy_snapshot) {
733 /// NOTE: remove after Luminous is retired
734 std::string snap_name;
735 if (*result >= 0) {
736 *result = cls_client::get_snapshot_name_finish(&it, &snap_name);
737 }
738
739 uint64_t snap_size;
740 if (*result >= 0) {
741 uint8_t order;
742 *result = cls_client::get_size_finish(&it, &snap_size, &order);
743 }
744
745 utime_t snap_timestamp;
746 if (*result >= 0) {
747 *result = cls_client::get_snapshot_timestamp_finish(&it,
748 &snap_timestamp);
749 }
750
751 if (*result >= 0) {
752 m_snap_infos[i] = {m_snapc.snaps[i],
753 {cls::rbd::UserSnapshotNamespace{}},
754 snap_name, snap_size, snap_timestamp, 0};
755 }
756 } else if (*result >= 0) {
757 *result = cls_client::snapshot_get_finish(&it, &m_snap_infos[i]);
758 }
759
760 if (*result == 0) {
761 if (m_legacy_parent) {
762 *result = cls_client::get_parent_finish(&it, &m_snap_parents[i].spec,
763 &m_snap_parents[i].overlap);
764 } else {
765 std::optional<uint64_t> parent_overlap;
766 *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap);
767 if (*result == 0 && parent_overlap && m_parent_md.spec.pool_id > -1) {
768 m_snap_parents[i].spec = m_parent_md.spec;
769 m_snap_parents[i].overlap = *parent_overlap;
770 }
771 }
772 }
773
774 if (*result >= 0) {
775 *result = cls_client::get_flags_finish(&it, &m_snap_flags[i]);
776 }
777
778 if (*result >= 0) {
779 *result = cls_client::get_protection_status_finish(
780 &it, &m_snap_protection[i]);
781 }
782
783 if (*result < 0) {
784 break;
785 }
786 }
787
788 if (*result == -ENOENT) {
789 ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl;
790 send_v2_get_mutable_metadata();
791 return nullptr;
792 } else if (!m_legacy_snapshot && *result == -EOPNOTSUPP) {
793 ldout(cct, 10) << "retrying using legacy snapshot methods" << dendl;
794 m_legacy_snapshot = true;
795 send_v2_get_snapshots();
796 return nullptr;
797 } else if (*result < 0) {
798 lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result)
799 << dendl;
800 return m_on_finish;
801 }
802
803 send_v2_refresh_parent();
804 return nullptr;
805 }
806
807 template <typename I>
808 void RefreshRequest<I>::send_v2_refresh_parent() {
809 {
810 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
811 RWLock::RLocker parent_locker(m_image_ctx.parent_lock);
812
813 ParentImageInfo parent_md;
814 MigrationInfo migration_info;
815 int r = get_parent_info(m_image_ctx.snap_id, &parent_md, &migration_info);
816 if (!m_skip_open_parent_image && (r < 0 ||
817 RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md,
818 migration_info))) {
819 CephContext *cct = m_image_ctx.cct;
820 ldout(cct, 10) << this << " " << __func__ << dendl;
821
822 using klass = RefreshRequest<I>;
823 Context *ctx = create_context_callback<
824 klass, &klass::handle_v2_refresh_parent>(this);
825 m_refresh_parent = RefreshParentRequest<I>::create(
826 m_image_ctx, parent_md, migration_info, ctx);
827 }
828 }
829
830 if (m_refresh_parent != nullptr) {
831 m_refresh_parent->send();
832 } else {
833 send_v2_init_exclusive_lock();
834 }
835 }
836
837 template <typename I>
838 Context *RefreshRequest<I>::handle_v2_refresh_parent(int *result) {
839 CephContext *cct = m_image_ctx.cct;
840 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
841
842 if (*result < 0) {
843 lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result)
844 << dendl;
845 save_result(result);
846 send_v2_apply();
847 return nullptr;
848 }
849
850 send_v2_init_exclusive_lock();
851 return nullptr;
852 }
853
854 template <typename I>
855 void RefreshRequest<I>::send_v2_init_exclusive_lock() {
856 if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0 ||
857 m_image_ctx.read_only || !m_image_ctx.snap_name.empty() ||
858 m_image_ctx.exclusive_lock != nullptr) {
859 send_v2_open_object_map();
860 return;
861 }
862
863 // implies exclusive lock dynamically enabled or image open in-progress
864 CephContext *cct = m_image_ctx.cct;
865 ldout(cct, 10) << this << " " << __func__ << dendl;
866
867 // TODO need safe shut down
868 m_exclusive_lock = m_image_ctx.create_exclusive_lock();
869
870 using klass = RefreshRequest<I>;
871 Context *ctx = create_context_callback<
872 klass, &klass::handle_v2_init_exclusive_lock>(this);
873
874 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
875 m_exclusive_lock->init(m_features, ctx);
876 }
877
878 template <typename I>
879 Context *RefreshRequest<I>::handle_v2_init_exclusive_lock(int *result) {
880 CephContext *cct = m_image_ctx.cct;
881 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
882
883 if (*result < 0) {
884 lderr(cct) << "failed to initialize exclusive lock: "
885 << cpp_strerror(*result) << dendl;
886 save_result(result);
887 }
888
889 // object map and journal will be opened when exclusive lock is
890 // acquired (if features are enabled)
891 send_v2_apply();
892 return nullptr;
893 }
894
895 template <typename I>
896 void RefreshRequest<I>::send_v2_open_journal() {
897 bool journal_disabled = (
898 (m_features & RBD_FEATURE_JOURNALING) == 0 ||
899 m_image_ctx.read_only ||
900 !m_image_ctx.snap_name.empty() ||
901 m_image_ctx.journal != nullptr ||
902 m_image_ctx.exclusive_lock == nullptr ||
903 !m_image_ctx.exclusive_lock->is_lock_owner());
904 bool journal_disabled_by_policy;
905 {
906 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
907 journal_disabled_by_policy = (
908 !journal_disabled &&
909 m_image_ctx.get_journal_policy()->journal_disabled());
910 }
911
912 if (journal_disabled || journal_disabled_by_policy) {
913 // journal dynamically enabled -- doesn't own exclusive lock
914 if ((m_features & RBD_FEATURE_JOURNALING) != 0 &&
915 !journal_disabled_by_policy &&
916 m_image_ctx.exclusive_lock != nullptr &&
917 m_image_ctx.journal == nullptr) {
918 m_image_ctx.io_work_queue->set_require_lock(librbd::io::DIRECTION_BOTH,
919 true);
920 }
921 send_v2_block_writes();
922 return;
923 }
924
925 // implies journal dynamically enabled since ExclusiveLock will init
926 // the journal upon acquiring the lock
927 CephContext *cct = m_image_ctx.cct;
928 ldout(cct, 10) << this << " " << __func__ << dendl;
929
930 using klass = RefreshRequest<I>;
931 Context *ctx = create_context_callback<
932 klass, &klass::handle_v2_open_journal>(this);
933
934 // TODO need safe close
935 m_journal = m_image_ctx.create_journal();
936 m_journal->open(ctx);
937 }
938
939 template <typename I>
940 Context *RefreshRequest<I>::handle_v2_open_journal(int *result) {
941 CephContext *cct = m_image_ctx.cct;
942 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
943
944 if (*result < 0) {
945 lderr(cct) << "failed to initialize journal: " << cpp_strerror(*result)
946 << dendl;
947 save_result(result);
948 }
949
950 send_v2_block_writes();
951 return nullptr;
952 }
953
954 template <typename I>
955 void RefreshRequest<I>::send_v2_block_writes() {
956 bool disabled_journaling = false;
957 {
958 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
959 disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
960 (m_features & RBD_FEATURE_JOURNALING) == 0 &&
961 m_image_ctx.journal != nullptr);
962 }
963
964 if (!disabled_journaling) {
965 send_v2_apply();
966 return;
967 }
968
969 CephContext *cct = m_image_ctx.cct;
970 ldout(cct, 10) << this << " " << __func__ << dendl;
971
972 // we need to block writes temporarily to avoid in-flight journal
973 // writes
974 m_blocked_writes = true;
975 Context *ctx = create_context_callback<
976 RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this);
977
978 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
979 m_image_ctx.io_work_queue->block_writes(ctx);
980 }
981
982 template <typename I>
983 Context *RefreshRequest<I>::handle_v2_block_writes(int *result) {
984 CephContext *cct = m_image_ctx.cct;
985 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
986
987 if (*result < 0) {
988 lderr(cct) << "failed to block writes: " << cpp_strerror(*result)
989 << dendl;
990 save_result(result);
991 }
992 send_v2_apply();
993 return nullptr;
994 }
995
996 template <typename I>
997 void RefreshRequest<I>::send_v2_open_object_map() {
998 if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0 ||
999 m_image_ctx.object_map != nullptr ||
1000 (m_image_ctx.snap_name.empty() &&
1001 (m_image_ctx.read_only ||
1002 m_image_ctx.exclusive_lock == nullptr ||
1003 !m_image_ctx.exclusive_lock->is_lock_owner()))) {
1004 send_v2_open_journal();
1005 return;
1006 }
1007
1008 // implies object map dynamically enabled or image open in-progress
1009 // since SetSnapRequest loads the object map for a snapshot and
1010 // ExclusiveLock loads the object map for HEAD
1011 CephContext *cct = m_image_ctx.cct;
1012 ldout(cct, 10) << this << " " << __func__ << dendl;
1013
1014 if (m_image_ctx.snap_name.empty()) {
1015 m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP);
1016 } else {
1017 for (size_t snap_idx = 0; snap_idx < m_snap_infos.size(); ++snap_idx) {
1018 if (m_snap_infos[snap_idx].name == m_image_ctx.snap_name) {
1019 m_object_map = m_image_ctx.create_object_map(
1020 m_snapc.snaps[snap_idx].val);
1021 break;
1022 }
1023 }
1024
1025 if (m_object_map == nullptr) {
1026 lderr(cct) << "failed to locate snapshot: " << m_image_ctx.snap_name
1027 << dendl;
1028 send_v2_open_journal();
1029 return;
1030 }
1031 }
1032
1033 using klass = RefreshRequest<I>;
1034 Context *ctx = create_context_callback<
1035 klass, &klass::handle_v2_open_object_map>(this);
1036 m_object_map->open(ctx);
1037 }
1038
1039 template <typename I>
1040 Context *RefreshRequest<I>::handle_v2_open_object_map(int *result) {
1041 CephContext *cct = m_image_ctx.cct;
1042 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1043
1044 if (*result < 0) {
1045 lderr(cct) << "failed to open object map: " << cpp_strerror(*result)
1046 << dendl;
1047 delete m_object_map;
1048 m_object_map = nullptr;
1049
1050 if (*result != -EFBIG) {
1051 save_result(result);
1052 }
1053 }
1054
1055 send_v2_open_journal();
1056 return nullptr;
1057 }
1058
1059 template <typename I>
1060 void RefreshRequest<I>::send_v2_apply() {
1061 CephContext *cct = m_image_ctx.cct;
1062 ldout(cct, 10) << this << " " << __func__ << dendl;
1063
1064 // ensure we are not in a rados callback when applying updates
1065 using klass = RefreshRequest<I>;
1066 Context *ctx = create_context_callback<
1067 klass, &klass::handle_v2_apply>(this);
1068 m_image_ctx.op_work_queue->queue(ctx, 0);
1069 }
1070
1071 template <typename I>
1072 Context *RefreshRequest<I>::handle_v2_apply(int *result) {
1073 CephContext *cct = m_image_ctx.cct;
1074 ldout(cct, 10) << this << " " << __func__ << dendl;
1075
1076 apply();
1077
1078 return send_v2_finalize_refresh_parent();
1079 }
1080
1081 template <typename I>
1082 Context *RefreshRequest<I>::send_v2_finalize_refresh_parent() {
1083 if (m_refresh_parent == nullptr) {
1084 return send_v2_shut_down_exclusive_lock();
1085 }
1086
1087 CephContext *cct = m_image_ctx.cct;
1088 ldout(cct, 10) << this << " " << __func__ << dendl;
1089
1090 using klass = RefreshRequest<I>;
1091 Context *ctx = create_context_callback<
1092 klass, &klass::handle_v2_finalize_refresh_parent>(this);
1093 m_refresh_parent->finalize(ctx);
1094 return nullptr;
1095 }
1096
1097 template <typename I>
1098 Context *RefreshRequest<I>::handle_v2_finalize_refresh_parent(int *result) {
1099 CephContext *cct = m_image_ctx.cct;
1100 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1101
1102 ceph_assert(m_refresh_parent != nullptr);
1103 delete m_refresh_parent;
1104 m_refresh_parent = nullptr;
1105
1106 return send_v2_shut_down_exclusive_lock();
1107 }
1108
1109 template <typename I>
1110 Context *RefreshRequest<I>::send_v2_shut_down_exclusive_lock() {
1111 if (m_exclusive_lock == nullptr) {
1112 return send_v2_close_journal();
1113 }
1114
1115 CephContext *cct = m_image_ctx.cct;
1116 ldout(cct, 10) << this << " " << __func__ << dendl;
1117
1118 // exclusive lock feature was dynamically disabled. in-flight IO will be
1119 // flushed and in-flight requests will be canceled before releasing lock
1120 using klass = RefreshRequest<I>;
1121 Context *ctx = create_context_callback<
1122 klass, &klass::handle_v2_shut_down_exclusive_lock>(this);
1123 m_exclusive_lock->shut_down(ctx);
1124 return nullptr;
1125 }
1126
1127 template <typename I>
1128 Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) {
1129 CephContext *cct = m_image_ctx.cct;
1130 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1131
1132 if (*result < 0) {
1133 lderr(cct) << "failed to shut down exclusive lock: "
1134 << cpp_strerror(*result) << dendl;
1135 save_result(result);
1136 }
1137
1138 {
1139 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
1140 ceph_assert(m_image_ctx.exclusive_lock == nullptr);
1141 }
1142
1143 ceph_assert(m_exclusive_lock != nullptr);
1144 delete m_exclusive_lock;
1145 m_exclusive_lock = nullptr;
1146
1147 return send_v2_close_journal();
1148 }
1149
1150 template <typename I>
1151 Context *RefreshRequest<I>::send_v2_close_journal() {
1152 if (m_journal == nullptr) {
1153 return send_v2_close_object_map();
1154 }
1155
1156 CephContext *cct = m_image_ctx.cct;
1157 ldout(cct, 10) << this << " " << __func__ << dendl;
1158
1159 // journal feature was dynamically disabled
1160 using klass = RefreshRequest<I>;
1161 Context *ctx = create_context_callback<
1162 klass, &klass::handle_v2_close_journal>(this);
1163 m_journal->close(ctx);
1164 return nullptr;
1165 }
1166
1167 template <typename I>
1168 Context *RefreshRequest<I>::handle_v2_close_journal(int *result) {
1169 CephContext *cct = m_image_ctx.cct;
1170 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1171
1172 if (*result < 0) {
1173 save_result(result);
1174 lderr(cct) << "failed to close journal: " << cpp_strerror(*result)
1175 << dendl;
1176 }
1177
1178 ceph_assert(m_journal != nullptr);
1179 delete m_journal;
1180 m_journal = nullptr;
1181
1182 ceph_assert(m_blocked_writes);
1183 m_blocked_writes = false;
1184
1185 m_image_ctx.io_work_queue->unblock_writes();
1186 return send_v2_close_object_map();
1187 }
1188
1189 template <typename I>
1190 Context *RefreshRequest<I>::send_v2_close_object_map() {
1191 if (m_object_map == nullptr) {
1192 return send_flush_aio();
1193 }
1194
1195 CephContext *cct = m_image_ctx.cct;
1196 ldout(cct, 10) << this << " " << __func__ << dendl;
1197
1198 // object map was dynamically disabled
1199 using klass = RefreshRequest<I>;
1200 Context *ctx = create_context_callback<
1201 klass, &klass::handle_v2_close_object_map>(this);
1202 m_object_map->close(ctx);
1203 return nullptr;
1204 }
1205
1206 template <typename I>
1207 Context *RefreshRequest<I>::handle_v2_close_object_map(int *result) {
1208 CephContext *cct = m_image_ctx.cct;
1209 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1210
1211 if (*result < 0) {
1212 lderr(cct) << "failed to close object map: " << cpp_strerror(*result)
1213 << dendl;
1214 }
1215
1216 ceph_assert(m_object_map != nullptr);
1217 delete m_object_map;
1218 m_object_map = nullptr;
1219
1220 return send_flush_aio();
1221 }
1222
1223 template <typename I>
1224 Context *RefreshRequest<I>::send_flush_aio() {
1225 if (m_incomplete_update && m_error_result == 0) {
1226 // if this was a partial refresh, notify ImageState
1227 m_error_result = -ERESTART;
1228 }
1229
1230 if (m_flush_aio) {
1231 CephContext *cct = m_image_ctx.cct;
1232 ldout(cct, 10) << this << " " << __func__ << dendl;
1233
1234 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
1235 auto ctx = create_context_callback<
1236 RefreshRequest<I>, &RefreshRequest<I>::handle_flush_aio>(this);
1237 auto aio_comp = io::AioCompletion::create(
1238 ctx, util::get_image_ctx(&m_image_ctx), io::AIO_TYPE_FLUSH);
1239 auto req = io::ImageDispatchSpec<I>::create_flush_request(
1240 m_image_ctx, aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
1241 req->send();
1242 delete req;
1243 return nullptr;
1244 } else if (m_error_result < 0) {
1245 // propagate saved error back to caller
1246 Context *ctx = create_context_callback<
1247 RefreshRequest<I>, &RefreshRequest<I>::handle_error>(this);
1248 m_image_ctx.op_work_queue->queue(ctx, 0);
1249 return nullptr;
1250 }
1251
1252 return m_on_finish;
1253 }
1254
1255 template <typename I>
1256 Context *RefreshRequest<I>::handle_flush_aio(int *result) {
1257 CephContext *cct = m_image_ctx.cct;
1258 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1259
1260 if (*result < 0) {
1261 lderr(cct) << "failed to flush pending AIO: " << cpp_strerror(*result)
1262 << dendl;
1263 }
1264
1265 return handle_error(result);
1266 }
1267
1268 template <typename I>
1269 Context *RefreshRequest<I>::handle_error(int *result) {
1270 if (m_error_result < 0) {
1271 *result = m_error_result;
1272
1273 CephContext *cct = m_image_ctx.cct;
1274 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1275 }
1276 return m_on_finish;
1277 }
1278
1279 template <typename I>
1280 void RefreshRequest<I>::apply() {
1281 CephContext *cct = m_image_ctx.cct;
1282 ldout(cct, 20) << this << " " << __func__ << dendl;
1283
1284 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
1285 RWLock::WLocker md_locker(m_image_ctx.md_lock);
1286
1287 {
1288 RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
1289 RWLock::WLocker parent_locker(m_image_ctx.parent_lock);
1290
1291 m_image_ctx.size = m_size;
1292 m_image_ctx.lockers = m_lockers;
1293 m_image_ctx.lock_tag = m_lock_tag;
1294 m_image_ctx.exclusive_locked = m_exclusive_locked;
1295
1296 std::map<uint64_t, uint64_t> migration_reverse_snap_seq;
1297
1298 if (m_image_ctx.old_format) {
1299 m_image_ctx.order = m_order;
1300 m_image_ctx.features = 0;
1301 m_image_ctx.flags = 0;
1302 m_image_ctx.op_features = 0;
1303 m_image_ctx.operations_disabled = false;
1304 m_image_ctx.object_prefix = std::move(m_object_prefix);
1305 m_image_ctx.init_layout();
1306 } else {
1307 // HEAD revision doesn't have a defined overlap so it's only
1308 // applicable to snapshots
1309 if (!m_head_parent_overlap) {
1310 m_parent_md = {};
1311 }
1312
1313 m_image_ctx.features = m_features;
1314 m_image_ctx.flags = m_flags;
1315 m_image_ctx.op_features = m_op_features;
1316 m_image_ctx.operations_disabled = (
1317 (m_op_features & ~RBD_OPERATION_FEATURES_ALL) != 0ULL);
1318 m_image_ctx.group_spec = m_group_spec;
1319 if (get_migration_info(&m_image_ctx.parent_md,
1320 &m_image_ctx.migration_info)) {
1321 for (auto it : m_image_ctx.migration_info.snap_map) {
1322 migration_reverse_snap_seq[it.second.front()] = it.first;
1323 }
1324 } else {
1325 m_image_ctx.parent_md = m_parent_md;
1326 m_image_ctx.migration_info = {};
1327 }
1328 }
1329
1330 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1331 std::vector<librados::snap_t>::const_iterator it = std::find(
1332 m_image_ctx.snaps.begin(), m_image_ctx.snaps.end(),
1333 m_snapc.snaps[i].val);
1334 if (it == m_image_ctx.snaps.end()) {
1335 m_flush_aio = true;
1336 ldout(cct, 20) << "new snapshot id=" << m_snapc.snaps[i].val
1337 << " name=" << m_snap_infos[i].name
1338 << " size=" << m_snap_infos[i].image_size
1339 << dendl;
1340 }
1341 }
1342
1343 m_image_ctx.snaps.clear();
1344 m_image_ctx.snap_info.clear();
1345 m_image_ctx.snap_ids.clear();
1346 auto overlap = m_image_ctx.parent_md.overlap;
1347 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1348 uint64_t flags = m_image_ctx.old_format ? 0 : m_snap_flags[i];
1349 uint8_t protection_status = m_image_ctx.old_format ?
1350 static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED) :
1351 m_snap_protection[i];
1352 ParentImageInfo parent;
1353 if (!m_image_ctx.old_format) {
1354 if (!m_image_ctx.migration_info.empty()) {
1355 parent = m_image_ctx.parent_md;
1356 auto it = migration_reverse_snap_seq.find(m_snapc.snaps[i].val);
1357 if (it != migration_reverse_snap_seq.end()) {
1358 parent.spec.snap_id = it->second;
1359 parent.overlap = m_snap_infos[i].image_size;
1360 } else {
1361 overlap = std::min(overlap, m_snap_infos[i].image_size);
1362 parent.overlap = overlap;
1363 }
1364 } else {
1365 parent = m_snap_parents[i];
1366 }
1367 }
1368 m_image_ctx.add_snap(m_snap_infos[i].snapshot_namespace,
1369 m_snap_infos[i].name, m_snapc.snaps[i].val,
1370 m_snap_infos[i].image_size, parent,
1371 protection_status, flags,
1372 m_snap_infos[i].timestamp);
1373 }
1374 m_image_ctx.parent_md.overlap = std::min(overlap, m_image_ctx.size);
1375 m_image_ctx.snapc = m_snapc;
1376
1377 if (m_image_ctx.snap_id != CEPH_NOSNAP &&
1378 m_image_ctx.get_snap_id(m_image_ctx.snap_namespace,
1379 m_image_ctx.snap_name) != m_image_ctx.snap_id) {
1380 lderr(cct) << "tried to read from a snapshot that no longer exists: "
1381 << m_image_ctx.snap_name << dendl;
1382 m_image_ctx.snap_exists = false;
1383 }
1384
1385 if (m_refresh_parent != nullptr) {
1386 m_refresh_parent->apply();
1387 }
1388 m_image_ctx.data_ctx.selfmanaged_snap_set_write_ctx(m_image_ctx.snapc.seq,
1389 m_image_ctx.snaps);
1390
1391 // handle dynamically enabled / disabled features
1392 if (m_image_ctx.exclusive_lock != nullptr &&
1393 !m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK,
1394 m_image_ctx.snap_lock)) {
1395 // disabling exclusive lock will automatically handle closing
1396 // object map and journaling
1397 ceph_assert(m_exclusive_lock == nullptr);
1398 m_exclusive_lock = m_image_ctx.exclusive_lock;
1399 } else {
1400 if (m_exclusive_lock != nullptr) {
1401 ceph_assert(m_image_ctx.exclusive_lock == nullptr);
1402 std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock);
1403 }
1404 if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
1405 m_image_ctx.snap_lock)) {
1406 if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) {
1407 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_READ,
1408 false);
1409 }
1410 std::swap(m_journal, m_image_ctx.journal);
1411 } else if (m_journal != nullptr) {
1412 std::swap(m_journal, m_image_ctx.journal);
1413 }
1414 if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
1415 m_image_ctx.snap_lock) ||
1416 m_object_map != nullptr) {
1417 std::swap(m_object_map, m_image_ctx.object_map);
1418 }
1419 }
1420 }
1421 }
1422
1423 template <typename I>
1424 int RefreshRequest<I>::get_parent_info(uint64_t snap_id,
1425 ParentImageInfo *parent_md,
1426 MigrationInfo *migration_info) {
1427 if (get_migration_info(parent_md, migration_info)) {
1428 return 0;
1429 } else if (snap_id == CEPH_NOSNAP) {
1430 *parent_md = m_parent_md;
1431 *migration_info = {};
1432 return 0;
1433 } else {
1434 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1435 if (m_snapc.snaps[i].val == snap_id) {
1436 *parent_md = m_snap_parents[i];
1437 *migration_info = {};
1438 return 0;
1439 }
1440 }
1441 }
1442 return -ENOENT;
1443 }
1444
1445 template <typename I>
1446 bool RefreshRequest<I>::get_migration_info(ParentImageInfo *parent_md,
1447 MigrationInfo *migration_info) {
1448 if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST ||
1449 (m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
1450 m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING)) {
1451 ceph_assert(m_migration_spec.header_type ==
1452 cls::rbd::MIGRATION_HEADER_TYPE_SRC ||
1453 m_migration_spec.pool_id == -1 ||
1454 m_migration_spec.state == cls::rbd::MIGRATION_STATE_EXECUTED);
1455
1456 return false;
1457 }
1458
1459 parent_md->spec.pool_id = m_migration_spec.pool_id;
1460 parent_md->spec.pool_namespace = m_migration_spec.pool_namespace;
1461 parent_md->spec.image_id = m_migration_spec.image_id;
1462 parent_md->spec.snap_id = CEPH_NOSNAP;
1463 parent_md->overlap = std::min(m_size, m_migration_spec.overlap);
1464
1465 auto snap_seqs = m_migration_spec.snap_seqs;
1466 // If new snapshots have been created on destination image after
1467 // migration stared, map the source CEPH_NOSNAP to the earliest of
1468 // these snapshots.
1469 snapid_t snap_id = snap_seqs.empty() ? 0 : snap_seqs.rbegin()->second;
1470 auto it = std::upper_bound(m_snapc.snaps.rbegin(), m_snapc.snaps.rend(),
1471 snap_id);
1472 if (it != m_snapc.snaps.rend()) {
1473 snap_seqs[CEPH_NOSNAP] = *it;
1474 } else {
1475 snap_seqs[CEPH_NOSNAP] = CEPH_NOSNAP;
1476 }
1477
1478 std::set<uint64_t> snap_ids;
1479 for (auto& it : snap_seqs) {
1480 snap_ids.insert(it.second);
1481 }
1482 uint64_t overlap = snap_ids.find(CEPH_NOSNAP) != snap_ids.end() ?
1483 parent_md->overlap : 0;
1484 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1485 if (snap_ids.find(m_snapc.snaps[i].val) != snap_ids.end()) {
1486 overlap = std::max(overlap, m_snap_infos[i].image_size);
1487 }
1488 }
1489
1490 *migration_info = {m_migration_spec.pool_id, m_migration_spec.pool_namespace,
1491 m_migration_spec.image_name, m_migration_spec.image_id, {},
1492 overlap, m_migration_spec.flatten};
1493
1494 deep_copy::util::compute_snap_map(0, CEPH_NOSNAP, snap_seqs,
1495 &migration_info->snap_map);
1496 return true;
1497 }
1498
1499 } // namespace image
1500 } // namespace librbd
1501
1502 template class librbd::image::RefreshRequest<librbd::ImageCtx>;