]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/image/RefreshRequest.cc
664f5b8b793e5f63b9543fcfb52b0cbf473509bd
[ceph.git] / ceph / src / librbd / image / RefreshRequest.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include <boost/algorithm/string/predicate.hpp>
5 #include "include/ceph_assert.h"
6
7 #include "librbd/image/RefreshRequest.h"
8 #include "common/dout.h"
9 #include "common/errno.h"
10 #include "cls/lock/cls_lock_client.h"
11 #include "cls/rbd/cls_rbd_client.h"
12 #include "librbd/ExclusiveLock.h"
13 #include "librbd/ImageCtx.h"
14 #include "librbd/ImageWatcher.h"
15 #include "librbd/Journal.h"
16 #include "librbd/ObjectMap.h"
17 #include "librbd/Utils.h"
18 #include "librbd/deep_copy/Utils.h"
19 #include "librbd/image/RefreshParentRequest.h"
20 #include "librbd/io/AioCompletion.h"
21 #include "librbd/io/ImageDispatchSpec.h"
22 #include "librbd/io/ImageRequestWQ.h"
23 #include "librbd/journal/Policy.h"
24
25 #define dout_subsys ceph_subsys_rbd
26 #undef dout_prefix
27 #define dout_prefix *_dout << "librbd::image::RefreshRequest: "
28
29 namespace librbd {
30 namespace image {
31
32 namespace {
33
34 const uint64_t MAX_METADATA_ITEMS = 128;
35
36 }
37
38 using util::create_rados_callback;
39 using util::create_async_context_callback;
40 using util::create_context_callback;
41
42 template <typename I>
43 RefreshRequest<I>::RefreshRequest(I &image_ctx, bool acquiring_lock,
44 bool skip_open_parent, Context *on_finish)
45 : m_image_ctx(image_ctx), m_acquiring_lock(acquiring_lock),
46 m_skip_open_parent_image(skip_open_parent),
47 m_on_finish(create_async_context_callback(m_image_ctx, on_finish)),
48 m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr),
49 m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) {
50 m_pool_metadata_io_ctx.dup(image_ctx.md_ctx);
51 m_pool_metadata_io_ctx.set_namespace("");
52 }
53
54 template <typename I>
55 RefreshRequest<I>::~RefreshRequest() {
56 // these require state machine to close
57 ceph_assert(m_exclusive_lock == nullptr);
58 ceph_assert(m_object_map == nullptr);
59 ceph_assert(m_journal == nullptr);
60 ceph_assert(m_refresh_parent == nullptr);
61 ceph_assert(!m_blocked_writes);
62 }
63
64 template <typename I>
65 void RefreshRequest<I>::send() {
66 if (m_image_ctx.old_format) {
67 send_v1_read_header();
68 } else {
69 send_v2_get_mutable_metadata();
70 }
71 }
72
73 template <typename I>
74 void RefreshRequest<I>::send_get_migration_header() {
75 if (m_image_ctx.ignore_migrating) {
76 if (m_image_ctx.old_format) {
77 send_v1_get_snapshots();
78 } else {
79 send_v2_get_metadata();
80 }
81 return;
82 }
83
84 CephContext *cct = m_image_ctx.cct;
85 ldout(cct, 10) << this << " " << __func__ << dendl;
86
87 librados::ObjectReadOperation op;
88 cls_client::migration_get_start(&op);
89
90 using klass = RefreshRequest<I>;
91 librados::AioCompletion *comp =
92 create_rados_callback<klass, &klass::handle_get_migration_header>(this);
93 m_out_bl.clear();
94 m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
95 &m_out_bl);
96 comp->release();
97 }
98
99 template <typename I>
100 Context *RefreshRequest<I>::handle_get_migration_header(int *result) {
101 CephContext *cct = m_image_ctx.cct;
102 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
103
104 if (*result == 0) {
105 auto it = m_out_bl.cbegin();
106 *result = cls_client::migration_get_finish(&it, &m_migration_spec);
107 } else if (*result == -ENOENT) {
108 ldout(cct, 5) << this << " " << __func__ << ": no migration header found"
109 << ", retrying" << dendl;
110 send();
111 return nullptr;
112 }
113
114 if (*result < 0) {
115 lderr(cct) << "failed to retrieve migration header: "
116 << cpp_strerror(*result) << dendl;
117 return m_on_finish;
118 }
119
120 switch(m_migration_spec.header_type) {
121 case cls::rbd::MIGRATION_HEADER_TYPE_SRC:
122 if (!m_image_ctx.read_only) {
123 lderr(cct) << "image being migrated" << dendl;
124 *result = -EROFS;
125 return m_on_finish;
126 }
127 ldout(cct, 1) << this << " " << __func__ << ": migrating to: "
128 << m_migration_spec << dendl;
129 break;
130 case cls::rbd::MIGRATION_HEADER_TYPE_DST:
131 ldout(cct, 1) << this << " " << __func__ << ": migrating from: "
132 << m_migration_spec << dendl;
133 if (m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
134 m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING &&
135 m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTED) {
136 ldout(cct, 5) << this << " " << __func__ << ": current migration state: "
137 << m_migration_spec.state << ", retrying" << dendl;
138 send();
139 return nullptr;
140 }
141 break;
142 default:
143 ldout(cct, 1) << this << " " << __func__ << ": migration type "
144 << m_migration_spec.header_type << dendl;
145 *result = -EBADMSG;
146 return m_on_finish;
147 }
148
149 if (m_image_ctx.old_format) {
150 send_v1_get_snapshots();
151 } else {
152 send_v2_get_metadata();
153 }
154 return nullptr;
155 }
156
157 template <typename I>
158 void RefreshRequest<I>::send_v1_read_header() {
159 CephContext *cct = m_image_ctx.cct;
160 ldout(cct, 10) << this << " " << __func__ << dendl;
161
162 librados::ObjectReadOperation op;
163 op.read(0, 0, nullptr, nullptr);
164
165 using klass = RefreshRequest<I>;
166 librados::AioCompletion *comp = create_rados_callback<
167 klass, &klass::handle_v1_read_header>(this);
168 m_out_bl.clear();
169 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
170 &m_out_bl);
171 ceph_assert(r == 0);
172 comp->release();
173 }
174
175 template <typename I>
176 Context *RefreshRequest<I>::handle_v1_read_header(int *result) {
177 CephContext *cct = m_image_ctx.cct;
178 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
179
180 rbd_obj_header_ondisk v1_header;
181 bool migrating = false;
182 if (*result < 0) {
183 return m_on_finish;
184 } else if (m_out_bl.length() < sizeof(v1_header)) {
185 lderr(cct) << "v1 header too small" << dendl;
186 *result = -EIO;
187 return m_on_finish;
188 } else if (memcmp(RBD_HEADER_TEXT, m_out_bl.c_str(),
189 sizeof(RBD_HEADER_TEXT)) != 0) {
190 if (memcmp(RBD_MIGRATE_HEADER_TEXT, m_out_bl.c_str(),
191 sizeof(RBD_MIGRATE_HEADER_TEXT)) == 0) {
192 ldout(cct, 1) << this << " " << __func__ << ": migration v1 header detected"
193 << dendl;
194 migrating = true;
195 } else {
196 lderr(cct) << "unrecognized v1 header" << dendl;
197 *result = -ENXIO;
198 return m_on_finish;
199 }
200 }
201
202 memcpy(&v1_header, m_out_bl.c_str(), sizeof(v1_header));
203 m_order = v1_header.options.order;
204 m_size = v1_header.image_size;
205 m_object_prefix = v1_header.block_name;
206 if (migrating) {
207 send_get_migration_header();
208 } else {
209 send_v1_get_snapshots();
210 }
211 return nullptr;
212 }
213
214 template <typename I>
215 void RefreshRequest<I>::send_v1_get_snapshots() {
216 CephContext *cct = m_image_ctx.cct;
217 ldout(cct, 10) << this << " " << __func__ << dendl;
218
219 librados::ObjectReadOperation op;
220 cls_client::old_snapshot_list_start(&op);
221
222 using klass = RefreshRequest<I>;
223 librados::AioCompletion *comp = create_rados_callback<
224 klass, &klass::handle_v1_get_snapshots>(this);
225 m_out_bl.clear();
226 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
227 &m_out_bl);
228 ceph_assert(r == 0);
229 comp->release();
230 }
231
232 template <typename I>
233 Context *RefreshRequest<I>::handle_v1_get_snapshots(int *result) {
234 CephContext *cct = m_image_ctx.cct;
235 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
236
237 std::vector<std::string> snap_names;
238 std::vector<uint64_t> snap_sizes;
239 if (*result == 0) {
240 auto it = m_out_bl.cbegin();
241 *result = cls_client::old_snapshot_list_finish(&it, &snap_names,
242 &snap_sizes, &m_snapc);
243 }
244
245 if (*result < 0) {
246 lderr(cct) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result)
247 << dendl;
248 return m_on_finish;
249 }
250
251 if (!m_snapc.is_valid()) {
252 lderr(cct) << "v1 image snap context is invalid" << dendl;
253 *result = -EIO;
254 return m_on_finish;
255 }
256
257 m_snap_infos.clear();
258 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
259 m_snap_infos.push_back({m_snapc.snaps[i],
260 {cls::rbd::UserSnapshotNamespace{}},
261 snap_names[i], snap_sizes[i], {}, 0});
262 }
263
264 send_v1_get_locks();
265 return nullptr;
266 }
267
268 template <typename I>
269 void RefreshRequest<I>::send_v1_get_locks() {
270 CephContext *cct = m_image_ctx.cct;
271 ldout(cct, 10) << this << " " << __func__ << dendl;
272
273 librados::ObjectReadOperation op;
274 rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
275
276 using klass = RefreshRequest<I>;
277 librados::AioCompletion *comp = create_rados_callback<
278 klass, &klass::handle_v1_get_locks>(this);
279 m_out_bl.clear();
280 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
281 &m_out_bl);
282 ceph_assert(r == 0);
283 comp->release();
284 }
285
286 template <typename I>
287 Context *RefreshRequest<I>::handle_v1_get_locks(int *result) {
288 CephContext *cct = m_image_ctx.cct;
289 ldout(cct, 10) << this << " " << __func__ << ": "
290 << "r=" << *result << dendl;
291
292 if (*result == 0) {
293 auto it = m_out_bl.cbegin();
294 ClsLockType lock_type;
295 *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
296 &lock_type, &m_lock_tag);
297 if (*result == 0) {
298 m_exclusive_locked = (lock_type == LOCK_EXCLUSIVE);
299 }
300 }
301 if (*result < 0) {
302 lderr(cct) << "failed to retrieve locks: " << cpp_strerror(*result)
303 << dendl;
304 return m_on_finish;
305 }
306
307 send_v1_apply();
308 return nullptr;
309 }
310
311 template <typename I>
312 void RefreshRequest<I>::send_v1_apply() {
313 CephContext *cct = m_image_ctx.cct;
314 ldout(cct, 10) << this << " " << __func__ << dendl;
315
316 // ensure we are not in a rados callback when applying updates
317 using klass = RefreshRequest<I>;
318 Context *ctx = create_context_callback<
319 klass, &klass::handle_v1_apply>(this);
320 m_image_ctx.op_work_queue->queue(ctx, 0);
321 }
322
323 template <typename I>
324 Context *RefreshRequest<I>::handle_v1_apply(int *result) {
325 CephContext *cct = m_image_ctx.cct;
326 ldout(cct, 10) << this << " " << __func__ << dendl;
327
328 apply();
329 return send_flush_aio();
330 }
331
332 template <typename I>
333 void RefreshRequest<I>::send_v2_get_mutable_metadata() {
334 CephContext *cct = m_image_ctx.cct;
335 ldout(cct, 10) << this << " " << __func__ << dendl;
336
337 uint64_t snap_id;
338 {
339 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
340 snap_id = m_image_ctx.snap_id;
341 }
342
343 bool read_only = m_image_ctx.read_only || snap_id != CEPH_NOSNAP;
344 librados::ObjectReadOperation op;
345 cls_client::get_size_start(&op, CEPH_NOSNAP);
346 cls_client::get_features_start(&op, read_only);
347 cls_client::get_flags_start(&op, CEPH_NOSNAP);
348 cls_client::get_snapcontext_start(&op);
349 rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME);
350
351 using klass = RefreshRequest<I>;
352 librados::AioCompletion *comp = create_rados_callback<
353 klass, &klass::handle_v2_get_mutable_metadata>(this);
354 m_out_bl.clear();
355 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
356 &m_out_bl);
357 ceph_assert(r == 0);
358 comp->release();
359 }
360
361 template <typename I>
362 Context *RefreshRequest<I>::handle_v2_get_mutable_metadata(int *result) {
363 CephContext *cct = m_image_ctx.cct;
364 ldout(cct, 10) << this << " " << __func__ << ": "
365 << "r=" << *result << dendl;
366
367 auto it = m_out_bl.cbegin();
368 if (*result >= 0) {
369 uint8_t order;
370 *result = cls_client::get_size_finish(&it, &m_size, &order);
371 }
372
373 if (*result >= 0) {
374 *result = cls_client::get_features_finish(&it, &m_features,
375 &m_incompatible_features);
376 }
377
378 if (*result >= 0) {
379 *result = cls_client::get_flags_finish(&it, &m_flags);
380 }
381
382 if (*result >= 0) {
383 *result = cls_client::get_snapcontext_finish(&it, &m_snapc);
384 }
385
386 if (*result >= 0) {
387 ClsLockType lock_type = LOCK_NONE;
388 *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers,
389 &lock_type, &m_lock_tag);
390 if (*result == 0) {
391 m_exclusive_locked = (lock_type == LOCK_EXCLUSIVE);
392 }
393 }
394
395 if (*result < 0) {
396 lderr(cct) << "failed to retrieve mutable metadata: "
397 << cpp_strerror(*result) << dendl;
398 return m_on_finish;
399 }
400
401 uint64_t unsupported = m_incompatible_features & ~RBD_FEATURES_ALL;
402 if (unsupported != 0ULL) {
403 lderr(cct) << "Image uses unsupported features: " << unsupported << dendl;
404 *result = -ENOSYS;
405 return m_on_finish;
406 }
407
408 if (!m_snapc.is_valid()) {
409 lderr(cct) << "image snap context is invalid!" << dendl;
410 *result = -EIO;
411 return m_on_finish;
412 }
413
414 if (m_acquiring_lock && (m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) {
415 ldout(cct, 5) << "ignoring dynamically disabled exclusive lock" << dendl;
416 m_features |= RBD_FEATURE_EXCLUSIVE_LOCK;
417 m_incomplete_update = true;
418 }
419
420 send_v2_get_parent();
421 return nullptr;
422 }
423
424 template <typename I>
425 void RefreshRequest<I>::send_v2_get_parent() {
426 // NOTE: remove support when Mimic is EOLed
427 CephContext *cct = m_image_ctx.cct;
428 ldout(cct, 10) << this << " " << __func__ << ": legacy=" << m_legacy_parent
429 << dendl;
430
431 librados::ObjectReadOperation op;
432 if (!m_legacy_parent) {
433 cls_client::parent_get_start(&op);
434 cls_client::parent_overlap_get_start(&op, CEPH_NOSNAP);
435 } else {
436 cls_client::get_parent_start(&op, CEPH_NOSNAP);
437 }
438
439 auto aio_comp = create_rados_callback<
440 RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_parent>(this);
441 m_out_bl.clear();
442 m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, aio_comp, &op,
443 &m_out_bl);
444 aio_comp->release();
445 }
446
447 template <typename I>
448 Context *RefreshRequest<I>::handle_v2_get_parent(int *result) {
449 // NOTE: remove support when Mimic is EOLed
450 CephContext *cct = m_image_ctx.cct;
451 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
452
453 auto it = m_out_bl.cbegin();
454 if (!m_legacy_parent) {
455 if (*result == 0) {
456 *result = cls_client::parent_get_finish(&it, &m_parent_md.spec);
457 }
458
459 std::optional<uint64_t> parent_overlap;
460 if (*result == 0) {
461 *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap);
462 }
463
464 if (*result == 0 && parent_overlap) {
465 m_parent_md.overlap = *parent_overlap;
466 m_head_parent_overlap = true;
467 }
468 } else if (*result == 0) {
469 *result = cls_client::get_parent_finish(&it, &m_parent_md.spec,
470 &m_parent_md.overlap);
471 m_head_parent_overlap = true;
472 }
473
474 if (*result == -EOPNOTSUPP && !m_legacy_parent) {
475 ldout(cct, 10) << "retrying using legacy parent method" << dendl;
476 m_legacy_parent = true;
477 send_v2_get_parent();
478 return nullptr;
479 } if (*result < 0) {
480 lderr(cct) << "failed to retrieve parent: " << cpp_strerror(*result)
481 << dendl;
482 return m_on_finish;
483 }
484
485 if ((m_features & RBD_FEATURE_MIGRATING) != 0) {
486 ldout(cct, 1) << "migrating feature set" << dendl;
487 send_get_migration_header();
488 return nullptr;
489 }
490
491 send_v2_get_metadata();
492 return nullptr;
493 }
494
495 template <typename I>
496 void RefreshRequest<I>::send_v2_get_metadata() {
497 CephContext *cct = m_image_ctx.cct;
498 ldout(cct, 10) << this << " " << __func__ << ": "
499 << "start_key=" << m_last_metadata_key << dendl;
500
501 librados::ObjectReadOperation op;
502 cls_client::metadata_list_start(&op, m_last_metadata_key, MAX_METADATA_ITEMS);
503
504 using klass = RefreshRequest<I>;
505 librados::AioCompletion *comp =
506 create_rados_callback<klass, &klass::handle_v2_get_metadata>(this);
507 m_out_bl.clear();
508 m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
509 &m_out_bl);
510 comp->release();
511 }
512
513 template <typename I>
514 Context *RefreshRequest<I>::handle_v2_get_metadata(int *result) {
515 CephContext *cct = m_image_ctx.cct;
516 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
517
518 std::map<std::string, bufferlist> metadata;
519 if (*result == 0) {
520 auto it = m_out_bl.cbegin();
521 *result = cls_client::metadata_list_finish(&it, &metadata);
522 }
523
524 if (*result < 0) {
525 lderr(cct) << "failed to retrieve metadata: " << cpp_strerror(*result)
526 << dendl;
527 return m_on_finish;
528 }
529
530 if (!metadata.empty()) {
531 m_metadata.insert(metadata.begin(), metadata.end());
532 m_last_metadata_key = metadata.rbegin()->first;
533 if (boost::starts_with(m_last_metadata_key,
534 ImageCtx::METADATA_CONF_PREFIX)) {
535 send_v2_get_metadata();
536 return nullptr;
537 }
538 }
539
540 m_last_metadata_key.clear();
541 send_v2_get_pool_metadata();
542 return nullptr;
543 }
544
545 template <typename I>
546 void RefreshRequest<I>::send_v2_get_pool_metadata() {
547 CephContext *cct = m_image_ctx.cct;
548 ldout(cct, 10) << this << " " << __func__ << ": "
549 << "start_key=" << m_last_metadata_key << dendl;
550
551 librados::ObjectReadOperation op;
552 cls_client::metadata_list_start(&op, m_last_metadata_key, MAX_METADATA_ITEMS);
553
554 using klass = RefreshRequest<I>;
555 librados::AioCompletion *comp =
556 create_rados_callback<klass, &klass::handle_v2_get_pool_metadata>(this);
557 m_out_bl.clear();
558 m_pool_metadata_io_ctx.aio_operate(RBD_INFO, comp, &op, &m_out_bl);
559 comp->release();
560 }
561
562 template <typename I>
563 Context *RefreshRequest<I>::handle_v2_get_pool_metadata(int *result) {
564 CephContext *cct = m_image_ctx.cct;
565 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
566
567 std::map<std::string, bufferlist> metadata;
568 if (*result == 0) {
569 auto it = m_out_bl.cbegin();
570 *result = cls_client::metadata_list_finish(&it, &metadata);
571 }
572
573 if (*result == -EOPNOTSUPP || *result == -ENOENT) {
574 ldout(cct, 10) << "pool metadata not supported by OSD" << dendl;
575 } else if (*result < 0) {
576 lderr(cct) << "failed to retrieve pool metadata: " << cpp_strerror(*result)
577 << dendl;
578 return m_on_finish;
579 }
580
581 if (!metadata.empty()) {
582 m_metadata.insert(metadata.begin(), metadata.end());
583 m_last_metadata_key = metadata.rbegin()->first;
584 if (boost::starts_with(m_last_metadata_key,
585 ImageCtx::METADATA_CONF_PREFIX)) {
586 send_v2_get_pool_metadata();
587 return nullptr;
588 }
589 }
590
591 bool thread_safe = m_image_ctx.image_watcher->is_unregistered();
592 m_image_ctx.apply_metadata(m_metadata, thread_safe);
593
594 send_v2_get_op_features();
595 return nullptr;
596 }
597
598 template <typename I>
599 void RefreshRequest<I>::send_v2_get_op_features() {
600 if ((m_features & RBD_FEATURE_OPERATIONS) == 0LL) {
601 send_v2_get_group();
602 return;
603 }
604
605 CephContext *cct = m_image_ctx.cct;
606 ldout(cct, 10) << this << " " << __func__ << dendl;
607
608 librados::ObjectReadOperation op;
609 cls_client::op_features_get_start(&op);
610
611 librados::AioCompletion *comp = create_rados_callback<
612 RefreshRequest<I>, &RefreshRequest<I>::handle_v2_get_op_features>(this);
613 m_out_bl.clear();
614 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
615 &m_out_bl);
616 ceph_assert(r == 0);
617 comp->release();
618 }
619
620 template <typename I>
621 Context *RefreshRequest<I>::handle_v2_get_op_features(int *result) {
622 CephContext *cct = m_image_ctx.cct;
623 ldout(cct, 10) << this << " " << __func__ << ": "
624 << "r=" << *result << dendl;
625
626 // -EOPNOTSUPP handler not required since feature bit implies OSD
627 // supports the method
628 if (*result == 0) {
629 auto it = m_out_bl.cbegin();
630 cls_client::op_features_get_finish(&it, &m_op_features);
631 } else if (*result < 0) {
632 lderr(cct) << "failed to retrieve op features: " << cpp_strerror(*result)
633 << dendl;
634 return m_on_finish;
635 }
636
637 send_v2_get_group();
638 return nullptr;
639 }
640
641 template <typename I>
642 void RefreshRequest<I>::send_v2_get_group() {
643 CephContext *cct = m_image_ctx.cct;
644 ldout(cct, 10) << this << " " << __func__ << dendl;
645
646 librados::ObjectReadOperation op;
647 cls_client::image_group_get_start(&op);
648
649 using klass = RefreshRequest<I>;
650 librados::AioCompletion *comp = create_rados_callback<
651 klass, &klass::handle_v2_get_group>(this);
652 m_out_bl.clear();
653 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
654 &m_out_bl);
655 ceph_assert(r == 0);
656 comp->release();
657 }
658
659 template <typename I>
660 Context *RefreshRequest<I>::handle_v2_get_group(int *result) {
661 CephContext *cct = m_image_ctx.cct;
662 ldout(cct, 10) << this << " " << __func__ << ": "
663 << "r=" << *result << dendl;
664
665 if (*result == 0) {
666 auto it = m_out_bl.cbegin();
667 cls_client::image_group_get_finish(&it, &m_group_spec);
668 }
669 if (*result < 0 && *result != -EOPNOTSUPP) {
670 lderr(cct) << "failed to retrieve group: " << cpp_strerror(*result)
671 << dendl;
672 return m_on_finish;
673 }
674
675 send_v2_get_snapshots();
676 return nullptr;
677 }
678
679 template <typename I>
680 void RefreshRequest<I>::send_v2_get_snapshots() {
681 m_snap_infos.resize(m_snapc.snaps.size());
682 m_snap_flags.resize(m_snapc.snaps.size());
683 m_snap_parents.resize(m_snapc.snaps.size());
684 m_snap_protection.resize(m_snapc.snaps.size());
685
686 if (m_snapc.snaps.empty()) {
687 send_v2_refresh_parent();
688 return;
689 }
690
691 CephContext *cct = m_image_ctx.cct;
692 ldout(cct, 10) << this << " " << __func__ << dendl;
693
694 librados::ObjectReadOperation op;
695 for (auto snap_id : m_snapc.snaps) {
696 if (m_legacy_snapshot != LEGACY_SNAPSHOT_DISABLED) {
697 /// NOTE: remove after Luminous is retired
698 cls_client::get_snapshot_name_start(&op, snap_id);
699 cls_client::get_size_start(&op, snap_id);
700 if (m_legacy_snapshot != LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP) {
701 cls_client::get_snapshot_timestamp_start(&op, snap_id);
702 }
703 } else {
704 cls_client::snapshot_get_start(&op, snap_id);
705 }
706
707 if (m_legacy_parent) {
708 cls_client::get_parent_start(&op, snap_id);
709 } else {
710 cls_client::parent_overlap_get_start(&op, snap_id);
711 }
712
713 cls_client::get_flags_start(&op, snap_id);
714 cls_client::get_protection_status_start(&op, snap_id);
715 }
716
717 using klass = RefreshRequest<I>;
718 librados::AioCompletion *comp = create_rados_callback<
719 klass, &klass::handle_v2_get_snapshots>(this);
720 m_out_bl.clear();
721 int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op,
722 &m_out_bl);
723 ceph_assert(r == 0);
724 comp->release();
725 }
726
727 template <typename I>
728 Context *RefreshRequest<I>::handle_v2_get_snapshots(int *result) {
729 CephContext *cct = m_image_ctx.cct;
730 ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl;
731
732 auto it = m_out_bl.cbegin();
733 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
734 if (m_legacy_snapshot != LEGACY_SNAPSHOT_DISABLED) {
735 /// NOTE: remove after Luminous is retired
736 std::string snap_name;
737 if (*result >= 0) {
738 *result = cls_client::get_snapshot_name_finish(&it, &snap_name);
739 }
740
741 uint64_t snap_size;
742 if (*result >= 0) {
743 uint8_t order;
744 *result = cls_client::get_size_finish(&it, &snap_size, &order);
745 }
746
747 utime_t snap_timestamp;
748 if (*result >= 0 &&
749 m_legacy_snapshot != LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP) {
750 /// NOTE: remove after Jewel is retired
751 *result = cls_client::get_snapshot_timestamp_finish(&it,
752 &snap_timestamp);
753 }
754
755 if (*result >= 0) {
756 m_snap_infos[i] = {m_snapc.snaps[i],
757 {cls::rbd::UserSnapshotNamespace{}},
758 snap_name, snap_size, snap_timestamp, 0};
759 }
760 } else if (*result >= 0) {
761 *result = cls_client::snapshot_get_finish(&it, &m_snap_infos[i]);
762 }
763
764 if (*result == 0) {
765 if (m_legacy_parent) {
766 *result = cls_client::get_parent_finish(&it, &m_snap_parents[i].spec,
767 &m_snap_parents[i].overlap);
768 } else {
769 std::optional<uint64_t> parent_overlap;
770 *result = cls_client::parent_overlap_get_finish(&it, &parent_overlap);
771 if (*result == 0 && parent_overlap && m_parent_md.spec.pool_id > -1) {
772 m_snap_parents[i].spec = m_parent_md.spec;
773 m_snap_parents[i].overlap = *parent_overlap;
774 }
775 }
776 }
777
778 if (*result >= 0) {
779 *result = cls_client::get_flags_finish(&it, &m_snap_flags[i]);
780 }
781
782 if (*result >= 0) {
783 *result = cls_client::get_protection_status_finish(
784 &it, &m_snap_protection[i]);
785 }
786
787 if (*result < 0) {
788 break;
789 }
790 }
791
792 if (*result == -ENOENT) {
793 ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl;
794 send_v2_get_mutable_metadata();
795 return nullptr;
796 } else if (m_legacy_snapshot == LEGACY_SNAPSHOT_DISABLED &&
797 *result == -EOPNOTSUPP) {
798 ldout(cct, 10) << "retrying using legacy snapshot methods" << dendl;
799 m_legacy_snapshot = LEGACY_SNAPSHOT_ENABLED;
800 send_v2_get_snapshots();
801 return nullptr;
802 } else if (m_legacy_snapshot == LEGACY_SNAPSHOT_ENABLED &&
803 *result == -EOPNOTSUPP) {
804 ldout(cct, 10) << "retrying using legacy snapshot methods (jewel)" << dendl;
805 m_legacy_snapshot = LEGACY_SNAPSHOT_ENABLED_NO_TIMESTAMP;
806 send_v2_get_snapshots();
807 return nullptr;
808 } else if (*result < 0) {
809 lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result)
810 << dendl;
811 return m_on_finish;
812 }
813
814 send_v2_refresh_parent();
815 return nullptr;
816 }
817
818 template <typename I>
819 void RefreshRequest<I>::send_v2_refresh_parent() {
820 {
821 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
822 RWLock::RLocker parent_locker(m_image_ctx.parent_lock);
823
824 ParentImageInfo parent_md;
825 MigrationInfo migration_info;
826 int r = get_parent_info(m_image_ctx.snap_id, &parent_md, &migration_info);
827 if (!m_skip_open_parent_image && (r < 0 ||
828 RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md,
829 migration_info))) {
830 CephContext *cct = m_image_ctx.cct;
831 ldout(cct, 10) << this << " " << __func__ << dendl;
832
833 using klass = RefreshRequest<I>;
834 Context *ctx = create_context_callback<
835 klass, &klass::handle_v2_refresh_parent>(this);
836 m_refresh_parent = RefreshParentRequest<I>::create(
837 m_image_ctx, parent_md, migration_info, ctx);
838 }
839 }
840
841 if (m_refresh_parent != nullptr) {
842 m_refresh_parent->send();
843 } else {
844 send_v2_init_exclusive_lock();
845 }
846 }
847
848 template <typename I>
849 Context *RefreshRequest<I>::handle_v2_refresh_parent(int *result) {
850 CephContext *cct = m_image_ctx.cct;
851 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
852
853 if (*result < 0) {
854 lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result)
855 << dendl;
856 save_result(result);
857 send_v2_apply();
858 return nullptr;
859 }
860
861 send_v2_init_exclusive_lock();
862 return nullptr;
863 }
864
865 template <typename I>
866 void RefreshRequest<I>::send_v2_init_exclusive_lock() {
867 if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0 ||
868 m_image_ctx.read_only || !m_image_ctx.snap_name.empty() ||
869 m_image_ctx.exclusive_lock != nullptr) {
870 send_v2_open_object_map();
871 return;
872 }
873
874 // implies exclusive lock dynamically enabled or image open in-progress
875 CephContext *cct = m_image_ctx.cct;
876 ldout(cct, 10) << this << " " << __func__ << dendl;
877
878 // TODO need safe shut down
879 m_exclusive_lock = m_image_ctx.create_exclusive_lock();
880
881 using klass = RefreshRequest<I>;
882 Context *ctx = create_context_callback<
883 klass, &klass::handle_v2_init_exclusive_lock>(this);
884
885 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
886 m_exclusive_lock->init(m_features, ctx);
887 }
888
889 template <typename I>
890 Context *RefreshRequest<I>::handle_v2_init_exclusive_lock(int *result) {
891 CephContext *cct = m_image_ctx.cct;
892 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
893
894 if (*result < 0) {
895 lderr(cct) << "failed to initialize exclusive lock: "
896 << cpp_strerror(*result) << dendl;
897 save_result(result);
898 }
899
900 // object map and journal will be opened when exclusive lock is
901 // acquired (if features are enabled)
902 send_v2_apply();
903 return nullptr;
904 }
905
906 template <typename I>
907 void RefreshRequest<I>::send_v2_open_journal() {
908 bool journal_disabled = (
909 (m_features & RBD_FEATURE_JOURNALING) == 0 ||
910 m_image_ctx.read_only ||
911 !m_image_ctx.snap_name.empty() ||
912 m_image_ctx.journal != nullptr ||
913 m_image_ctx.exclusive_lock == nullptr ||
914 !m_image_ctx.exclusive_lock->is_lock_owner());
915 bool journal_disabled_by_policy;
916 {
917 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
918 journal_disabled_by_policy = (
919 !journal_disabled &&
920 m_image_ctx.get_journal_policy()->journal_disabled());
921 }
922
923 if (journal_disabled || journal_disabled_by_policy) {
924 // journal dynamically enabled -- doesn't own exclusive lock
925 if ((m_features & RBD_FEATURE_JOURNALING) != 0 &&
926 !journal_disabled_by_policy &&
927 m_image_ctx.exclusive_lock != nullptr &&
928 m_image_ctx.journal == nullptr) {
929 m_image_ctx.io_work_queue->set_require_lock(librbd::io::DIRECTION_BOTH,
930 true);
931 }
932 send_v2_block_writes();
933 return;
934 }
935
936 // implies journal dynamically enabled since ExclusiveLock will init
937 // the journal upon acquiring the lock
938 CephContext *cct = m_image_ctx.cct;
939 ldout(cct, 10) << this << " " << __func__ << dendl;
940
941 using klass = RefreshRequest<I>;
942 Context *ctx = create_context_callback<
943 klass, &klass::handle_v2_open_journal>(this);
944
945 // TODO need safe close
946 m_journal = m_image_ctx.create_journal();
947 m_journal->open(ctx);
948 }
949
950 template <typename I>
951 Context *RefreshRequest<I>::handle_v2_open_journal(int *result) {
952 CephContext *cct = m_image_ctx.cct;
953 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
954
955 if (*result < 0) {
956 lderr(cct) << "failed to initialize journal: " << cpp_strerror(*result)
957 << dendl;
958 save_result(result);
959 }
960
961 send_v2_block_writes();
962 return nullptr;
963 }
964
965 template <typename I>
966 void RefreshRequest<I>::send_v2_block_writes() {
967 bool disabled_journaling = false;
968 {
969 RWLock::RLocker snap_locker(m_image_ctx.snap_lock);
970 disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 &&
971 (m_features & RBD_FEATURE_JOURNALING) == 0 &&
972 m_image_ctx.journal != nullptr);
973 }
974
975 if (!disabled_journaling) {
976 send_v2_apply();
977 return;
978 }
979
980 CephContext *cct = m_image_ctx.cct;
981 ldout(cct, 10) << this << " " << __func__ << dendl;
982
983 // we need to block writes temporarily to avoid in-flight journal
984 // writes
985 m_blocked_writes = true;
986 Context *ctx = create_context_callback<
987 RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this);
988
989 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
990 m_image_ctx.io_work_queue->block_writes(ctx);
991 }
992
993 template <typename I>
994 Context *RefreshRequest<I>::handle_v2_block_writes(int *result) {
995 CephContext *cct = m_image_ctx.cct;
996 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
997
998 if (*result < 0) {
999 lderr(cct) << "failed to block writes: " << cpp_strerror(*result)
1000 << dendl;
1001 save_result(result);
1002 }
1003 send_v2_apply();
1004 return nullptr;
1005 }
1006
1007 template <typename I>
1008 void RefreshRequest<I>::send_v2_open_object_map() {
1009 if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0 ||
1010 m_image_ctx.object_map != nullptr ||
1011 (m_image_ctx.snap_name.empty() &&
1012 (m_image_ctx.read_only ||
1013 m_image_ctx.exclusive_lock == nullptr ||
1014 !m_image_ctx.exclusive_lock->is_lock_owner()))) {
1015 send_v2_open_journal();
1016 return;
1017 }
1018
1019 // implies object map dynamically enabled or image open in-progress
1020 // since SetSnapRequest loads the object map for a snapshot and
1021 // ExclusiveLock loads the object map for HEAD
1022 CephContext *cct = m_image_ctx.cct;
1023 ldout(cct, 10) << this << " " << __func__ << dendl;
1024
1025 if (m_image_ctx.snap_name.empty()) {
1026 m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP);
1027 } else {
1028 for (size_t snap_idx = 0; snap_idx < m_snap_infos.size(); ++snap_idx) {
1029 if (m_snap_infos[snap_idx].name == m_image_ctx.snap_name) {
1030 m_object_map = m_image_ctx.create_object_map(
1031 m_snapc.snaps[snap_idx].val);
1032 break;
1033 }
1034 }
1035
1036 if (m_object_map == nullptr) {
1037 lderr(cct) << "failed to locate snapshot: " << m_image_ctx.snap_name
1038 << dendl;
1039 send_v2_open_journal();
1040 return;
1041 }
1042 }
1043
1044 using klass = RefreshRequest<I>;
1045 Context *ctx = create_context_callback<
1046 klass, &klass::handle_v2_open_object_map>(this);
1047 m_object_map->open(ctx);
1048 }
1049
1050 template <typename I>
1051 Context *RefreshRequest<I>::handle_v2_open_object_map(int *result) {
1052 CephContext *cct = m_image_ctx.cct;
1053 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1054
1055 if (*result < 0) {
1056 lderr(cct) << "failed to open object map: " << cpp_strerror(*result)
1057 << dendl;
1058 delete m_object_map;
1059 m_object_map = nullptr;
1060
1061 if (*result != -EFBIG) {
1062 save_result(result);
1063 }
1064 }
1065
1066 send_v2_open_journal();
1067 return nullptr;
1068 }
1069
1070 template <typename I>
1071 void RefreshRequest<I>::send_v2_apply() {
1072 CephContext *cct = m_image_ctx.cct;
1073 ldout(cct, 10) << this << " " << __func__ << dendl;
1074
1075 // ensure we are not in a rados callback when applying updates
1076 using klass = RefreshRequest<I>;
1077 Context *ctx = create_context_callback<
1078 klass, &klass::handle_v2_apply>(this);
1079 m_image_ctx.op_work_queue->queue(ctx, 0);
1080 }
1081
1082 template <typename I>
1083 Context *RefreshRequest<I>::handle_v2_apply(int *result) {
1084 CephContext *cct = m_image_ctx.cct;
1085 ldout(cct, 10) << this << " " << __func__ << dendl;
1086
1087 apply();
1088
1089 return send_v2_finalize_refresh_parent();
1090 }
1091
1092 template <typename I>
1093 Context *RefreshRequest<I>::send_v2_finalize_refresh_parent() {
1094 if (m_refresh_parent == nullptr) {
1095 return send_v2_shut_down_exclusive_lock();
1096 }
1097
1098 CephContext *cct = m_image_ctx.cct;
1099 ldout(cct, 10) << this << " " << __func__ << dendl;
1100
1101 using klass = RefreshRequest<I>;
1102 Context *ctx = create_context_callback<
1103 klass, &klass::handle_v2_finalize_refresh_parent>(this);
1104 m_refresh_parent->finalize(ctx);
1105 return nullptr;
1106 }
1107
1108 template <typename I>
1109 Context *RefreshRequest<I>::handle_v2_finalize_refresh_parent(int *result) {
1110 CephContext *cct = m_image_ctx.cct;
1111 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1112
1113 ceph_assert(m_refresh_parent != nullptr);
1114 delete m_refresh_parent;
1115 m_refresh_parent = nullptr;
1116
1117 return send_v2_shut_down_exclusive_lock();
1118 }
1119
1120 template <typename I>
1121 Context *RefreshRequest<I>::send_v2_shut_down_exclusive_lock() {
1122 if (m_exclusive_lock == nullptr) {
1123 return send_v2_close_journal();
1124 }
1125
1126 CephContext *cct = m_image_ctx.cct;
1127 ldout(cct, 10) << this << " " << __func__ << dendl;
1128
1129 // exclusive lock feature was dynamically disabled. in-flight IO will be
1130 // flushed and in-flight requests will be canceled before releasing lock
1131 using klass = RefreshRequest<I>;
1132 Context *ctx = create_context_callback<
1133 klass, &klass::handle_v2_shut_down_exclusive_lock>(this);
1134 m_exclusive_lock->shut_down(ctx);
1135 return nullptr;
1136 }
1137
1138 template <typename I>
1139 Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) {
1140 CephContext *cct = m_image_ctx.cct;
1141 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1142
1143 if (*result < 0) {
1144 lderr(cct) << "failed to shut down exclusive lock: "
1145 << cpp_strerror(*result) << dendl;
1146 save_result(result);
1147 }
1148
1149 {
1150 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
1151 ceph_assert(m_image_ctx.exclusive_lock == nullptr);
1152 }
1153
1154 ceph_assert(m_exclusive_lock != nullptr);
1155 delete m_exclusive_lock;
1156 m_exclusive_lock = nullptr;
1157
1158 return send_v2_close_journal();
1159 }
1160
1161 template <typename I>
1162 Context *RefreshRequest<I>::send_v2_close_journal() {
1163 if (m_journal == nullptr) {
1164 return send_v2_close_object_map();
1165 }
1166
1167 CephContext *cct = m_image_ctx.cct;
1168 ldout(cct, 10) << this << " " << __func__ << dendl;
1169
1170 // journal feature was dynamically disabled
1171 using klass = RefreshRequest<I>;
1172 Context *ctx = create_context_callback<
1173 klass, &klass::handle_v2_close_journal>(this);
1174 m_journal->close(ctx);
1175 return nullptr;
1176 }
1177
1178 template <typename I>
1179 Context *RefreshRequest<I>::handle_v2_close_journal(int *result) {
1180 CephContext *cct = m_image_ctx.cct;
1181 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1182
1183 if (*result < 0) {
1184 save_result(result);
1185 lderr(cct) << "failed to close journal: " << cpp_strerror(*result)
1186 << dendl;
1187 }
1188
1189 ceph_assert(m_journal != nullptr);
1190 delete m_journal;
1191 m_journal = nullptr;
1192
1193 ceph_assert(m_blocked_writes);
1194 m_blocked_writes = false;
1195
1196 m_image_ctx.io_work_queue->unblock_writes();
1197 return send_v2_close_object_map();
1198 }
1199
1200 template <typename I>
1201 Context *RefreshRequest<I>::send_v2_close_object_map() {
1202 if (m_object_map == nullptr) {
1203 return send_flush_aio();
1204 }
1205
1206 CephContext *cct = m_image_ctx.cct;
1207 ldout(cct, 10) << this << " " << __func__ << dendl;
1208
1209 // object map was dynamically disabled
1210 using klass = RefreshRequest<I>;
1211 Context *ctx = create_context_callback<
1212 klass, &klass::handle_v2_close_object_map>(this);
1213 m_object_map->close(ctx);
1214 return nullptr;
1215 }
1216
1217 template <typename I>
1218 Context *RefreshRequest<I>::handle_v2_close_object_map(int *result) {
1219 CephContext *cct = m_image_ctx.cct;
1220 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1221
1222 if (*result < 0) {
1223 lderr(cct) << "failed to close object map: " << cpp_strerror(*result)
1224 << dendl;
1225 }
1226
1227 ceph_assert(m_object_map != nullptr);
1228 delete m_object_map;
1229 m_object_map = nullptr;
1230
1231 return send_flush_aio();
1232 }
1233
1234 template <typename I>
1235 Context *RefreshRequest<I>::send_flush_aio() {
1236 if (m_incomplete_update && m_error_result == 0) {
1237 // if this was a partial refresh, notify ImageState
1238 m_error_result = -ERESTART;
1239 }
1240
1241 if (m_flush_aio) {
1242 CephContext *cct = m_image_ctx.cct;
1243 ldout(cct, 10) << this << " " << __func__ << dendl;
1244
1245 RWLock::RLocker owner_locker(m_image_ctx.owner_lock);
1246 auto ctx = create_context_callback<
1247 RefreshRequest<I>, &RefreshRequest<I>::handle_flush_aio>(this);
1248 auto aio_comp = io::AioCompletion::create_and_start(
1249 ctx, util::get_image_ctx(&m_image_ctx), io::AIO_TYPE_FLUSH);
1250 auto req = io::ImageDispatchSpec<I>::create_flush_request(
1251 m_image_ctx, aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
1252 req->send();
1253 delete req;
1254 return nullptr;
1255 } else if (m_error_result < 0) {
1256 // propagate saved error back to caller
1257 Context *ctx = create_context_callback<
1258 RefreshRequest<I>, &RefreshRequest<I>::handle_error>(this);
1259 m_image_ctx.op_work_queue->queue(ctx, 0);
1260 return nullptr;
1261 }
1262
1263 return m_on_finish;
1264 }
1265
1266 template <typename I>
1267 Context *RefreshRequest<I>::handle_flush_aio(int *result) {
1268 CephContext *cct = m_image_ctx.cct;
1269 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1270
1271 if (*result < 0) {
1272 lderr(cct) << "failed to flush pending AIO: " << cpp_strerror(*result)
1273 << dendl;
1274 }
1275
1276 return handle_error(result);
1277 }
1278
1279 template <typename I>
1280 Context *RefreshRequest<I>::handle_error(int *result) {
1281 if (m_error_result < 0) {
1282 *result = m_error_result;
1283
1284 CephContext *cct = m_image_ctx.cct;
1285 ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl;
1286 }
1287 return m_on_finish;
1288 }
1289
1290 template <typename I>
1291 void RefreshRequest<I>::apply() {
1292 CephContext *cct = m_image_ctx.cct;
1293 ldout(cct, 20) << this << " " << __func__ << dendl;
1294
1295 RWLock::WLocker owner_locker(m_image_ctx.owner_lock);
1296 RWLock::WLocker md_locker(m_image_ctx.md_lock);
1297
1298 {
1299 RWLock::WLocker snap_locker(m_image_ctx.snap_lock);
1300 RWLock::WLocker parent_locker(m_image_ctx.parent_lock);
1301
1302 m_image_ctx.size = m_size;
1303 m_image_ctx.lockers = m_lockers;
1304 m_image_ctx.lock_tag = m_lock_tag;
1305 m_image_ctx.exclusive_locked = m_exclusive_locked;
1306
1307 std::map<uint64_t, uint64_t> migration_reverse_snap_seq;
1308
1309 if (m_image_ctx.old_format) {
1310 m_image_ctx.order = m_order;
1311 m_image_ctx.features = 0;
1312 m_image_ctx.flags = 0;
1313 m_image_ctx.op_features = 0;
1314 m_image_ctx.operations_disabled = false;
1315 m_image_ctx.object_prefix = std::move(m_object_prefix);
1316 m_image_ctx.init_layout();
1317 } else {
1318 // HEAD revision doesn't have a defined overlap so it's only
1319 // applicable to snapshots
1320 if (!m_head_parent_overlap) {
1321 m_parent_md = {};
1322 }
1323
1324 m_image_ctx.features = m_features;
1325 m_image_ctx.flags = m_flags;
1326 m_image_ctx.op_features = m_op_features;
1327 m_image_ctx.operations_disabled = (
1328 (m_op_features & ~RBD_OPERATION_FEATURES_ALL) != 0ULL);
1329 m_image_ctx.group_spec = m_group_spec;
1330 if (get_migration_info(&m_image_ctx.parent_md,
1331 &m_image_ctx.migration_info)) {
1332 for (auto it : m_image_ctx.migration_info.snap_map) {
1333 migration_reverse_snap_seq[it.second.front()] = it.first;
1334 }
1335 } else {
1336 m_image_ctx.parent_md = m_parent_md;
1337 m_image_ctx.migration_info = {};
1338 }
1339 }
1340
1341 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1342 std::vector<librados::snap_t>::const_iterator it = std::find(
1343 m_image_ctx.snaps.begin(), m_image_ctx.snaps.end(),
1344 m_snapc.snaps[i].val);
1345 if (it == m_image_ctx.snaps.end()) {
1346 m_flush_aio = true;
1347 ldout(cct, 20) << "new snapshot id=" << m_snapc.snaps[i].val
1348 << " name=" << m_snap_infos[i].name
1349 << " size=" << m_snap_infos[i].image_size
1350 << dendl;
1351 }
1352 }
1353
1354 m_image_ctx.snaps.clear();
1355 m_image_ctx.snap_info.clear();
1356 m_image_ctx.snap_ids.clear();
1357 auto overlap = m_image_ctx.parent_md.overlap;
1358 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1359 uint64_t flags = m_image_ctx.old_format ? 0 : m_snap_flags[i];
1360 uint8_t protection_status = m_image_ctx.old_format ?
1361 static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED) :
1362 m_snap_protection[i];
1363 ParentImageInfo parent;
1364 if (!m_image_ctx.old_format) {
1365 if (!m_image_ctx.migration_info.empty()) {
1366 parent = m_image_ctx.parent_md;
1367 auto it = migration_reverse_snap_seq.find(m_snapc.snaps[i].val);
1368 if (it != migration_reverse_snap_seq.end()) {
1369 parent.spec.snap_id = it->second;
1370 parent.overlap = m_snap_infos[i].image_size;
1371 } else {
1372 overlap = std::min(overlap, m_snap_infos[i].image_size);
1373 parent.overlap = overlap;
1374 }
1375 } else {
1376 parent = m_snap_parents[i];
1377 }
1378 }
1379 m_image_ctx.add_snap(m_snap_infos[i].snapshot_namespace,
1380 m_snap_infos[i].name, m_snapc.snaps[i].val,
1381 m_snap_infos[i].image_size, parent,
1382 protection_status, flags,
1383 m_snap_infos[i].timestamp);
1384 }
1385 m_image_ctx.parent_md.overlap = std::min(overlap, m_image_ctx.size);
1386 m_image_ctx.snapc = m_snapc;
1387
1388 if (m_image_ctx.snap_id != CEPH_NOSNAP &&
1389 m_image_ctx.get_snap_id(m_image_ctx.snap_namespace,
1390 m_image_ctx.snap_name) != m_image_ctx.snap_id) {
1391 lderr(cct) << "tried to read from a snapshot that no longer exists: "
1392 << m_image_ctx.snap_name << dendl;
1393 m_image_ctx.snap_exists = false;
1394 }
1395
1396 if (m_refresh_parent != nullptr) {
1397 m_refresh_parent->apply();
1398 }
1399 m_image_ctx.data_ctx.selfmanaged_snap_set_write_ctx(m_image_ctx.snapc.seq,
1400 m_image_ctx.snaps);
1401
1402 // handle dynamically enabled / disabled features
1403 if (m_image_ctx.exclusive_lock != nullptr &&
1404 !m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK,
1405 m_image_ctx.snap_lock)) {
1406 // disabling exclusive lock will automatically handle closing
1407 // object map and journaling
1408 ceph_assert(m_exclusive_lock == nullptr);
1409 m_exclusive_lock = m_image_ctx.exclusive_lock;
1410 } else {
1411 if (m_exclusive_lock != nullptr) {
1412 ceph_assert(m_image_ctx.exclusive_lock == nullptr);
1413 std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock);
1414 }
1415 if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING,
1416 m_image_ctx.snap_lock)) {
1417 if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) {
1418 m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_READ,
1419 false);
1420 }
1421 std::swap(m_journal, m_image_ctx.journal);
1422 } else if (m_journal != nullptr) {
1423 std::swap(m_journal, m_image_ctx.journal);
1424 }
1425 if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
1426 m_image_ctx.snap_lock) ||
1427 m_object_map != nullptr) {
1428 std::swap(m_object_map, m_image_ctx.object_map);
1429 }
1430 }
1431 }
1432 }
1433
1434 template <typename I>
1435 int RefreshRequest<I>::get_parent_info(uint64_t snap_id,
1436 ParentImageInfo *parent_md,
1437 MigrationInfo *migration_info) {
1438 if (get_migration_info(parent_md, migration_info)) {
1439 return 0;
1440 } else if (snap_id == CEPH_NOSNAP) {
1441 *parent_md = m_parent_md;
1442 *migration_info = {};
1443 return 0;
1444 } else {
1445 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1446 if (m_snapc.snaps[i].val == snap_id) {
1447 *parent_md = m_snap_parents[i];
1448 *migration_info = {};
1449 return 0;
1450 }
1451 }
1452 }
1453 return -ENOENT;
1454 }
1455
1456 template <typename I>
1457 bool RefreshRequest<I>::get_migration_info(ParentImageInfo *parent_md,
1458 MigrationInfo *migration_info) {
1459 if (m_migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST ||
1460 (m_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
1461 m_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING)) {
1462 ceph_assert(m_migration_spec.header_type ==
1463 cls::rbd::MIGRATION_HEADER_TYPE_SRC ||
1464 m_migration_spec.pool_id == -1 ||
1465 m_migration_spec.state == cls::rbd::MIGRATION_STATE_EXECUTED);
1466
1467 return false;
1468 }
1469
1470 parent_md->spec.pool_id = m_migration_spec.pool_id;
1471 parent_md->spec.pool_namespace = m_migration_spec.pool_namespace;
1472 parent_md->spec.image_id = m_migration_spec.image_id;
1473 parent_md->spec.snap_id = CEPH_NOSNAP;
1474 parent_md->overlap = std::min(m_size, m_migration_spec.overlap);
1475
1476 auto snap_seqs = m_migration_spec.snap_seqs;
1477 // If new snapshots have been created on destination image after
1478 // migration stared, map the source CEPH_NOSNAP to the earliest of
1479 // these snapshots.
1480 snapid_t snap_id = snap_seqs.empty() ? 0 : snap_seqs.rbegin()->second;
1481 auto it = std::upper_bound(m_snapc.snaps.rbegin(), m_snapc.snaps.rend(),
1482 snap_id);
1483 if (it != m_snapc.snaps.rend()) {
1484 snap_seqs[CEPH_NOSNAP] = *it;
1485 } else {
1486 snap_seqs[CEPH_NOSNAP] = CEPH_NOSNAP;
1487 }
1488
1489 std::set<uint64_t> snap_ids;
1490 for (auto& it : snap_seqs) {
1491 snap_ids.insert(it.second);
1492 }
1493 uint64_t overlap = snap_ids.find(CEPH_NOSNAP) != snap_ids.end() ?
1494 parent_md->overlap : 0;
1495 for (size_t i = 0; i < m_snapc.snaps.size(); ++i) {
1496 if (snap_ids.find(m_snapc.snaps[i].val) != snap_ids.end()) {
1497 overlap = std::max(overlap, m_snap_infos[i].image_size);
1498 }
1499 }
1500
1501 *migration_info = {m_migration_spec.pool_id, m_migration_spec.pool_namespace,
1502 m_migration_spec.image_name, m_migration_spec.image_id, {},
1503 overlap, m_migration_spec.flatten};
1504
1505 deep_copy::util::compute_snap_map(0, CEPH_NOSNAP, snap_seqs,
1506 &migration_info->snap_map);
1507 return true;
1508 }
1509
1510 } // namespace image
1511 } // namespace librbd
1512
1513 template class librbd::image::RefreshRequest<librbd::ImageCtx>;