]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- |
2 | // vim: ts=8 sw=2 smarttab | |
3 | ||
b32b8144 FG |
4 | #include <boost/algorithm/string/predicate.hpp> |
5 | #include "include/assert.h" | |
6 | ||
7c673cae FG |
7 | #include "librbd/image/RefreshRequest.h" |
8 | #include "common/dout.h" | |
9 | #include "common/errno.h" | |
10 | #include "cls/lock/cls_lock_client.h" | |
11 | #include "cls/rbd/cls_rbd_client.h" | |
12 | #include "librbd/ExclusiveLock.h" | |
13 | #include "librbd/ImageCtx.h" | |
14 | #include "librbd/Journal.h" | |
15 | #include "librbd/ObjectMap.h" | |
16 | #include "librbd/Utils.h" | |
17 | #include "librbd/image/RefreshParentRequest.h" | |
18 | #include "librbd/io/ImageRequestWQ.h" | |
19 | #include "librbd/journal/Policy.h" | |
20 | ||
21 | #define dout_subsys ceph_subsys_rbd | |
22 | #undef dout_prefix | |
23 | #define dout_prefix *_dout << "librbd::image::RefreshRequest: " | |
24 | ||
25 | namespace librbd { | |
26 | namespace image { | |
27 | ||
b32b8144 FG |
28 | namespace { |
29 | ||
30 | const uint64_t MAX_METADATA_ITEMS = 128; | |
31 | ||
32 | } | |
33 | ||
7c673cae FG |
34 | using util::create_rados_callback; |
35 | using util::create_async_context_callback; | |
36 | using util::create_context_callback; | |
37 | ||
38 | template <typename I> | |
39 | RefreshRequest<I>::RefreshRequest(I &image_ctx, bool acquiring_lock, | |
40 | bool skip_open_parent, Context *on_finish) | |
41 | : m_image_ctx(image_ctx), m_acquiring_lock(acquiring_lock), | |
42 | m_skip_open_parent_image(skip_open_parent), | |
43 | m_on_finish(create_async_context_callback(m_image_ctx, on_finish)), | |
44 | m_error_result(0), m_flush_aio(false), m_exclusive_lock(nullptr), | |
45 | m_object_map(nullptr), m_journal(nullptr), m_refresh_parent(nullptr) { | |
46 | } | |
47 | ||
48 | template <typename I> | |
49 | RefreshRequest<I>::~RefreshRequest() { | |
50 | // these require state machine to close | |
51 | assert(m_exclusive_lock == nullptr); | |
52 | assert(m_object_map == nullptr); | |
53 | assert(m_journal == nullptr); | |
54 | assert(m_refresh_parent == nullptr); | |
55 | assert(!m_blocked_writes); | |
56 | } | |
57 | ||
58 | template <typename I> | |
59 | void RefreshRequest<I>::send() { | |
60 | if (m_image_ctx.old_format) { | |
61 | send_v1_read_header(); | |
62 | } else { | |
63 | send_v2_get_mutable_metadata(); | |
64 | } | |
65 | } | |
66 | ||
67 | template <typename I> | |
68 | void RefreshRequest<I>::send_v1_read_header() { | |
69 | CephContext *cct = m_image_ctx.cct; | |
70 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
71 | ||
72 | librados::ObjectReadOperation op; | |
73 | op.read(0, 0, nullptr, nullptr); | |
74 | ||
75 | using klass = RefreshRequest<I>; | |
76 | librados::AioCompletion *comp = create_rados_callback< | |
77 | klass, &klass::handle_v1_read_header>(this); | |
78 | m_out_bl.clear(); | |
79 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
80 | &m_out_bl); | |
81 | assert(r == 0); | |
82 | comp->release(); | |
83 | } | |
84 | ||
85 | template <typename I> | |
86 | Context *RefreshRequest<I>::handle_v1_read_header(int *result) { | |
87 | CephContext *cct = m_image_ctx.cct; | |
88 | ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; | |
89 | ||
90 | rbd_obj_header_ondisk v1_header; | |
91 | if (*result < 0) { | |
92 | return m_on_finish; | |
93 | } else if (m_out_bl.length() < sizeof(v1_header)) { | |
94 | lderr(cct) << "v1 header too small" << dendl; | |
95 | *result = -EIO; | |
96 | return m_on_finish; | |
97 | } else if (memcmp(RBD_HEADER_TEXT, m_out_bl.c_str(), | |
98 | sizeof(RBD_HEADER_TEXT)) != 0) { | |
99 | lderr(cct) << "unrecognized v1 header" << dendl; | |
100 | *result = -ENXIO; | |
101 | return m_on_finish; | |
102 | } | |
103 | ||
104 | memcpy(&v1_header, m_out_bl.c_str(), sizeof(v1_header)); | |
105 | m_order = v1_header.options.order; | |
106 | m_size = v1_header.image_size; | |
107 | m_object_prefix = v1_header.block_name; | |
108 | send_v1_get_snapshots(); | |
109 | return nullptr; | |
110 | } | |
111 | ||
112 | template <typename I> | |
113 | void RefreshRequest<I>::send_v1_get_snapshots() { | |
114 | CephContext *cct = m_image_ctx.cct; | |
115 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
116 | ||
117 | librados::ObjectReadOperation op; | |
118 | cls_client::old_snapshot_list_start(&op); | |
119 | ||
120 | using klass = RefreshRequest<I>; | |
121 | librados::AioCompletion *comp = create_rados_callback< | |
122 | klass, &klass::handle_v1_get_snapshots>(this); | |
123 | m_out_bl.clear(); | |
124 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
125 | &m_out_bl); | |
126 | assert(r == 0); | |
127 | comp->release(); | |
128 | } | |
129 | ||
130 | template <typename I> | |
131 | Context *RefreshRequest<I>::handle_v1_get_snapshots(int *result) { | |
132 | CephContext *cct = m_image_ctx.cct; | |
133 | ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; | |
134 | ||
135 | if (*result == 0) { | |
136 | bufferlist::iterator it = m_out_bl.begin(); | |
137 | *result = cls_client::old_snapshot_list_finish( | |
138 | &it, &m_snap_names, &m_snap_sizes, &m_snapc); | |
139 | } | |
140 | ||
141 | if (*result < 0) { | |
142 | lderr(cct) << "failed to retrieve v1 snapshots: " << cpp_strerror(*result) | |
143 | << dendl; | |
144 | return m_on_finish; | |
145 | } | |
146 | ||
147 | if (!m_snapc.is_valid()) { | |
148 | lderr(cct) << "v1 image snap context is invalid" << dendl; | |
149 | *result = -EIO; | |
150 | return m_on_finish; | |
151 | } | |
152 | ||
153 | //m_snap_namespaces = {m_snap_names.size(), cls::rbd::UserSnapshotNamespace()}; | |
154 | m_snap_namespaces = std::vector<cls::rbd::SnapshotNamespace>( | |
155 | m_snap_names.size(), | |
156 | cls::rbd::UserSnapshotNamespace()); | |
157 | ||
158 | m_snap_timestamps = std::vector<utime_t>(m_snap_names.size(), utime_t()); | |
159 | ||
160 | send_v1_get_locks(); | |
161 | return nullptr; | |
162 | } | |
163 | ||
164 | template <typename I> | |
165 | void RefreshRequest<I>::send_v1_get_locks() { | |
166 | CephContext *cct = m_image_ctx.cct; | |
167 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
168 | ||
169 | librados::ObjectReadOperation op; | |
170 | rados::cls::lock::get_lock_info_start(&op, RBD_LOCK_NAME); | |
171 | ||
172 | using klass = RefreshRequest<I>; | |
173 | librados::AioCompletion *comp = create_rados_callback< | |
174 | klass, &klass::handle_v1_get_locks>(this); | |
175 | m_out_bl.clear(); | |
176 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
177 | &m_out_bl); | |
178 | assert(r == 0); | |
179 | comp->release(); | |
180 | } | |
181 | ||
182 | template <typename I> | |
183 | Context *RefreshRequest<I>::handle_v1_get_locks(int *result) { | |
184 | CephContext *cct = m_image_ctx.cct; | |
185 | ldout(cct, 10) << this << " " << __func__ << ": " | |
186 | << "r=" << *result << dendl; | |
187 | ||
188 | // If EOPNOTSUPP, treat image as if there are no locks (we can't | |
189 | // query them). | |
190 | if (*result == -EOPNOTSUPP) { | |
191 | *result = 0; | |
192 | } else if (*result == 0) { | |
193 | bufferlist::iterator it = m_out_bl.begin(); | |
194 | ClsLockType lock_type; | |
195 | *result = rados::cls::lock::get_lock_info_finish(&it, &m_lockers, | |
196 | &lock_type, &m_lock_tag); | |
197 | if (*result == 0) { | |
198 | m_exclusive_locked = (lock_type == LOCK_EXCLUSIVE); | |
199 | } | |
200 | } | |
201 | if (*result < 0) { | |
202 | lderr(cct) << "failed to retrieve locks: " << cpp_strerror(*result) | |
203 | << dendl; | |
204 | return m_on_finish; | |
205 | } | |
206 | ||
207 | send_v1_apply(); | |
208 | return nullptr; | |
209 | } | |
210 | ||
211 | template <typename I> | |
212 | void RefreshRequest<I>::send_v1_apply() { | |
213 | CephContext *cct = m_image_ctx.cct; | |
214 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
215 | ||
216 | // ensure we are not in a rados callback when applying updates | |
217 | using klass = RefreshRequest<I>; | |
218 | Context *ctx = create_context_callback< | |
219 | klass, &klass::handle_v1_apply>(this); | |
220 | m_image_ctx.op_work_queue->queue(ctx, 0); | |
221 | } | |
222 | ||
223 | template <typename I> | |
224 | Context *RefreshRequest<I>::handle_v1_apply(int *result) { | |
225 | CephContext *cct = m_image_ctx.cct; | |
226 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
227 | ||
228 | apply(); | |
229 | return send_flush_aio(); | |
230 | } | |
231 | ||
232 | template <typename I> | |
233 | void RefreshRequest<I>::send_v2_get_mutable_metadata() { | |
234 | CephContext *cct = m_image_ctx.cct; | |
235 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
236 | ||
237 | uint64_t snap_id; | |
238 | { | |
239 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
240 | snap_id = m_image_ctx.snap_id; | |
241 | } | |
242 | ||
243 | bool read_only = m_image_ctx.read_only || snap_id != CEPH_NOSNAP; | |
244 | librados::ObjectReadOperation op; | |
245 | cls_client::get_mutable_metadata_start(&op, read_only); | |
246 | ||
247 | using klass = RefreshRequest<I>; | |
248 | librados::AioCompletion *comp = create_rados_callback< | |
249 | klass, &klass::handle_v2_get_mutable_metadata>(this); | |
250 | m_out_bl.clear(); | |
251 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
252 | &m_out_bl); | |
253 | assert(r == 0); | |
254 | comp->release(); | |
255 | } | |
256 | ||
257 | template <typename I> | |
258 | Context *RefreshRequest<I>::handle_v2_get_mutable_metadata(int *result) { | |
259 | CephContext *cct = m_image_ctx.cct; | |
260 | ldout(cct, 10) << this << " " << __func__ << ": " | |
261 | << "r=" << *result << dendl; | |
262 | ||
263 | if (*result == 0) { | |
264 | bufferlist::iterator it = m_out_bl.begin(); | |
265 | *result = cls_client::get_mutable_metadata_finish(&it, &m_size, &m_features, | |
266 | &m_incompatible_features, | |
267 | &m_lockers, | |
268 | &m_exclusive_locked, | |
269 | &m_lock_tag, &m_snapc, | |
270 | &m_parent_md); | |
271 | } | |
272 | if (*result < 0) { | |
273 | lderr(cct) << "failed to retrieve mutable metadata: " | |
274 | << cpp_strerror(*result) << dendl; | |
275 | return m_on_finish; | |
276 | } | |
277 | ||
278 | uint64_t unsupported = m_incompatible_features & ~RBD_FEATURES_ALL; | |
279 | if (unsupported != 0ULL) { | |
280 | lderr(cct) << "Image uses unsupported features: " << unsupported << dendl; | |
281 | *result = -ENOSYS; | |
282 | return m_on_finish; | |
283 | } | |
284 | ||
285 | if (!m_snapc.is_valid()) { | |
286 | lderr(cct) << "image snap context is invalid!" << dendl; | |
287 | *result = -EIO; | |
288 | return m_on_finish; | |
289 | } | |
290 | ||
291 | if (m_acquiring_lock && (m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0) { | |
292 | ldout(cct, 5) << "ignoring dynamically disabled exclusive lock" << dendl; | |
293 | m_features |= RBD_FEATURE_EXCLUSIVE_LOCK; | |
294 | m_incomplete_update = true; | |
295 | } | |
296 | ||
b32b8144 FG |
297 | send_v2_get_metadata(); |
298 | return nullptr; | |
299 | } | |
300 | ||
301 | template <typename I> | |
302 | void RefreshRequest<I>::send_v2_get_metadata() { | |
303 | CephContext *cct = m_image_ctx.cct; | |
304 | ldout(cct, 10) << this << " " << __func__ << ": " | |
305 | << "start_key=" << m_last_metadata_key << dendl; | |
306 | ||
307 | librados::ObjectReadOperation op; | |
308 | cls_client::metadata_list_start(&op, m_last_metadata_key, MAX_METADATA_ITEMS); | |
309 | ||
310 | using klass = RefreshRequest<I>; | |
311 | librados::AioCompletion *comp = | |
312 | create_rados_callback<klass, &klass::handle_v2_get_metadata>(this); | |
313 | m_out_bl.clear(); | |
314 | m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
315 | &m_out_bl); | |
316 | comp->release(); | |
317 | } | |
318 | ||
319 | template <typename I> | |
320 | Context *RefreshRequest<I>::handle_v2_get_metadata(int *result) { | |
321 | CephContext *cct = m_image_ctx.cct; | |
322 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
323 | ||
324 | std::map<std::string, bufferlist> metadata; | |
325 | if (*result == 0) { | |
326 | bufferlist::iterator it = m_out_bl.begin(); | |
327 | *result = cls_client::metadata_list_finish(&it, &metadata); | |
328 | } | |
329 | ||
330 | if (*result == -EOPNOTSUPP || *result == -EIO) { | |
331 | ldout(cct, 10) << "config metadata not supported by OSD" << dendl; | |
332 | } else if (*result < 0) { | |
333 | lderr(cct) << "failed to retrieve metadata: " << cpp_strerror(*result) | |
334 | << dendl; | |
335 | return m_on_finish; | |
336 | } | |
337 | ||
338 | if (!metadata.empty()) { | |
339 | m_metadata.insert(metadata.begin(), metadata.end()); | |
340 | m_last_metadata_key = metadata.rbegin()->first; | |
341 | if (boost::starts_with(m_last_metadata_key, | |
342 | ImageCtx::METADATA_CONF_PREFIX)) { | |
343 | send_v2_get_metadata(); | |
344 | return nullptr; | |
345 | } | |
346 | } | |
347 | ||
348 | m_image_ctx.apply_metadata(m_metadata, false); | |
349 | ||
7c673cae FG |
350 | send_v2_get_flags(); |
351 | return nullptr; | |
352 | } | |
353 | ||
354 | template <typename I> | |
355 | void RefreshRequest<I>::send_v2_get_flags() { | |
356 | CephContext *cct = m_image_ctx.cct; | |
357 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
358 | ||
359 | librados::ObjectReadOperation op; | |
360 | cls_client::get_flags_start(&op, m_snapc.snaps); | |
361 | ||
362 | using klass = RefreshRequest<I>; | |
363 | librados::AioCompletion *comp = create_rados_callback< | |
364 | klass, &klass::handle_v2_get_flags>(this); | |
365 | m_out_bl.clear(); | |
366 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
367 | &m_out_bl); | |
368 | assert(r == 0); | |
369 | comp->release(); | |
370 | } | |
371 | ||
372 | template <typename I> | |
373 | Context *RefreshRequest<I>::handle_v2_get_flags(int *result) { | |
374 | CephContext *cct = m_image_ctx.cct; | |
375 | ldout(cct, 10) << this << " " << __func__ << ": " | |
376 | << "r=" << *result << dendl; | |
377 | ||
378 | if (*result == 0) { | |
379 | bufferlist::iterator it = m_out_bl.begin(); | |
380 | cls_client::get_flags_finish(&it, &m_flags, m_snapc.snaps, &m_snap_flags); | |
381 | } | |
382 | if (*result == -EOPNOTSUPP) { | |
383 | // Older OSD doesn't support RBD flags, need to assume the worst | |
384 | *result = 0; | |
385 | ldout(cct, 10) << "OSD does not support RBD flags, disabling object map " | |
386 | << "optimizations" << dendl; | |
387 | m_flags = RBD_FLAG_OBJECT_MAP_INVALID; | |
388 | if ((m_features & RBD_FEATURE_FAST_DIFF) != 0) { | |
389 | m_flags |= RBD_FLAG_FAST_DIFF_INVALID; | |
390 | } | |
391 | ||
392 | std::vector<uint64_t> default_flags(m_snapc.snaps.size(), m_flags); | |
393 | m_snap_flags = std::move(default_flags); | |
394 | } else if (*result == -ENOENT) { | |
395 | ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl; | |
396 | send_v2_get_mutable_metadata(); | |
397 | return nullptr; | |
398 | } else if (*result < 0) { | |
399 | lderr(cct) << "failed to retrieve flags: " << cpp_strerror(*result) | |
400 | << dendl; | |
401 | return m_on_finish; | |
402 | } | |
403 | ||
404 | send_v2_get_group(); | |
405 | return nullptr; | |
406 | } | |
407 | ||
408 | template <typename I> | |
409 | void RefreshRequest<I>::send_v2_get_group() { | |
410 | CephContext *cct = m_image_ctx.cct; | |
411 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
412 | ||
413 | librados::ObjectReadOperation op; | |
414 | cls_client::image_get_group_start(&op); | |
415 | ||
416 | using klass = RefreshRequest<I>; | |
417 | librados::AioCompletion *comp = create_rados_callback< | |
418 | klass, &klass::handle_v2_get_group>(this); | |
419 | m_out_bl.clear(); | |
420 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
421 | &m_out_bl); | |
422 | assert(r == 0); | |
423 | comp->release(); | |
424 | } | |
425 | ||
426 | template <typename I> | |
427 | Context *RefreshRequest<I>::handle_v2_get_group(int *result) { | |
428 | CephContext *cct = m_image_ctx.cct; | |
429 | ldout(cct, 10) << this << " " << __func__ << ": " | |
430 | << "r=" << *result << dendl; | |
431 | ||
432 | if (*result == 0) { | |
433 | bufferlist::iterator it = m_out_bl.begin(); | |
434 | cls_client::image_get_group_finish(&it, &m_group_spec); | |
435 | } | |
436 | if (*result == -EOPNOTSUPP) { | |
437 | // Older OSD doesn't support RBD groups | |
438 | *result = 0; | |
439 | ldout(cct, 10) << "OSD does not support consistency groups" << dendl; | |
440 | } else if (*result < 0) { | |
441 | lderr(cct) << "failed to retrieve group: " << cpp_strerror(*result) | |
442 | << dendl; | |
443 | return m_on_finish; | |
444 | } | |
445 | ||
446 | send_v2_get_snapshots(); | |
447 | return nullptr; | |
448 | } | |
449 | ||
450 | template <typename I> | |
451 | void RefreshRequest<I>::send_v2_get_snapshots() { | |
452 | if (m_snapc.snaps.empty()) { | |
453 | m_snap_names.clear(); | |
454 | m_snap_namespaces.clear(); | |
455 | m_snap_sizes.clear(); | |
456 | m_snap_parents.clear(); | |
457 | m_snap_protection.clear(); | |
458 | m_snap_timestamps.clear(); | |
459 | send_v2_refresh_parent(); | |
460 | return; | |
461 | } | |
462 | ||
463 | CephContext *cct = m_image_ctx.cct; | |
464 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
465 | ||
466 | librados::ObjectReadOperation op; | |
467 | cls_client::snapshot_list_start(&op, m_snapc.snaps); | |
468 | ||
469 | using klass = RefreshRequest<I>; | |
470 | librados::AioCompletion *comp = create_rados_callback< | |
471 | klass, &klass::handle_v2_get_snapshots>(this); | |
472 | m_out_bl.clear(); | |
473 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
474 | &m_out_bl); | |
475 | assert(r == 0); | |
476 | comp->release(); | |
477 | } | |
478 | ||
479 | template <typename I> | |
480 | Context *RefreshRequest<I>::handle_v2_get_snapshots(int *result) { | |
481 | CephContext *cct = m_image_ctx.cct; | |
482 | ldout(cct, 10) << this << " " << __func__ << ": " | |
483 | << "r=" << *result << dendl; | |
484 | ||
485 | if (*result == 0) { | |
486 | bufferlist::iterator it = m_out_bl.begin(); | |
487 | *result = cls_client::snapshot_list_finish(&it, m_snapc.snaps, | |
488 | &m_snap_names, | |
489 | &m_snap_sizes, | |
490 | &m_snap_parents, | |
491 | &m_snap_protection); | |
492 | } | |
493 | if (*result == -ENOENT) { | |
494 | ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl; | |
495 | send_v2_get_mutable_metadata(); | |
496 | return nullptr; | |
497 | } else if (*result < 0) { | |
498 | lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result) | |
499 | << dendl; | |
500 | return m_on_finish; | |
501 | } | |
502 | ||
503 | send_v2_get_snap_timestamps(); | |
504 | return nullptr; | |
505 | } | |
506 | ||
507 | template <typename I> | |
508 | void RefreshRequest<I>::send_v2_get_snap_timestamps() { | |
509 | CephContext *cct = m_image_ctx.cct; | |
510 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
511 | ||
512 | librados::ObjectReadOperation op; | |
513 | cls_client::snapshot_timestamp_list_start(&op, m_snapc.snaps); | |
514 | ||
515 | using klass = RefreshRequest<I>; | |
516 | librados::AioCompletion *comp = create_rados_callback< | |
517 | klass, &klass::handle_v2_get_snap_timestamps>(this); | |
518 | m_out_bl.clear(); | |
519 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
520 | &m_out_bl); | |
521 | assert(r == 0); | |
522 | comp->release(); | |
523 | } | |
524 | ||
525 | template <typename I> | |
526 | Context *RefreshRequest<I>::handle_v2_get_snap_timestamps(int *result) { | |
527 | CephContext *cct = m_image_ctx.cct; | |
528 | ldout(cct, 10) << this << " " << __func__ << ": " << "r=" << *result << dendl; | |
529 | ||
530 | if (*result == 0) { | |
531 | bufferlist::iterator it = m_out_bl.begin(); | |
532 | *result = cls_client::snapshot_timestamp_list_finish(&it, m_snapc.snaps, &m_snap_timestamps); | |
533 | } | |
534 | if (*result == -ENOENT) { | |
535 | ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl; | |
536 | send_v2_get_mutable_metadata(); | |
537 | return nullptr; | |
538 | } else if (*result == -EOPNOTSUPP) { | |
539 | m_snap_timestamps = std::vector<utime_t>(m_snap_names.size(), utime_t()); | |
540 | // Ignore it means no snap timestamps are available | |
541 | } else if (*result < 0) { | |
542 | lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result) | |
543 | << dendl; | |
544 | return m_on_finish; | |
545 | } | |
546 | ||
547 | send_v2_get_snap_namespaces(); | |
548 | return nullptr; | |
549 | } | |
550 | ||
551 | template <typename I> | |
552 | void RefreshRequest<I>::send_v2_get_snap_namespaces() { | |
553 | CephContext *cct = m_image_ctx.cct; | |
554 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
555 | ||
556 | librados::ObjectReadOperation op; | |
557 | cls_client::snapshot_namespace_list_start(&op, m_snapc.snaps); | |
558 | ||
559 | using klass = RefreshRequest<I>; | |
560 | librados::AioCompletion *comp = create_rados_callback< | |
561 | klass, &klass::handle_v2_get_snap_namespaces>(this); | |
562 | m_out_bl.clear(); | |
563 | int r = m_image_ctx.md_ctx.aio_operate(m_image_ctx.header_oid, comp, &op, | |
564 | &m_out_bl); | |
565 | assert(r == 0); | |
566 | comp->release(); | |
567 | } | |
568 | ||
569 | template <typename I> | |
570 | Context *RefreshRequest<I>::handle_v2_get_snap_namespaces(int *result) { | |
571 | CephContext *cct = m_image_ctx.cct; | |
572 | ldout(cct, 10) << this << " " << __func__ << ": " | |
573 | << "r=" << *result << dendl; | |
574 | ||
575 | if (*result == 0) { | |
576 | bufferlist::iterator it = m_out_bl.begin(); | |
577 | *result = cls_client::snapshot_namespace_list_finish(&it, m_snapc.snaps, | |
578 | &m_snap_namespaces); | |
579 | } | |
580 | if (*result == -ENOENT) { | |
581 | ldout(cct, 10) << "out-of-sync snapshot state detected" << dendl; | |
582 | send_v2_get_mutable_metadata(); | |
583 | return nullptr; | |
584 | } else if (*result == -EOPNOTSUPP) { | |
585 | m_snap_namespaces = std::vector | |
586 | <cls::rbd::SnapshotNamespace>( | |
587 | m_snap_names.size(), | |
588 | cls::rbd::UserSnapshotNamespace()); | |
589 | // Ignore it means no snap namespaces are available | |
590 | } else if (*result < 0) { | |
591 | lderr(cct) << "failed to retrieve snapshots: " << cpp_strerror(*result) | |
592 | << dendl; | |
593 | return m_on_finish; | |
594 | } | |
595 | ||
596 | send_v2_refresh_parent(); | |
597 | return nullptr; | |
598 | } | |
599 | ||
600 | template <typename I> | |
601 | void RefreshRequest<I>::send_v2_refresh_parent() { | |
602 | { | |
603 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
604 | RWLock::RLocker parent_locker(m_image_ctx.parent_lock); | |
605 | ||
606 | ParentInfo parent_md; | |
607 | int r = get_parent_info(m_image_ctx.snap_id, &parent_md); | |
608 | if (!m_skip_open_parent_image && (r < 0 || | |
609 | RefreshParentRequest<I>::is_refresh_required(m_image_ctx, parent_md))) { | |
610 | CephContext *cct = m_image_ctx.cct; | |
611 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
612 | ||
613 | using klass = RefreshRequest<I>; | |
614 | Context *ctx = create_context_callback< | |
615 | klass, &klass::handle_v2_refresh_parent>(this); | |
616 | m_refresh_parent = RefreshParentRequest<I>::create( | |
617 | m_image_ctx, parent_md, ctx); | |
618 | } | |
619 | } | |
620 | ||
621 | if (m_refresh_parent != nullptr) { | |
622 | m_refresh_parent->send(); | |
623 | } else { | |
624 | send_v2_init_exclusive_lock(); | |
625 | } | |
626 | } | |
627 | ||
628 | template <typename I> | |
629 | Context *RefreshRequest<I>::handle_v2_refresh_parent(int *result) { | |
630 | CephContext *cct = m_image_ctx.cct; | |
631 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
632 | ||
633 | if (*result < 0) { | |
634 | lderr(cct) << "failed to refresh parent image: " << cpp_strerror(*result) | |
635 | << dendl; | |
636 | save_result(result); | |
637 | send_v2_apply(); | |
638 | return nullptr; | |
639 | } | |
640 | ||
641 | send_v2_init_exclusive_lock(); | |
642 | return nullptr; | |
643 | } | |
644 | ||
645 | template <typename I> | |
646 | void RefreshRequest<I>::send_v2_init_exclusive_lock() { | |
647 | if ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) == 0 || | |
648 | m_image_ctx.read_only || !m_image_ctx.snap_name.empty() || | |
649 | m_image_ctx.exclusive_lock != nullptr) { | |
650 | send_v2_open_object_map(); | |
651 | return; | |
652 | } | |
653 | ||
654 | // implies exclusive lock dynamically enabled or image open in-progress | |
655 | CephContext *cct = m_image_ctx.cct; | |
656 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
657 | ||
658 | // TODO need safe shut down | |
659 | m_exclusive_lock = m_image_ctx.create_exclusive_lock(); | |
660 | ||
661 | using klass = RefreshRequest<I>; | |
662 | Context *ctx = create_context_callback< | |
663 | klass, &klass::handle_v2_init_exclusive_lock>(this); | |
664 | ||
665 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
666 | m_exclusive_lock->init(m_features, ctx); | |
667 | } | |
668 | ||
669 | template <typename I> | |
670 | Context *RefreshRequest<I>::handle_v2_init_exclusive_lock(int *result) { | |
671 | CephContext *cct = m_image_ctx.cct; | |
672 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
673 | ||
674 | if (*result < 0) { | |
675 | lderr(cct) << "failed to initialize exclusive lock: " | |
676 | << cpp_strerror(*result) << dendl; | |
677 | save_result(result); | |
678 | } | |
679 | ||
680 | // object map and journal will be opened when exclusive lock is | |
681 | // acquired (if features are enabled) | |
682 | send_v2_apply(); | |
683 | return nullptr; | |
684 | } | |
685 | ||
686 | template <typename I> | |
687 | void RefreshRequest<I>::send_v2_open_journal() { | |
688 | bool journal_disabled = ( | |
689 | (m_features & RBD_FEATURE_JOURNALING) == 0 || | |
690 | m_image_ctx.read_only || | |
691 | !m_image_ctx.snap_name.empty() || | |
692 | m_image_ctx.journal != nullptr || | |
693 | m_image_ctx.exclusive_lock == nullptr || | |
694 | !m_image_ctx.exclusive_lock->is_lock_owner()); | |
695 | bool journal_disabled_by_policy; | |
696 | { | |
697 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
698 | journal_disabled_by_policy = ( | |
699 | !journal_disabled && | |
700 | m_image_ctx.get_journal_policy()->journal_disabled()); | |
701 | } | |
702 | ||
703 | if (journal_disabled || journal_disabled_by_policy) { | |
704 | // journal dynamically enabled -- doesn't own exclusive lock | |
705 | if ((m_features & RBD_FEATURE_JOURNALING) != 0 && | |
706 | !journal_disabled_by_policy && | |
707 | m_image_ctx.exclusive_lock != nullptr && | |
708 | m_image_ctx.journal == nullptr) { | |
224ce89b WB |
709 | m_image_ctx.io_work_queue->set_require_lock(librbd::io::DIRECTION_BOTH, |
710 | true); | |
7c673cae FG |
711 | } |
712 | send_v2_block_writes(); | |
713 | return; | |
714 | } | |
715 | ||
716 | // implies journal dynamically enabled since ExclusiveLock will init | |
717 | // the journal upon acquiring the lock | |
718 | CephContext *cct = m_image_ctx.cct; | |
719 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
720 | ||
721 | using klass = RefreshRequest<I>; | |
722 | Context *ctx = create_context_callback< | |
723 | klass, &klass::handle_v2_open_journal>(this); | |
724 | ||
725 | // TODO need safe close | |
726 | m_journal = m_image_ctx.create_journal(); | |
727 | m_journal->open(ctx); | |
728 | } | |
729 | ||
730 | template <typename I> | |
731 | Context *RefreshRequest<I>::handle_v2_open_journal(int *result) { | |
732 | CephContext *cct = m_image_ctx.cct; | |
733 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
734 | ||
735 | if (*result < 0) { | |
736 | lderr(cct) << "failed to initialize journal: " << cpp_strerror(*result) | |
737 | << dendl; | |
738 | save_result(result); | |
739 | } | |
740 | ||
741 | send_v2_block_writes(); | |
742 | return nullptr; | |
743 | } | |
744 | ||
745 | template <typename I> | |
746 | void RefreshRequest<I>::send_v2_block_writes() { | |
747 | bool disabled_journaling = false; | |
748 | { | |
749 | RWLock::RLocker snap_locker(m_image_ctx.snap_lock); | |
750 | disabled_journaling = ((m_features & RBD_FEATURE_EXCLUSIVE_LOCK) != 0 && | |
751 | (m_features & RBD_FEATURE_JOURNALING) == 0 && | |
752 | m_image_ctx.journal != nullptr); | |
753 | } | |
754 | ||
755 | if (!disabled_journaling) { | |
756 | send_v2_apply(); | |
757 | return; | |
758 | } | |
759 | ||
760 | CephContext *cct = m_image_ctx.cct; | |
761 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
762 | ||
763 | // we need to block writes temporarily to avoid in-flight journal | |
764 | // writes | |
765 | m_blocked_writes = true; | |
766 | Context *ctx = create_context_callback< | |
767 | RefreshRequest<I>, &RefreshRequest<I>::handle_v2_block_writes>(this); | |
768 | ||
769 | RWLock::RLocker owner_locker(m_image_ctx.owner_lock); | |
770 | m_image_ctx.io_work_queue->block_writes(ctx); | |
771 | } | |
772 | ||
773 | template <typename I> | |
774 | Context *RefreshRequest<I>::handle_v2_block_writes(int *result) { | |
775 | CephContext *cct = m_image_ctx.cct; | |
776 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
777 | ||
778 | if (*result < 0) { | |
779 | lderr(cct) << "failed to block writes: " << cpp_strerror(*result) | |
780 | << dendl; | |
781 | save_result(result); | |
782 | } | |
783 | send_v2_apply(); | |
784 | return nullptr; | |
785 | } | |
786 | ||
787 | template <typename I> | |
788 | void RefreshRequest<I>::send_v2_open_object_map() { | |
789 | if ((m_features & RBD_FEATURE_OBJECT_MAP) == 0 || | |
790 | m_image_ctx.object_map != nullptr || | |
791 | (m_image_ctx.snap_name.empty() && | |
792 | (m_image_ctx.read_only || | |
793 | m_image_ctx.exclusive_lock == nullptr || | |
794 | !m_image_ctx.exclusive_lock->is_lock_owner()))) { | |
795 | send_v2_open_journal(); | |
796 | return; | |
797 | } | |
798 | ||
799 | // implies object map dynamically enabled or image open in-progress | |
800 | // since SetSnapRequest loads the object map for a snapshot and | |
801 | // ExclusiveLock loads the object map for HEAD | |
802 | CephContext *cct = m_image_ctx.cct; | |
803 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
804 | ||
805 | if (m_image_ctx.snap_name.empty()) { | |
806 | m_object_map = m_image_ctx.create_object_map(CEPH_NOSNAP); | |
807 | } else { | |
808 | for (size_t snap_idx = 0; snap_idx < m_snap_names.size(); ++snap_idx) { | |
809 | if (m_snap_names[snap_idx] == m_image_ctx.snap_name) { | |
810 | m_object_map = m_image_ctx.create_object_map( | |
811 | m_snapc.snaps[snap_idx].val); | |
812 | break; | |
813 | } | |
814 | } | |
815 | ||
816 | if (m_object_map == nullptr) { | |
817 | lderr(cct) << "failed to locate snapshot: " << m_image_ctx.snap_name | |
818 | << dendl; | |
819 | send_v2_open_journal(); | |
820 | return; | |
821 | } | |
822 | } | |
823 | ||
824 | using klass = RefreshRequest<I>; | |
825 | Context *ctx = create_context_callback< | |
826 | klass, &klass::handle_v2_open_object_map>(this); | |
827 | m_object_map->open(ctx); | |
828 | } | |
829 | ||
830 | template <typename I> | |
831 | Context *RefreshRequest<I>::handle_v2_open_object_map(int *result) { | |
832 | CephContext *cct = m_image_ctx.cct; | |
833 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
834 | ||
835 | if (*result < 0) { | |
836 | lderr(cct) << "failed to open object map: " << cpp_strerror(*result) | |
837 | << dendl; | |
838 | delete m_object_map; | |
839 | m_object_map = nullptr; | |
840 | } | |
841 | ||
842 | send_v2_open_journal(); | |
843 | return nullptr; | |
844 | } | |
845 | ||
846 | template <typename I> | |
847 | void RefreshRequest<I>::send_v2_apply() { | |
848 | CephContext *cct = m_image_ctx.cct; | |
849 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
850 | ||
851 | // ensure we are not in a rados callback when applying updates | |
852 | using klass = RefreshRequest<I>; | |
853 | Context *ctx = create_context_callback< | |
854 | klass, &klass::handle_v2_apply>(this); | |
855 | m_image_ctx.op_work_queue->queue(ctx, 0); | |
856 | } | |
857 | ||
858 | template <typename I> | |
859 | Context *RefreshRequest<I>::handle_v2_apply(int *result) { | |
860 | CephContext *cct = m_image_ctx.cct; | |
861 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
862 | ||
863 | apply(); | |
864 | ||
865 | return send_v2_finalize_refresh_parent(); | |
866 | } | |
867 | ||
868 | template <typename I> | |
869 | Context *RefreshRequest<I>::send_v2_finalize_refresh_parent() { | |
870 | if (m_refresh_parent == nullptr) { | |
871 | return send_v2_shut_down_exclusive_lock(); | |
872 | } | |
873 | ||
874 | CephContext *cct = m_image_ctx.cct; | |
875 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
876 | ||
877 | using klass = RefreshRequest<I>; | |
878 | Context *ctx = create_context_callback< | |
879 | klass, &klass::handle_v2_finalize_refresh_parent>(this); | |
880 | m_refresh_parent->finalize(ctx); | |
881 | return nullptr; | |
882 | } | |
883 | ||
884 | template <typename I> | |
885 | Context *RefreshRequest<I>::handle_v2_finalize_refresh_parent(int *result) { | |
886 | CephContext *cct = m_image_ctx.cct; | |
887 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
888 | ||
889 | assert(m_refresh_parent != nullptr); | |
890 | delete m_refresh_parent; | |
891 | m_refresh_parent = nullptr; | |
892 | ||
893 | return send_v2_shut_down_exclusive_lock(); | |
894 | } | |
895 | ||
896 | template <typename I> | |
897 | Context *RefreshRequest<I>::send_v2_shut_down_exclusive_lock() { | |
898 | if (m_exclusive_lock == nullptr) { | |
899 | return send_v2_close_journal(); | |
900 | } | |
901 | ||
902 | CephContext *cct = m_image_ctx.cct; | |
903 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
904 | ||
905 | // exclusive lock feature was dynamically disabled. in-flight IO will be | |
906 | // flushed and in-flight requests will be canceled before releasing lock | |
907 | using klass = RefreshRequest<I>; | |
908 | Context *ctx = create_context_callback< | |
909 | klass, &klass::handle_v2_shut_down_exclusive_lock>(this); | |
910 | m_exclusive_lock->shut_down(ctx); | |
911 | return nullptr; | |
912 | } | |
913 | ||
914 | template <typename I> | |
915 | Context *RefreshRequest<I>::handle_v2_shut_down_exclusive_lock(int *result) { | |
916 | CephContext *cct = m_image_ctx.cct; | |
917 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
918 | ||
919 | if (*result < 0) { | |
920 | lderr(cct) << "failed to shut down exclusive lock: " | |
921 | << cpp_strerror(*result) << dendl; | |
922 | save_result(result); | |
923 | } | |
924 | ||
925 | { | |
926 | RWLock::WLocker owner_locker(m_image_ctx.owner_lock); | |
927 | assert(m_image_ctx.exclusive_lock == nullptr); | |
928 | } | |
929 | ||
930 | assert(m_exclusive_lock != nullptr); | |
931 | delete m_exclusive_lock; | |
932 | m_exclusive_lock = nullptr; | |
933 | ||
934 | return send_v2_close_journal(); | |
935 | } | |
936 | ||
937 | template <typename I> | |
938 | Context *RefreshRequest<I>::send_v2_close_journal() { | |
939 | if (m_journal == nullptr) { | |
940 | return send_v2_close_object_map(); | |
941 | } | |
942 | ||
943 | CephContext *cct = m_image_ctx.cct; | |
944 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
945 | ||
946 | // journal feature was dynamically disabled | |
947 | using klass = RefreshRequest<I>; | |
948 | Context *ctx = create_context_callback< | |
949 | klass, &klass::handle_v2_close_journal>(this); | |
950 | m_journal->close(ctx); | |
951 | return nullptr; | |
952 | } | |
953 | ||
954 | template <typename I> | |
955 | Context *RefreshRequest<I>::handle_v2_close_journal(int *result) { | |
956 | CephContext *cct = m_image_ctx.cct; | |
957 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
958 | ||
959 | if (*result < 0) { | |
960 | save_result(result); | |
961 | lderr(cct) << "failed to close journal: " << cpp_strerror(*result) | |
962 | << dendl; | |
963 | } | |
964 | ||
965 | assert(m_journal != nullptr); | |
966 | delete m_journal; | |
967 | m_journal = nullptr; | |
968 | ||
969 | assert(m_blocked_writes); | |
970 | m_blocked_writes = false; | |
971 | ||
972 | m_image_ctx.io_work_queue->unblock_writes(); | |
973 | return send_v2_close_object_map(); | |
974 | } | |
975 | ||
976 | template <typename I> | |
977 | Context *RefreshRequest<I>::send_v2_close_object_map() { | |
978 | if (m_object_map == nullptr) { | |
979 | return send_flush_aio(); | |
980 | } | |
981 | ||
982 | CephContext *cct = m_image_ctx.cct; | |
983 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
984 | ||
985 | // object map was dynamically disabled | |
986 | using klass = RefreshRequest<I>; | |
987 | Context *ctx = create_context_callback< | |
988 | klass, &klass::handle_v2_close_object_map>(this); | |
989 | m_object_map->close(ctx); | |
990 | return nullptr; | |
991 | } | |
992 | ||
993 | template <typename I> | |
994 | Context *RefreshRequest<I>::handle_v2_close_object_map(int *result) { | |
995 | CephContext *cct = m_image_ctx.cct; | |
996 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
997 | ||
998 | assert(*result == 0); | |
999 | assert(m_object_map != nullptr); | |
1000 | delete m_object_map; | |
1001 | m_object_map = nullptr; | |
1002 | ||
1003 | return send_flush_aio(); | |
1004 | } | |
1005 | ||
1006 | template <typename I> | |
1007 | Context *RefreshRequest<I>::send_flush_aio() { | |
1008 | if (m_incomplete_update && m_error_result == 0) { | |
1009 | // if this was a partial refresh, notify ImageState | |
1010 | m_error_result = -ERESTART; | |
1011 | } | |
1012 | ||
1013 | if (m_flush_aio) { | |
1014 | CephContext *cct = m_image_ctx.cct; | |
1015 | ldout(cct, 10) << this << " " << __func__ << dendl; | |
1016 | ||
1017 | RWLock::RLocker owner_lock(m_image_ctx.owner_lock); | |
1018 | using klass = RefreshRequest<I>; | |
1019 | Context *ctx = create_context_callback< | |
1020 | klass, &klass::handle_flush_aio>(this); | |
1021 | m_image_ctx.flush(ctx); | |
1022 | return nullptr; | |
1023 | } else if (m_error_result < 0) { | |
1024 | // propagate saved error back to caller | |
1025 | Context *ctx = create_context_callback< | |
1026 | RefreshRequest<I>, &RefreshRequest<I>::handle_error>(this); | |
1027 | m_image_ctx.op_work_queue->queue(ctx, 0); | |
1028 | return nullptr; | |
1029 | } | |
1030 | ||
1031 | return m_on_finish; | |
1032 | } | |
1033 | ||
1034 | template <typename I> | |
1035 | Context *RefreshRequest<I>::handle_flush_aio(int *result) { | |
1036 | CephContext *cct = m_image_ctx.cct; | |
1037 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
1038 | ||
1039 | if (*result < 0) { | |
1040 | lderr(cct) << "failed to flush pending AIO: " << cpp_strerror(*result) | |
1041 | << dendl; | |
1042 | } | |
1043 | ||
1044 | return handle_error(result); | |
1045 | } | |
1046 | ||
1047 | template <typename I> | |
1048 | Context *RefreshRequest<I>::handle_error(int *result) { | |
1049 | if (m_error_result < 0) { | |
1050 | *result = m_error_result; | |
1051 | ||
1052 | CephContext *cct = m_image_ctx.cct; | |
1053 | ldout(cct, 10) << this << " " << __func__ << ": r=" << *result << dendl; | |
1054 | } | |
1055 | return m_on_finish; | |
1056 | } | |
1057 | ||
1058 | template <typename I> | |
1059 | void RefreshRequest<I>::apply() { | |
1060 | CephContext *cct = m_image_ctx.cct; | |
1061 | ldout(cct, 20) << this << " " << __func__ << dendl; | |
1062 | ||
1063 | RWLock::WLocker owner_locker(m_image_ctx.owner_lock); | |
1064 | RWLock::WLocker md_locker(m_image_ctx.md_lock); | |
1065 | ||
1066 | { | |
1067 | Mutex::Locker cache_locker(m_image_ctx.cache_lock); | |
1068 | RWLock::WLocker snap_locker(m_image_ctx.snap_lock); | |
1069 | RWLock::WLocker parent_locker(m_image_ctx.parent_lock); | |
1070 | ||
1071 | m_image_ctx.size = m_size; | |
1072 | m_image_ctx.lockers = m_lockers; | |
1073 | m_image_ctx.lock_tag = m_lock_tag; | |
1074 | m_image_ctx.exclusive_locked = m_exclusive_locked; | |
1075 | ||
1076 | if (m_image_ctx.old_format) { | |
1077 | m_image_ctx.order = m_order; | |
1078 | m_image_ctx.features = 0; | |
1079 | m_image_ctx.flags = 0; | |
1080 | m_image_ctx.object_prefix = std::move(m_object_prefix); | |
1081 | m_image_ctx.init_layout(); | |
1082 | } else { | |
1083 | m_image_ctx.features = m_features; | |
1084 | m_image_ctx.flags = m_flags; | |
1085 | m_image_ctx.group_spec = m_group_spec; | |
1086 | m_image_ctx.parent_md = m_parent_md; | |
1087 | } | |
1088 | ||
1089 | for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { | |
1090 | std::vector<librados::snap_t>::const_iterator it = std::find( | |
1091 | m_image_ctx.snaps.begin(), m_image_ctx.snaps.end(), | |
1092 | m_snapc.snaps[i].val); | |
1093 | if (it == m_image_ctx.snaps.end()) { | |
1094 | m_flush_aio = true; | |
1095 | ldout(cct, 20) << "new snapshot id=" << m_snapc.snaps[i].val | |
1096 | << " name=" << m_snap_names[i] | |
1097 | << " size=" << m_snap_sizes[i] | |
1098 | << dendl; | |
1099 | } | |
1100 | } | |
1101 | ||
1102 | m_image_ctx.snaps.clear(); | |
1103 | m_image_ctx.snap_info.clear(); | |
1104 | m_image_ctx.snap_ids.clear(); | |
1105 | for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { | |
1106 | uint64_t flags = m_image_ctx.old_format ? 0 : m_snap_flags[i]; | |
1107 | uint8_t protection_status = m_image_ctx.old_format ? | |
1108 | static_cast<uint8_t>(RBD_PROTECTION_STATUS_UNPROTECTED) : | |
1109 | m_snap_protection[i]; | |
1110 | ParentInfo parent; | |
1111 | if (!m_image_ctx.old_format) { | |
1112 | parent = m_snap_parents[i]; | |
1113 | } | |
1114 | ||
1115 | m_image_ctx.add_snap(m_snap_namespaces[i], m_snap_names[i], | |
1116 | m_snapc.snaps[i].val, m_snap_sizes[i], parent, | |
1117 | protection_status, flags, m_snap_timestamps[i]); | |
1118 | } | |
1119 | m_image_ctx.snapc = m_snapc; | |
1120 | ||
1121 | if (m_image_ctx.snap_id != CEPH_NOSNAP && | |
1122 | m_image_ctx.get_snap_id(m_image_ctx.snap_namespace, | |
1123 | m_image_ctx.snap_name) != m_image_ctx.snap_id) { | |
1124 | lderr(cct) << "tried to read from a snapshot that no longer exists: " | |
1125 | << m_image_ctx.snap_name << dendl; | |
1126 | m_image_ctx.snap_exists = false; | |
1127 | } | |
1128 | ||
1129 | if (m_refresh_parent != nullptr) { | |
1130 | m_refresh_parent->apply(); | |
1131 | } | |
1132 | m_image_ctx.data_ctx.selfmanaged_snap_set_write_ctx(m_image_ctx.snapc.seq, | |
1133 | m_image_ctx.snaps); | |
1134 | ||
1135 | // handle dynamically enabled / disabled features | |
1136 | if (m_image_ctx.exclusive_lock != nullptr && | |
1137 | !m_image_ctx.test_features(RBD_FEATURE_EXCLUSIVE_LOCK, | |
1138 | m_image_ctx.snap_lock)) { | |
1139 | // disabling exclusive lock will automatically handle closing | |
1140 | // object map and journaling | |
1141 | assert(m_exclusive_lock == nullptr); | |
1142 | m_exclusive_lock = m_image_ctx.exclusive_lock; | |
7c673cae FG |
1143 | } else { |
1144 | if (m_exclusive_lock != nullptr) { | |
1145 | assert(m_image_ctx.exclusive_lock == nullptr); | |
1146 | std::swap(m_exclusive_lock, m_image_ctx.exclusive_lock); | |
1147 | } | |
1148 | if (!m_image_ctx.test_features(RBD_FEATURE_JOURNALING, | |
1149 | m_image_ctx.snap_lock)) { | |
224ce89b WB |
1150 | if (!m_image_ctx.clone_copy_on_read && m_image_ctx.journal != nullptr) { |
1151 | m_image_ctx.io_work_queue->set_require_lock(io::DIRECTION_READ, | |
1152 | false); | |
7c673cae FG |
1153 | } |
1154 | std::swap(m_journal, m_image_ctx.journal); | |
1155 | } else if (m_journal != nullptr) { | |
1156 | std::swap(m_journal, m_image_ctx.journal); | |
1157 | } | |
1158 | if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP, | |
1159 | m_image_ctx.snap_lock) || | |
1160 | m_object_map != nullptr) { | |
1161 | std::swap(m_object_map, m_image_ctx.object_map); | |
1162 | } | |
7c673cae FG |
1163 | } |
1164 | } | |
1165 | } | |
1166 | ||
1167 | template <typename I> | |
1168 | int RefreshRequest<I>::get_parent_info(uint64_t snap_id, | |
1169 | ParentInfo *parent_md) { | |
1170 | if (snap_id == CEPH_NOSNAP) { | |
1171 | *parent_md = m_parent_md; | |
1172 | return 0; | |
1173 | } else { | |
1174 | for (size_t i = 0; i < m_snapc.snaps.size(); ++i) { | |
1175 | if (m_snapc.snaps[i].val == snap_id) { | |
1176 | *parent_md = m_snap_parents[i]; | |
1177 | return 0; | |
1178 | } | |
1179 | } | |
1180 | } | |
1181 | return -ENOENT; | |
1182 | } | |
1183 | ||
1184 | } // namespace image | |
1185 | } // namespace librbd | |
1186 | ||
1187 | template class librbd::image::RefreshRequest<librbd::ImageCtx>; |