]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/Operations.cc
d53d7637e3f130588e280046efb1e213fa996a67
[ceph.git] / ceph / src / librbd / Operations.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "cls/rbd/cls_rbd_types.h"
5 #include "librbd/Operations.h"
6 #include "common/dout.h"
7 #include "common/errno.h"
8 #include "common/perf_counters.h"
9 #include "common/WorkQueue.h"
10 #include "osdc/Striper.h"
11
12 #include "librbd/ExclusiveLock.h"
13 #include "librbd/ImageCtx.h"
14 #include "librbd/ImageState.h"
15 #include "librbd/ImageWatcher.h"
16 #include "librbd/ObjectMap.h"
17 #include "librbd/Types.h"
18 #include "librbd/Utils.h"
19 #include "librbd/api/Config.h"
20 #include "librbd/journal/DisabledPolicy.h"
21 #include "librbd/journal/StandardPolicy.h"
22 #include "librbd/operation/DisableFeaturesRequest.h"
23 #include "librbd/operation/EnableFeaturesRequest.h"
24 #include "librbd/operation/FlattenRequest.h"
25 #include "librbd/operation/MetadataRemoveRequest.h"
26 #include "librbd/operation/MetadataSetRequest.h"
27 #include "librbd/operation/MigrateRequest.h"
28 #include "librbd/operation/ObjectMapIterate.h"
29 #include "librbd/operation/RebuildObjectMapRequest.h"
30 #include "librbd/operation/RenameRequest.h"
31 #include "librbd/operation/ResizeRequest.h"
32 #include "librbd/operation/SnapshotCreateRequest.h"
33 #include "librbd/operation/SnapshotProtectRequest.h"
34 #include "librbd/operation/SnapshotRemoveRequest.h"
35 #include "librbd/operation/SnapshotRenameRequest.h"
36 #include "librbd/operation/SnapshotRollbackRequest.h"
37 #include "librbd/operation/SnapshotUnprotectRequest.h"
38 #include "librbd/operation/SnapshotLimitRequest.h"
39 #include "librbd/operation/SparsifyRequest.h"
40 #include <set>
41 #include <boost/bind.hpp>
42 #include <boost/scope_exit.hpp>
43
44 #define dout_subsys ceph_subsys_rbd
45 #undef dout_prefix
46 #define dout_prefix *_dout << "librbd::Operations: "
47
48 namespace librbd {
49
50 namespace {
51
52 template <typename I>
53 struct C_NotifyUpdate : public Context {
54 I &image_ctx;
55 Context *on_finish;
56 bool notified = false;
57
58 C_NotifyUpdate(I &image_ctx, Context *on_finish)
59 : image_ctx(image_ctx), on_finish(on_finish) {
60 }
61
62 void complete(int r) override {
63 CephContext *cct = image_ctx.cct;
64 if (notified) {
65 if (r == -ETIMEDOUT) {
66 // don't fail the op if a peer fails to get the update notification
67 lderr(cct) << "update notification timed-out" << dendl;
68 r = 0;
69 } else if (r == -ENOENT) {
70 // don't fail if header is missing (e.g. v1 image rename)
71 ldout(cct, 5) << "update notification on missing header" << dendl;
72 r = 0;
73 } else if (r < 0) {
74 lderr(cct) << "update notification failed: " << cpp_strerror(r)
75 << dendl;
76 }
77 Context::complete(r);
78 return;
79 }
80
81 if (r < 0) {
82 // op failed -- no need to send update notification
83 Context::complete(r);
84 return;
85 }
86
87 notified = true;
88 image_ctx.notify_update(this);
89 }
90 void finish(int r) override {
91 on_finish->complete(r);
92 }
93 };
94
95 template <typename I>
96 struct C_InvokeAsyncRequest : public Context {
97 /**
98 * @verbatim
99 *
100 * <start>
101 * |
102 * . . . . . . | . . . . . . . . . . . . . . . . . .
103 * . . | . .
104 * . v v v .
105 * . REFRESH_IMAGE (skip if not needed) .
106 * . | .
107 * . v .
108 * . ACQUIRE_LOCK (skip if exclusive lock .
109 * . | disabled or has lock) .
110 * . | .
111 * . /--------/ \--------\ . . . . . . . . . . . . .
112 * . | | .
113 * . v v .
114 * LOCAL_REQUEST REMOTE_REQUEST
115 * | |
116 * | |
117 * \--------\ /--------/
118 * |
119 * v
120 * <finish>
121 *
122 * @endverbatim
123 */
124
125 I &image_ctx;
126 std::string name;
127 exclusive_lock::OperationRequestType request_type;
128 bool permit_snapshot;
129 boost::function<void(Context*)> local;
130 boost::function<void(Context*)> remote;
131 std::set<int> filter_error_codes;
132 Context *on_finish;
133 bool request_lock = false;
134
135 C_InvokeAsyncRequest(I &image_ctx, const std::string& name,
136 exclusive_lock::OperationRequestType request_type,
137 bool permit_snapshot,
138 const boost::function<void(Context*)>& local,
139 const boost::function<void(Context*)>& remote,
140 const std::set<int> &filter_error_codes,
141 Context *on_finish)
142 : image_ctx(image_ctx), name(name), request_type(request_type),
143 permit_snapshot(permit_snapshot), local(local), remote(remote),
144 filter_error_codes(filter_error_codes), on_finish(on_finish) {
145 }
146
147 void send() {
148 send_refresh_image();
149 }
150
151 void send_refresh_image() {
152 if (!image_ctx.state->is_refresh_required()) {
153 send_acquire_exclusive_lock();
154 return;
155 }
156
157 CephContext *cct = image_ctx.cct;
158 ldout(cct, 20) << __func__ << dendl;
159
160 Context *ctx = util::create_context_callback<
161 C_InvokeAsyncRequest<I>,
162 &C_InvokeAsyncRequest<I>::handle_refresh_image>(this);
163 image_ctx.state->refresh(ctx);
164 }
165
166 void handle_refresh_image(int r) {
167 CephContext *cct = image_ctx.cct;
168 ldout(cct, 20) << __func__ << ": r=" << r << dendl;
169
170 if (r < 0) {
171 lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl;
172 complete(r);
173 return;
174 }
175
176 send_acquire_exclusive_lock();
177 }
178
179 void send_acquire_exclusive_lock() {
180 // context can complete before owner_lock is unlocked
181 ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
182 owner_lock.lock_shared();
183 image_ctx.image_lock.lock_shared();
184 if (image_ctx.read_only ||
185 (!permit_snapshot && image_ctx.snap_id != CEPH_NOSNAP)) {
186 image_ctx.image_lock.unlock_shared();
187 owner_lock.unlock_shared();
188 complete(-EROFS);
189 return;
190 }
191 image_ctx.image_lock.unlock_shared();
192
193 if (image_ctx.exclusive_lock == nullptr) {
194 send_local_request();
195 owner_lock.unlock_shared();
196 return;
197 } else if (image_ctx.image_watcher == nullptr) {
198 owner_lock.unlock_shared();
199 complete(-EROFS);
200 return;
201 }
202
203 if (image_ctx.exclusive_lock->is_lock_owner() &&
204 image_ctx.exclusive_lock->accept_request(request_type, nullptr)) {
205 send_local_request();
206 owner_lock.unlock_shared();
207 return;
208 }
209
210 CephContext *cct = image_ctx.cct;
211 ldout(cct, 20) << __func__ << dendl;
212
213 Context *ctx = util::create_async_context_callback(
214 image_ctx, util::create_context_callback<
215 C_InvokeAsyncRequest<I>,
216 &C_InvokeAsyncRequest<I>::handle_acquire_exclusive_lock>(
217 this, image_ctx.exclusive_lock));
218
219 if (request_lock) {
220 // current lock owner doesn't support op -- try to perform
221 // the action locally
222 request_lock = false;
223 image_ctx.exclusive_lock->acquire_lock(ctx);
224 } else {
225 image_ctx.exclusive_lock->try_acquire_lock(ctx);
226 }
227 owner_lock.unlock_shared();
228 }
229
230 void handle_acquire_exclusive_lock(int r) {
231 CephContext *cct = image_ctx.cct;
232 ldout(cct, 20) << __func__ << ": r=" << r << dendl;
233
234 if (r < 0) {
235 complete(r == -EBLACKLISTED ? -EBLACKLISTED : -EROFS);
236 return;
237 }
238
239 // context can complete before owner_lock is unlocked
240 ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
241 owner_lock.lock_shared();
242 if (image_ctx.exclusive_lock == nullptr ||
243 image_ctx.exclusive_lock->is_lock_owner()) {
244 send_local_request();
245 owner_lock.unlock_shared();
246 return;
247 }
248
249 send_remote_request();
250 owner_lock.unlock_shared();
251 }
252
253 void send_remote_request() {
254 ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
255
256 CephContext *cct = image_ctx.cct;
257 ldout(cct, 20) << __func__ << dendl;
258
259 Context *ctx = util::create_async_context_callback(
260 image_ctx, util::create_context_callback<
261 C_InvokeAsyncRequest<I>,
262 &C_InvokeAsyncRequest<I>::handle_remote_request>(this));
263 remote(ctx);
264 }
265
266 void handle_remote_request(int r) {
267 CephContext *cct = image_ctx.cct;
268 ldout(cct, 20) << __func__ << ": r=" << r << dendl;
269
270 if (r == -EOPNOTSUPP) {
271 ldout(cct, 5) << name << " not supported by current lock owner" << dendl;
272 request_lock = true;
273 send_refresh_image();
274 return;
275 } else if (r != -ETIMEDOUT && r != -ERESTART) {
276 image_ctx.state->handle_update_notification();
277
278 complete(r);
279 return;
280 }
281
282 ldout(cct, 5) << name << " timed out notifying lock owner" << dendl;
283 send_refresh_image();
284 }
285
286 void send_local_request() {
287 ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
288
289 CephContext *cct = image_ctx.cct;
290 ldout(cct, 20) << __func__ << dendl;
291
292 Context *ctx = util::create_async_context_callback(
293 image_ctx, util::create_context_callback<
294 C_InvokeAsyncRequest<I>,
295 &C_InvokeAsyncRequest<I>::handle_local_request>(this));
296 local(ctx);
297 }
298
299 void handle_local_request(int r) {
300 CephContext *cct = image_ctx.cct;
301 ldout(cct, 20) << __func__ << ": r=" << r << dendl;
302
303 if (r == -ERESTART) {
304 send_refresh_image();
305 return;
306 }
307 complete(r);
308 }
309
310 void finish(int r) override {
311 if (filter_error_codes.count(r) != 0) {
312 r = 0;
313 }
314 on_finish->complete(r);
315 }
316 };
317
318 template <typename I>
319 bool needs_invalidate(I& image_ctx, uint64_t object_no,
320 uint8_t current_state, uint8_t new_state) {
321 if ( (current_state == OBJECT_EXISTS ||
322 current_state == OBJECT_EXISTS_CLEAN) &&
323 (new_state == OBJECT_NONEXISTENT ||
324 new_state == OBJECT_PENDING)) {
325 return false;
326 }
327 return true;
328 }
329
330 } // anonymous namespace
331
332 template <typename I>
333 Operations<I>::Operations(I &image_ctx)
334 : m_image_ctx(image_ctx), m_async_request_seq(0) {
335 }
336
337 template <typename I>
338 int Operations<I>::flatten(ProgressContext &prog_ctx) {
339 CephContext *cct = m_image_ctx.cct;
340 ldout(cct, 20) << "flatten" << dendl;
341
342 int r = m_image_ctx.state->refresh_if_required();
343 if (r < 0) {
344 return r;
345 }
346
347 if (m_image_ctx.read_only) {
348 return -EROFS;
349 }
350
351 {
352 std::shared_lock image_locker{m_image_ctx.image_lock};
353 if (m_image_ctx.parent_md.spec.pool_id == -1) {
354 lderr(cct) << "image has no parent" << dendl;
355 return -EINVAL;
356 }
357 }
358
359 uint64_t request_id = ++m_async_request_seq;
360 r = invoke_async_request("flatten",
361 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
362 false,
363 boost::bind(&Operations<I>::execute_flatten, this,
364 boost::ref(prog_ctx), _1),
365 boost::bind(&ImageWatcher<I>::notify_flatten,
366 m_image_ctx.image_watcher, request_id,
367 boost::ref(prog_ctx), _1));
368
369 if (r < 0 && r != -EINVAL) {
370 return r;
371 }
372 ldout(cct, 20) << "flatten finished" << dendl;
373 return 0;
374 }
375
376 template <typename I>
377 void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
378 Context *on_finish) {
379 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
380 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
381 m_image_ctx.exclusive_lock->is_lock_owner());
382
383 CephContext *cct = m_image_ctx.cct;
384 ldout(cct, 20) << "flatten" << dendl;
385
386 if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
387 on_finish->complete(-EROFS);
388 return;
389 }
390
391 m_image_ctx.image_lock.lock_shared();
392
393 // can't flatten a non-clone
394 if (m_image_ctx.parent_md.spec.pool_id == -1) {
395 lderr(cct) << "image has no parent" << dendl;
396 m_image_ctx.image_lock.unlock_shared();
397 on_finish->complete(-EINVAL);
398 return;
399 }
400 if (m_image_ctx.snap_id != CEPH_NOSNAP) {
401 lderr(cct) << "snapshots cannot be flattened" << dendl;
402 m_image_ctx.image_lock.unlock_shared();
403 on_finish->complete(-EROFS);
404 return;
405 }
406
407 ::SnapContext snapc = m_image_ctx.snapc;
408
409 uint64_t overlap;
410 int r = m_image_ctx.get_parent_overlap(CEPH_NOSNAP, &overlap);
411 ceph_assert(r == 0);
412 ceph_assert(overlap <= m_image_ctx.size);
413
414 uint64_t overlap_objects = Striper::get_num_objects(m_image_ctx.layout,
415 overlap);
416
417 m_image_ctx.image_lock.unlock_shared();
418
419 operation::FlattenRequest<I> *req = new operation::FlattenRequest<I>(
420 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), overlap_objects,
421 snapc, prog_ctx);
422 req->send();
423 }
424
425 template <typename I>
426 int Operations<I>::rebuild_object_map(ProgressContext &prog_ctx) {
427 CephContext *cct = m_image_ctx.cct;
428 ldout(cct, 10) << "rebuild_object_map" << dendl;
429
430 int r = m_image_ctx.state->refresh_if_required();
431 if (r < 0) {
432 return r;
433 }
434
435 uint64_t request_id = ++m_async_request_seq;
436 r = invoke_async_request("rebuild object map",
437 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
438 boost::bind(&Operations<I>::execute_rebuild_object_map,
439 this, boost::ref(prog_ctx), _1),
440 boost::bind(&ImageWatcher<I>::notify_rebuild_object_map,
441 m_image_ctx.image_watcher, request_id,
442 boost::ref(prog_ctx), _1));
443
444 ldout(cct, 10) << "rebuild object map finished" << dendl;
445 if (r < 0) {
446 return r;
447 }
448 return 0;
449 }
450
451 template <typename I>
452 void Operations<I>::execute_rebuild_object_map(ProgressContext &prog_ctx,
453 Context *on_finish) {
454 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
455 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
456 m_image_ctx.exclusive_lock->is_lock_owner());
457
458 CephContext *cct = m_image_ctx.cct;
459 ldout(cct, 5) << this << " " << __func__ << dendl;
460
461 if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
462 on_finish->complete(-EROFS);
463 return;
464 }
465
466 if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
467 lderr(cct) << "image must support object-map feature" << dendl;
468 on_finish->complete(-EINVAL);
469 return;
470 }
471
472 operation::RebuildObjectMapRequest<I> *req =
473 new operation::RebuildObjectMapRequest<I>(
474 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
475 req->send();
476 }
477
478 template <typename I>
479 int Operations<I>::check_object_map(ProgressContext &prog_ctx) {
480 CephContext *cct = m_image_ctx.cct;
481 ldout(cct, 5) << this << " " << __func__ << dendl;
482 int r = m_image_ctx.state->refresh_if_required();
483 if (r < 0) {
484 return r;
485 }
486
487 r = invoke_async_request("check object map",
488 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
489 boost::bind(&Operations<I>::check_object_map, this,
490 boost::ref(prog_ctx), _1),
491 [this](Context *c) {
492 m_image_ctx.op_work_queue->queue(c, -EOPNOTSUPP);
493 });
494
495 return r;
496 }
497
498 template <typename I>
499 void Operations<I>::object_map_iterate(ProgressContext &prog_ctx,
500 operation::ObjectIterateWork<I> handle_mismatch,
501 Context *on_finish) {
502 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
503 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
504 m_image_ctx.exclusive_lock->is_lock_owner());
505
506 if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
507 on_finish->complete(-EINVAL);
508 return;
509 }
510
511 operation::ObjectMapIterateRequest<I> *req =
512 new operation::ObjectMapIterateRequest<I>(m_image_ctx, on_finish,
513 prog_ctx, handle_mismatch);
514 req->send();
515 }
516
517 template <typename I>
518 void Operations<I>::check_object_map(ProgressContext &prog_ctx,
519 Context *on_finish) {
520 object_map_iterate(prog_ctx, needs_invalidate, on_finish);
521 }
522
523 template <typename I>
524 int Operations<I>::rename(const char *dstname) {
525 CephContext *cct = m_image_ctx.cct;
526 ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dstname
527 << dendl;
528
529 int r = librbd::detect_format(m_image_ctx.md_ctx, dstname, NULL, NULL);
530 if (r < 0 && r != -ENOENT) {
531 lderr(cct) << "error checking for existing image called "
532 << dstname << ":" << cpp_strerror(r) << dendl;
533 return r;
534 }
535 if (r == 0) {
536 lderr(cct) << "rbd image " << dstname << " already exists" << dendl;
537 return -EEXIST;
538 }
539
540 if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
541 r = invoke_async_request("rename",
542 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
543 true,
544 boost::bind(&Operations<I>::execute_rename, this,
545 dstname, _1),
546 boost::bind(&ImageWatcher<I>::notify_rename,
547 m_image_ctx.image_watcher, dstname,
548 _1));
549 if (r < 0 && r != -EEXIST) {
550 return r;
551 }
552 } else {
553 C_SaferCond cond_ctx;
554 {
555 std::shared_lock owner_lock{m_image_ctx.owner_lock};
556 execute_rename(dstname, &cond_ctx);
557 }
558
559 r = cond_ctx.wait();
560 if (r < 0) {
561 return r;
562 }
563 }
564
565 m_image_ctx.set_image_name(dstname);
566 return 0;
567 }
568
569 template <typename I>
570 void Operations<I>::execute_rename(const std::string &dest_name,
571 Context *on_finish) {
572 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
573 if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
574 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
575 m_image_ctx.exclusive_lock->is_lock_owner());
576 }
577
578 if (m_image_ctx.operations_disabled) {
579 on_finish->complete(-EROFS);
580 return;
581 }
582
583 m_image_ctx.image_lock.lock_shared();
584 if (m_image_ctx.name == dest_name) {
585 m_image_ctx.image_lock.unlock_shared();
586 on_finish->complete(-EEXIST);
587 return;
588 }
589 m_image_ctx.image_lock.unlock_shared();
590
591 CephContext *cct = m_image_ctx.cct;
592 ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dest_name
593 << dendl;
594
595 if (m_image_ctx.old_format) {
596 // unregister watch before and register back after rename
597 on_finish = new C_NotifyUpdate<I>(m_image_ctx, on_finish);
598 on_finish = new LambdaContext([this, on_finish](int r) {
599 if (m_image_ctx.old_format) {
600 m_image_ctx.image_watcher->set_oid(m_image_ctx.header_oid);
601 }
602 m_image_ctx.image_watcher->register_watch(on_finish);
603 });
604 on_finish = new LambdaContext([this, dest_name, on_finish](int r) {
605 std::shared_lock owner_locker{m_image_ctx.owner_lock};
606 operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
607 m_image_ctx, on_finish, dest_name);
608 req->send();
609 });
610 m_image_ctx.image_watcher->unregister_watch(on_finish);
611 return;
612 }
613 operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
614 m_image_ctx, on_finish, dest_name);
615 req->send();
616 }
617
618 template <typename I>
619 int Operations<I>::resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx) {
620 CephContext *cct = m_image_ctx.cct;
621
622 m_image_ctx.image_lock.lock_shared();
623 ldout(cct, 5) << this << " " << __func__ << ": "
624 << "size=" << m_image_ctx.size << ", "
625 << "new_size=" << size << dendl;
626 m_image_ctx.image_lock.unlock_shared();
627
628 int r = m_image_ctx.state->refresh_if_required();
629 if (r < 0) {
630 return r;
631 }
632
633 if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP) &&
634 !ObjectMap<>::is_compatible(m_image_ctx.layout, size)) {
635 lderr(cct) << "New size not compatible with object map" << dendl;
636 return -EINVAL;
637 }
638
639 uint64_t request_id = ++m_async_request_seq;
640 r = invoke_async_request("resize",
641 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
642 false,
643 boost::bind(&Operations<I>::execute_resize, this,
644 size, allow_shrink, boost::ref(prog_ctx), _1, 0),
645 boost::bind(&ImageWatcher<I>::notify_resize,
646 m_image_ctx.image_watcher, request_id,
647 size, allow_shrink, boost::ref(prog_ctx), _1));
648
649 m_image_ctx.perfcounter->inc(l_librbd_resize);
650 ldout(cct, 2) << "resize finished" << dendl;
651 return r;
652 }
653
654 template <typename I>
655 void Operations<I>::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
656 Context *on_finish,
657 uint64_t journal_op_tid) {
658 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
659 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
660 m_image_ctx.exclusive_lock->is_lock_owner());
661
662 CephContext *cct = m_image_ctx.cct;
663 m_image_ctx.image_lock.lock_shared();
664 ldout(cct, 5) << this << " " << __func__ << ": "
665 << "size=" << m_image_ctx.size << ", "
666 << "new_size=" << size << dendl;
667
668 if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only ||
669 m_image_ctx.operations_disabled) {
670 m_image_ctx.image_lock.unlock_shared();
671 on_finish->complete(-EROFS);
672 return;
673 } else if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
674 m_image_ctx.image_lock) &&
675 !ObjectMap<>::is_compatible(m_image_ctx.layout, size)) {
676 m_image_ctx.image_lock.unlock_shared();
677 on_finish->complete(-EINVAL);
678 return;
679 }
680 m_image_ctx.image_lock.unlock_shared();
681
682 operation::ResizeRequest<I> *req = new operation::ResizeRequest<I>(
683 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), size, allow_shrink,
684 prog_ctx, journal_op_tid, false);
685 req->send();
686 }
687
688 template <typename I>
689 int Operations<I>::snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
690 const std::string& snap_name) {
691 if (m_image_ctx.read_only) {
692 return -EROFS;
693 }
694
695 int r = m_image_ctx.state->refresh_if_required();
696 if (r < 0) {
697 return r;
698 }
699
700 C_SaferCond ctx;
701 snap_create(snap_namespace, snap_name, &ctx);
702 r = ctx.wait();
703
704 if (r < 0) {
705 return r;
706 }
707
708 m_image_ctx.perfcounter->inc(l_librbd_snap_create);
709 return r;
710 }
711
712 template <typename I>
713 void Operations<I>::snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
714 const std::string& snap_name,
715 Context *on_finish) {
716 CephContext *cct = m_image_ctx.cct;
717 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
718 << dendl;
719
720 if (m_image_ctx.read_only) {
721 on_finish->complete(-EROFS);
722 return;
723 }
724
725 m_image_ctx.image_lock.lock_shared();
726 if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
727 m_image_ctx.image_lock.unlock_shared();
728 on_finish->complete(-EEXIST);
729 return;
730 }
731 m_image_ctx.image_lock.unlock_shared();
732
733 C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
734 m_image_ctx, "snap_create", exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
735 true,
736 boost::bind(&Operations<I>::execute_snap_create, this, snap_namespace, snap_name,
737 _1, 0, false),
738 boost::bind(&ImageWatcher<I>::notify_snap_create, m_image_ctx.image_watcher,
739 snap_namespace, snap_name, _1),
740 {-EEXIST}, on_finish);
741 req->send();
742 }
743
744 template <typename I>
745 void Operations<I>::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
746 const std::string &snap_name,
747 Context *on_finish,
748 uint64_t journal_op_tid,
749 bool skip_object_map) {
750 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
751 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
752 m_image_ctx.exclusive_lock->is_lock_owner());
753
754 CephContext *cct = m_image_ctx.cct;
755 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
756 << dendl;
757
758 if (m_image_ctx.operations_disabled) {
759 on_finish->complete(-EROFS);
760 return;
761 }
762
763 m_image_ctx.image_lock.lock_shared();
764 if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
765 m_image_ctx.image_lock.unlock_shared();
766 on_finish->complete(-EEXIST);
767 return;
768 }
769 m_image_ctx.image_lock.unlock_shared();
770
771 operation::SnapshotCreateRequest<I> *req =
772 new operation::SnapshotCreateRequest<I>(
773 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
774 snap_namespace, snap_name, journal_op_tid, skip_object_map);
775 req->send();
776 }
777
778 template <typename I>
779 int Operations<I>::snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
780 const std::string& snap_name,
781 ProgressContext& prog_ctx) {
782 CephContext *cct = m_image_ctx.cct;
783 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
784 << dendl;
785
786 int r = m_image_ctx.state->refresh_if_required();
787 if (r < 0)
788 return r;
789
790 C_SaferCond cond_ctx;
791 {
792 std::shared_lock owner_locker{m_image_ctx.owner_lock};
793 {
794 // need to drop image_lock before invalidating cache
795 std::shared_lock image_locker{m_image_ctx.image_lock};
796 if (!m_image_ctx.snap_exists) {
797 return -ENOENT;
798 }
799
800 if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only) {
801 return -EROFS;
802 }
803
804 uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
805 if (snap_id == CEPH_NOSNAP) {
806 lderr(cct) << "No such snapshot found." << dendl;
807 return -ENOENT;
808 }
809 }
810
811 r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
812 false);
813 if (r < 0) {
814 return r;
815 }
816
817 execute_snap_rollback(snap_namespace, snap_name, prog_ctx, &cond_ctx);
818 }
819
820 r = cond_ctx.wait();
821 if (r < 0) {
822 return r;
823 }
824
825 m_image_ctx.perfcounter->inc(l_librbd_snap_rollback);
826 return r;
827 }
828
829 template <typename I>
830 void Operations<I>::execute_snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
831 const std::string &snap_name,
832 ProgressContext& prog_ctx,
833 Context *on_finish) {
834 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
835 CephContext *cct = m_image_ctx.cct;
836 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
837 << dendl;
838
839 if (m_image_ctx.operations_disabled) {
840 on_finish->complete(-EROFS);
841 return;
842 }
843
844 m_image_ctx.image_lock.lock_shared();
845 uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
846 if (snap_id == CEPH_NOSNAP) {
847 lderr(cct) << "No such snapshot found." << dendl;
848 m_image_ctx.image_lock.unlock_shared();
849 on_finish->complete(-ENOENT);
850 return;
851 }
852
853 uint64_t new_size = m_image_ctx.get_image_size(snap_id);
854 m_image_ctx.image_lock.unlock_shared();
855
856 // async mode used for journal replay
857 operation::SnapshotRollbackRequest<I> *request =
858 new operation::SnapshotRollbackRequest<I>(
859 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name,
860 snap_id, new_size, prog_ctx);
861 request->send();
862 }
863
864 template <typename I>
865 int Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
866 const std::string& snap_name) {
867 if (m_image_ctx.read_only) {
868 return -EROFS;
869 }
870
871 int r = m_image_ctx.state->refresh_if_required();
872 if (r < 0) {
873 return r;
874 }
875
876 C_SaferCond ctx;
877 snap_remove(snap_namespace, snap_name, &ctx);
878 r = ctx.wait();
879
880 if (r < 0) {
881 return r;
882 }
883
884 m_image_ctx.perfcounter->inc(l_librbd_snap_remove);
885 return 0;
886 }
887
888 template <typename I>
889 void Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
890 const std::string& snap_name,
891 Context *on_finish) {
892 CephContext *cct = m_image_ctx.cct;
893 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
894 << dendl;
895
896 if (m_image_ctx.read_only) {
897 on_finish->complete(-EROFS);
898 return;
899 }
900
901 // quickly filter out duplicate ops
902 m_image_ctx.image_lock.lock_shared();
903 if (m_image_ctx.get_snap_id(snap_namespace, snap_name) == CEPH_NOSNAP) {
904 m_image_ctx.image_lock.unlock_shared();
905 on_finish->complete(-ENOENT);
906 return;
907 }
908
909 bool proxy_op = ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0 ||
910 (m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0);
911 m_image_ctx.image_lock.unlock_shared();
912
913 if (proxy_op) {
914 auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL;
915 if (cls::rbd::get_snap_namespace_type(snap_namespace) ==
916 cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
917 request_type = exclusive_lock::OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE;
918 }
919 C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
920 m_image_ctx, "snap_remove", request_type, true,
921 boost::bind(&Operations<I>::execute_snap_remove, this, snap_namespace, snap_name, _1),
922 boost::bind(&ImageWatcher<I>::notify_snap_remove, m_image_ctx.image_watcher,
923 snap_namespace, snap_name, _1),
924 {-ENOENT}, on_finish);
925 req->send();
926 } else {
927 std::shared_lock owner_lock{m_image_ctx.owner_lock};
928 execute_snap_remove(snap_namespace, snap_name, on_finish);
929 }
930 }
931
932 template <typename I>
933 void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
934 const std::string &snap_name,
935 Context *on_finish) {
936 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
937 {
938 if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
939 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
940 m_image_ctx.exclusive_lock->is_lock_owner());
941 }
942 }
943
944 CephContext *cct = m_image_ctx.cct;
945 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
946 << dendl;
947
948 if (m_image_ctx.operations_disabled) {
949 on_finish->complete(-EROFS);
950 return;
951 }
952
953 m_image_ctx.image_lock.lock_shared();
954 uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
955 if (snap_id == CEPH_NOSNAP) {
956 lderr(m_image_ctx.cct) << "No such snapshot found." << dendl;
957 m_image_ctx.image_lock.unlock_shared();
958 on_finish->complete(-ENOENT);
959 return;
960 }
961
962 bool is_protected;
963 int r = m_image_ctx.is_snap_protected(snap_id, &is_protected);
964 if (r < 0) {
965 m_image_ctx.image_lock.unlock_shared();
966 on_finish->complete(r);
967 return;
968 } else if (is_protected) {
969 lderr(m_image_ctx.cct) << "snapshot is protected" << dendl;
970 m_image_ctx.image_lock.unlock_shared();
971 on_finish->complete(-EBUSY);
972 return;
973 }
974 m_image_ctx.image_lock.unlock_shared();
975
976 operation::SnapshotRemoveRequest<I> *req =
977 new operation::SnapshotRemoveRequest<I>(
978 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
979 snap_namespace, snap_name, snap_id);
980 req->send();
981 }
982
983 template <typename I>
984 int Operations<I>::snap_rename(const char *srcname, const char *dstname) {
985 CephContext *cct = m_image_ctx.cct;
986 ldout(cct, 5) << this << " " << __func__ << ": "
987 << "snap_name=" << srcname << ", "
988 << "new_snap_name=" << dstname << dendl;
989
990 snapid_t snap_id;
991 if (m_image_ctx.read_only) {
992 return -EROFS;
993 }
994
995 int r = m_image_ctx.state->refresh_if_required();
996 if (r < 0)
997 return r;
998
999 {
1000 std::shared_lock l{m_image_ctx.image_lock};
1001 snap_id = m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), srcname);
1002 if (snap_id == CEPH_NOSNAP) {
1003 return -ENOENT;
1004 }
1005 if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), dstname) != CEPH_NOSNAP) {
1006 return -EEXIST;
1007 }
1008 }
1009
1010 if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
1011 r = invoke_async_request("snap_rename",
1012 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1013 true,
1014 boost::bind(&Operations<I>::execute_snap_rename,
1015 this, snap_id, dstname, _1),
1016 boost::bind(&ImageWatcher<I>::notify_snap_rename,
1017 m_image_ctx.image_watcher, snap_id,
1018 dstname, _1));
1019 if (r < 0 && r != -EEXIST) {
1020 return r;
1021 }
1022 } else {
1023 C_SaferCond cond_ctx;
1024 {
1025 std::shared_lock owner_lock{m_image_ctx.owner_lock};
1026 execute_snap_rename(snap_id, dstname, &cond_ctx);
1027 }
1028
1029 r = cond_ctx.wait();
1030 if (r < 0) {
1031 return r;
1032 }
1033 }
1034
1035 m_image_ctx.perfcounter->inc(l_librbd_snap_rename);
1036 return 0;
1037 }
1038
1039 template <typename I>
1040 void Operations<I>::execute_snap_rename(const uint64_t src_snap_id,
1041 const std::string &dest_snap_name,
1042 Context *on_finish) {
1043 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1044 if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
1045 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
1046 m_image_ctx.exclusive_lock->is_lock_owner());
1047 }
1048
1049 if (m_image_ctx.operations_disabled) {
1050 on_finish->complete(-EROFS);
1051 return;
1052 }
1053
1054 m_image_ctx.image_lock.lock_shared();
1055 if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(),
1056 dest_snap_name) != CEPH_NOSNAP) {
1057 // Renaming is supported for snapshots from user namespace only.
1058 m_image_ctx.image_lock.unlock_shared();
1059 on_finish->complete(-EEXIST);
1060 return;
1061 }
1062 m_image_ctx.image_lock.unlock_shared();
1063
1064 CephContext *cct = m_image_ctx.cct;
1065 ldout(cct, 5) << this << " " << __func__ << ": "
1066 << "snap_id=" << src_snap_id << ", "
1067 << "new_snap_name=" << dest_snap_name << dendl;
1068
1069 operation::SnapshotRenameRequest<I> *req =
1070 new operation::SnapshotRenameRequest<I>(
1071 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), src_snap_id,
1072 dest_snap_name);
1073 req->send();
1074 }
1075
1076 template <typename I>
1077 int Operations<I>::snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
1078 const std::string& snap_name) {
1079 CephContext *cct = m_image_ctx.cct;
1080 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
1081 << dendl;
1082
1083 if (m_image_ctx.read_only) {
1084 return -EROFS;
1085 }
1086
1087 if (!m_image_ctx.test_features(RBD_FEATURE_LAYERING)) {
1088 lderr(cct) << "image must support layering" << dendl;
1089 return -ENOSYS;
1090 }
1091
1092 int r = m_image_ctx.state->refresh_if_required();
1093 if (r < 0) {
1094 return r;
1095 }
1096
1097 {
1098 std::shared_lock image_locker{m_image_ctx.image_lock};
1099 bool is_protected;
1100 r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
1101 &is_protected);
1102 if (r < 0) {
1103 return r;
1104 }
1105
1106 if (is_protected) {
1107 return -EBUSY;
1108 }
1109 }
1110
1111 if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
1112 r = invoke_async_request("snap_protect",
1113 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1114 true,
1115 boost::bind(&Operations<I>::execute_snap_protect,
1116 this, snap_namespace, snap_name, _1),
1117 boost::bind(&ImageWatcher<I>::notify_snap_protect,
1118 m_image_ctx.image_watcher,
1119 snap_namespace, snap_name, _1));
1120 if (r < 0 && r != -EBUSY) {
1121 return r;
1122 }
1123 } else {
1124 C_SaferCond cond_ctx;
1125 {
1126 std::shared_lock owner_lock{m_image_ctx.owner_lock};
1127 execute_snap_protect(snap_namespace, snap_name, &cond_ctx);
1128 }
1129
1130 r = cond_ctx.wait();
1131 if (r < 0) {
1132 return r;
1133 }
1134 }
1135 return 0;
1136 }
1137
1138 template <typename I>
1139 void Operations<I>::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
1140 const std::string &snap_name,
1141 Context *on_finish) {
1142 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1143 if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
1144 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
1145 m_image_ctx.exclusive_lock->is_lock_owner());
1146 }
1147
1148 if (m_image_ctx.operations_disabled) {
1149 on_finish->complete(-EROFS);
1150 return;
1151 }
1152
1153 m_image_ctx.image_lock.lock_shared();
1154 bool is_protected;
1155 int r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
1156 &is_protected);
1157 if (r < 0) {
1158 m_image_ctx.image_lock.unlock_shared();
1159 on_finish->complete(r);
1160 return;
1161 } else if (is_protected) {
1162 m_image_ctx.image_lock.unlock_shared();
1163 on_finish->complete(-EBUSY);
1164 return;
1165 }
1166 m_image_ctx.image_lock.unlock_shared();
1167
1168 CephContext *cct = m_image_ctx.cct;
1169 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
1170 << dendl;
1171
1172 operation::SnapshotProtectRequest<I> *request =
1173 new operation::SnapshotProtectRequest<I>(
1174 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name);
1175 request->send();
1176 }
1177
1178 template <typename I>
1179 int Operations<I>::snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
1180 const std::string& snap_name) {
1181 CephContext *cct = m_image_ctx.cct;
1182 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
1183 << dendl;
1184
1185 if (m_image_ctx.read_only) {
1186 return -EROFS;
1187 }
1188
1189 int r = m_image_ctx.state->refresh_if_required();
1190 if (r < 0) {
1191 return r;
1192 }
1193
1194 {
1195 std::shared_lock image_locker{m_image_ctx.image_lock};
1196 bool is_unprotected;
1197 r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
1198 &is_unprotected);
1199 if (r < 0) {
1200 return r;
1201 }
1202
1203 if (is_unprotected) {
1204 return -EINVAL;
1205 }
1206 }
1207
1208 if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
1209 r = invoke_async_request("snap_unprotect",
1210 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1211 true,
1212 boost::bind(&Operations<I>::execute_snap_unprotect,
1213 this, snap_namespace, snap_name, _1),
1214 boost::bind(&ImageWatcher<I>::notify_snap_unprotect,
1215 m_image_ctx.image_watcher,
1216 snap_namespace, snap_name, _1));
1217 if (r < 0 && r != -EINVAL) {
1218 return r;
1219 }
1220 } else {
1221 C_SaferCond cond_ctx;
1222 {
1223 std::shared_lock owner_lock{m_image_ctx.owner_lock};
1224 execute_snap_unprotect(snap_namespace, snap_name, &cond_ctx);
1225 }
1226
1227 r = cond_ctx.wait();
1228 if (r < 0) {
1229 return r;
1230 }
1231 }
1232 return 0;
1233 }
1234
1235 template <typename I>
1236 void Operations<I>::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
1237 const std::string &snap_name,
1238 Context *on_finish) {
1239 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1240 if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
1241 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
1242 m_image_ctx.exclusive_lock->is_lock_owner());
1243 }
1244
1245 if (m_image_ctx.operations_disabled) {
1246 on_finish->complete(-EROFS);
1247 return;
1248 }
1249
1250 m_image_ctx.image_lock.lock_shared();
1251 bool is_unprotected;
1252 int r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
1253 &is_unprotected);
1254 if (r < 0) {
1255 m_image_ctx.image_lock.unlock_shared();
1256 on_finish->complete(r);
1257 return;
1258 } else if (is_unprotected) {
1259 m_image_ctx.image_lock.unlock_shared();
1260 on_finish->complete(-EINVAL);
1261 return;
1262 }
1263 m_image_ctx.image_lock.unlock_shared();
1264
1265 CephContext *cct = m_image_ctx.cct;
1266 ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
1267 << dendl;
1268
1269 operation::SnapshotUnprotectRequest<I> *request =
1270 new operation::SnapshotUnprotectRequest<I>(
1271 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name);
1272 request->send();
1273 }
1274
1275 template <typename I>
1276 int Operations<I>::snap_set_limit(uint64_t limit) {
1277 CephContext *cct = m_image_ctx.cct;
1278 ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit << dendl;
1279
1280 if (m_image_ctx.read_only) {
1281 return -EROFS;
1282 }
1283
1284 int r = m_image_ctx.state->refresh_if_required();
1285 if (r < 0) {
1286 return r;
1287 }
1288
1289 C_SaferCond limit_ctx;
1290 {
1291 std::shared_lock owner_lock{m_image_ctx.owner_lock};
1292 r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1293 true);
1294 if (r < 0) {
1295 return r;
1296 }
1297
1298 execute_snap_set_limit(limit, &limit_ctx);
1299 }
1300
1301 r = limit_ctx.wait();
1302 return r;
1303 }
1304
1305 template <typename I>
1306 void Operations<I>::execute_snap_set_limit(const uint64_t limit,
1307 Context *on_finish) {
1308 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1309
1310 CephContext *cct = m_image_ctx.cct;
1311 ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit
1312 << dendl;
1313
1314 operation::SnapshotLimitRequest<I> *request =
1315 new operation::SnapshotLimitRequest<I>(m_image_ctx, on_finish, limit);
1316 request->send();
1317 }
1318
1319 template <typename I>
1320 int Operations<I>::update_features(uint64_t features, bool enabled) {
1321 CephContext *cct = m_image_ctx.cct;
1322 ldout(cct, 5) << this << " " << __func__ << ": features=" << features
1323 << ", enabled=" << enabled << dendl;
1324
1325 int r = m_image_ctx.state->refresh_if_required();
1326 if (r < 0) {
1327 return r;
1328 }
1329
1330 if (m_image_ctx.read_only) {
1331 return -EROFS;
1332 } else if (m_image_ctx.old_format) {
1333 lderr(cct) << "old-format images do not support features" << dendl;
1334 return -EINVAL;
1335 }
1336
1337 uint64_t disable_mask = (RBD_FEATURES_MUTABLE |
1338 RBD_FEATURES_DISABLE_ONLY);
1339 if ((enabled && (features & RBD_FEATURES_MUTABLE) != features) ||
1340 (!enabled && (features & disable_mask) != features) ||
1341 ((features & ~RBD_FEATURES_MUTABLE_INTERNAL) != features)) {
1342 lderr(cct) << "cannot update immutable features" << dendl;
1343 return -EINVAL;
1344 }
1345
1346 bool set_object_map = (features & RBD_FEATURE_OBJECT_MAP) == RBD_FEATURE_OBJECT_MAP;
1347 bool set_fast_diff = (features & RBD_FEATURE_FAST_DIFF) == RBD_FEATURE_FAST_DIFF;
1348 bool exist_fast_diff = (m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0;
1349 bool exist_object_map = (m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0;
1350
1351 if ((enabled && ((set_object_map && !exist_fast_diff) || (set_fast_diff && !exist_object_map)))
1352 || (!enabled && (set_object_map && exist_fast_diff))) {
1353 features |= (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF);
1354 }
1355
1356 if (features == 0) {
1357 lderr(cct) << "update requires at least one feature" << dendl;
1358 return -EINVAL;
1359 }
1360 {
1361 std::shared_lock image_locker{m_image_ctx.image_lock};
1362 if (enabled && (features & m_image_ctx.features) != 0) {
1363 lderr(cct) << "one or more requested features are already enabled"
1364 << dendl;
1365 return -EINVAL;
1366 }
1367 if (!enabled && (features & ~m_image_ctx.features) != 0) {
1368 lderr(cct) << "one or more requested features are already disabled"
1369 << dendl;
1370 return -EINVAL;
1371 }
1372 }
1373
1374 // if disabling journaling, avoid attempting to open the journal
1375 // when acquiring the exclusive lock in case the journal is corrupt
1376 bool disabling_journal = false;
1377 if (!enabled && ((features & RBD_FEATURE_JOURNALING) != 0)) {
1378 std::unique_lock image_locker{m_image_ctx.image_lock};
1379 m_image_ctx.set_journal_policy(new journal::DisabledPolicy());
1380 disabling_journal = true;
1381 }
1382 BOOST_SCOPE_EXIT_ALL( (this)(disabling_journal) ) {
1383 if (disabling_journal) {
1384 std::unique_lock image_locker{m_image_ctx.image_lock};
1385 m_image_ctx.set_journal_policy(
1386 new journal::StandardPolicy<I>(&m_image_ctx));
1387 }
1388 };
1389
1390 // The journal options are not passed to the lock owner in the
1391 // update features request. Therefore, if journaling is being
1392 // enabled, the lock should be locally acquired instead of
1393 // attempting to send the request to the peer.
1394 if (enabled && (features & RBD_FEATURE_JOURNALING) != 0) {
1395 C_SaferCond cond_ctx;
1396 {
1397 std::shared_lock owner_lock{m_image_ctx.owner_lock};
1398 r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1399 true);
1400 if (r < 0) {
1401 return r;
1402 }
1403
1404 execute_update_features(features, enabled, &cond_ctx, 0);
1405 }
1406
1407 r = cond_ctx.wait();
1408 } else {
1409 r = invoke_async_request("update_features",
1410 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1411 false,
1412 boost::bind(&Operations<I>::execute_update_features,
1413 this, features, enabled, _1, 0),
1414 boost::bind(&ImageWatcher<I>::notify_update_features,
1415 m_image_ctx.image_watcher, features,
1416 enabled, _1));
1417 }
1418 ldout(cct, 2) << "update_features finished" << dendl;
1419 return r;
1420 }
1421
1422 template <typename I>
1423 void Operations<I>::execute_update_features(uint64_t features, bool enabled,
1424 Context *on_finish,
1425 uint64_t journal_op_tid) {
1426 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1427 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
1428 m_image_ctx.exclusive_lock->is_lock_owner());
1429
1430 CephContext *cct = m_image_ctx.cct;
1431 ldout(cct, 5) << this << " " << __func__ << ": features=" << features
1432 << ", enabled=" << enabled << dendl;
1433
1434 if (m_image_ctx.operations_disabled) {
1435 on_finish->complete(-EROFS);
1436 return;
1437 }
1438
1439 if (enabled) {
1440 operation::EnableFeaturesRequest<I> *req =
1441 new operation::EnableFeaturesRequest<I>(
1442 m_image_ctx, on_finish, journal_op_tid, features);
1443 req->send();
1444 } else {
1445 operation::DisableFeaturesRequest<I> *req =
1446 new operation::DisableFeaturesRequest<I>(
1447 m_image_ctx, on_finish, journal_op_tid, features, false);
1448 req->send();
1449 }
1450 }
1451
1452 template <typename I>
1453 int Operations<I>::metadata_set(const std::string &key,
1454 const std::string &value) {
1455 CephContext *cct = m_image_ctx.cct;
1456 ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
1457 << value << dendl;
1458
1459 std::string config_key;
1460 bool config_override = util::is_metadata_config_override(key, &config_key);
1461 if (config_override) {
1462 // validate config setting
1463 if (!librbd::api::Config<I>::is_option_name(&m_image_ctx, config_key)) {
1464 lderr(cct) << "validation for " << key
1465 << " failed: not allowed image level override" << dendl;
1466 return -EINVAL;
1467 }
1468 int r = ConfigProxy{false}.set_val(config_key.c_str(), value);
1469 if (r < 0) {
1470 return r;
1471 }
1472 }
1473
1474 int r = m_image_ctx.state->refresh_if_required();
1475 if (r < 0) {
1476 return r;
1477 }
1478
1479 if (m_image_ctx.read_only) {
1480 return -EROFS;
1481 }
1482
1483 C_SaferCond metadata_ctx;
1484 {
1485 std::shared_lock owner_lock{m_image_ctx.owner_lock};
1486 r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1487 true);
1488 if (r < 0) {
1489 return r;
1490 }
1491
1492 execute_metadata_set(key, value, &metadata_ctx);
1493 }
1494
1495 r = metadata_ctx.wait();
1496 if (config_override && r >= 0) {
1497 // apply new config key immediately
1498 r = m_image_ctx.state->refresh_if_required();
1499 }
1500
1501 return r;
1502 }
1503
1504 template <typename I>
1505 void Operations<I>::execute_metadata_set(const std::string &key,
1506 const std::string &value,
1507 Context *on_finish) {
1508 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1509
1510 CephContext *cct = m_image_ctx.cct;
1511 ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
1512 << value << dendl;
1513
1514 if (m_image_ctx.operations_disabled) {
1515 on_finish->complete(-EROFS);
1516 return;
1517 }
1518
1519 operation::MetadataSetRequest<I> *request =
1520 new operation::MetadataSetRequest<I>(m_image_ctx,
1521 new C_NotifyUpdate<I>(m_image_ctx, on_finish),
1522 key, value);
1523 request->send();
1524 }
1525
1526 template <typename I>
1527 int Operations<I>::metadata_remove(const std::string &key) {
1528 CephContext *cct = m_image_ctx.cct;
1529 ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
1530
1531 int r = m_image_ctx.state->refresh_if_required();
1532 if (r < 0) {
1533 return r;
1534 }
1535
1536 if (m_image_ctx.read_only) {
1537 return -EROFS;
1538 }
1539
1540 std::string value;
1541 r = cls_client::metadata_get(&m_image_ctx.md_ctx, m_image_ctx.header_oid, key, &value);
1542 if(r < 0)
1543 return r;
1544
1545 C_SaferCond metadata_ctx;
1546 {
1547 std::shared_lock owner_lock{m_image_ctx.owner_lock};
1548 r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1549 true);
1550 if (r < 0) {
1551 return r;
1552 }
1553
1554 execute_metadata_remove(key, &metadata_ctx);
1555 }
1556
1557 r = metadata_ctx.wait();
1558
1559 std::string config_key;
1560 if (util::is_metadata_config_override(key, &config_key) && r >= 0) {
1561 // apply new config key immediately
1562 r = m_image_ctx.state->refresh_if_required();
1563 }
1564
1565 return r;
1566 }
1567
1568 template <typename I>
1569 void Operations<I>::execute_metadata_remove(const std::string &key,
1570 Context *on_finish) {
1571 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1572
1573 CephContext *cct = m_image_ctx.cct;
1574 ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
1575
1576 if (m_image_ctx.operations_disabled) {
1577 on_finish->complete(-EROFS);
1578 return;
1579 }
1580
1581 operation::MetadataRemoveRequest<I> *request =
1582 new operation::MetadataRemoveRequest<I>(
1583 m_image_ctx,
1584 new C_NotifyUpdate<I>(m_image_ctx, on_finish), key);
1585 request->send();
1586 }
1587
1588 template <typename I>
1589 int Operations<I>::migrate(ProgressContext &prog_ctx) {
1590 CephContext *cct = m_image_ctx.cct;
1591 ldout(cct, 20) << "migrate" << dendl;
1592
1593 int r = m_image_ctx.state->refresh_if_required();
1594 if (r < 0) {
1595 return r;
1596 }
1597
1598 if (m_image_ctx.read_only) {
1599 return -EROFS;
1600 }
1601
1602 {
1603 std::shared_lock image_locker{m_image_ctx.image_lock};
1604 if (m_image_ctx.migration_info.empty()) {
1605 lderr(cct) << "image has no migrating parent" << dendl;
1606 return -EINVAL;
1607 }
1608 }
1609
1610 uint64_t request_id = ++m_async_request_seq;
1611 r = invoke_async_request("migrate",
1612 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1613 false,
1614 boost::bind(&Operations<I>::execute_migrate, this,
1615 boost::ref(prog_ctx), _1),
1616 boost::bind(&ImageWatcher<I>::notify_migrate,
1617 m_image_ctx.image_watcher, request_id,
1618 boost::ref(prog_ctx), _1));
1619
1620 if (r < 0 && r != -EINVAL) {
1621 return r;
1622 }
1623 ldout(cct, 20) << "migrate finished" << dendl;
1624 return 0;
1625 }
1626
1627 template <typename I>
1628 void Operations<I>::execute_migrate(ProgressContext &prog_ctx,
1629 Context *on_finish) {
1630 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1631 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
1632 m_image_ctx.exclusive_lock->is_lock_owner());
1633
1634 CephContext *cct = m_image_ctx.cct;
1635 ldout(cct, 20) << "migrate" << dendl;
1636
1637 if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
1638 on_finish->complete(-EROFS);
1639 return;
1640 }
1641
1642 m_image_ctx.image_lock.lock_shared();
1643
1644 if (m_image_ctx.migration_info.empty()) {
1645 lderr(cct) << "image has no migrating parent" << dendl;
1646 m_image_ctx.image_lock.unlock_shared();
1647 on_finish->complete(-EINVAL);
1648 return;
1649 }
1650 if (m_image_ctx.snap_id != CEPH_NOSNAP) {
1651 lderr(cct) << "snapshots cannot be migrated" << dendl;
1652 m_image_ctx.image_lock.unlock_shared();
1653 on_finish->complete(-EROFS);
1654 return;
1655 }
1656
1657 m_image_ctx.image_lock.unlock_shared();
1658
1659 operation::MigrateRequest<I> *req = new operation::MigrateRequest<I>(
1660 m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
1661 req->send();
1662 }
1663
1664 template <typename I>
1665 int Operations<I>::sparsify(size_t sparse_size, ProgressContext &prog_ctx) {
1666 CephContext *cct = m_image_ctx.cct;
1667 ldout(cct, 20) << "sparsify" << dendl;
1668
1669 if (sparse_size < 4096 || sparse_size > m_image_ctx.get_object_size() ||
1670 (sparse_size & (sparse_size - 1)) != 0) {
1671 lderr(cct) << "sparse size should be power of two not less than 4096"
1672 << " and not larger image object size" << dendl;
1673 return -EINVAL;
1674 }
1675
1676 uint64_t request_id = ++m_async_request_seq;
1677 int r = invoke_async_request("sparsify",
1678 exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
1679 false,
1680 boost::bind(&Operations<I>::execute_sparsify,
1681 this, sparse_size,
1682 boost::ref(prog_ctx), _1),
1683 boost::bind(&ImageWatcher<I>::notify_sparsify,
1684 m_image_ctx.image_watcher,
1685 request_id, sparse_size,
1686 boost::ref(prog_ctx), _1));
1687 if (r < 0 && r != -EINVAL) {
1688 return r;
1689 }
1690 ldout(cct, 20) << "resparsify finished" << dendl;
1691 return 0;
1692 }
1693
1694 template <typename I>
1695 void Operations<I>::execute_sparsify(size_t sparse_size,
1696 ProgressContext &prog_ctx,
1697 Context *on_finish) {
1698 ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
1699 ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
1700 m_image_ctx.exclusive_lock->is_lock_owner());
1701
1702 CephContext *cct = m_image_ctx.cct;
1703 ldout(cct, 20) << "sparsify" << dendl;
1704
1705 if (m_image_ctx.operations_disabled) {
1706 on_finish->complete(-EROFS);
1707 return;
1708 }
1709
1710 auto req = new operation::SparsifyRequest<I>(
1711 m_image_ctx, sparse_size, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
1712 prog_ctx);
1713 req->send();
1714 }
1715
1716 template <typename I>
1717 int Operations<I>::prepare_image_update(
1718 exclusive_lock::OperationRequestType request_type, bool request_lock) {
1719 ceph_assert(ceph_mutex_is_rlocked(m_image_ctx.owner_lock));
1720 if (m_image_ctx.image_watcher == nullptr) {
1721 return -EROFS;
1722 }
1723
1724 // need to upgrade to a write lock
1725 C_SaferCond ctx;
1726 m_image_ctx.owner_lock.unlock_shared();
1727 bool attempting_lock = false;
1728 {
1729 std::unique_lock owner_locker{m_image_ctx.owner_lock};
1730 if (m_image_ctx.exclusive_lock != nullptr &&
1731 (!m_image_ctx.exclusive_lock->is_lock_owner() ||
1732 !m_image_ctx.exclusive_lock->accept_request(request_type, nullptr))) {
1733
1734 attempting_lock = true;
1735 m_image_ctx.exclusive_lock->block_requests(0);
1736
1737 if (request_lock) {
1738 m_image_ctx.exclusive_lock->acquire_lock(&ctx);
1739 } else {
1740 m_image_ctx.exclusive_lock->try_acquire_lock(&ctx);
1741 }
1742 }
1743 }
1744
1745 int r = 0;
1746 if (attempting_lock) {
1747 r = ctx.wait();
1748 }
1749
1750 m_image_ctx.owner_lock.lock_shared();
1751 if (attempting_lock && m_image_ctx.exclusive_lock != nullptr) {
1752 m_image_ctx.exclusive_lock->unblock_requests();
1753 }
1754
1755 if (r == -EAGAIN || r == -EBUSY) {
1756 r = 0;
1757 }
1758 if (r < 0) {
1759 return r;
1760 } else if (m_image_ctx.exclusive_lock != nullptr &&
1761 !m_image_ctx.exclusive_lock->is_lock_owner()) {
1762 return m_image_ctx.exclusive_lock->get_unlocked_op_error();
1763 }
1764
1765 return 0;
1766 }
1767
1768 template <typename I>
1769 int Operations<I>::invoke_async_request(
1770 const std::string& name, exclusive_lock::OperationRequestType request_type,
1771 bool permit_snapshot, const boost::function<void(Context*)>& local_request,
1772 const boost::function<void(Context*)>& remote_request) {
1773 C_SaferCond ctx;
1774 C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(m_image_ctx, name,
1775 request_type,
1776 permit_snapshot,
1777 local_request,
1778 remote_request,
1779 {}, &ctx);
1780 req->send();
1781 return ctx.wait();
1782 }
1783
1784 } // namespace librbd
1785
1786 template class librbd::Operations<librbd::ImageCtx>;