]> git.proxmox.com Git - ceph.git/blame - ceph/src/librbd/operation/MigrateRequest.cc
bump version to 18.2.2-pve1
[ceph.git] / ceph / src / librbd / operation / MigrateRequest.cc
CommitLineData
11fdf7f2
TL
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include "librbd/operation/MigrateRequest.h"
5#include "common/dout.h"
6#include "common/errno.h"
7#include "librbd/AsyncObjectThrottle.h"
8#include "librbd/ExclusiveLock.h"
9#include "librbd/ImageCtx.h"
10#include "librbd/Utils.h"
11#include "librbd/deep_copy/ObjectCopyRequest.h"
12#include "librbd/io/AsyncOperation.h"
f67539c2 13#include "librbd/io/ImageDispatcherInterface.h"
11fdf7f2
TL
14#include "librbd/io/ObjectRequest.h"
15#include "osdc/Striper.h"
16#include <boost/lambda/bind.hpp>
17#include <boost/lambda/construct.hpp>
18
19#define dout_subsys ceph_subsys_rbd
20#undef dout_prefix
21#define dout_prefix *_dout << "librbd::MigrateRequest: " << this << " " \
22 << __func__ << ": "
23
24namespace librbd {
25namespace operation {
26
27using util::create_context_callback;
28using util::create_async_context_callback;
29
30namespace {
31
32template <typename I>
33class C_MigrateObject : public C_AsyncObjectThrottle<I> {
34public:
35 C_MigrateObject(AsyncObjectThrottle<I> &throttle, I *image_ctx,
f67539c2
TL
36 IOContext io_context, uint64_t object_no)
37 : C_AsyncObjectThrottle<I>(throttle, *image_ctx), m_io_context(io_context),
11fdf7f2
TL
38 m_object_no(object_no) {
39 }
40
41 int send() override {
42 I &image_ctx = this->m_image_ctx;
9f95a23c 43 ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
11fdf7f2
TL
44 CephContext *cct = image_ctx.cct;
45
46 if (image_ctx.exclusive_lock != nullptr &&
47 !image_ctx.exclusive_lock->is_lock_owner()) {
48 ldout(cct, 1) << "lost exclusive lock during migrate" << dendl;
49 return -ERESTART;
50 }
51
52 start_async_op();
53 return 0;
54 }
55
56private:
f67539c2 57 IOContext m_io_context;
11fdf7f2
TL
58 uint64_t m_object_no;
59
60 io::AsyncOperation *m_async_op = nullptr;
61
62 void start_async_op() {
63 I &image_ctx = this->m_image_ctx;
9f95a23c 64 ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
11fdf7f2
TL
65 CephContext *cct = image_ctx.cct;
66 ldout(cct, 10) << dendl;
67
68 ceph_assert(m_async_op == nullptr);
69 m_async_op = new io::AsyncOperation();
70 m_async_op->start_op(image_ctx);
71
f67539c2 72 if (!image_ctx.io_image_dispatcher->writes_blocked()) {
11fdf7f2
TL
73 migrate_object();
74 return;
75 }
76
77 auto ctx = create_async_context_callback(
78 image_ctx, create_context_callback<
79 C_MigrateObject<I>, &C_MigrateObject<I>::handle_start_async_op>(this));
80 m_async_op->finish_op();
81 delete m_async_op;
82 m_async_op = nullptr;
f67539c2 83 image_ctx.io_image_dispatcher->wait_on_writes_unblocked(ctx);
11fdf7f2
TL
84 }
85
86 void handle_start_async_op(int r) {
87 I &image_ctx = this->m_image_ctx;
88 CephContext *cct = image_ctx.cct;
89 ldout(cct, 10) << "r=" << r << dendl;
90
91 if (r < 0) {
92 lderr(cct) << "failed to start async op: " << cpp_strerror(r) << dendl;
93 this->complete(r);
94 return;
95 }
96
9f95a23c 97 std::shared_lock owner_locker{image_ctx.owner_lock};
11fdf7f2
TL
98 start_async_op();
99 }
100
101 bool is_within_overlap_bounds() {
102 I &image_ctx = this->m_image_ctx;
9f95a23c 103 std::shared_lock image_locker{image_ctx.image_lock};
11fdf7f2
TL
104
105 auto overlap = std::min(image_ctx.size, image_ctx.migration_info.overlap);
106 return overlap > 0 &&
107 Striper::get_num_objects(image_ctx.layout, overlap) > m_object_no;
108 }
109
110 void migrate_object() {
111 I &image_ctx = this->m_image_ctx;
9f95a23c 112 ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
11fdf7f2
TL
113 CephContext *cct = image_ctx.cct;
114
115 auto ctx = create_context_callback<
116 C_MigrateObject<I>, &C_MigrateObject<I>::handle_migrate_object>(this);
117
118 if (is_within_overlap_bounds()) {
119 bufferlist bl;
9f95a23c 120 auto req = new io::ObjectWriteRequest<I>(&image_ctx, m_object_no, 0,
f67539c2
TL
121 std::move(bl), m_io_context, 0,
122 0, std::nullopt, {}, ctx);
11fdf7f2
TL
123
124 ldout(cct, 20) << "copyup object req " << req << ", object_no "
125 << m_object_no << dendl;
126
127 req->send();
128 } else {
129 ceph_assert(image_ctx.parent != nullptr);
130
cd265ab1
TL
131 uint32_t flags = deep_copy::OBJECT_COPY_REQUEST_FLAG_MIGRATION;
132 if (image_ctx.migration_info.flatten) {
133 flags |= deep_copy::OBJECT_COPY_REQUEST_FLAG_FLATTEN;
134 }
135
11fdf7f2 136 auto req = deep_copy::ObjectCopyRequest<I>::create(
9f95a23c 137 image_ctx.parent, &image_ctx, 0, 0, image_ctx.migration_info.snap_map,
cd265ab1 138 m_object_no, flags, nullptr, ctx);
11fdf7f2
TL
139
140 ldout(cct, 20) << "deep copy object req " << req << ", object_no "
141 << m_object_no << dendl;
142 req->send();
143 }
144 }
145
146 void handle_migrate_object(int r) {
147 CephContext *cct = this->m_image_ctx.cct;
148 ldout(cct, 10) << "r=" << r << dendl;
149
150 if (r == -ENOENT) {
151 r = 0;
152 }
153
154 m_async_op->finish_op();
155 delete m_async_op;
156 this->complete(r);
157 }
158};
159
160} // anonymous namespace
161
162template <typename I>
163void MigrateRequest<I>::send_op() {
164 I &image_ctx = this->m_image_ctx;
9f95a23c 165 ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
11fdf7f2
TL
166 CephContext *cct = image_ctx.cct;
167 ldout(cct, 10) << dendl;
168
169 migrate_objects();
170}
171
172template <typename I>
173bool MigrateRequest<I>::should_complete(int r) {
174 I &image_ctx = this->m_image_ctx;
175 CephContext *cct = image_ctx.cct;
176 ldout(cct, 10) << "r=" << r << dendl;
177
178 if (r < 0) {
179 lderr(cct) << "encountered error: " << cpp_strerror(r) << dendl;
180 }
181
182 return true;
183}
184
185template <typename I>
186void MigrateRequest<I>::migrate_objects() {
187 I &image_ctx = this->m_image_ctx;
188 CephContext *cct = image_ctx.cct;
9f95a23c 189 ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
11fdf7f2
TL
190
191 uint64_t overlap_objects = get_num_overlap_objects();
192
193 ldout(cct, 10) << "from 0 to " << overlap_objects << dendl;
194
195 auto ctx = create_context_callback<
196 MigrateRequest<I>, &MigrateRequest<I>::handle_migrate_objects>(this);
197
198 typename AsyncObjectThrottle<I>::ContextFactory context_factory(
199 boost::lambda::bind(boost::lambda::new_ptr<C_MigrateObject<I> >(),
f67539c2
TL
200 boost::lambda::_1, &image_ctx, image_ctx.get_data_io_context(),
201 boost::lambda::_2));
11fdf7f2
TL
202 AsyncObjectThrottle<I> *throttle = new AsyncObjectThrottle<I>(
203 this, image_ctx, context_factory, ctx, &m_prog_ctx, 0, overlap_objects);
204 throttle->start_ops(
205 image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"));
206}
207
208template <typename I>
209void MigrateRequest<I>::handle_migrate_objects(int r) {
210 I &image_ctx = this->m_image_ctx;
211 CephContext *cct = image_ctx.cct;
212 ldout(cct, 5) << "r=" << r << dendl;
213
214 if (r < 0) {
215 lderr(cct) << "failed to migrate objects: " << cpp_strerror(r) << dendl;
216 }
217
218 this->complete(r);
219}
220
221template <typename I>
222uint64_t MigrateRequest<I>::get_num_overlap_objects() {
223 I &image_ctx = this->m_image_ctx;
224 CephContext *cct = image_ctx.cct;
225 ldout(cct, 10) << dendl;
226
9f95a23c 227 std::shared_lock image_locker{image_ctx.image_lock};
11fdf7f2
TL
228
229 auto overlap = image_ctx.migration_info.overlap;
230
231 return overlap > 0 ?
232 Striper::get_num_objects(image_ctx.layout, overlap) : 0;
233}
234
235} // namespace operation
236} // namespace librbd
237
238template class librbd::operation::MigrateRequest<librbd::ImageCtx>;