]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/crypto/CryptoObjectDispatch.cc
update ceph source to reef 18.1.2
[ceph.git] / ceph / src / librbd / crypto / CryptoObjectDispatch.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/crypto/CryptoObjectDispatch.h"
5 #include "include/ceph_assert.h"
6 #include "include/neorados/RADOS.hpp"
7 #include "common/dout.h"
8 #include "osdc/Striper.h"
9 #include "librbd/ImageCtx.h"
10 #include "librbd/Utils.h"
11 #include "librbd/crypto/CryptoInterface.h"
12 #include "librbd/io/AioCompletion.h"
13 #include "librbd/io/ObjectDispatcherInterface.h"
14 #include "librbd/io/ObjectDispatchSpec.h"
15 #include "librbd/io/ReadResult.h"
16 #include "librbd/io/Utils.h"
17
18 #define dout_subsys ceph_subsys_rbd
19 #undef dout_prefix
20 #define dout_prefix *_dout << "librbd::crypto::CryptoObjectDispatch: " \
21 << this << " " << __func__ << ": "
22
23 namespace librbd {
24 namespace crypto {
25
26 using librbd::util::create_context_callback;
27 using librbd::util::data_object_name;
28
29 template <typename I>
30 uint64_t get_file_offset(I* image_ctx, uint64_t object_no,
31 uint64_t object_off) {
32 auto off = io::util::raw_to_area_offset(
33 *image_ctx, Striper::get_file_offset(image_ctx->cct, &image_ctx->layout,
34 object_no, object_off));
35 ceph_assert(off.second == io::ImageArea::DATA);
36 return off.first;
37 }
38
39 template <typename I>
40 struct C_AlignedObjectReadRequest : public Context {
41 I* image_ctx;
42 CryptoInterface* crypto;
43 uint64_t object_no;
44 io::ReadExtents* extents;
45 IOContext io_context;
46 const ZTracer::Trace parent_trace;
47 uint64_t* version;
48 Context* on_finish;
49 io::ObjectDispatchSpec* req;
50 bool disable_read_from_parent;
51
52 C_AlignedObjectReadRequest(
53 I* image_ctx, CryptoInterface* crypto,
54 uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
55 int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
56 uint64_t* version, int* object_dispatch_flags,
57 Context* on_dispatched
58 ) : image_ctx(image_ctx), crypto(crypto), object_no(object_no),
59 extents(extents), io_context(io_context),
60 parent_trace(parent_trace), version(version),
61 on_finish(on_dispatched) {
62 disable_read_from_parent =
63 ((read_flags & io::READ_FLAG_DISABLE_READ_FROM_PARENT) != 0);
64 read_flags |= io::READ_FLAG_DISABLE_READ_FROM_PARENT;
65
66 auto ctx = create_context_callback<
67 C_AlignedObjectReadRequest<I>,
68 &C_AlignedObjectReadRequest<I>::handle_read>(this);
69
70 req = io::ObjectDispatchSpec::create_read(
71 image_ctx, io::OBJECT_DISPATCH_LAYER_CRYPTO, object_no,
72 extents, io_context, op_flags, read_flags, parent_trace,
73 version, ctx);
74 }
75
76 void send() {
77 req->send();
78 }
79
80 void finish(int r) override {
81 ldout(image_ctx->cct, 20) << "aligned read r=" << r << dendl;
82 on_finish->complete(r);
83 }
84
85 void handle_read(int r) {
86 auto cct = image_ctx->cct;
87 ldout(cct, 20) << "aligned read r=" << r << dendl;
88 if (r >= 0) {
89 r = 0;
90 for (auto& extent: *extents) {
91 auto crypto_ret = crypto->decrypt_aligned_extent(
92 extent, get_file_offset(image_ctx, object_no, extent.offset));
93 if (crypto_ret != 0) {
94 ceph_assert(crypto_ret < 0);
95 r = crypto_ret;
96 break;
97 }
98 r += extent.length;
99 }
100 }
101
102 if (r == -ENOENT && !disable_read_from_parent) {
103 io::util::read_parent<I>(
104 image_ctx, object_no, extents,
105 io_context->read_snap().value_or(CEPH_NOSNAP),
106 parent_trace, this);
107 } else {
108 complete(r);
109 }
110 }
111 };
112
113 template <typename I>
114 struct C_UnalignedObjectReadRequest : public Context {
115 CephContext* cct;
116 io::ReadExtents* extents;
117 Context* on_finish;
118 io::ReadExtents aligned_extents;
119 io::ObjectDispatchSpec* req;
120
121 C_UnalignedObjectReadRequest(
122 I* image_ctx, CryptoInterface* crypto,
123 uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
124 int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
125 uint64_t* version, int* object_dispatch_flags,
126 Context* on_dispatched) : cct(image_ctx->cct), extents(extents),
127 on_finish(on_dispatched) {
128 crypto->align_extents(*extents, &aligned_extents);
129
130 // send the aligned read back to get decrypted
131 req = io::ObjectDispatchSpec::create_read(
132 image_ctx,
133 io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
134 object_no, &aligned_extents, io_context, op_flags, read_flags,
135 parent_trace, version, this);
136 }
137
138 void send() {
139 req->send();
140 }
141
142 void remove_alignment_data() {
143 for (uint64_t i = 0; i < extents->size(); ++i) {
144 auto& extent = (*extents)[i];
145 auto& aligned_extent = aligned_extents[i];
146 if (aligned_extent.extent_map.empty()) {
147 uint64_t cut_offset = extent.offset - aligned_extent.offset;
148 int64_t padding_count =
149 cut_offset + extent.length - aligned_extent.bl.length();
150 if (padding_count > 0) {
151 aligned_extent.bl.append_zero(padding_count);
152 }
153 aligned_extent.bl.splice(cut_offset, extent.length, &extent.bl);
154 } else {
155 for (auto [off, len]: aligned_extent.extent_map) {
156 ceph::bufferlist tmp;
157 aligned_extent.bl.splice(0, len, &tmp);
158
159 uint64_t bytes_to_skip = 0;
160 if (off < extent.offset) {
161 bytes_to_skip = extent.offset - off;
162 if (len <= bytes_to_skip) {
163 continue;
164 }
165 off += bytes_to_skip;
166 len -= bytes_to_skip;
167 }
168
169 len = std::min(len, extent.offset + extent.length - off);
170 if (len == 0) {
171 continue;
172 }
173
174 if (len > 0) {
175 tmp.splice(bytes_to_skip, len, &extent.bl);
176 extent.extent_map.emplace_back(off, len);
177 }
178 }
179 }
180 }
181 }
182
183 void finish(int r) override {
184 ldout(cct, 20) << "unaligned read r=" << r << dendl;
185 if (r >= 0) {
186 remove_alignment_data();
187
188 r = 0;
189 for (auto& extent: *extents) {
190 r += extent.length;
191 }
192 }
193 on_finish->complete(r);
194 }
195 };
196
197 template <typename I>
198 struct C_UnalignedObjectWriteRequest : public Context {
199 I* image_ctx;
200 CryptoInterface* crypto;
201 uint64_t object_no;
202 uint64_t object_off;
203 ceph::bufferlist data;
204 ceph::bufferlist cmp_data;
205 uint64_t* mismatch_offset;
206 IOContext io_context;
207 int op_flags;
208 int write_flags;
209 std::optional<uint64_t> assert_version;
210 const ZTracer::Trace parent_trace;
211 int* object_dispatch_flags;
212 uint64_t* journal_tid;
213 Context* on_finish;
214 bool may_copyup;
215 ceph::bufferlist aligned_data;
216 io::ReadExtents extents;
217 uint64_t version;
218 C_UnalignedObjectReadRequest<I>* read_req;
219 bool object_exists;
220
221 C_UnalignedObjectWriteRequest(
222 I* image_ctx, CryptoInterface* crypto,
223 uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
224 ceph::bufferlist&& cmp_data, uint64_t* mismatch_offset,
225 IOContext io_context, int op_flags, int write_flags,
226 std::optional<uint64_t> assert_version,
227 const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
228 uint64_t* journal_tid, Context* on_dispatched, bool may_copyup
229 ) : image_ctx(image_ctx), crypto(crypto), object_no(object_no),
230 object_off(object_off), data(data), cmp_data(cmp_data),
231 mismatch_offset(mismatch_offset), io_context(io_context),
232 op_flags(op_flags), write_flags(write_flags),
233 assert_version(assert_version), parent_trace(parent_trace),
234 object_dispatch_flags(object_dispatch_flags),
235 journal_tid(journal_tid), on_finish(on_dispatched),
236 may_copyup(may_copyup) {
237 // build read extents
238 auto [pre_align, post_align] = crypto->get_pre_and_post_align(
239 object_off, data.length());
240 if (pre_align != 0) {
241 extents.emplace_back(object_off - pre_align, pre_align);
242 }
243 if (post_align != 0) {
244 extents.emplace_back(object_off + data.length(), post_align);
245 }
246 if (cmp_data.length() != 0) {
247 extents.emplace_back(object_off, cmp_data.length());
248 }
249
250 auto ctx = create_context_callback<
251 C_UnalignedObjectWriteRequest<I>,
252 &C_UnalignedObjectWriteRequest<I>::handle_read>(this);
253
254 read_req = new C_UnalignedObjectReadRequest<I>(
255 image_ctx, crypto, object_no, &extents, io_context,
256 0, io::READ_FLAG_DISABLE_READ_FROM_PARENT, parent_trace,
257 &version, 0, ctx);
258 }
259
260 void send() {
261 read_req->send();
262 }
263
264 bool check_cmp_data() {
265 if (cmp_data.length() == 0) {
266 return true;
267 }
268
269 auto& cmp_extent = extents.back();
270 io::util::unsparsify(image_ctx->cct, &cmp_extent.bl,
271 cmp_extent.extent_map, cmp_extent.offset,
272 cmp_extent.length);
273
274 std::optional<uint64_t> found_mismatch = std::nullopt;
275
276 auto it1 = cmp_data.cbegin();
277 auto it2 = cmp_extent.bl.cbegin();
278 for (uint64_t idx = 0; idx < cmp_data.length(); ++idx) {
279 if (*it1 != *it2) {
280 found_mismatch = std::make_optional(idx);
281 break;
282 }
283 ++it1;
284 ++it2;
285 }
286
287 extents.pop_back();
288
289 if (found_mismatch.has_value()) {
290 if (mismatch_offset != nullptr) {
291 *mismatch_offset = found_mismatch.value();
292 }
293 complete(-EILSEQ);
294 return false;
295 }
296
297 return true;
298 }
299
300 bool check_create_exclusive() {
301 bool exclusive =
302 ((write_flags & io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0);
303 if (exclusive && object_exists) {
304 complete(-EEXIST);
305 return false;
306 }
307 return true;
308 }
309
310 bool check_version() {
311 int r = 0;
312 if (assert_version.has_value()) {
313 if (!object_exists) {
314 r = -ENOENT;
315 } else if (assert_version.value() < version) {
316 r = -ERANGE;
317 } else if (assert_version.value() > version) {
318 r = -EOVERFLOW;
319 }
320 }
321
322 if (r != 0) {
323 complete(r);
324 return false;
325 }
326 return true;
327 }
328
329 void build_aligned_data() {
330 auto [pre_align, post_align] = crypto->get_pre_and_post_align(
331 object_off, data.length());
332 if (pre_align != 0) {
333 auto &extent = extents.front();
334 io::util::unsparsify(image_ctx->cct, &extent.bl, extent.extent_map,
335 extent.offset, extent.length);
336 extent.bl.splice(0, pre_align, &aligned_data);
337 }
338 aligned_data.append(data);
339 if (post_align != 0) {
340 auto &extent = extents.back();
341 io::util::unsparsify(image_ctx->cct, &extent.bl, extent.extent_map,
342 extent.offset, extent.length);
343 extent.bl.splice(0, post_align, &aligned_data);
344 }
345 }
346
347 void handle_copyup(int r) {
348 ldout(image_ctx->cct, 20) << "r=" << r << dendl;
349 if (r < 0) {
350 complete(r);
351 } else {
352 restart_request(false);
353 }
354 }
355
356 void handle_read(int r) {
357 ldout(image_ctx->cct, 20) << "unaligned write r=" << r << dendl;
358
359 if (r == -ENOENT) {
360 if (may_copyup) {
361 auto ctx = create_context_callback<
362 C_UnalignedObjectWriteRequest<I>,
363 &C_UnalignedObjectWriteRequest<I>::handle_copyup>(this);
364 if (io::util::trigger_copyup(
365 image_ctx, object_no, io_context, ctx)) {
366 return;
367 }
368 delete ctx;
369 }
370 object_exists = false;
371 } else if (r < 0) {
372 complete(r);
373 return;
374 } else {
375 object_exists = true;
376 }
377
378 if (!check_create_exclusive() || !check_version() || !check_cmp_data()) {
379 return;
380 }
381
382 build_aligned_data();
383
384 auto aligned_off = crypto->align(object_off, data.length()).first;
385 auto new_write_flags = write_flags;
386 auto new_assert_version = std::make_optional(version);
387 if (!object_exists) {
388 new_write_flags |= io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE;
389 new_assert_version = std::nullopt;
390 }
391
392 auto ctx = create_context_callback<
393 C_UnalignedObjectWriteRequest<I>,
394 &C_UnalignedObjectWriteRequest<I>::handle_write>(this);
395
396 // send back aligned write back to get encrypted and committed
397 auto write_req = io::ObjectDispatchSpec::create_write(
398 image_ctx,
399 io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
400 object_no, aligned_off, std::move(aligned_data), io_context,
401 op_flags, new_write_flags, new_assert_version,
402 journal_tid == nullptr ? 0 : *journal_tid, parent_trace, ctx);
403 write_req->send();
404 }
405
406 void restart_request(bool may_copyup) {
407 auto req = new C_UnalignedObjectWriteRequest<I>(
408 image_ctx, crypto, object_no, object_off,
409 std::move(data), std::move(cmp_data),
410 mismatch_offset, io_context, op_flags, write_flags,
411 assert_version, parent_trace,
412 object_dispatch_flags, journal_tid, this, may_copyup);
413 req->send();
414 }
415
416 void handle_write(int r) {
417 ldout(image_ctx->cct, 20) << "r=" << r << dendl;
418 bool exclusive = write_flags & io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE;
419 bool restart = false;
420 if (r == -ERANGE && !assert_version.has_value()) {
421 restart = true;
422 } else if (r == -EEXIST && !exclusive) {
423 restart = true;
424 }
425
426 if (restart) {
427 restart_request(may_copyup);
428 } else {
429 complete(r);
430 }
431 }
432
433 void finish(int r) override {
434 ldout(image_ctx->cct, 20) << "unaligned write r=" << r << dendl;
435 on_finish->complete(r);
436 }
437 };
438
439 template <typename I>
440 CryptoObjectDispatch<I>::CryptoObjectDispatch(
441 I* image_ctx, CryptoInterface* crypto)
442 : m_image_ctx(image_ctx), m_crypto(crypto) {
443 m_data_offset_object_no = Striper::get_num_objects(image_ctx->layout,
444 crypto->get_data_offset());
445 }
446
447 template <typename I>
448 void CryptoObjectDispatch<I>::shut_down(Context* on_finish) {
449 on_finish->complete(0);
450 }
451
452 template <typename I>
453 bool CryptoObjectDispatch<I>::read(
454 uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
455 int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
456 uint64_t* version, int* object_dispatch_flags,
457 io::DispatchResult* dispatch_result, Context** on_finish,
458 Context* on_dispatched) {
459 if (object_no < m_data_offset_object_no) {
460 return false;
461 }
462
463 auto cct = m_image_ctx->cct;
464 ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
465 << *extents << dendl;
466 ceph_assert(m_crypto != nullptr);
467
468 *dispatch_result = io::DISPATCH_RESULT_COMPLETE;
469 if (m_crypto->is_aligned(*extents)) {
470 auto req = new C_AlignedObjectReadRequest<I>(
471 m_image_ctx, m_crypto, object_no, extents, io_context,
472 op_flags, read_flags, parent_trace, version, object_dispatch_flags,
473 on_dispatched);
474 req->send();
475 } else {
476 auto req = new C_UnalignedObjectReadRequest<I>(
477 m_image_ctx, m_crypto, object_no, extents, io_context,
478 op_flags, read_flags, parent_trace, version, object_dispatch_flags,
479 on_dispatched);
480 req->send();
481 }
482
483 return true;
484 }
485
486 template <typename I>
487 bool CryptoObjectDispatch<I>::write(
488 uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
489 IOContext io_context, int op_flags, int write_flags,
490 std::optional<uint64_t> assert_version,
491 const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
492 uint64_t* journal_tid, io::DispatchResult* dispatch_result,
493 Context** on_finish, Context* on_dispatched) {
494 if (object_no < m_data_offset_object_no) {
495 return false;
496 }
497
498 auto cct = m_image_ctx->cct;
499 ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
500 << object_off << "~" << data.length() << dendl;
501 ceph_assert(m_crypto != nullptr);
502
503 if (m_crypto->is_aligned(object_off, data.length())) {
504 auto r = m_crypto->encrypt(
505 &data, get_file_offset(m_image_ctx, object_no, object_off));
506 *dispatch_result = r == 0 ? io::DISPATCH_RESULT_CONTINUE
507 : io::DISPATCH_RESULT_COMPLETE;
508 on_dispatched->complete(r);
509 } else {
510 *dispatch_result = io::DISPATCH_RESULT_COMPLETE;
511 auto req = new C_UnalignedObjectWriteRequest<I>(
512 m_image_ctx, m_crypto, object_no, object_off, std::move(data), {},
513 nullptr, io_context, op_flags, write_flags, assert_version,
514 parent_trace, object_dispatch_flags, journal_tid, on_dispatched,
515 true);
516 req->send();
517 }
518
519 return true;
520 }
521
522 template <typename I>
523 bool CryptoObjectDispatch<I>::write_same(
524 uint64_t object_no, uint64_t object_off, uint64_t object_len,
525 io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
526 IOContext io_context, int op_flags,
527 const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
528 uint64_t* journal_tid, io::DispatchResult* dispatch_result,
529 Context** on_finish, Context* on_dispatched) {
530 if (object_no < m_data_offset_object_no) {
531 return false;
532 }
533
534 auto cct = m_image_ctx->cct;
535 ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
536 << object_off << "~" << object_len << dendl;
537 ceph_assert(m_crypto != nullptr);
538
539 // convert to regular write
540 io::LightweightObjectExtent extent(object_no, object_off, object_len, 0);
541 extent.buffer_extents = std::move(buffer_extents);
542
543 bufferlist ws_data;
544 io::util::assemble_write_same_extent(extent, data, &ws_data, true);
545
546 auto ctx = new LambdaContext(
547 [on_finish_ctx=on_dispatched](int r) {
548 on_finish_ctx->complete(r);
549 });
550
551 *dispatch_result = io::DISPATCH_RESULT_COMPLETE;
552 auto req = io::ObjectDispatchSpec::create_write(
553 m_image_ctx,
554 io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
555 object_no, object_off, std::move(ws_data), io_context, op_flags, 0,
556 std::nullopt, 0, parent_trace, ctx);
557 req->send();
558 return true;
559 }
560
561 template <typename I>
562 bool CryptoObjectDispatch<I>::compare_and_write(
563 uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
564 ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
565 const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
566 int* object_dispatch_flags, uint64_t* journal_tid,
567 io::DispatchResult* dispatch_result, Context** on_finish,
568 Context* on_dispatched) {
569 if (object_no < m_data_offset_object_no) {
570 return false;
571 }
572
573 auto cct = m_image_ctx->cct;
574 ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
575 << object_off << "~" << write_data.length()
576 << dendl;
577 ceph_assert(m_crypto != nullptr);
578
579 *dispatch_result = io::DISPATCH_RESULT_COMPLETE;
580 auto req = new C_UnalignedObjectWriteRequest<I>(
581 m_image_ctx, m_crypto, object_no, object_off, std::move(write_data),
582 std::move(cmp_data), mismatch_offset, io_context, op_flags, 0,
583 std::nullopt, parent_trace, object_dispatch_flags, journal_tid,
584 on_dispatched, true);
585 req->send();
586
587 return true;
588 }
589
590 template <typename I>
591 bool CryptoObjectDispatch<I>::discard(
592 uint64_t object_no, uint64_t object_off, uint64_t object_len,
593 IOContext io_context, int discard_flags,
594 const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
595 uint64_t* journal_tid, io::DispatchResult* dispatch_result,
596 Context** on_finish, Context* on_dispatched) {
597 if (object_no < m_data_offset_object_no) {
598 return false;
599 }
600
601 auto cct = m_image_ctx->cct;
602 ldout(cct, 20) << data_object_name(m_image_ctx, object_no) << " "
603 << object_off << "~" << object_len << dendl;
604 ceph_assert(m_crypto != nullptr);
605
606 // convert to write-same
607 auto ctx = new LambdaContext(
608 [on_finish_ctx=on_dispatched](int r) {
609 on_finish_ctx->complete(r);
610 });
611
612 bufferlist bl;
613 const int buffer_size = 4096;
614 bl.append_zero(buffer_size);
615
616 *dispatch_result = io::DISPATCH_RESULT_COMPLETE;
617 auto req = io::ObjectDispatchSpec::create_write_same(
618 m_image_ctx,
619 io::util::get_previous_layer(io::OBJECT_DISPATCH_LAYER_CRYPTO),
620 object_no, object_off, object_len, {{0, object_len}}, std::move(bl),
621 io_context, *object_dispatch_flags, 0, parent_trace, ctx);
622 req->send();
623 return true;
624 }
625
626 template <typename I>
627 int CryptoObjectDispatch<I>::prepare_copyup(
628 uint64_t object_no,
629 io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) {
630 if (object_no < m_data_offset_object_no) {
631 return 0;
632 }
633
634 ceph::bufferlist current_bl;
635 current_bl.append_zero(m_image_ctx->get_object_size());
636
637 for (auto& [key, extent_map]: *snapshot_sparse_bufferlist) {
638 // update current_bl with data from extent_map
639 for (auto& extent : extent_map) {
640 auto &sbe = extent.get_val();
641 if (sbe.state == io::SPARSE_EXTENT_STATE_DATA) {
642 current_bl.begin(extent.get_off()).copy_in(extent.get_len(), sbe.bl);
643 } else if (sbe.state == io::SPARSE_EXTENT_STATE_ZEROED) {
644 ceph::bufferlist zeros;
645 zeros.append_zero(extent.get_len());
646 current_bl.begin(extent.get_off()).copy_in(extent.get_len(), zeros);
647 }
648 }
649
650 // encrypt
651 io::SparseBufferlist encrypted_sparse_bufferlist;
652 for (auto& extent : extent_map) {
653 auto [aligned_off, aligned_len] = m_crypto->align(
654 extent.get_off(), extent.get_len());
655
656 auto [image_extents, _] = io::util::object_to_area_extents(
657 m_image_ctx, object_no, {{aligned_off, aligned_len}});
658
659 ceph::bufferlist encrypted_bl;
660 uint64_t position = 0;
661 for (auto [image_offset, image_length]: image_extents) {
662 ceph::bufferlist aligned_bl;
663 aligned_bl.substr_of(current_bl, aligned_off + position, image_length);
664 aligned_bl.rebuild(); // to deep copy aligned_bl from current_bl
665 position += image_length;
666
667 auto r = m_crypto->encrypt(&aligned_bl, image_offset);
668 if (r != 0) {
669 return r;
670 }
671
672 encrypted_bl.append(aligned_bl);
673 }
674
675 encrypted_sparse_bufferlist.insert(
676 aligned_off, aligned_len, {io::SPARSE_EXTENT_STATE_DATA, aligned_len,
677 std::move(encrypted_bl)});
678 }
679
680 // replace original plaintext sparse bufferlist with encrypted one
681 extent_map.clear();
682 extent_map.insert(std::move(encrypted_sparse_bufferlist));
683 }
684
685 return 0;
686 }
687
688 } // namespace crypto
689 } // namespace librbd
690
691 template class librbd::crypto::CryptoObjectDispatch<librbd::ImageCtx>;