1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
4 #include "librbd/io/CopyupRequest.h"
5 #include "include/neorados/RADOS.hpp"
6 #include "common/ceph_context.h"
7 #include "common/ceph_mutex.h"
8 #include "common/dout.h"
9 #include "common/errno.h"
10 #include "librbd/AsioEngine.h"
11 #include "librbd/AsyncObjectThrottle.h"
12 #include "librbd/ExclusiveLock.h"
13 #include "librbd/ImageCtx.h"
14 #include "librbd/ObjectMap.h"
15 #include "librbd/Utils.h"
16 #include "librbd/asio/ContextWQ.h"
17 #include "librbd/asio/Utils.h"
18 #include "librbd/deep_copy/ObjectCopyRequest.h"
19 #include "librbd/io/AioCompletion.h"
20 #include "librbd/io/ImageDispatchSpec.h"
21 #include "librbd/io/ObjectDispatcherInterface.h"
22 #include "librbd/io/ObjectRequest.h"
23 #include "librbd/io/ReadResult.h"
24 #include "librbd/io/Utils.h"
26 #include <boost/lambda/bind.hpp>
27 #include <boost/lambda/construct.hpp>
29 #define dout_subsys ceph_subsys_rbd
31 #define dout_prefix *_dout << "librbd::io::CopyupRequest: " << this \
32 << " " << __func__ << ": " \
33 << data_object_name(m_image_ctx, m_object_no) << " "
38 using librbd::util::data_object_name
;
43 class C_UpdateObjectMap
: public C_AsyncObjectThrottle
<I
> {
45 C_UpdateObjectMap(AsyncObjectThrottle
<I
> &throttle
, I
*image_ctx
,
46 uint64_t object_no
, uint8_t head_object_map_state
,
47 const std::vector
<uint64_t> *snap_ids
,
48 bool first_snap_is_clean
, const ZTracer::Trace
&trace
,
50 : C_AsyncObjectThrottle
<I
>(throttle
, *image_ctx
), m_object_no(object_no
),
51 m_head_object_map_state(head_object_map_state
), m_snap_ids(*snap_ids
),
52 m_first_snap_is_clean(first_snap_is_clean
), m_trace(trace
),
53 m_snap_id_idx(snap_id_idx
)
58 auto& image_ctx
= this->m_image_ctx
;
59 ceph_assert(ceph_mutex_is_locked(image_ctx
.owner_lock
));
60 if (image_ctx
.exclusive_lock
== nullptr) {
63 ceph_assert(image_ctx
.exclusive_lock
->is_lock_owner());
65 std::shared_lock image_locker
{image_ctx
.image_lock
};
66 if (image_ctx
.object_map
== nullptr) {
70 uint64_t snap_id
= m_snap_ids
[m_snap_id_idx
];
71 if (snap_id
== CEPH_NOSNAP
) {
74 return update_snapshot(snap_id
);
79 auto& image_ctx
= this->m_image_ctx
;
80 ceph_assert(ceph_mutex_is_locked(image_ctx
.image_lock
));
82 bool sent
= image_ctx
.object_map
->template aio_update
<Context
>(
83 CEPH_NOSNAP
, m_object_no
, m_head_object_map_state
, {}, m_trace
, false,
85 return (sent
? 0 : 1);
88 int update_snapshot(uint64_t snap_id
) {
89 auto& image_ctx
= this->m_image_ctx
;
90 ceph_assert(ceph_mutex_is_locked(image_ctx
.image_lock
));
92 uint8_t state
= OBJECT_EXISTS
;
93 if (image_ctx
.test_features(RBD_FEATURE_FAST_DIFF
, image_ctx
.image_lock
) &&
94 (m_snap_id_idx
> 0 || m_first_snap_is_clean
)) {
95 // first snapshot should be exists+dirty since it contains
96 // the copyup data -- later snapshots inherit the data.
97 state
= OBJECT_EXISTS_CLEAN
;
100 bool sent
= image_ctx
.object_map
->template aio_update
<Context
>(
101 snap_id
, m_object_no
, state
, {}, m_trace
, true, this);
107 uint64_t m_object_no
;
108 uint8_t m_head_object_map_state
;
109 const std::vector
<uint64_t> &m_snap_ids
;
110 bool m_first_snap_is_clean
;
111 const ZTracer::Trace
&m_trace
;
112 size_t m_snap_id_idx
;
115 } // anonymous namespace
117 template <typename I
>
118 CopyupRequest
<I
>::CopyupRequest(I
*ictx
, uint64_t objectno
,
119 Extents
&&image_extents
, ImageArea area
,
120 const ZTracer::Trace
&parent_trace
)
121 : m_image_ctx(ictx
), m_object_no(objectno
),
122 m_image_extents(std::move(image_extents
)), m_image_area(area
),
123 m_trace(librbd::util::create_trace(*m_image_ctx
, "copy-up", parent_trace
))
125 ceph_assert(m_image_ctx
->data_ctx
.is_valid());
126 m_async_op
.start_op(*librbd::util::get_image_ctx(m_image_ctx
));
129 template <typename I
>
130 CopyupRequest
<I
>::~CopyupRequest() {
131 ceph_assert(m_pending_requests
.empty());
132 m_async_op
.finish_op();
135 template <typename I
>
136 void CopyupRequest
<I
>::append_request(AbstractObjectWriteRequest
<I
> *req
,
137 const Extents
& object_extents
) {
138 std::lock_guard locker
{m_lock
};
140 auto cct
= m_image_ctx
->cct
;
141 ldout(cct
, 20) << "object_request=" << req
<< ", "
142 << "append=" << m_append_request_permitted
<< dendl
;
143 if (m_append_request_permitted
) {
144 m_pending_requests
.push_back(req
);
146 for (auto [offset
, length
] : object_extents
) {
148 m_write_object_extents
.union_insert(offset
, length
);
152 m_restart_requests
.push_back(req
);
156 template <typename I
>
157 void CopyupRequest
<I
>::send() {
161 template <typename I
>
162 void CopyupRequest
<I
>::read_from_parent() {
163 auto cct
= m_image_ctx
->cct
;
164 std::shared_lock image_locker
{m_image_ctx
->image_lock
};
166 if (m_image_ctx
->parent
== nullptr) {
167 ldout(cct
, 5) << "parent detached" << dendl
;
169 m_image_ctx
->asio_engine
->post(
170 [this]() { handle_read_from_parent(-ENOENT
); });
172 } else if (is_deep_copy()) {
177 auto comp
= AioCompletion::create_and_start
<
179 &CopyupRequest
<I
>::handle_read_from_parent
>(
180 this, librbd::util::get_image_ctx(m_image_ctx
->parent
), AIO_TYPE_READ
);
182 ldout(cct
, 20) << "completion=" << comp
183 << " image_extents=" << m_image_extents
184 << " area=" << m_image_area
<< dendl
;
185 auto req
= io::ImageDispatchSpec::create_read(
186 *m_image_ctx
->parent
, io::IMAGE_DISPATCH_LAYER_INTERNAL_START
, comp
,
187 std::move(m_image_extents
), m_image_area
,
188 ReadResult
{&m_copyup_extent_map
, &m_copyup_data
},
189 m_image_ctx
->parent
->get_data_io_context(), 0, 0, m_trace
);
193 template <typename I
>
194 void CopyupRequest
<I
>::handle_read_from_parent(int r
) {
195 auto cct
= m_image_ctx
->cct
;
196 ldout(cct
, 20) << "r=" << r
<< dendl
;
198 if (r
< 0 && r
!= -ENOENT
) {
200 disable_append_requests();
203 lderr(cct
) << "error reading from parent: " << cpp_strerror(r
) << dendl
;
208 convert_copyup_extent_map();
210 m_image_ctx
->image_lock
.lock_shared();
212 disable_append_requests();
214 r
= prepare_copyup_data();
217 m_image_ctx
->image_lock
.unlock_shared();
219 lderr(m_image_ctx
->cct
) << "failed to prepare copyup data: "
220 << cpp_strerror(r
) << dendl
;
225 m_copyup_is_zero
= m_copyup_data
.is_zero();
226 m_copyup_required
= is_copyup_required();
227 if (!m_copyup_required
) {
229 m_image_ctx
->image_lock
.unlock_shared();
231 ldout(cct
, 20) << "no-op, skipping" << dendl
;
236 // copyup() will affect snapshots only if parent data is not all
238 if (!m_copyup_is_zero
) {
239 m_snap_ids
.insert(m_snap_ids
.end(), m_image_ctx
->snaps
.rbegin(),
240 m_image_ctx
->snaps
.rend());
244 m_image_ctx
->image_lock
.unlock_shared();
246 update_object_maps();
249 template <typename I
>
250 void CopyupRequest
<I
>::deep_copy() {
251 auto cct
= m_image_ctx
->cct
;
252 ceph_assert(ceph_mutex_is_locked(m_image_ctx
->image_lock
));
253 ceph_assert(m_image_ctx
->parent
!= nullptr);
256 m_deep_copied
= true;
257 m_flatten
= is_copyup_required() ? true : m_image_ctx
->migration_info
.flatten
;
260 ldout(cct
, 20) << "flatten=" << m_flatten
<< dendl
;
262 uint32_t flags
= deep_copy::OBJECT_COPY_REQUEST_FLAG_MIGRATION
;
264 flags
|= deep_copy::OBJECT_COPY_REQUEST_FLAG_FLATTEN
;
267 auto ctx
= librbd::util::create_context_callback
<
268 CopyupRequest
<I
>, &CopyupRequest
<I
>::handle_deep_copy
>(this);
269 auto req
= deep_copy::ObjectCopyRequest
<I
>::create(
270 m_image_ctx
->parent
, m_image_ctx
, 0, 0,
271 m_image_ctx
->migration_info
.snap_map
, m_object_no
, flags
, nullptr, ctx
);
276 template <typename I
>
277 void CopyupRequest
<I
>::handle_deep_copy(int r
) {
278 auto cct
= m_image_ctx
->cct
;
279 ldout(cct
, 20) << "r=" << r
<< dendl
;
281 m_image_ctx
->image_lock
.lock_shared();
283 m_copyup_required
= is_copyup_required();
284 if (r
== -ENOENT
&& !m_flatten
&& m_copyup_required
) {
286 m_image_ctx
->image_lock
.unlock_shared();
288 ldout(cct
, 10) << "restart deep-copy with flatten" << dendl
;
293 disable_append_requests();
295 if (r
< 0 && r
!= -ENOENT
) {
297 m_image_ctx
->image_lock
.unlock_shared();
299 lderr(cct
) << "error encountered during deep-copy: " << cpp_strerror(r
)
305 if (!m_copyup_required
&& !is_update_object_map_required(r
)) {
307 m_image_ctx
->image_lock
.unlock_shared();
313 ldout(cct
, 20) << "skipping" << dendl
;
318 // For deep-copy, copyup() will never affect snapshots. However,
319 // this state machine is responsible for updating object maps for
320 // snapshots that have been created on destination image after
321 // migration started.
323 compute_deep_copy_snap_ids();
327 m_image_ctx
->image_lock
.unlock_shared();
329 update_object_maps();
332 template <typename I
>
333 void CopyupRequest
<I
>::update_object_maps() {
334 std::shared_lock owner_locker
{m_image_ctx
->owner_lock
};
335 std::shared_lock image_locker
{m_image_ctx
->image_lock
};
336 if (m_image_ctx
->object_map
== nullptr) {
337 image_locker
.unlock();
338 owner_locker
.unlock();
344 auto cct
= m_image_ctx
->cct
;
345 ldout(cct
, 20) << dendl
;
347 bool copy_on_read
= m_pending_requests
.empty();
348 uint8_t head_object_map_state
= OBJECT_EXISTS
;
349 if (copy_on_read
&& !m_snap_ids
.empty() &&
350 m_image_ctx
->test_features(RBD_FEATURE_FAST_DIFF
,
351 m_image_ctx
->image_lock
)) {
352 // HEAD is non-dirty since data is tied to first snapshot
353 head_object_map_state
= OBJECT_EXISTS_CLEAN
;
356 auto r_it
= m_pending_requests
.rbegin();
357 if (r_it
!= m_pending_requests
.rend()) {
358 // last write-op determines the final object map state
359 head_object_map_state
= (*r_it
)->get_pre_write_object_map_state();
362 if ((*m_image_ctx
->object_map
)[m_object_no
] != head_object_map_state
) {
363 // (maybe) need to update the HEAD object map state
364 m_snap_ids
.push_back(CEPH_NOSNAP
);
366 image_locker
.unlock();
368 ceph_assert(m_image_ctx
->exclusive_lock
->is_lock_owner());
369 typename AsyncObjectThrottle
<I
>::ContextFactory
context_factory(
370 boost::lambda::bind(boost::lambda::new_ptr
<C_UpdateObjectMap
<I
>>(),
371 boost::lambda::_1
, m_image_ctx
, m_object_no
, head_object_map_state
,
372 &m_snap_ids
, m_first_snap_is_clean
, m_trace
, boost::lambda::_2
));
373 auto ctx
= librbd::util::create_context_callback
<
374 CopyupRequest
<I
>, &CopyupRequest
<I
>::handle_update_object_maps
>(this);
375 auto throttle
= new AsyncObjectThrottle
<I
>(
376 nullptr, *m_image_ctx
, context_factory
, ctx
, nullptr, 0, m_snap_ids
.size());
378 m_image_ctx
->config
.template get_val
<uint64_t>("rbd_concurrent_management_ops"));
381 template <typename I
>
382 void CopyupRequest
<I
>::handle_update_object_maps(int r
) {
383 auto cct
= m_image_ctx
->cct
;
384 ldout(cct
, 20) << "r=" << r
<< dendl
;
387 lderr(m_image_ctx
->cct
) << "failed to update object map: "
388 << cpp_strerror(r
) << dendl
;
397 template <typename I
>
398 void CopyupRequest
<I
>::copyup() {
399 auto cct
= m_image_ctx
->cct
;
400 m_image_ctx
->image_lock
.lock_shared();
401 auto snapc
= m_image_ctx
->snapc
;
402 auto io_context
= m_image_ctx
->get_data_io_context();
403 m_image_ctx
->image_lock
.unlock_shared();
406 if (!m_copyup_required
) {
409 ldout(cct
, 20) << "skipping copyup" << dendl
;
414 ldout(cct
, 20) << dendl
;
416 bool copy_on_read
= m_pending_requests
.empty() && !m_deep_copied
;
417 bool deep_copyup
= !snapc
.snaps
.empty() && !m_copyup_is_zero
;
418 if (m_copyup_is_zero
) {
419 m_copyup_data
.clear();
420 m_copyup_extent_map
.clear();
423 neorados::WriteOp copyup_op
;
424 neorados::WriteOp write_op
;
425 neorados::WriteOp
* op
;
426 if (copy_on_read
|| deep_copyup
) {
427 // copyup-op will use its own request issued to the initial object revision
431 // copyup-op can be combined with the write-ops (if any)
435 if (m_image_ctx
->enable_sparse_copyup
) {
436 cls_client::sparse_copyup(op
, m_copyup_extent_map
, m_copyup_data
);
438 // convert the sparse read back into a standard (thick) read
439 Striper::StripedReadResult destriper
;
440 destriper
.add_partial_sparse_result(
441 cct
, std::move(m_copyup_data
), m_copyup_extent_map
, 0,
442 {{0, m_image_ctx
->layout
.object_size
}});
445 destriper
.assemble_result(cct
, thick_bl
, false);
446 cls_client::copyup(op
, thick_bl
);
448 ObjectRequest
<I
>::add_write_hint(*m_image_ctx
, op
);
451 // merge all pending write ops into this single RADOS op
452 for (auto req
: m_pending_requests
) {
453 ldout(cct
, 20) << "add_copyup_ops " << req
<< dendl
;
454 req
->add_copyup_ops(&write_op
);
457 if (write_op
.size() > 0) {
463 // issue librados ops at the end to simplify test cases
464 auto object
= neorados::Object
{data_object_name(m_image_ctx
, m_object_no
)};
465 if (copyup_op
.size() > 0) {
466 // send only the copyup request with a blank snapshot context so that
467 // all snapshots are detected from the parent for this object. If
468 // this is a CoW request, a second request will be created for the
469 // actual modification.
470 ldout(cct
, 20) << "copyup with empty snapshot context" << dendl
;
472 auto copyup_io_context
= *io_context
;
473 copyup_io_context
.write_snap_context({});
475 m_image_ctx
->rados_api
.execute(
476 object
, copyup_io_context
, std::move(copyup_op
),
477 librbd::asio::util::get_callback_adapter(
478 [this](int r
) { handle_copyup(r
); }), nullptr,
479 (this->m_trace
.valid() ? this->m_trace
.get_info() : nullptr));
482 if (write_op
.size() > 0) {
483 // compare-and-write doesn't add any write ops (copyup+cmpext+write
484 // can't be executed in the same RADOS op because, unless the object
485 // was already present in the clone, cmpext wouldn't see it)
486 ldout(cct
, 20) << (!deep_copyup
&& write_op
.size() > 2 ?
487 "copyup + ops" : !deep_copyup
? "copyup" : "ops")
488 << " with current snapshot context" << dendl
;
490 m_image_ctx
->rados_api
.execute(
491 object
, *io_context
, std::move(write_op
),
492 librbd::asio::util::get_callback_adapter(
493 [this](int r
) { handle_copyup(r
); }), nullptr,
494 (this->m_trace
.valid() ? this->m_trace
.get_info() : nullptr));
498 template <typename I
>
499 void CopyupRequest
<I
>::handle_copyup(int r
) {
500 auto cct
= m_image_ctx
->cct
;
501 unsigned pending_copyups
;
502 int copyup_ret_val
= r
;
504 std::lock_guard locker
{m_lock
};
505 ceph_assert(m_pending_copyups
> 0);
506 pending_copyups
= --m_pending_copyups
;
507 if (m_copyup_ret_val
< 0) {
508 copyup_ret_val
= m_copyup_ret_val
;
510 m_copyup_ret_val
= r
;
514 ldout(cct
, 20) << "r=" << r
<< ", "
515 << "pending=" << pending_copyups
<< dendl
;
517 if (pending_copyups
== 0) {
518 if (copyup_ret_val
< 0 && copyup_ret_val
!= -ENOENT
) {
519 lderr(cct
) << "failed to copyup object: " << cpp_strerror(copyup_ret_val
)
521 complete_requests(false, copyup_ret_val
);
528 template <typename I
>
529 void CopyupRequest
<I
>::finish(int r
) {
530 auto cct
= m_image_ctx
->cct
;
531 ldout(cct
, 20) << "r=" << r
<< dendl
;
533 complete_requests(true, r
);
537 template <typename I
>
538 void CopyupRequest
<I
>::complete_requests(bool override_restart_retval
, int r
) {
539 auto cct
= m_image_ctx
->cct
;
542 while (!m_pending_requests
.empty()) {
543 auto it
= m_pending_requests
.begin();
545 ldout(cct
, 20) << "completing request " << req
<< dendl
;
546 req
->handle_copyup(r
);
547 m_pending_requests
.erase(it
);
550 if (override_restart_retval
) {
554 while (!m_restart_requests
.empty()) {
555 auto it
= m_restart_requests
.begin();
557 ldout(cct
, 20) << "restarting request " << req
<< dendl
;
558 req
->handle_copyup(r
);
559 m_restart_requests
.erase(it
);
563 template <typename I
>
564 void CopyupRequest
<I
>::disable_append_requests() {
565 ceph_assert(ceph_mutex_is_locked(m_lock
));
566 m_append_request_permitted
= false;
569 template <typename I
>
570 void CopyupRequest
<I
>::remove_from_list() {
571 std::lock_guard copyup_list_locker
{m_image_ctx
->copyup_list_lock
};
573 auto it
= m_image_ctx
->copyup_list
.find(m_object_no
);
574 if (it
!= m_image_ctx
->copyup_list
.end()) {
575 m_image_ctx
->copyup_list
.erase(it
);
579 template <typename I
>
580 bool CopyupRequest
<I
>::is_copyup_required() {
581 ceph_assert(ceph_mutex_is_locked(m_lock
));
583 bool copy_on_read
= m_pending_requests
.empty();
585 // always force a copyup if CoR enabled
589 if (!m_copyup_is_zero
) {
593 for (auto req
: m_pending_requests
) {
594 if (!req
->is_empty_write_op()) {
601 template <typename I
>
602 bool CopyupRequest
<I
>::is_deep_copy() const {
603 ceph_assert(ceph_mutex_is_locked(m_image_ctx
->image_lock
));
604 return !m_image_ctx
->migration_info
.empty();
607 template <typename I
>
608 bool CopyupRequest
<I
>::is_update_object_map_required(int r
) {
609 ceph_assert(ceph_mutex_is_locked(m_image_ctx
->image_lock
));
615 if (m_image_ctx
->object_map
== nullptr) {
619 if (m_image_ctx
->migration_info
.empty()) {
620 // migration might have completed while IO was in-flight,
621 // assume worst-case and perform an object map update
625 auto it
= m_image_ctx
->migration_info
.snap_map
.find(CEPH_NOSNAP
);
626 ceph_assert(it
!= m_image_ctx
->migration_info
.snap_map
.end());
627 return it
->second
[0] != CEPH_NOSNAP
;
630 template <typename I
>
631 void CopyupRequest
<I
>::compute_deep_copy_snap_ids() {
632 ceph_assert(ceph_mutex_is_locked(m_image_ctx
->image_lock
));
634 // don't copy ids for the snaps updated by object deep copy or
635 // that don't overlap
636 std::set
<uint64_t> deep_copied
;
637 for (auto &it
: m_image_ctx
->migration_info
.snap_map
) {
638 if (it
.first
!= CEPH_NOSNAP
) {
639 deep_copied
.insert(it
.second
.front());
642 ldout(m_image_ctx
->cct
, 15) << "deep_copied=" << deep_copied
<< dendl
;
644 std::copy_if(m_image_ctx
->snaps
.rbegin(), m_image_ctx
->snaps
.rend(),
645 std::back_inserter(m_snap_ids
),
646 [this, cct
=m_image_ctx
->cct
, &deep_copied
](uint64_t snap_id
) {
647 if (deep_copied
.count(snap_id
)) {
648 m_first_snap_is_clean
= true;
652 uint64_t raw_overlap
= 0;
653 uint64_t object_overlap
= 0;
654 int r
= m_image_ctx
->get_parent_overlap(snap_id
, &raw_overlap
);
656 ldout(cct
, 5) << "failed getting parent overlap for snap_id: "
657 << snap_id
<< ": " << cpp_strerror(r
) << dendl
;
658 } else if (raw_overlap
> 0) {
659 auto [parent_extents
, area
] = util::object_to_area_extents(
660 m_image_ctx
, m_object_no
, {{0, m_image_ctx
->layout
.object_size
}});
661 object_overlap
= m_image_ctx
->prune_parent_extents(parent_extents
, area
,
664 return object_overlap
> 0;
668 template <typename I
>
669 void CopyupRequest
<I
>::convert_copyup_extent_map() {
670 auto cct
= m_image_ctx
->cct
;
672 Extents image_extent_map
;
673 image_extent_map
.swap(m_copyup_extent_map
);
674 m_copyup_extent_map
.reserve(image_extent_map
.size());
676 // convert the image-extent extent map to object-extents
677 for (auto [image_offset
, image_length
] : image_extent_map
) {
678 striper::LightweightObjectExtents object_extents
;
679 util::area_to_object_extents(m_image_ctx
, image_offset
, image_length
,
680 m_image_area
, 0, &object_extents
);
681 for (auto& object_extent
: object_extents
) {
682 m_copyup_extent_map
.emplace_back(
683 object_extent
.offset
, object_extent
.length
);
687 ldout(cct
, 20) << "image_extents=" << image_extent_map
<< ", "
688 << "object_extents=" << m_copyup_extent_map
<< dendl
;
691 template <typename I
>
692 int CopyupRequest
<I
>::prepare_copyup_data() {
693 ceph_assert(ceph_mutex_is_locked(m_image_ctx
->image_lock
));
694 auto cct
= m_image_ctx
->cct
;
696 SnapshotSparseBufferlist snapshot_sparse_bufferlist
;
697 auto& sparse_bufferlist
= snapshot_sparse_bufferlist
[0];
699 bool copy_on_read
= m_pending_requests
.empty();
700 bool maybe_deep_copyup
= !m_image_ctx
->snapc
.snaps
.empty();
701 if (copy_on_read
|| maybe_deep_copyup
) {
702 // stand-alone copyup that will not be overwritten until HEAD revision
703 ldout(cct
, 20) << "processing full copy-up" << dendl
;
705 uint64_t buffer_offset
= 0;
706 for (auto [object_offset
, object_length
] : m_copyup_extent_map
) {
708 sub_bl
.substr_of(m_copyup_data
, buffer_offset
, object_length
);
709 buffer_offset
+= object_length
;
711 sparse_bufferlist
.insert(
712 object_offset
, object_length
,
713 {SPARSE_EXTENT_STATE_DATA
, object_length
, std::move(sub_bl
)});
716 // copyup that will concurrently written to the HEAD revision with the
717 // associated write-ops so only process partial extents
718 uint64_t buffer_offset
= 0;
719 for (auto [object_offset
, object_length
] : m_copyup_extent_map
) {
720 interval_set
<uint64_t> copyup_object_extents
;
721 copyup_object_extents
.insert(object_offset
, object_length
);
723 interval_set
<uint64_t> intersection
;
724 intersection
.intersection_of(copyup_object_extents
,
725 m_write_object_extents
);
727 // extract only portions of the parent copyup data that have not
728 // been overwritten by write-ops
729 copyup_object_extents
.subtract(intersection
);
730 for (auto [copyup_offset
, copyup_length
] : copyup_object_extents
) {
733 m_copyup_data
, buffer_offset
+ (copyup_offset
- object_offset
),
735 ceph_assert(sub_bl
.length() == copyup_length
);
737 sparse_bufferlist
.insert(
738 copyup_offset
, copyup_length
,
739 {SPARSE_EXTENT_STATE_DATA
, copyup_length
, std::move(sub_bl
)});
741 buffer_offset
+= object_length
;
744 ldout(cct
, 20) << "processing partial copy-up: " << sparse_bufferlist
748 // Let dispatch layers have a chance to process the data
749 auto r
= m_image_ctx
->io_object_dispatcher
->prepare_copyup(
750 m_object_no
, &snapshot_sparse_bufferlist
);
755 // Convert sparse extents back to extent map
756 m_copyup_data
.clear();
757 m_copyup_extent_map
.clear();
758 m_copyup_extent_map
.reserve(sparse_bufferlist
.ext_count());
759 for (auto& extent
: sparse_bufferlist
) {
760 auto& sbe
= extent
.get_val();
761 if (sbe
.state
== SPARSE_EXTENT_STATE_DATA
) {
762 m_copyup_extent_map
.emplace_back(extent
.get_off(), extent
.get_len());
763 m_copyup_data
.append(sbe
.bl
);
771 } // namespace librbd
773 template class librbd::io::CopyupRequest
<librbd::ImageCtx
>;