1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
6 #include <seastar/core/shared_mutex.hh>
8 #include "crimson/common/operation.h"
9 #include "crimson/osd/osd_operation.h"
11 namespace crimson::os::seastore
{
13 struct WritePipeline
{
14 struct ReserveProjectedUsage
: OrderedExclusivePhaseT
<ReserveProjectedUsage
> {
15 constexpr static auto type_name
= "WritePipeline::reserve_projected_usage";
16 } reserve_projected_usage
;
17 struct OolWrites
: UnorderedStageT
<OolWrites
> {
18 constexpr static auto type_name
= "UnorderedStage::ool_writes_stage";
20 struct Prepare
: OrderedExclusivePhaseT
<Prepare
> {
21 constexpr static auto type_name
= "WritePipeline::prepare_phase";
23 struct DeviceSubmission
: OrderedConcurrentPhaseT
<DeviceSubmission
> {
24 constexpr static auto type_name
= "WritePipeline::device_submission_phase";
26 struct Finalize
: OrderedExclusivePhaseT
<Finalize
> {
27 constexpr static auto type_name
= "WritePipeline::finalize_phase";
30 using BlockingEvents
= std::tuple
<
31 ReserveProjectedUsage::BlockingEvent
,
32 OolWrites::BlockingEvent
,
33 Prepare::BlockingEvent
,
34 DeviceSubmission::BlockingEvent
,
35 Finalize::BlockingEvent
40 * PlaceholderOperation
42 * Once seastore is more complete, I expect to update the externally
43 * facing interfaces to permit passing the osd level operation through.
44 * Until then (and for tests likely permanently) we'll use this unregistered
45 * placeholder for the pipeline phases necessary for journal correctness.
47 class PlaceholderOperation
: public crimson::osd::PhasedOperationT
<PlaceholderOperation
> {
49 constexpr static auto type
= 0U;
50 constexpr static auto type_name
=
51 "crimson::os::seastore::PlaceholderOperation";
53 static PlaceholderOperation::IRef
create() {
54 return IRef
{new PlaceholderOperation()};
57 PipelineHandle handle
;
58 WritePipeline::BlockingEvents tracking_events
;
60 PipelineHandle
& get_handle() {
64 void dump_detail(ceph::Formatter
*f
) const final
{}
65 void print(std::ostream
&) const final
{}
68 struct OperationProxy
{
70 OperationProxy(OperationRef op
) : op(std::move(op
)) {}
72 virtual seastar::future
<> enter(WritePipeline::ReserveProjectedUsage
&) = 0;
73 virtual seastar::future
<> enter(WritePipeline::OolWrites
&) = 0;
74 virtual seastar::future
<> enter(WritePipeline::Prepare
&) = 0;
75 virtual seastar::future
<> enter(WritePipeline::DeviceSubmission
&) = 0;
76 virtual seastar::future
<> enter(WritePipeline::Finalize
&) = 0;
78 virtual void exit() = 0;
79 virtual seastar::future
<> complete() = 0;
81 virtual ~OperationProxy() = default;
84 template <typename OpT
>
85 struct OperationProxyT
: OperationProxy
{
86 OperationProxyT(typename
OpT::IRef op
) : OperationProxy(op
) {}
89 return static_cast<OpT
*>(op
.get());
91 const OpT
* that() const {
92 return static_cast<const OpT
*>(op
.get());
95 seastar::future
<> enter(WritePipeline::ReserveProjectedUsage
& s
) final
{
96 return that()->enter_stage(s
);
98 seastar::future
<> enter(WritePipeline::OolWrites
& s
) final
{
99 return that()->enter_stage(s
);
101 seastar::future
<> enter(WritePipeline::Prepare
& s
) final
{
102 return that()->enter_stage(s
);
104 seastar::future
<> enter(WritePipeline::DeviceSubmission
& s
) final
{
105 return that()->enter_stage(s
);
107 seastar::future
<> enter(WritePipeline::Finalize
& s
) final
{
108 return that()->enter_stage(s
);
112 return that()->handle
.exit();
114 seastar::future
<> complete() final
{
115 return that()->handle
.complete();
119 struct OrderingHandle
{
120 // we can easily optimize this dynalloc out as all concretes are
121 // supposed to have exactly the same size.
122 std::unique_ptr
<OperationProxy
> op
;
123 seastar::shared_mutex
*collection_ordering_lock
= nullptr;
125 // in the future we might add further constructors / template to type
126 // erasure while extracting the location of tracking events.
127 OrderingHandle(std::unique_ptr
<OperationProxy
> op
) : op(std::move(op
)) {}
128 OrderingHandle(OrderingHandle
&&other
)
129 : op(std::move(other
.op
)),
130 collection_ordering_lock(other
.collection_ordering_lock
) {
131 other
.collection_ordering_lock
= nullptr;
134 seastar::future
<> take_collection_lock(seastar::shared_mutex
&mutex
) {
135 ceph_assert(!collection_ordering_lock
);
136 collection_ordering_lock
= &mutex
;
137 return collection_ordering_lock
->lock();
140 void maybe_release_collection_lock() {
141 if (collection_ordering_lock
) {
142 collection_ordering_lock
->unlock();
143 collection_ordering_lock
= nullptr;
147 template <typename T
>
148 seastar::future
<> enter(T
&t
) {
156 seastar::future
<> complete() {
157 return op
->complete();
161 maybe_release_collection_lock();
165 inline OrderingHandle
get_dummy_ordering_handle() {
166 using PlaceholderOpProxy
= OperationProxyT
<PlaceholderOperation
>;
167 return OrderingHandle
{
168 std::make_unique
<PlaceholderOpProxy
>(PlaceholderOperation::create())};
171 } // namespace crimson::os::seastore
175 struct EventBackendRegistry
<os::seastore::PlaceholderOperation
> {
176 static std::tuple
<> get_backends() {
180 } // namespace crimson