]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/cache/rwl/Request.h
bump version to 15.2.11-pve1
[ceph.git] / ceph / src / librbd / cache / rwl / Request.h
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #ifndef CEPH_LIBRBD_CACHE_RWL_REQUEST_H
5 #define CEPH_LIBRBD_CACHE_RWL_REQUEST_H
6
7 #include "include/Context.h"
8 #include "librbd/cache/ImageCache.h"
9 #include "librbd/cache/rwl/Types.h"
10 #include "librbd/cache/rwl/LogOperation.h"
11
12 namespace librbd {
13 class BlockGuardCell;
14
15 namespace cache {
16 namespace rwl {
17
18 class GuardedRequestFunctionContext;
19
20 struct WriteRequestResources {
21 bool allocated = false;
22 std::vector<WriteBufferAllocation> buffers;
23 };
24
25 /**
26 * A request that can be deferred in a BlockGuard to sequence
27 * overlapping operations.
28 * This is the custodian of the BlockGuard cell for this IO, and the
29 * state information about the progress of this IO. This object lives
30 * until the IO is persisted in all (live) log replicas. User request
31 * may be completed from here before the IO persists.
32 */
33 template <typename T>
34 class C_BlockIORequest : public Context {
35 public:
36 T &rwl;
37 io::Extents image_extents;
38 bufferlist bl;
39 int fadvise_flags;
40 Context *user_req; /* User write request */
41 ExtentsSummary<io::Extents> image_extents_summary;
42 bool detained = false; /* Detained in blockguard (overlapped with a prior IO) */
43 utime_t allocated_time; /* When allocation began */
44 bool waited_lanes = false; /* This IO waited for free persist/replicate lanes */
45 bool waited_entries = false; /* This IO waited for free log entries */
46 bool waited_buffers = false; /* This IO waited for data buffers (pmemobj_reserve() failed) */
47
48 C_BlockIORequest(T &rwl, const utime_t arrived, io::Extents &&extents,
49 bufferlist&& bl, const int fadvise_flags, Context *user_req);
50 ~C_BlockIORequest() override;
51 C_BlockIORequest(const C_BlockIORequest&) = delete;
52 C_BlockIORequest &operator=(const C_BlockIORequest&) = delete;
53
54 void set_cell(BlockGuardCell *cell);
55 BlockGuardCell *get_cell(void);
56 void release_cell();
57
58 void complete_user_request(int r);
59 void finish(int r);
60 virtual void finish_req(int r) = 0;
61
62 virtual bool alloc_resources() = 0;
63
64 void deferred();
65
66 virtual void deferred_handler() = 0;
67
68 virtual void dispatch() = 0;
69
70 virtual const char *get_name() const {
71 return "C_BlockIORequest";
72 }
73 uint64_t get_image_extents_size() {
74 return image_extents.size();
75 }
76 void set_io_waited_for_lanes(bool waited) {
77 waited_lanes = waited;
78 }
79 void set_io_waited_for_entries(bool waited) {
80 waited_entries = waited;
81 }
82 void set_io_waited_for_buffers(bool waited) {
83 waited_buffers = waited;
84 }
85 bool has_io_waited_for_buffers() {
86 return waited_buffers;
87 }
88 std::vector<WriteBufferAllocation>& get_resources_buffers() {
89 return m_resources.buffers;
90 }
91
92 void set_allocated(bool allocated) {
93 if (allocated) {
94 m_resources.allocated = true;
95 } else {
96 m_resources.buffers.clear();
97 }
98 }
99
100 virtual void setup_buffer_resources(
101 uint64_t &bytes_cached, uint64_t &bytes_dirtied, uint64_t &bytes_allocated,
102 uint64_t &number_lanes, uint64_t &number_log_entries,
103 uint64_t &number_unpublished_reserves) {};
104
105 protected:
106 utime_t m_arrived_time;
107 utime_t m_dispatched_time; /* When dispatch began */
108 utime_t m_user_req_completed_time;
109 std::atomic<bool> m_deferred = {false}; /* Deferred because this or a prior IO had to wait for write resources */
110 WriteRequestResources m_resources;
111
112 private:
113 std::atomic<bool> m_user_req_completed = {false};
114 std::atomic<bool> m_finish_called = {false};
115 std::atomic<bool> m_cell_released = {false};
116 BlockGuardCell* m_cell = nullptr;
117
118 template <typename U>
119 friend std::ostream &operator<<(std::ostream &os,
120 const C_BlockIORequest<U> &req);
121 };
122
123 /**
124 * This is the custodian of the BlockGuard cell for this write. Block
125 * guard is not released until the write persists everywhere (this is
126 * how we guarantee to each log replica that they will never see
127 * overlapping writes).
128 */
129 template <typename T>
130 class C_WriteRequest : public C_BlockIORequest<T> {
131 public:
132 using C_BlockIORequest<T>::rwl;
133 unique_ptr<WriteLogOperationSet> op_set = nullptr;
134
135 C_WriteRequest(T &rwl, const utime_t arrived, io::Extents &&image_extents,
136 bufferlist&& bl, const int fadvise_flags, ceph::mutex &lock,
137 PerfCounters *perfcounter, Context *user_req);
138
139 ~C_WriteRequest() override;
140
141 void blockguard_acquired(GuardedRequestFunctionContext &guard_ctx);
142
143 /* Common finish to plain write and compare-and-write (if it writes) */
144 void finish_req(int r) override;
145
146 /* Compare and write will override this */
147 virtual void update_req_stats(utime_t &now) {
148 // TODO: Add in later PRs
149 }
150 bool alloc_resources() override;
151
152 void deferred_handler() override { }
153
154 void dispatch() override;
155
156 virtual void setup_log_operations();
157
158 bool append_write_request(std::shared_ptr<SyncPoint> sync_point);
159
160 virtual void schedule_append();
161
162 const char *get_name() const override {
163 return "C_WriteRequest";
164 }
165
166 protected:
167 using C_BlockIORequest<T>::m_resources;
168 /* Plain writes will allocate one buffer per request extent */
169 void setup_buffer_resources(
170 uint64_t &bytes_cached, uint64_t &bytes_dirtied, uint64_t &bytes_allocated,
171 uint64_t &number_lanes, uint64_t &number_log_entries,
172 uint64_t &number_unpublished_reserves) override;
173
174 private:
175 bool m_do_early_flush = false;
176 std::atomic<int> m_appended = {0};
177 bool m_queued = false;
178 ceph::mutex &m_lock;
179 PerfCounters *m_perfcounter = nullptr;
180 template <typename U>
181 friend std::ostream &operator<<(std::ostream &os,
182 const C_WriteRequest<U> &req);
183 };
184
185 struct BlockGuardReqState {
186 bool barrier = false; /* This is a barrier request */
187 bool current_barrier = false; /* This is the currently active barrier */
188 bool detained = false;
189 bool queued = false; /* Queued for barrier */
190 friend std::ostream &operator<<(std::ostream &os,
191 const BlockGuardReqState &r);
192 };
193
194 class GuardedRequestFunctionContext : public Context {
195 public:
196 BlockGuardCell *cell = nullptr;
197 BlockGuardReqState state;
198 GuardedRequestFunctionContext(boost::function<void(GuardedRequestFunctionContext&)> &&callback);
199 ~GuardedRequestFunctionContext(void) override;
200 GuardedRequestFunctionContext(const GuardedRequestFunctionContext&) = delete;
201 GuardedRequestFunctionContext &operator=(const GuardedRequestFunctionContext&) = delete;
202
203 private:
204 boost::function<void(GuardedRequestFunctionContext&)> m_callback;
205 void finish(int r) override;
206 };
207
208 class GuardedRequest {
209 public:
210 const BlockExtent block_extent;
211 GuardedRequestFunctionContext *guard_ctx; /* Work to do when guard on range obtained */
212
213 GuardedRequest(const BlockExtent block_extent,
214 GuardedRequestFunctionContext *on_guard_acquire, bool barrier = false)
215 : block_extent(block_extent), guard_ctx(on_guard_acquire) {
216 guard_ctx->state.barrier = barrier;
217 }
218 friend std::ostream &operator<<(std::ostream &os,
219 const GuardedRequest &r);
220 };
221
222 } // namespace rwl
223 } // namespace cache
224 } // namespace librbd
225
226 #endif // CEPH_LIBRBD_CACHE_RWL_REQUEST_H