]> git.proxmox.com Git - ceph.git/blame - ceph/src/librbd/exclusive_lock/ImageDispatch.cc
bump version to 18.2.2-pve1
[ceph.git] / ceph / src / librbd / exclusive_lock / ImageDispatch.cc
CommitLineData
f67539c2
TL
1// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2// vim: ts=8 sw=2 smarttab
3
4#include "librbd/exclusive_lock/ImageDispatch.h"
5#include "include/Context.h"
6#include "common/dout.h"
7#include "common/errno.h"
8#include "librbd/ExclusiveLock.h"
9#include "librbd/ImageCtx.h"
10#include "librbd/Utils.h"
1e59de90 11#include "librbd/asio/ContextWQ.h"
f67539c2
TL
12#include "librbd/exclusive_lock/Policy.h"
13#include "librbd/io/AioCompletion.h"
14#include "librbd/io/ImageDispatchSpec.h"
15#include "librbd/io/ImageDispatcherInterface.h"
16
17#define dout_subsys ceph_subsys_rbd
18#undef dout_prefix
19#define dout_prefix *_dout << "librbd::exclusive_lock::ImageDispatch: " \
20 << this << " " << __func__ << ": "
21
22namespace librbd {
23namespace exclusive_lock {
24
25using util::create_context_callback;
1e59de90 26using util::create_async_context_callback;
f67539c2
TL
27
28template <typename I>
29ImageDispatch<I>::ImageDispatch(I* image_ctx)
30 : m_image_ctx(image_ctx),
31 m_lock(ceph::make_shared_mutex(
32 util::unique_lock_name("librbd::exclusve_lock::ImageDispatch::m_lock",
33 this))) {
34}
35
36template <typename I>
37void ImageDispatch<I>::shut_down(Context* on_finish) {
38 // release any IO waiting on exclusive lock
39 Contexts on_dispatches;
40 {
41 std::unique_lock locker{m_lock};
42 std::swap(on_dispatches, m_on_dispatches);
43 }
44
45 for (auto ctx : on_dispatches) {
46 ctx->complete(0);
47 }
48
49 on_finish->complete(0);
50}
51
52template <typename I>
53void ImageDispatch<I>::set_require_lock(bool init_shutdown,
54 io::Direction direction,
55 Context* on_finish) {
56 // pause any matching IO from proceeding past this layer
57 set_require_lock(direction, true);
58
59 if (direction == io::DIRECTION_READ) {
60 on_finish->complete(0);
61 return;
62 }
63
64 // push through a flush for any in-flight writes at lower levels
65 auto aio_comp = io::AioCompletion::create_and_start(
66 on_finish, util::get_image_ctx(m_image_ctx), io::AIO_TYPE_FLUSH);
67 auto req = io::ImageDispatchSpec::create_flush(
68 *m_image_ctx, io::IMAGE_DISPATCH_LAYER_EXCLUSIVE_LOCK, aio_comp,
69 (init_shutdown ?
70 io::FLUSH_SOURCE_EXCLUSIVE_LOCK_SKIP_REFRESH :
71 io::FLUSH_SOURCE_EXCLUSIVE_LOCK), {});
72 req->send();
73}
74
75template <typename I>
76void ImageDispatch<I>::unset_require_lock(io::Direction direction) {
77 set_require_lock(direction, false);
78}
79
80template <typename I>
81bool ImageDispatch<I>::set_require_lock(io::Direction direction, bool enabled) {
82 auto cct = m_image_ctx->cct;
83 ldout(cct, 20) << "direction=" << direction << ", enabled=" << enabled
84 << dendl;
85
86 std::unique_lock locker{m_lock};
87 auto prev_require_lock = (m_require_lock_on_read || m_require_lock_on_write);
88
89 switch (direction) {
90 case io::DIRECTION_READ:
91 m_require_lock_on_read = enabled;
92 break;
93 case io::DIRECTION_WRITE:
94 m_require_lock_on_write = enabled;
95 break;
96 case io::DIRECTION_BOTH:
97 m_require_lock_on_read = enabled;
98 m_require_lock_on_write = enabled;
99 break;
100 }
101
102 bool require_lock = (m_require_lock_on_read || m_require_lock_on_write);
103 return ((enabled && !prev_require_lock && require_lock) ||
104 (!enabled && prev_require_lock && !require_lock));
105}
106
107template <typename I>
108bool ImageDispatch<I>::read(
109 io::AioCompletion* aio_comp, io::Extents &&image_extents,
110 io::ReadResult &&read_result, IOContext io_context, int op_flags,
111 int read_flags, const ZTracer::Trace &parent_trace, uint64_t tid,
112 std::atomic<uint32_t>* image_dispatch_flags,
113 io::DispatchResult* dispatch_result, Context** on_finish,
114 Context* on_dispatched) {
115 auto cct = m_image_ctx->cct;
116 ldout(cct, 20) << "image_extents=" << image_extents << dendl;
117
118 if (needs_exclusive_lock(true, tid, dispatch_result, on_dispatched)) {
119 return true;
120 }
121
122 return false;
123}
124
125template <typename I>
126bool ImageDispatch<I>::write(
127 io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
1e59de90 128 int op_flags, const ZTracer::Trace &parent_trace,
f67539c2
TL
129 uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
130 io::DispatchResult* dispatch_result, Context** on_finish,
131 Context* on_dispatched) {
132 auto cct = m_image_ctx->cct;
133 ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
134 << dendl;
135
136 if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
137 return true;
138 }
139
140 return false;
141}
142
143template <typename I>
144bool ImageDispatch<I>::discard(
145 io::AioCompletion* aio_comp, io::Extents &&image_extents,
1e59de90
TL
146 uint32_t discard_granularity_bytes, const ZTracer::Trace &parent_trace,
147 uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
f67539c2
TL
148 io::DispatchResult* dispatch_result, Context** on_finish,
149 Context* on_dispatched) {
150 auto cct = m_image_ctx->cct;
151 ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
152 << dendl;
153
154 if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
155 return true;
156 }
157
158 return false;
159}
160
161template <typename I>
162bool ImageDispatch<I>::write_same(
163 io::AioCompletion* aio_comp, io::Extents &&image_extents, bufferlist &&bl,
1e59de90 164 int op_flags, const ZTracer::Trace &parent_trace,
f67539c2
TL
165 uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
166 io::DispatchResult* dispatch_result, Context** on_finish,
167 Context* on_dispatched) {
168 auto cct = m_image_ctx->cct;
169 ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
170 << dendl;
171
172 if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
173 return true;
174 }
175
176 return false;
177}
178
179template <typename I>
180bool ImageDispatch<I>::compare_and_write(
181 io::AioCompletion* aio_comp, io::Extents &&image_extents,
182 bufferlist &&cmp_bl, bufferlist &&bl, uint64_t *mismatch_offset,
1e59de90 183 int op_flags, const ZTracer::Trace &parent_trace,
f67539c2
TL
184 uint64_t tid, std::atomic<uint32_t>* image_dispatch_flags,
185 io::DispatchResult* dispatch_result, Context** on_finish,
186 Context* on_dispatched) {
187 auto cct = m_image_ctx->cct;
188 ldout(cct, 20) << "tid=" << tid << ", image_extents=" << image_extents
189 << dendl;
190
191 if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
192 return true;
193 }
194
195 return false;
196}
197
198template <typename I>
199bool ImageDispatch<I>::flush(
200 io::AioCompletion* aio_comp, io::FlushSource flush_source,
201 const ZTracer::Trace &parent_trace, uint64_t tid,
202 std::atomic<uint32_t>* image_dispatch_flags,
203 io::DispatchResult* dispatch_result, Context** on_finish,
204 Context* on_dispatched) {
205 auto cct = m_image_ctx->cct;
206 ldout(cct, 20) << "tid=" << tid << dendl;
207
208 // don't attempt to grab the exclusive lock if were are just internally
209 // clearing out our in-flight IO queue
210 if (flush_source != io::FLUSH_SOURCE_USER) {
211 return false;
212 }
213
214 if (needs_exclusive_lock(false, tid, dispatch_result, on_dispatched)) {
215 return true;
216 }
217
218 return false;
219}
220
221template <typename I>
222bool ImageDispatch<I>::is_lock_required(bool read_op) const {
223 ceph_assert(ceph_mutex_is_locked(m_lock));
224 return ((read_op && m_require_lock_on_read) ||
225 (!read_op && m_require_lock_on_write));
226}
227
228template <typename I>
229bool ImageDispatch<I>::needs_exclusive_lock(bool read_op, uint64_t tid,
230 io::DispatchResult* dispatch_result,
231 Context* on_dispatched) {
232 auto cct = m_image_ctx->cct;
233 bool lock_required = false;
234 {
235 std::shared_lock locker{m_lock};
236 lock_required = is_lock_required(read_op);
237 }
238
239 if (lock_required) {
240 std::shared_lock owner_locker{m_image_ctx->owner_lock};
241 if (m_image_ctx->exclusive_lock == nullptr) {
242 // raced with the exclusive lock being disabled
243 return false;
244 }
245
246 ldout(cct, 5) << "exclusive lock required: delaying IO" << dendl;
247 if (!m_image_ctx->get_exclusive_lock_policy()->may_auto_request_lock()) {
248 lderr(cct) << "op requires exclusive lock" << dendl;
249
250 *dispatch_result = io::DISPATCH_RESULT_CONTINUE;
251 on_dispatched->complete(
252 m_image_ctx->exclusive_lock->get_unlocked_op_error());
253 return true;
254 }
255
256 // block potential races with other incoming IOs
257 std::unique_lock locker{m_lock};
258 bool retesting_lock = (
259 !m_on_dispatches.empty() && m_on_dispatches.front() == on_dispatched);
260 if (!m_on_dispatches.empty() && !retesting_lock) {
261 *dispatch_result = io::DISPATCH_RESULT_RESTART;
262 m_on_dispatches.push_back(on_dispatched);
263 return true;
264 }
265
266 if (!is_lock_required(read_op)) {
267 return false;
268 }
269
270 ceph_assert(m_on_dispatches.empty() || retesting_lock);
271 m_on_dispatches.push_back(on_dispatched);
272 locker.unlock();
273
274 *dispatch_result = io::DISPATCH_RESULT_RESTART;
1e59de90
TL
275 auto ctx = create_async_context_callback(
276 *m_image_ctx, create_context_callback<
277 ImageDispatch<I>, &ImageDispatch<I>::handle_acquire_lock>(this));
f67539c2
TL
278 m_image_ctx->exclusive_lock->acquire_lock(ctx);
279 return true;
280 }
281
282 return false;
283}
284
285template <typename I>
286void ImageDispatch<I>::handle_acquire_lock(int r) {
287 auto cct = m_image_ctx->cct;
288 ldout(cct, 5) << "r=" << r << dendl;
289
290 std::unique_lock locker{m_lock};
291 ceph_assert(!m_on_dispatches.empty());
292
293 Context* failed_dispatch = nullptr;
294 Contexts on_dispatches;
1e59de90 295 if (r == -ERESTART) {
f67539c2
TL
296 ldout(cct, 5) << "IO raced with exclusive lock shutdown" << dendl;
297 } else if (r < 0) {
298 lderr(cct) << "failed to acquire exclusive lock: " << cpp_strerror(r)
299 << dendl;
300 failed_dispatch = m_on_dispatches.front();
301 m_on_dispatches.pop_front();
302 }
303
304 // re-test if lock is still required (i.e. it wasn't acquired/lost) via a
305 // restart dispatch
306 std::swap(on_dispatches, m_on_dispatches);
307 locker.unlock();
308
309 if (failed_dispatch != nullptr) {
310 failed_dispatch->complete(r);
311 }
312 for (auto ctx : on_dispatches) {
313 ctx->complete(0);
314 }
315}
316
317} // namespace exclusive_lock
318} // namespace librbd
319
320template class librbd::exclusive_lock::ImageDispatch<librbd::ImageCtx>;