]> git.proxmox.com Git - ceph.git/blob - ceph/src/librbd/io/AioCompletion.cc
import ceph nautilus 14.2.2
[ceph.git] / ceph / src / librbd / io / AioCompletion.cc
1 // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
2 // vim: ts=8 sw=2 smarttab
3
4 #include "librbd/io/AioCompletion.h"
5 #include <errno.h>
6
7 #include "common/ceph_context.h"
8 #include "common/dout.h"
9 #include "common/errno.h"
10 #include "common/perf_counters.h"
11 #include "common/WorkQueue.h"
12
13 #include "librbd/ImageCtx.h"
14 #include "librbd/internal.h"
15 #include "librbd/Journal.h"
16 #include "librbd/Types.h"
17
18 #ifdef WITH_LTTNG
19 #include "tracing/librbd.h"
20 #else
21 #define tracepoint(...)
22 #endif
23
24 #define dout_subsys ceph_subsys_rbd
25 #undef dout_prefix
26 #define dout_prefix *_dout << "librbd::io::AioCompletion: " << this \
27 << " " << __func__ << ": "
28
29 namespace librbd {
30 namespace io {
31
32 int AioCompletion::wait_for_complete() {
33 tracepoint(librbd, aio_wait_for_complete_enter, this);
34 lock.Lock();
35 while (state != AIO_STATE_COMPLETE)
36 cond.Wait(lock);
37 lock.Unlock();
38 tracepoint(librbd, aio_wait_for_complete_exit, 0);
39 return 0;
40 }
41
42 void AioCompletion::finalize(ssize_t rval)
43 {
44 ceph_assert(lock.is_locked());
45 ceph_assert(ictx != nullptr);
46 CephContext *cct = ictx->cct;
47
48 ldout(cct, 20) << "r=" << rval << dendl;
49 if (rval >= 0 && aio_type == AIO_TYPE_READ) {
50 read_result.assemble_result(cct);
51 }
52 }
53
54 void AioCompletion::complete() {
55 ceph_assert(lock.is_locked());
56 ceph_assert(ictx != nullptr);
57 CephContext *cct = ictx->cct;
58
59 tracepoint(librbd, aio_complete_enter, this, rval);
60 if (ictx->perfcounter != nullptr) {
61 ceph::timespan elapsed = coarse_mono_clock::now() - start_time;
62 switch (aio_type) {
63 case AIO_TYPE_GENERIC:
64 case AIO_TYPE_OPEN:
65 case AIO_TYPE_CLOSE:
66 break;
67 case AIO_TYPE_READ:
68 ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed); break;
69 case AIO_TYPE_WRITE:
70 ictx->perfcounter->tinc(l_librbd_wr_latency, elapsed); break;
71 case AIO_TYPE_DISCARD:
72 ictx->perfcounter->tinc(l_librbd_discard_latency, elapsed); break;
73 case AIO_TYPE_FLUSH:
74 ictx->perfcounter->tinc(l_librbd_flush_latency, elapsed); break;
75 case AIO_TYPE_WRITESAME:
76 ictx->perfcounter->tinc(l_librbd_ws_latency, elapsed); break;
77 case AIO_TYPE_COMPARE_AND_WRITE:
78 ictx->perfcounter->tinc(l_librbd_cmp_latency, elapsed); break;
79 default:
80 lderr(cct) << "completed invalid aio_type: " << aio_type << dendl;
81 break;
82 }
83 }
84
85 if ((aio_type == AIO_TYPE_CLOSE) ||
86 (aio_type == AIO_TYPE_OPEN && rval < 0)) {
87 // must destroy ImageCtx prior to invoking callback
88 delete ictx;
89 ictx = nullptr;
90 }
91
92 state = AIO_STATE_CALLBACK;
93 if (complete_cb) {
94 lock.Unlock();
95 complete_cb(rbd_comp, complete_arg);
96 lock.Lock();
97 }
98
99 if (ictx != nullptr && event_notify && ictx->event_socket.is_valid()) {
100 ictx->completed_reqs_lock.Lock();
101 ictx->completed_reqs.push_back(&m_xlist_item);
102 ictx->completed_reqs_lock.Unlock();
103 ictx->event_socket.notify();
104 }
105
106 state = AIO_STATE_COMPLETE;
107 cond.Signal();
108
109 // note: possible for image to be closed after op marked finished
110 if (async_op.started()) {
111 async_op.finish_op();
112 }
113 tracepoint(librbd, aio_complete_exit);
114 }
115
116 void AioCompletion::init_time(ImageCtx *i, aio_type_t t) {
117 Mutex::Locker locker(lock);
118 if (ictx == nullptr) {
119 ictx = i;
120 aio_type = t;
121 start_time = coarse_mono_clock::now();
122 }
123 }
124
125 void AioCompletion::start_op(bool ignore_type) {
126 Mutex::Locker locker(lock);
127 ceph_assert(ictx != nullptr);
128 ceph_assert(!async_op.started());
129
130 if (aio_type == AIO_TYPE_OPEN || aio_type == AIO_TYPE_CLOSE) {
131 // no need to track async open/close operations
132 return;
133 }
134
135 if (state == AIO_STATE_PENDING &&
136 (ignore_type || aio_type != AIO_TYPE_FLUSH)) {
137 async_op.start_op(*ictx);
138 }
139 }
140
141 void AioCompletion::fail(int r)
142 {
143 lock.Lock();
144 ceph_assert(ictx != nullptr);
145 CephContext *cct = ictx->cct;
146
147 lderr(cct) << cpp_strerror(r) << dendl;
148 ceph_assert(pending_count == 0);
149 rval = r;
150 complete();
151 put_unlock();
152 }
153
154 void AioCompletion::set_request_count(uint32_t count) {
155 lock.Lock();
156 ceph_assert(ictx != nullptr);
157 CephContext *cct = ictx->cct;
158
159 ldout(cct, 20) << "pending=" << count << dendl;
160 ceph_assert(pending_count == 0);
161
162 if (count > 0) {
163 pending_count = count;
164 lock.Unlock();
165 } else {
166 pending_count = 1;
167 lock.Unlock();
168
169 // ensure completion fires in clean lock context
170 ictx->op_work_queue->queue(new C_AioRequest(this), 0);
171 }
172 }
173
174 void AioCompletion::complete_request(ssize_t r)
175 {
176 lock.Lock();
177 ceph_assert(ictx != nullptr);
178 CephContext *cct = ictx->cct;
179
180 if (rval >= 0) {
181 if (r < 0 && r != -EEXIST)
182 rval = r;
183 else if (r > 0)
184 rval += r;
185 }
186 ceph_assert(pending_count);
187 int count = --pending_count;
188
189 ldout(cct, 20) << "cb=" << complete_cb << ", "
190 << "pending=" << pending_count << dendl;
191 if (!count) {
192 finalize(rval);
193 complete();
194 }
195 put_unlock();
196 }
197
198 bool AioCompletion::is_complete() {
199 tracepoint(librbd, aio_is_complete_enter, this);
200 bool done;
201 {
202 Mutex::Locker l(lock);
203 done = this->state == AIO_STATE_COMPLETE;
204 }
205 tracepoint(librbd, aio_is_complete_exit, done);
206 return done;
207 }
208
209 ssize_t AioCompletion::get_return_value() {
210 tracepoint(librbd, aio_get_return_value_enter, this);
211 lock.Lock();
212 ssize_t r = rval;
213 lock.Unlock();
214 tracepoint(librbd, aio_get_return_value_exit, r);
215 return r;
216 }
217
218 } // namespace io
219 } // namespace librbd