]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/utils/utils_io.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / utils / utils_io.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "ocf/ocf.h"
7 #include "../ocf_priv.h"
8 #include "../ocf_cache_priv.h"
9 #include "../ocf_volume_priv.h"
10 #include "../ocf_request.h"
11 #include "utils_io.h"
12 #include "utils_cache_line.h"
13
14 struct ocf_submit_volume_context {
15 env_atomic req_remaining;
16 int error;
17 ocf_submit_end_t cmpl;
18 void *priv;
19 };
20
21 static void _ocf_volume_flush_end(struct ocf_io *io, int error)
22 {
23 ocf_submit_end_t cmpl = io->priv1;
24
25 cmpl(io->priv2, error);
26 ocf_io_put(io);
27 }
28
29 void ocf_submit_volume_flush(ocf_volume_t volume,
30 ocf_submit_end_t cmpl, void *priv)
31 {
32 struct ocf_io *io;
33
34 io = ocf_volume_new_io(volume);
35 if (!io) {
36 cmpl(priv, -OCF_ERR_NO_MEM);
37 return;
38 }
39
40 ocf_io_configure(io, 0, 0, OCF_WRITE, 0, 0);
41 ocf_io_set_cmpl(io, cmpl, priv, _ocf_volume_flush_end);
42
43 ocf_volume_submit_flush(io);
44 }
45
46 static void ocf_submit_volume_end(struct ocf_io *io, int error)
47 {
48 struct ocf_submit_volume_context *context = io->priv1;
49
50 if (error)
51 context->error = error;
52
53 ocf_io_put(io);
54
55 if (env_atomic_dec_return(&context->req_remaining))
56 return;
57
58 context->cmpl(context->priv, context->error);
59 env_vfree(context);
60 }
61
62 void ocf_submit_volume_discard(ocf_volume_t volume, uint64_t addr,
63 uint64_t length, ocf_submit_end_t cmpl, void *priv)
64 {
65 struct ocf_submit_volume_context *context;
66 uint64_t bytes;
67 uint64_t max_length = (uint32_t)~0;
68 struct ocf_io *io;
69
70 context = env_vzalloc(sizeof(*context));
71 if (!context) {
72 cmpl(priv, -OCF_ERR_NO_MEM);
73 return;
74 }
75
76 env_atomic_set(&context->req_remaining, 1);
77 context->cmpl = cmpl;
78 context->priv = priv;
79
80 while (length) {
81 io = ocf_volume_new_io(volume);
82 if (!io) {
83 context->error = -OCF_ERR_NO_MEM;
84 break;
85 }
86
87 env_atomic_inc(&context->req_remaining);
88
89 bytes = OCF_MIN(length, max_length);
90
91 ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0);
92 ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
93 ocf_volume_submit_discard(io);
94
95 addr += bytes;
96 length -= bytes;
97 }
98
99 if (env_atomic_dec_return(&context->req_remaining))
100 return;
101
102 cmpl(priv, context->error);
103 env_vfree(context);
104 }
105
106 void ocf_submit_write_zeros(ocf_volume_t volume, uint64_t addr,
107 uint64_t length, ocf_submit_end_t cmpl, void *priv)
108 {
109 struct ocf_submit_volume_context *context;
110 uint32_t bytes;
111 uint32_t max_length = ~((uint32_t)PAGE_SIZE - 1);
112 struct ocf_io *io;
113
114 context = env_vzalloc(sizeof(*context));
115 if (!context) {
116 cmpl(priv, -OCF_ERR_NO_MEM);
117 return;
118 }
119
120 env_atomic_set(&context->req_remaining, 1);
121 context->cmpl = cmpl;
122 context->priv = priv;
123
124 while (length) {
125 io = ocf_volume_new_io(volume);
126 if (!io) {
127 context->error = -OCF_ERR_NO_MEM;
128 break;
129 }
130
131 env_atomic_inc(&context->req_remaining);
132
133 bytes = OCF_MIN(length, max_length);
134
135 ocf_io_configure(io, addr, bytes, OCF_WRITE, 0, 0);
136 ocf_io_set_cmpl(io, context, NULL, ocf_submit_volume_end);
137 ocf_volume_submit_write_zeroes(io);
138
139 addr += bytes;
140 length -= bytes;
141 }
142
143 if (env_atomic_dec_return(&context->req_remaining))
144 return;
145
146 cmpl(priv, context->error);
147 env_vfree(context);
148 }
149
150 struct ocf_submit_cache_page_context {
151 ocf_cache_t cache;
152 void *buffer;
153 ocf_submit_end_t cmpl;
154 void *priv;
155 };
156
157 static void ocf_submit_cache_page_end(struct ocf_io *io, int error)
158 {
159 struct ocf_submit_cache_page_context *context = io->priv1;
160 ctx_data_t *data = ocf_io_get_data(io);
161
162 if (io->dir == OCF_READ) {
163 ctx_data_rd_check(context->cache->owner, context->buffer,
164 data, PAGE_SIZE);
165 }
166
167 context->cmpl(context->priv, error);
168 ctx_data_free(context->cache->owner, data);
169 env_vfree(context);
170 ocf_io_put(io);
171 }
172
173 void ocf_submit_cache_page(ocf_cache_t cache, uint64_t addr, int dir,
174 void *buffer, ocf_submit_end_t cmpl, void *priv)
175 {
176 struct ocf_submit_cache_page_context *context;
177 ctx_data_t *data;
178 struct ocf_io *io;
179 int result = 0;
180
181 context = env_vmalloc(sizeof(*context));
182 if (!context) {
183 cmpl(priv, -OCF_ERR_NO_MEM);
184 return;
185 }
186
187 context->cache = cache;
188 context->buffer = buffer;
189 context->cmpl = cmpl;
190 context->priv = priv;
191
192 io = ocf_volume_new_io(&cache->device->volume);
193 if (!io) {
194 result = -OCF_ERR_NO_MEM;
195 goto err_io;
196 }
197
198 data = ctx_data_alloc(cache->owner, 1);
199 if (!data) {
200 result = -OCF_ERR_NO_MEM;
201 goto err_data;
202 }
203
204 if (dir == OCF_WRITE)
205 ctx_data_wr_check(cache->owner, data, buffer, PAGE_SIZE);
206
207 result = ocf_io_set_data(io, data, 0);
208 if (result)
209 goto err_set_data;
210
211 ocf_io_configure(io, addr, PAGE_SIZE, dir, 0, 0);
212 ocf_io_set_cmpl(io, context, NULL, ocf_submit_cache_page_end);
213
214 ocf_volume_submit_io(io);
215 return;
216
217 err_set_data:
218 ctx_data_free(cache->owner, data);
219 err_data:
220 ocf_io_put(io);
221 err_io:
222 env_vfree(context);
223 cmpl(priv, result);
224 }
225
226 static void ocf_submit_volume_req_cmpl(struct ocf_io *io, int error)
227 {
228 struct ocf_request *req = io->priv1;
229 ocf_req_end_t callback = io->priv2;
230
231 callback(req, error);
232
233 ocf_io_put(io);
234 }
235
236 void ocf_submit_cache_reqs(struct ocf_cache *cache,
237 struct ocf_map_info *map_info, struct ocf_request *req, int dir,
238 unsigned int reqs, ocf_req_end_t callback)
239 {
240 struct ocf_counters_block *cache_stats;
241 uint64_t flags = req->io ? req->io->flags : 0;
242 uint32_t class = req->io ? req->io->io_class : 0;
243 uint64_t addr, bytes, total_bytes = 0;
244 struct ocf_io *io;
245 uint32_t i;
246 int err;
247
248 cache_stats = &cache->core[req->core_id].
249 counters->cache_blocks;
250
251 if (reqs == 1) {
252 io = ocf_new_cache_io(cache);
253 if (!io) {
254 callback(req, -ENOMEM);
255 goto update_stats;
256 }
257
258 addr = ocf_metadata_map_lg2phy(cache,
259 map_info[0].coll_idx);
260 addr *= ocf_line_size(cache);
261 addr += cache->device->metadata_offset;
262 addr += (req->byte_position % ocf_line_size(cache));
263 bytes = req->byte_length;
264
265 ocf_io_configure(io, addr, bytes, dir, class, flags);
266 ocf_io_set_queue(io, req->io_queue);
267 ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
268
269 err = ocf_io_set_data(io, req->data, 0);
270 if (err) {
271 ocf_io_put(io);
272 callback(req, err);
273 goto update_stats;
274 }
275
276 ocf_volume_submit_io(io);
277 total_bytes = req->byte_length;
278
279 goto update_stats;
280 }
281
282 /* Issue requests to cache. */
283 for (i = 0; i < reqs; i++) {
284 io = ocf_new_cache_io(cache);
285
286 if (!io) {
287 /* Finish all IOs which left with ERROR */
288 for (; i < reqs; i++)
289 callback(req, -ENOMEM);
290 goto update_stats;
291 }
292
293 addr = ocf_metadata_map_lg2phy(cache,
294 map_info[i].coll_idx);
295 addr *= ocf_line_size(cache);
296 addr += cache->device->metadata_offset;
297 bytes = ocf_line_size(cache);
298
299 if (i == 0) {
300 uint64_t seek = (req->byte_position %
301 ocf_line_size(cache));
302
303 addr += seek;
304 bytes -= seek;
305 } else if (i == (reqs - 1)) {
306 uint64_t skip = (ocf_line_size(cache) -
307 ((req->byte_position + req->byte_length) %
308 ocf_line_size(cache))) % ocf_line_size(cache);
309
310 bytes -= skip;
311 }
312
313 ocf_io_configure(io, addr, bytes, dir, class, flags);
314 ocf_io_set_queue(io, req->io_queue);
315 ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
316
317 err = ocf_io_set_data(io, req->data, total_bytes);
318 if (err) {
319 ocf_io_put(io);
320 /* Finish all IOs which left with ERROR */
321 for (; i < reqs; i++)
322 callback(req, err);
323 goto update_stats;
324 }
325 ocf_volume_submit_io(io);
326 total_bytes += bytes;
327 }
328
329 update_stats:
330 if (dir == OCF_WRITE)
331 env_atomic64_add(total_bytes, &cache_stats->write_bytes);
332 else if (dir == OCF_READ)
333 env_atomic64_add(total_bytes, &cache_stats->read_bytes);
334 }
335
336 void ocf_submit_volume_req(ocf_volume_t volume, struct ocf_request *req,
337 ocf_req_end_t callback)
338 {
339 struct ocf_cache *cache = req->cache;
340 struct ocf_counters_block *core_stats;
341 uint64_t flags = req->io ? req->io->flags : 0;
342 uint32_t class = req->io ? req->io->io_class : 0;
343 int dir = req->rw;
344 struct ocf_io *io;
345 int err;
346
347 core_stats = &cache->core[req->core_id].
348 counters->core_blocks;
349 if (dir == OCF_WRITE)
350 env_atomic64_add(req->byte_length, &core_stats->write_bytes);
351 else if (dir == OCF_READ)
352 env_atomic64_add(req->byte_length, &core_stats->read_bytes);
353
354 io = ocf_volume_new_io(volume);
355 if (!io) {
356 callback(req, -ENOMEM);
357 return;
358 }
359
360 ocf_io_configure(io, req->byte_position, req->byte_length, dir,
361 class, flags);
362 ocf_io_set_queue(io, req->io_queue);
363 ocf_io_set_cmpl(io, req, callback, ocf_submit_volume_req_cmpl);
364 err = ocf_io_set_data(io, req->data, 0);
365 if (err) {
366 ocf_io_put(io);
367 callback(req, err);
368 return;
369 }
370 ocf_volume_submit_io(io);
371 }