]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/metadata/metadata_io.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / metadata / metadata_io.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5 #include "metadata.h"
6 #include "metadata_io.h"
7 #include "../ocf_priv.h"
8 #include "../engine/cache_engine.h"
9 #include "../engine/engine_common.h"
10 #include "../engine/engine_bf.h"
11 #include "../utils/utils_cache_line.h"
12 #include "../utils/utils_allocator.h"
13 #include "../utils/utils_io.h"
14 #include "../ocf_def_priv.h"
15
16 #define OCF_METADATA_IO_DEBUG 0
17
18 #if 1 == OCF_METADATA_IO_DEBUG
19 #define OCF_DEBUG_TRACE(cache) \
20 ocf_cache_log(cache, log_info, "[Metadata][IO] %s\n", __func__)
21
22 #define OCF_DEBUG_MSG(cache, msg) \
23 ocf_cache_log(cache, log_info, "[Metadata][IO] %s - %s\n", \
24 __func__, msg)
25
26 #define OCF_DEBUG_PARAM(cache, format, ...) \
27 ocf_cache_log(cache, log_info, "[Metadata][IO] %s - "format"\n", \
28 __func__, ##__VA_ARGS__)
29 #else
30 #define OCF_DEBUG_TRACE(cache)
31 #define OCF_DEBUG_MSG(cache, msg)
32 #define OCF_DEBUG_PARAM(cache, format, ...)
33 #endif
34
35 static void metadata_io_i_asynch_end(struct metadata_io_request *request,
36 int error);
37 static int ocf_restart_meta_io(struct ocf_request *req);
38
39 static struct ocf_io_if meta_restart_if = {
40 .read = ocf_restart_meta_io,
41 .write = ocf_restart_meta_io
42 };
43
44 /*
45 * Get max pages for IO
46 */
47 static uint32_t metadata_io_max_page(ocf_cache_t cache)
48 {
49 return ocf_volume_get_max_io_size(&cache->device->volume) / PAGE_SIZE;
50 }
51
52 /*
53 * Iterative read end callback
54 */
55 static void metadata_io_read_i_atomic_end(struct ocf_io *io, int error)
56 {
57 struct metadata_io_request_atomic *meta_atom_req = io->priv1;
58
59 OCF_DEBUG_TRACE(ocf_volume_get_cache(io->volume));
60
61 meta_atom_req->error |= error;
62 env_completion_complete(&meta_atom_req->complete);
63 }
64
65 /*
66 * Iterative read request
67 * TODO: Make this function asynchronous to enable async recovery
68 * in atomic mode.
69 */
70 int metadata_io_read_i_atomic(ocf_cache_t cache, ocf_queue_t queue,
71 void *context, ocf_metadata_atomic_io_event_t drain_hndl,
72 ocf_metadata_io_end_t compl_hndl)
73 {
74 uint64_t i;
75 uint64_t max_sectors_count = PAGE_SIZE / OCF_ATOMIC_METADATA_SIZE;
76 uint64_t io_sectors_count = cache->device->collision_table_entries *
77 ocf_line_sectors(cache);
78 uint64_t count, curr_count;
79 int result = 0;
80 struct ocf_io *io;
81 ctx_data_t *data;
82 struct metadata_io_request_atomic meta_atom_req;
83 unsigned char step = 0;
84
85 OCF_DEBUG_TRACE(cache);
86
87 /* Allocate one 4k page for metadata*/
88 data = ctx_data_alloc(cache->owner, 1);
89 if (!data)
90 return -OCF_ERR_NO_MEM;
91
92 count = io_sectors_count;
93 for (i = 0; i < io_sectors_count; i += curr_count) {
94 /* Get sectors count of this IO iteration */
95 curr_count = OCF_MIN(max_sectors_count, count);
96
97 env_completion_init(&meta_atom_req.complete);
98 meta_atom_req.error = 0;
99
100 /* Reset position in data buffer */
101 ctx_data_seek(cache->owner, data, ctx_data_seek_begin, 0);
102
103 /* Allocate new IO */
104 io = ocf_new_cache_io(cache);
105 if (!io) {
106 result = -OCF_ERR_NO_MEM;
107 break;
108 }
109
110 /* Setup IO */
111 ocf_io_configure(io,
112 cache->device->metadata_offset +
113 SECTORS_TO_BYTES(i),
114 SECTORS_TO_BYTES(curr_count),
115 OCF_READ, 0, 0);
116 ocf_io_set_cmpl(io, &meta_atom_req, NULL,
117 metadata_io_read_i_atomic_end);
118 result = ocf_io_set_data(io, data, 0);
119 if (result) {
120 ocf_io_put(io);
121 break;
122 }
123
124 /* Submit IO */
125 ocf_volume_submit_metadata(io);
126 ocf_io_put(io);
127
128 /* Wait for completion of IO */
129 env_completion_wait(&meta_atom_req.complete);
130
131 /* Check for error */
132 if (meta_atom_req.error) {
133 result = meta_atom_req.error;
134 break;
135 }
136
137 result |= drain_hndl(cache, i, curr_count, data);
138 if (result)
139 break;
140
141 count -= curr_count;
142
143 OCF_COND_RESCHED(step, 128);
144 }
145
146 /* Memory free */
147 ctx_data_free(cache->owner, data);
148
149 compl_hndl(cache, context, result);
150
151 return 0;
152 }
153
154 static void metadata_io_i_asynch_cmpl(struct ocf_io *io, int error)
155 {
156 struct metadata_io_request *request = io->priv1;
157
158 metadata_io_i_asynch_end(request, error);
159
160 ocf_io_put(io);
161 }
162
163 static void metadata_io_req_fill(struct metadata_io_request *meta_io_req)
164 {
165 ocf_cache_t cache = meta_io_req->cache;
166 int i;
167
168 for (i = 0; i < meta_io_req->count; i++) {
169 meta_io_req->on_meta_fill(cache, meta_io_req->data,
170 meta_io_req->page + i, meta_io_req->context);
171 }
172 }
173
174 static void metadata_io_req_drain(struct metadata_io_request *meta_io_req)
175 {
176 ocf_cache_t cache = meta_io_req->cache;
177 int i;
178
179 for (i = 0; i < meta_io_req->count; i++) {
180 meta_io_req->on_meta_drain(cache, meta_io_req->data,
181 meta_io_req->page + i, meta_io_req->context);
182 }
183 }
184
185 static int ocf_restart_meta_io(struct ocf_request *req)
186 {
187 struct metadata_io_request *meta_io_req = req->priv;
188 ocf_cache_t cache = req->cache;
189 struct ocf_io *io;
190 int ret;
191
192 /* Fill with the latest metadata. */
193 OCF_METADATA_LOCK_RD();
194 metadata_io_req_fill(meta_io_req);
195 OCF_METADATA_UNLOCK_RD();
196
197 io = ocf_new_cache_io(cache);
198 if (!io) {
199 metadata_io_i_asynch_end(meta_io_req, -OCF_ERR_NO_MEM);
200 return 0;
201 }
202
203 /* Setup IO */
204 ocf_io_configure(io,
205 PAGES_TO_BYTES(meta_io_req->page),
206 PAGES_TO_BYTES(meta_io_req->count),
207 OCF_WRITE, 0, 0);
208
209 ocf_io_set_cmpl(io, meta_io_req, NULL, metadata_io_i_asynch_cmpl);
210 ret = ocf_io_set_data(io, meta_io_req->data, 0);
211 if (ret) {
212 ocf_io_put(io);
213 metadata_io_i_asynch_end(meta_io_req, ret);
214 return ret;
215 }
216 ocf_volume_submit_io(io);
217 return 0;
218 }
219
220 /*
221 * Iterative asynchronous write callback
222 */
223 static void metadata_io_i_asynch_end(struct metadata_io_request *request,
224 int error)
225 {
226 struct metadata_io_request_asynch *a_req;
227 ocf_cache_t cache;
228
229 OCF_CHECK_NULL(request);
230
231 cache = request->cache;
232
233 a_req = request->asynch;
234 OCF_CHECK_NULL(a_req);
235 OCF_CHECK_NULL(a_req->on_complete);
236
237 if (error) {
238 request->error |= error;
239 request->asynch->error |= error;
240 } else {
241 if (request->fl_req.rw == OCF_READ)
242 metadata_io_req_drain(request);
243 }
244
245 if (env_atomic_dec_return(&request->req_remaining))
246 return;
247
248 OCF_DEBUG_PARAM(cache, "Page = %u", request->page);
249
250 ctx_data_free(cache->owner, request->data);
251 request->data = NULL;
252
253 if (env_atomic_dec_return(&a_req->req_remaining)) {
254 env_atomic_set(&request->finished, 1);
255 ocf_metadata_updater_kick(cache);
256 return;
257 }
258
259 OCF_DEBUG_MSG(cache, "Asynchronous IO completed");
260
261 /* All IOs have been finished, call IO end callback */
262 a_req->on_complete(request->cache, a_req->context, request->error);
263
264 /*
265 * If it's last request, we mark is as finished
266 * after calling IO end callback
267 */
268 env_atomic_set(&request->finished, 1);
269 ocf_metadata_updater_kick(cache);
270 }
271
272 static void metadata_io_req_error(ocf_cache_t cache,
273 struct metadata_io_request_asynch *a_req,
274 uint32_t i, int error)
275 {
276 a_req->error |= error;
277 a_req->reqs[i].error |= error;
278 a_req->reqs[i].count = 0;
279 if (a_req->reqs[i].data)
280 ctx_data_free(cache->owner, a_req->reqs[i].data);
281 a_req->reqs[i].data = NULL;
282 }
283
284 /*
285 * Iterative write request asynchronously
286 */
287 static int metadata_io_i_asynch(ocf_cache_t cache, ocf_queue_t queue, int dir,
288 void *context, uint32_t page, uint32_t count,
289 ocf_metadata_io_event_t io_hndl,
290 ocf_metadata_io_end_t compl_hndl)
291 {
292 uint32_t curr_count, written;
293 uint32_t max_count = metadata_io_max_page(cache);
294 uint32_t io_count = OCF_DIV_ROUND_UP(count, max_count);
295 uint32_t i;
296 int error = 0, ret;
297 struct ocf_io *io;
298
299 /* Allocation and initialization of asynchronous metadata IO request */
300 struct metadata_io_request_asynch *a_req;
301
302 if (count == 0)
303 return 0;
304
305 a_req = env_zalloc(sizeof(*a_req), ENV_MEM_NOIO);
306 if (!a_req)
307 return -OCF_ERR_NO_MEM;
308
309 env_atomic_set(&a_req->req_remaining, io_count);
310 env_atomic_set(&a_req->req_active, io_count);
311 a_req->on_complete = compl_hndl;
312 a_req->context = context;
313 a_req->page = page;
314
315 /* Allocate particular requests and initialize them */
316 OCF_REALLOC_CP(&a_req->reqs, sizeof(a_req->reqs[0]),
317 io_count, &a_req->reqs_limit);
318 if (!a_req->reqs) {
319 env_free(a_req);
320 ocf_cache_log(cache, log_warn,
321 "No memory during metadata IO\n");
322 return -OCF_ERR_NO_MEM;
323 }
324 /* IO Requests initialization */
325 for (i = 0; i < io_count; i++) {
326 env_atomic_set(&(a_req->reqs[i].req_remaining), 1);
327 env_atomic_set(&(a_req->reqs[i].finished), 0);
328 a_req->reqs[i].asynch = a_req;
329 }
330
331 OCF_DEBUG_PARAM(cache, "IO count = %u", io_count);
332
333 i = 0;
334 written = 0;
335 while (count) {
336 /* Get pages count of this IO iteration */
337 if (count > max_count)
338 curr_count = max_count;
339 else
340 curr_count = count;
341
342 /* Fill request */
343 a_req->reqs[i].cache = cache;
344 a_req->reqs[i].context = context;
345 a_req->reqs[i].page = page + written;
346 a_req->reqs[i].count = curr_count;
347 a_req->reqs[i].on_meta_fill = io_hndl;
348 a_req->reqs[i].on_meta_drain = io_hndl;
349 a_req->reqs[i].fl_req.io_if = &meta_restart_if;
350 a_req->reqs[i].fl_req.io_queue = queue;
351 a_req->reqs[i].fl_req.cache = cache;
352 a_req->reqs[i].fl_req.priv = &a_req->reqs[i];
353 a_req->reqs[i].fl_req.info.internal = true;
354 a_req->reqs[i].fl_req.rw = dir;
355
356 /*
357 * We don't want allocate map for this request in
358 * threads.
359 */
360 a_req->reqs[i].fl_req.map = LIST_POISON1;
361
362 INIT_LIST_HEAD(&a_req->reqs[i].list);
363
364 a_req->reqs[i].data = ctx_data_alloc(cache->owner, curr_count);
365 if (!a_req->reqs[i].data) {
366 error = -OCF_ERR_NO_MEM;
367 metadata_io_req_error(cache, a_req, i, error);
368 break;
369 }
370
371 /* Issue IO if it is not overlapping with anything else */
372 ret = metadata_updater_check_overlaps(cache, &a_req->reqs[i]);
373 if (ret == 0) {
374 /* Allocate new IO */
375 io = ocf_new_cache_io(cache);
376 if (!io) {
377 error = -OCF_ERR_NO_MEM;
378 metadata_io_req_error(cache, a_req, i, error);
379 break;
380 }
381
382 if (dir == OCF_WRITE)
383 metadata_io_req_fill(&a_req->reqs[i]);
384
385 /* Setup IO */
386 ocf_io_configure(io,
387 PAGES_TO_BYTES(a_req->reqs[i].page),
388 PAGES_TO_BYTES(a_req->reqs[i].count),
389 dir, 0, 0);
390
391 ocf_io_set_cmpl(io, &a_req->reqs[i], NULL,
392 metadata_io_i_asynch_cmpl);
393 error = ocf_io_set_data(io, a_req->reqs[i].data, 0);
394 if (error) {
395 ocf_io_put(io);
396 metadata_io_req_error(cache, a_req, i, error);
397 break;
398 }
399
400 ocf_volume_submit_io(io);
401 }
402
403 count -= curr_count;
404 written += curr_count;
405 i++;
406 }
407
408 if (error == 0) {
409 /* No error, return 0 that indicates operation successful */
410 return 0;
411 }
412
413 OCF_DEBUG_MSG(cache, "ERROR");
414
415 if (i == 0) {
416 /*
417 * If no requests were submitted, we just call completion
418 * callback, free memory and return error.
419 */
420 compl_hndl(cache, context, error);
421
422 OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
423 env_free(a_req);
424
425 return error;
426 }
427
428 /*
429 * Decrement total reaming requests with IO that were not triggered.
430 * If we reached zero, we need to call completion callback.
431 */
432 if (env_atomic_sub_return(io_count - i, &a_req->req_remaining) == 0)
433 compl_hndl(cache, context, error);
434
435 /*
436 * Decrement total active requests with IO that were not triggered.
437 * If we reached zero, we need to free memory.
438 */
439 if (env_atomic_sub_return(io_count - i, &a_req->req_active) == 0) {
440 OCF_REALLOC_DEINIT(&a_req->reqs, &a_req->reqs_limit);
441 env_free(a_req);
442 }
443
444 return error;
445 }
446
447 int metadata_io_write_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
448 void *context, uint32_t page, uint32_t count,
449 ocf_metadata_io_event_t fill_hndl,
450 ocf_metadata_io_end_t compl_hndl)
451 {
452 return metadata_io_i_asynch(cache, queue, OCF_WRITE, context,
453 page, count, fill_hndl, compl_hndl);
454 }
455
456 int metadata_io_read_i_asynch(ocf_cache_t cache, ocf_queue_t queue,
457 void *context, uint32_t page, uint32_t count,
458 ocf_metadata_io_event_t drain_hndl,
459 ocf_metadata_io_end_t compl_hndl)
460 {
461 return metadata_io_i_asynch(cache, queue, OCF_READ, context,
462 page, count, drain_hndl, compl_hndl);
463 }
464
465 int ocf_metadata_io_init(ocf_cache_t cache)
466 {
467 return ocf_metadata_updater_init(cache);
468 }
469
470 void ocf_metadata_io_deinit(ocf_cache_t cache)
471 {
472 ocf_metadata_updater_stop(cache);
473 }