]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/metadata/metadata_raw_dynamic.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / metadata / metadata_raw_dynamic.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "metadata.h"
7 #include "metadata_hash.h"
8 #include "metadata_raw.h"
9 #include "metadata_raw_dynamic.h"
10 #include "metadata_io.h"
11 #include "../engine/cache_engine.h"
12 #include "../engine/engine_common.h"
13 #include "../utils/utils_io.h"
14 #include "../utils/utils_req.h"
15 #include "../ocf_def_priv.h"
16
17 #define OCF_METADATA_RAW_DEBUG 0
18
19 #if 1 == OCF_METADATA_RAW_DEBUG
20 #define OCF_DEBUG_TRACE(cache) \
21 ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s\n", __func__)
22
23 #define OCF_DEBUG_PARAM(cache, format, ...) \
24 ocf_cache_log(cache, log_info, "[Metadata][Volatile] %s - "format"\n", \
25 __func__, ##__VA_ARGS__)
26 #else
27 #define OCF_DEBUG_TRACE(cache)
28 #define OCF_DEBUG_PARAM(cache, format, ...)
29 #endif
30
31 /*******************************************************************************
32 * Common RAW Implementation
33 ******************************************************************************/
34
35 /*
36 * Check if page is valid for specified RAW descriptor
37 */
38 static bool _raw_ssd_page_is_valid(struct ocf_metadata_raw *raw, uint32_t page)
39 {
40 ENV_BUG_ON(page < raw->ssd_pages_offset);
41 ENV_BUG_ON(page >= (raw->ssd_pages_offset + raw->ssd_pages));
42
43 return true;
44 }
45
46 /*******************************************************************************
47 * RAW dynamic Implementation
48 ******************************************************************************/
49
50 #define _RAW_DYNAMIC_PAGE(raw, line) \
51 ((line) / raw->entries_in_page)
52
53 #define _RAW_DYNAMIC_PAGE_OFFSET(raw, line) \
54 ((line % raw->entries_in_page) * raw->entry_size)
55
56 /*
57 * RAW DYNAMIC control structure
58 */
59 struct _raw_ctrl {
60 env_mutex lock;
61 env_atomic count;
62 void *pages[];
63 };
64
65 static void *_raw_dynamic_get_item(ocf_cache_t cache,
66 struct ocf_metadata_raw *raw, ocf_cache_line_t line, uint32_t size)
67 {
68 void *new = NULL;
69 struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
70 uint32_t page = _RAW_DYNAMIC_PAGE(raw, line);
71
72 ENV_BUG_ON(!_raw_is_valid(raw, line, size));
73
74 OCF_DEBUG_PARAM(cache, "Accessing item %u on page %u", line, page);
75
76 if (!ctrl->pages[page]) {
77 /* No page, allocate one, and set*/
78
79 /* This RAW container has some restrictions and need to check
80 * this limitation:
81 * 1. no atomic context when allocation
82 * 2. Only one allocator in time
83 */
84
85 ENV_BUG_ON(env_in_interrupt());
86
87 env_mutex_lock(&ctrl->lock);
88
89 if (ctrl->pages[page]) {
90 /* Page has been already allocated, skip allocation */
91 goto _raw_dynamic_get_item_SKIP;
92 }
93
94 OCF_DEBUG_PARAM(cache, "New page allocation - %u", page);
95
96 new = env_secure_alloc(PAGE_SIZE);
97 if (new) {
98 ENV_BUG_ON(env_memset(new, PAGE_SIZE, 0));
99 ctrl->pages[page] = new;
100 env_atomic_inc(&ctrl->count);
101 }
102
103 _raw_dynamic_get_item_SKIP:
104
105 env_mutex_unlock(&ctrl->lock);
106 }
107
108 if (ctrl->pages[page])
109 return ctrl->pages[page] + _RAW_DYNAMIC_PAGE_OFFSET(raw, line);
110
111 return NULL;
112 }
113
114 /*
115 * RAM DYNAMIC Implementation - De-Initialize
116 */
117 int raw_dynamic_deinit(ocf_cache_t cache,
118 struct ocf_metadata_raw *raw)
119 {
120 uint32_t i;
121 struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
122
123 if (!ctrl)
124 return 0;
125
126 OCF_DEBUG_TRACE(cache);
127
128 for (i = 0; i < raw->ssd_pages; i++)
129 env_secure_free(ctrl->pages[i], PAGE_SIZE);
130
131 env_vfree(ctrl);
132 raw->priv = NULL;
133
134 return 0;
135 }
136
137 /*
138 * RAM DYNAMIC Implementation - Initialize
139 */
140 int raw_dynamic_init(ocf_cache_t cache,
141 struct ocf_metadata_raw *raw)
142 {
143 struct _raw_ctrl *ctrl;
144 size_t size = sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
145
146 OCF_DEBUG_TRACE(cache);
147
148 if (raw->entry_size > PAGE_SIZE)
149 return -1;
150
151 ctrl = env_vmalloc(size);
152 if (!ctrl)
153 return -1;
154
155 ENV_BUG_ON(env_memset(ctrl, size, 0));
156
157 if (env_mutex_init(&ctrl->lock)) {
158 env_vfree(ctrl);
159 return -1;
160 }
161
162 raw->priv = ctrl;
163
164 return 0;
165 }
166
167 /*
168 * RAW DYNAMIC Implementation - Size of
169 */
170 size_t raw_dynamic_size_of(ocf_cache_t cache,
171 struct ocf_metadata_raw *raw)
172 {
173 struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
174 size_t size;
175
176 /* Size of allocated items */
177 size = env_atomic_read(&ctrl->count);
178 size *= PAGE_SIZE;
179
180 /* Size of control structure */
181 size += sizeof(*ctrl) + (sizeof(ctrl->pages[0]) * raw->ssd_pages);
182
183 OCF_DEBUG_PARAM(cache, "Count = %d, Size = %lu",
184 env_atomic_read(&ctrl->count), size);
185
186 return size;
187 }
188
189 /*
190 * RAW DYNAMIC Implementation - Size on SSD
191 */
192 uint32_t raw_dynamic_size_on_ssd(struct ocf_metadata_raw *raw)
193 {
194 const size_t alignment = 128 * KiB / PAGE_SIZE;
195
196 return OCF_DIV_ROUND_UP(raw->ssd_pages, alignment) * alignment;
197 }
198
199 /*
200 * RAM DYNAMIC Implementation - Checksum
201 */
202 uint32_t raw_dynamic_checksum(ocf_cache_t cache,
203 struct ocf_metadata_raw *raw)
204 {
205 struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
206 uint64_t i;
207 uint32_t step = 0;
208 uint32_t crc = 0;
209
210 for (i = 0; i < raw->ssd_pages; i++) {
211 if (ctrl->pages[i])
212 crc = env_crc32(crc, ctrl->pages[i], PAGE_SIZE);
213 OCF_COND_RESCHED(step, 10000);
214 }
215
216 return crc;
217 }
218
219 /*
220 * RAM DYNAMIC Implementation - Get
221 */
222 int raw_dynamic_get(ocf_cache_t cache,
223 struct ocf_metadata_raw *raw, ocf_cache_line_t line,
224 void *data, uint32_t size)
225 {
226 void *item = _raw_dynamic_get_item(cache, raw, line, size);
227
228 if (!item) {
229 ENV_BUG_ON(env_memset(data, size, 0));
230 ocf_metadata_error(cache);
231 return -1;
232 }
233
234 return env_memcpy(data, size, item, size);
235 }
236
237 /*
238 * RAM DYNAMIC Implementation - Set
239 */
240 int raw_dynamic_set(ocf_cache_t cache,
241 struct ocf_metadata_raw *raw, ocf_cache_line_t line,
242 void *data, uint32_t size)
243 {
244 void *item = _raw_dynamic_get_item(cache, raw, line, size);
245
246 if (!item) {
247 ocf_metadata_error(cache);
248 return -1;
249 }
250
251 return env_memcpy(item, size, data, size);
252 }
253
254 /*
255 * RAM DYNAMIC Implementation - access
256 */
257 const void *raw_dynamic_rd_access(ocf_cache_t cache,
258 struct ocf_metadata_raw *raw, ocf_cache_line_t line,
259 uint32_t size)
260 {
261 return _raw_dynamic_get_item(cache, raw, line, size);
262 }
263
264 /*
265 * RAM DYNAMIC Implementation - access
266 */
267 void *raw_dynamic_wr_access(ocf_cache_t cache,
268 struct ocf_metadata_raw *raw, ocf_cache_line_t line,
269 uint32_t size)
270 {
271 return _raw_dynamic_get_item(cache, raw, line, size);
272 }
273
274 /*
275 * RAM DYNAMIC Implementation - Load all
276 */
277 #define RAW_DYNAMIC_LOAD_PAGES 128
278
279 struct raw_dynamic_load_all_context {
280 struct ocf_metadata_raw *raw;
281 struct ocf_request *req;
282 ocf_cache_t cache;
283 struct ocf_io *io;
284 ctx_data_t *data;
285 uint8_t *zpage;
286 uint8_t *page;
287 uint64_t i;
288 int error;
289
290 ocf_metadata_end_t cmpl;
291 void *priv;
292 };
293
294 static void raw_dynamic_load_all_complete(
295 struct raw_dynamic_load_all_context *context, int error)
296 {
297 context->cmpl(context->priv, error);
298
299 ocf_req_put(context->req);
300 env_secure_free(context->page, PAGE_SIZE);
301 env_free(context->zpage);
302 ctx_data_free(context->cache->owner, context->data);
303 env_vfree(context);
304 }
305
306 static int raw_dynamic_load_all_update(struct ocf_request *req);
307
308 static const struct ocf_io_if _io_if_raw_dynamic_load_all_update = {
309 .read = raw_dynamic_load_all_update,
310 .write = raw_dynamic_load_all_update,
311 };
312
313 static void raw_dynamic_load_all_read_end(struct ocf_io *io, int error)
314 {
315 struct raw_dynamic_load_all_context *context = io->priv1;
316
317 ocf_io_put(io);
318
319 if (error) {
320 raw_dynamic_load_all_complete(context, error);
321 return;
322 }
323
324 context->req->io_if = &_io_if_raw_dynamic_load_all_update;
325 ocf_engine_push_req_front(context->req, true);
326 }
327
328 static int raw_dynamic_load_all_read(struct ocf_request *req)
329 {
330 struct raw_dynamic_load_all_context *context = req->priv;
331 struct ocf_metadata_raw *raw = context->raw;
332 uint64_t count;
333 int result;
334
335 count = OCF_MIN(RAW_DYNAMIC_LOAD_PAGES, raw->ssd_pages - context->i);
336
337 /* Allocate IO */
338 context->io = ocf_new_cache_io(context->cache);
339 if (!context->io) {
340 raw_dynamic_load_all_complete(context, -OCF_ERR_NO_MEM);
341 return 0;
342 }
343
344 /* Setup IO */
345 result = ocf_io_set_data(context->io, context->data, 0);
346 if (result) {
347 ocf_io_put(context->io);
348 raw_dynamic_load_all_complete(context, result);
349 return 0;
350 }
351 ocf_io_configure(context->io,
352 PAGES_TO_BYTES(raw->ssd_pages_offset + context->i),
353 PAGES_TO_BYTES(count), OCF_READ, 0, 0);
354
355 ocf_io_set_queue(context->io, req->io_queue);
356 ocf_io_set_cmpl(context->io, context, NULL,
357 raw_dynamic_load_all_read_end);
358
359 /* Submit IO */
360 ocf_volume_submit_io(context->io);
361
362 return 0;
363 }
364
365 static const struct ocf_io_if _io_if_raw_dynamic_load_all_read = {
366 .read = raw_dynamic_load_all_read,
367 .write = raw_dynamic_load_all_read,
368 };
369
370 static int raw_dynamic_load_all_update(struct ocf_request *req)
371 {
372 struct raw_dynamic_load_all_context *context = req->priv;
373 struct ocf_metadata_raw *raw = context->raw;
374 struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
375 ocf_cache_t cache = context->cache;
376 uint64_t count = BYTES_TO_PAGES(context->io->bytes);
377 uint64_t i_page;
378 int result = 0;
379 int cmp;
380
381 /* Reset head of data buffer */
382 ctx_data_seek_check(context->cache->owner, context->data,
383 ctx_data_seek_begin, 0);
384
385 for (i_page = 0; i_page < count; i_page++, context->i++) {
386 if (!context->page) {
387 context->page = env_secure_alloc(PAGE_SIZE);
388 if (!context->page) {
389 /* Allocation error */
390 result = -OCF_ERR_NO_MEM;
391 break;
392 }
393 }
394
395 ctx_data_rd_check(cache->owner, context->page,
396 context->data, PAGE_SIZE);
397
398 result = env_memcmp(context->zpage, PAGE_SIZE, context->page,
399 PAGE_SIZE, &cmp);
400 if (result)
401 break;
402
403 /* When page is zero set, no need to allocate space for it */
404 if (cmp == 0) {
405 OCF_DEBUG_PARAM(cache, "Zero loaded %llu", i);
406 continue;
407 }
408
409 OCF_DEBUG_PARAM(cache, "Non-zero loaded %llu", i);
410
411 ctrl->pages[context->i] = context->page;
412 context->page = NULL;
413
414 env_atomic_inc(&ctrl->count);
415 }
416
417 if (result || context->i >= raw->ssd_pages) {
418 raw_dynamic_load_all_complete(context, result);
419 return 0;
420 }
421
422 context->req->io_if = &_io_if_raw_dynamic_load_all_read;
423 ocf_engine_push_req_front(context->req, true);
424
425 return 0;
426 }
427
428 void raw_dynamic_load_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
429 ocf_metadata_end_t cmpl, void *priv)
430 {
431 struct raw_dynamic_load_all_context *context;
432 int result;
433
434 OCF_DEBUG_TRACE(cache);
435
436 context = env_vzalloc(sizeof(*context));
437 if (!context) {
438 cmpl(priv, -OCF_ERR_NO_MEM);
439 return;
440 }
441
442 context->raw = raw;
443 context->cache = cache;
444 context->cmpl = cmpl;
445 context->priv = priv;
446
447 context->data = ctx_data_alloc(cache->owner, RAW_DYNAMIC_LOAD_PAGES);
448 if (!context->data) {
449 result = -OCF_ERR_NO_MEM;
450 goto err_data;
451 }
452
453 context->zpage = env_zalloc(PAGE_SIZE, ENV_MEM_NORMAL);
454 if (!context->zpage) {
455 result = -OCF_ERR_NO_MEM;
456 goto err_zpage;
457 }
458
459 context->req = ocf_req_new(cache->mngt_queue, NULL, 0, 0, 0);
460 if (!context->req) {
461 result = -OCF_ERR_NO_MEM;
462 goto err_req;
463 }
464
465 context->req->info.internal = true;
466 context->req->priv = context;
467 context->req->io_if = &_io_if_raw_dynamic_load_all_read;
468
469 ocf_engine_push_req_front(context->req, true);
470 return;
471
472 err_req:
473 env_free(context->zpage);
474 err_zpage:
475 ctx_data_free(cache->owner, context->data);
476 err_data:
477 env_vfree(context);
478 cmpl(priv, result);
479 }
480
481 /*
482 * RAM DYNAMIC Implementation - Flush all
483 */
484
485 struct raw_dynamic_flush_all_context {
486 struct ocf_metadata_raw *raw;
487 ocf_metadata_end_t cmpl;
488 void *priv;
489 };
490
491 /*
492 * RAM Implementation - Flush IO callback - Fill page
493 */
494 static int raw_dynamic_flush_all_fill(ocf_cache_t cache,
495 ctx_data_t *data, uint32_t page, void *priv)
496 {
497 struct raw_dynamic_flush_all_context *context = priv;
498 struct ocf_metadata_raw *raw = context->raw;
499 struct _raw_ctrl *ctrl = (struct _raw_ctrl *)raw->priv;
500 uint32_t raw_page;
501
502 ENV_BUG_ON(!_raw_ssd_page_is_valid(raw, page));
503
504 raw_page = page - raw->ssd_pages_offset;
505
506 if (ctrl->pages[raw_page]) {
507 OCF_DEBUG_PARAM(cache, "Page = %u", raw_page);
508 ctx_data_wr_check(cache->owner, data, ctrl->pages[raw_page],
509 PAGE_SIZE);
510 } else {
511 OCF_DEBUG_PARAM(cache, "Zero fill, Page = %u", raw_page);
512 /* Page was not allocated before set only zeros */
513 ctx_data_zero_check(cache->owner, data, PAGE_SIZE);
514 }
515
516 return 0;
517 }
518
519 static void raw_dynamic_flush_all_complete(ocf_cache_t cache,
520 void *priv, int error)
521 {
522 struct raw_dynamic_flush_all_context *context = priv;
523
524 context->cmpl(context->priv, error);
525 env_vfree(context);
526 }
527
528 void raw_dynamic_flush_all(ocf_cache_t cache, struct ocf_metadata_raw *raw,
529 ocf_metadata_end_t cmpl, void *priv)
530 {
531 struct raw_dynamic_flush_all_context *context;
532 int result;
533
534 OCF_DEBUG_TRACE(cache);
535
536 context = env_vmalloc(sizeof(*context));
537 if (!context) {
538 cmpl(priv, -OCF_ERR_NO_MEM);
539 return;
540 }
541
542 context->raw = raw;
543 context->cmpl = cmpl;
544 context->priv = priv;
545
546 result = metadata_io_write_i_asynch(cache, cache->mngt_queue, context,
547 raw->ssd_pages_offset, raw->ssd_pages,
548 raw_dynamic_flush_all_fill,
549 raw_dynamic_flush_all_complete);
550 if (result)
551 cmpl(priv, result);
552 }
553
554 /*
555 * RAM DYNAMIC Implementation - Mark to Flush
556 */
557 void raw_dynamic_flush_mark(ocf_cache_t cache, struct ocf_request *req,
558 uint32_t map_idx, int to_state, uint8_t start, uint8_t stop)
559 {
560 ENV_BUG();
561 }
562
563 /*
564 * RAM DYNAMIC Implementation - Do flushing asynchronously
565 */
566 int raw_dynamic_flush_do_asynch(ocf_cache_t cache, struct ocf_request *req,
567 struct ocf_metadata_raw *raw, ocf_req_end_t complete)
568 {
569 ENV_BUG();
570 return -ENOSYS;
571 }