]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/mngt/ocf_mngt_cache.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / ocf / src / mngt / ocf_mngt_cache.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "ocf/ocf.h"
7 #include "ocf_mngt_common.h"
8 #include "ocf_mngt_core_priv.h"
9 #include "../ocf_priv.h"
10 #include "../ocf_core_priv.h"
11 #include "../ocf_queue_priv.h"
12 #include "../metadata/metadata.h"
13 #include "../engine/cache_engine.h"
14 #include "../utils/utils_part.h"
15 #include "../utils/utils_cache_line.h"
16 #include "../utils/utils_io.h"
17 #include "../utils/utils_cache_line.h"
18 #include "../utils/utils_pipeline.h"
19 #include "../utils/utils_refcnt.h"
20 #include "../utils/utils_async_lock.h"
21 #include "../concurrency/ocf_concurrency.h"
22 #include "../eviction/ops.h"
23 #include "../ocf_ctx_priv.h"
24 #include "../ocf_freelist.h"
25 #include "../cleaning/cleaning.h"
26 #include "../promotion/ops.h"
27
28 #define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device)
29
30 #define DIRTY_SHUTDOWN_ERROR_MSG "Please use --load option to restore " \
31 "previous cache state (Warning: data corruption may happen)" \
32 "\nOr initialize your cache using --force option. " \
33 "Warning: All dirty data will be lost!\n"
34
35 #define DIRTY_NOT_FLUSHED_ERROR_MSG "Cache closed w/ no data flushing\n" \
36 "Restart with --load or --force option\n"
37
38 /**
39 * @brief Helpful struct to start cache
40 */
41 struct ocf_cache_mngt_init_params {
42 bool metadata_volatile;
43
44 ocf_ctx_t ctx;
45 /*!< OCF context */
46
47 ocf_cache_t cache;
48 /*!< cache that is being initialized */
49
50 uint8_t locked;
51 /*!< Keep cache locked */
52
53 /**
54 * @brief initialization state (in case of error, it is used to know
55 * which assets have to be deallocated in premature exit from function
56 */
57 struct {
58 bool cache_alloc : 1;
59 /*!< cache is allocated and added to list */
60
61 bool metadata_inited : 1;
62 /*!< Metadata is inited to valid state */
63
64 bool added_to_list : 1;
65 /*!< Cache is added to context list */
66
67 bool cache_locked : 1;
68 /*!< Cache has been locked */
69 } flags;
70
71 struct ocf_metadata_init_params {
72 ocf_cache_line_size_t line_size;
73 /*!< Metadata cache line size */
74
75 ocf_metadata_layout_t layout;
76 /*!< Metadata layout (striping/sequential) */
77
78 ocf_cache_mode_t cache_mode;
79 /*!< cache mode */
80
81 ocf_promotion_t promotion_policy;
82 } metadata;
83 };
84
85 typedef void (*_ocf_mngt_cache_attach_end_t)(ocf_cache_t, void *priv1,
86 void *priv2, int error);
87
88 struct ocf_cache_attach_context {
89 ocf_cache_t cache;
90 /*!< cache that is being initialized */
91
92 struct ocf_mngt_cache_device_config cfg;
93
94 uint64_t volume_size;
95 /*!< size of the device in cache lines */
96
97 enum ocf_mngt_cache_init_mode init_mode;
98 /*!< cache init mode */
99
100 /**
101 * @brief initialization state (in case of error, it is used to know
102 * which assets have to be deallocated in premature exit from function
103 */
104 struct {
105 bool device_alloc : 1;
106 /*!< data structure allocated */
107
108 bool volume_inited : 1;
109 /*!< uuid for cache device is allocated */
110
111 bool attached_metadata_inited : 1;
112 /*!< attached metadata sections initialized */
113
114 bool device_opened : 1;
115 /*!< underlying device volume is open */
116
117 bool cleaner_started : 1;
118 /*!< Cleaner has been started */
119
120 bool promotion_initialized : 1;
121 /*!< Promotion policy has been started */
122
123 bool cores_opened : 1;
124 /*!< underlying cores are opened (happens only during
125 * load or recovery
126 */
127
128 bool freelist_inited : 1;
129
130 bool concurrency_inited : 1;
131 } flags;
132
133 struct {
134 ocf_cache_line_size_t line_size;
135 /*!< Metadata cache line size */
136
137 ocf_metadata_layout_t layout;
138 /*!< Metadata layout (striping/sequential) */
139
140 ocf_cache_mode_t cache_mode;
141 /*!< cache mode */
142
143 enum ocf_metadata_shutdown_status shutdown_status;
144 /*!< dirty or clean */
145
146 uint8_t dirty_flushed;
147 /*!< is dirty data fully flushed */
148
149 int status;
150 /*!< metadata retrieval status (nonzero is sign of an error
151 * during recovery/load but is non issue in case of clean init
152 */
153 } metadata;
154
155 struct {
156 void *rw_buffer;
157 void *cmp_buffer;
158 unsigned long reserved_lba_addr;
159 ocf_pipeline_t pipeline;
160 } test;
161
162 _ocf_mngt_cache_attach_end_t cmpl;
163 void *priv1;
164 void *priv2;
165
166 ocf_pipeline_t pipeline;
167 };
168
169 static void __init_partitions(ocf_cache_t cache)
170 {
171 ocf_part_id_t i_part;
172
173 /* Init default Partition */
174 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, PARTITION_DEFAULT,
175 "unclassified", 0, PARTITION_SIZE_MAX,
176 OCF_IO_CLASS_PRIO_LOWEST, true));
177
178 /* Add other partition to the cache and make it as dummy */
179 for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) {
180 ocf_refcnt_freeze(&cache->refcnt.cleaning[i_part]);
181
182 if (i_part == PARTITION_DEFAULT)
183 continue;
184
185 /* Init default Partition */
186 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, i_part,
187 "Inactive", 0, PARTITION_SIZE_MAX,
188 OCF_IO_CLASS_PRIO_LOWEST, false));
189 }
190 }
191
192 static void __init_partitions_attached(ocf_cache_t cache)
193 {
194 ocf_part_id_t part_id;
195
196 for (part_id = 0; part_id < OCF_IO_CLASS_MAX; part_id++) {
197 cache->user_parts[part_id].runtime->head =
198 cache->device->collision_table_entries;
199 cache->user_parts[part_id].runtime->curr_size = 0;
200
201 ocf_eviction_initialize(cache, part_id);
202 }
203 }
204
205 static void __init_freelist(ocf_cache_t cache)
206 {
207 uint64_t free_clines = ocf_metadata_collision_table_entries(cache) -
208 ocf_get_cache_occupancy(cache);
209
210 ocf_freelist_populate(cache->freelist, free_clines);
211 }
212
213 static ocf_error_t __init_cleaning_policy(ocf_cache_t cache)
214 {
215 ocf_cleaning_t cleaning_policy = ocf_cleaning_default;
216 int i;
217 ocf_error_t result = 0;
218
219 OCF_ASSERT_PLUGGED(cache);
220
221 for (i = 0; i < ocf_cleaning_max; i++) {
222 if (cleaning_policy_ops[i].setup)
223 cleaning_policy_ops[i].setup(cache);
224 }
225
226 cache->conf_meta->cleaning_policy_type = ocf_cleaning_default;
227 if (cleaning_policy_ops[cleaning_policy].initialize)
228 result = cleaning_policy_ops[cleaning_policy].initialize(cache, 1);
229
230 return result;
231 }
232
233 static void __deinit_cleaning_policy(ocf_cache_t cache)
234 {
235 ocf_cleaning_t cleaning_policy;
236
237 cleaning_policy = cache->conf_meta->cleaning_policy_type;
238 if (cleaning_policy_ops[cleaning_policy].deinitialize)
239 cleaning_policy_ops[cleaning_policy].deinitialize(cache);
240 }
241
242 static void __init_eviction_policy(ocf_cache_t cache,
243 ocf_eviction_t eviction)
244 {
245 ENV_BUG_ON(eviction < 0 || eviction >= ocf_eviction_max);
246
247 cache->conf_meta->eviction_policy_type = eviction;
248 }
249
250 static void __setup_promotion_policy(ocf_cache_t cache)
251 {
252 int i;
253
254 OCF_CHECK_NULL(cache);
255
256 for (i = 0; i < ocf_promotion_max; i++) {
257 if (ocf_promotion_policies[i].setup)
258 ocf_promotion_policies[i].setup(cache);
259 }
260 }
261
262 static void __deinit_promotion_policy(ocf_cache_t cache)
263 {
264 ocf_promotion_deinit(cache->promotion_policy);
265 cache->promotion_policy = NULL;
266 }
267
268 static void __init_cores(ocf_cache_t cache)
269 {
270 /* No core devices yet */
271 cache->conf_meta->core_count = 0;
272 ENV_BUG_ON(env_memset(cache->conf_meta->valid_core_bitmap,
273 sizeof(cache->conf_meta->valid_core_bitmap), 0));
274 }
275
276 static void __init_metadata_version(ocf_cache_t cache)
277 {
278 cache->conf_meta->metadata_version = METADATA_VERSION();
279 }
280
281 static void __reset_stats(ocf_cache_t cache)
282 {
283 ocf_core_t core;
284 ocf_core_id_t core_id;
285 ocf_part_id_t i;
286
287 for_each_core_all(cache, core, core_id) {
288 env_atomic_set(&core->runtime_meta->cached_clines, 0);
289 env_atomic_set(&core->runtime_meta->dirty_clines, 0);
290 env_atomic64_set(&core->runtime_meta->dirty_since, 0);
291
292 for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
293 env_atomic_set(&core->runtime_meta->
294 part_counters[i].cached_clines, 0);
295 env_atomic_set(&core->runtime_meta->
296 part_counters[i].dirty_clines, 0);
297 }
298 }
299 }
300
301 static ocf_error_t init_attached_data_structures(ocf_cache_t cache,
302 ocf_eviction_t eviction_policy)
303 {
304 ocf_error_t result;
305
306 /* Lock to ensure consistency */
307
308 ocf_metadata_init_hash_table(cache);
309 ocf_metadata_init_collision(cache);
310 __init_partitions_attached(cache);
311 __init_freelist(cache);
312
313 result = __init_cleaning_policy(cache);
314 if (result) {
315 ocf_cache_log(cache, log_err,
316 "Cannot initialize cleaning policy\n");
317 return result;
318 }
319
320 __init_eviction_policy(cache, eviction_policy);
321 __setup_promotion_policy(cache);
322
323 return 0;
324 }
325
326 static void init_attached_data_structures_recovery(ocf_cache_t cache)
327 {
328 ocf_metadata_init_hash_table(cache);
329 ocf_metadata_init_collision(cache);
330 __init_partitions_attached(cache);
331 __reset_stats(cache);
332 __init_metadata_version(cache);
333 }
334
335 /****************************************************************
336 * Function for removing all uninitialized core objects *
337 * from the cache instance. *
338 * Used in case of cache initialization errors. *
339 ****************************************************************/
340 static void _ocf_mngt_close_all_uninitialized_cores(
341 ocf_cache_t cache)
342 {
343 ocf_volume_t volume;
344 int j, i;
345
346 for (j = cache->conf_meta->core_count, i = 0; j > 0; ++i) {
347 if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
348 continue;
349
350 volume = &(cache->core[i].volume);
351 ocf_volume_close(volume);
352
353 --j;
354
355 env_free(cache->core[i].counters);
356 cache->core[i].counters = NULL;
357
358 env_bit_clear(i, cache->conf_meta->valid_core_bitmap);
359 }
360
361 cache->conf_meta->core_count = 0;
362 }
363
364 /**
365 * @brief routine loading metadata from cache device
366 * - attempts to open all the underlying cores
367 */
368 static int _ocf_mngt_init_instance_add_cores(
369 struct ocf_cache_attach_context *context)
370 {
371 ocf_cache_t cache = context->cache;
372 ocf_core_t core;
373 ocf_core_id_t core_id;
374 int ret = -1;
375 uint64_t hd_lines = 0;
376
377 OCF_ASSERT_PLUGGED(cache);
378
379 /* Count value will be re-calculated on the basis of 'valid' flag */
380 cache->conf_meta->core_count = 0;
381
382 /* Check in metadata which cores were saved in cache metadata */
383 for_each_core_metadata(cache, core, core_id) {
384 ocf_volume_t tvolume = NULL;
385
386 if (!core->volume.type)
387 goto err;
388
389 tvolume = ocf_mngt_core_pool_lookup(ocf_cache_get_ctx(cache),
390 &core->volume.uuid, core->volume.type);
391 if (tvolume) {
392 /*
393 * Attach bottom device to core structure
394 * in cache
395 */
396 ocf_volume_move(&core->volume, tvolume);
397 ocf_mngt_core_pool_remove(cache->owner, tvolume);
398
399 core->opened = true;
400 ocf_cache_log(cache, log_info,
401 "Attached core %u from pool\n",
402 core_id);
403 } else if (context->cfg.open_cores) {
404 ret = ocf_volume_open(&core->volume, NULL);
405 if (ret == -OCF_ERR_NOT_OPEN_EXC) {
406 ocf_cache_log(cache, log_warn,
407 "Cannot open core %u. "
408 "Cache is busy", core_id);
409 } else if (ret) {
410 ocf_cache_log(cache, log_warn,
411 "Cannot open core %u", core_id);
412 } else {
413 core->opened = true;
414 }
415 }
416
417 env_bit_set(core_id, cache->conf_meta->valid_core_bitmap);
418 core->added = true;
419 cache->conf_meta->core_count++;
420 core->volume.cache = cache;
421
422 if (ocf_mngt_core_init_front_volume(core))
423 goto err;
424
425 core->counters =
426 env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
427 if (!core->counters)
428 goto err;
429
430 if (!core->opened) {
431 env_bit_set(ocf_cache_state_incomplete,
432 &cache->cache_state);
433 cache->ocf_core_inactive_count++;
434 ocf_cache_log(cache, log_warn,
435 "Cannot find core %u in pool"
436 ", core added as inactive\n", core_id);
437 continue;
438 }
439
440 hd_lines = ocf_bytes_2_lines(cache,
441 ocf_volume_get_length(&core->volume));
442
443 if (hd_lines) {
444 ocf_cache_log(cache, log_info,
445 "Disk lines = %" ENV_PRIu64 "\n", hd_lines);
446 }
447 }
448
449 context->flags.cores_opened = true;
450 return 0;
451
452 err:
453 _ocf_mngt_close_all_uninitialized_cores(cache);
454
455 return -OCF_ERR_START_CACHE_FAIL;
456 }
457
458 void _ocf_mngt_init_instance_load_complete(void *priv, int error)
459 {
460 struct ocf_cache_attach_context *context = priv;
461 ocf_cache_t cache = context->cache;
462 ocf_cleaning_t cleaning_policy;
463 ocf_error_t result;
464
465 if (error) {
466 ocf_cache_log(cache, log_err,
467 "Cannot read cache metadata\n");
468 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
469 }
470
471 __init_freelist(cache);
472
473 cleaning_policy = cache->conf_meta->cleaning_policy_type;
474 if (!cleaning_policy_ops[cleaning_policy].initialize)
475 goto out;
476
477 if (context->metadata.shutdown_status == ocf_metadata_clean_shutdown)
478 result = cleaning_policy_ops[cleaning_policy].initialize(cache, 0);
479 else
480 result = cleaning_policy_ops[cleaning_policy].initialize(cache, 1);
481
482 if (result) {
483 ocf_cache_log(cache, log_err,
484 "Cannot initialize cleaning policy\n");
485 OCF_PL_FINISH_RET(context->pipeline, result);
486 }
487
488 out:
489 ocf_pipeline_next(context->pipeline);
490 }
491
492 /**
493 * handle load variant
494 */
495 static void _ocf_mngt_init_instance_clean_load(
496 struct ocf_cache_attach_context *context)
497 {
498 ocf_cache_t cache = context->cache;
499
500 ocf_metadata_load_all(cache,
501 _ocf_mngt_init_instance_load_complete, context);
502 }
503
504 /**
505 * handle recovery variant
506 */
507 static void _ocf_mngt_init_instance_recovery(
508 struct ocf_cache_attach_context *context)
509 {
510 ocf_cache_t cache = context->cache;
511
512 init_attached_data_structures_recovery(cache);
513
514 ocf_cache_log(cache, log_warn,
515 "ERROR: Cache device did not shut down properly!\n");
516
517 ocf_cache_log(cache, log_info, "Initiating recovery sequence...\n");
518
519 ocf_metadata_load_recovery(cache,
520 _ocf_mngt_init_instance_load_complete, context);
521 }
522
523 static void _ocf_mngt_init_instance_load(
524 struct ocf_cache_attach_context *context)
525 {
526 ocf_cache_t cache = context->cache;
527 int ret;
528
529 OCF_ASSERT_PLUGGED(cache);
530
531 ret = _ocf_mngt_init_instance_add_cores(context);
532 if (ret)
533 OCF_PL_FINISH_RET(context->pipeline, ret);
534
535 if (context->metadata.shutdown_status == ocf_metadata_clean_shutdown)
536 _ocf_mngt_init_instance_clean_load(context);
537 else
538 _ocf_mngt_init_instance_recovery(context);
539 }
540
541 /**
542 * @brief allocate memory for new cache, add it to cache queue, set initial
543 * values and running state
544 */
545 static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params *params)
546 {
547 ocf_cache_t cache = env_vzalloc(sizeof(*cache));
548 int result;
549
550 if (!cache)
551 return -OCF_ERR_NO_MEM;
552
553 if (ocf_mngt_cache_lock_init(cache)) {
554 result = -OCF_ERR_NO_MEM;
555 goto alloc_err;
556 }
557
558 /* Lock cache during setup - this trylock should always succeed */
559 ENV_BUG_ON(ocf_mngt_cache_trylock(cache));
560
561 if (env_mutex_init(&cache->flush_mutex)) {
562 result = -OCF_ERR_NO_MEM;
563 goto lock_err;
564 }
565
566 ENV_BUG_ON(!ocf_refcnt_inc(&cache->refcnt.cache));
567
568 /* start with freezed metadata ref counter to indicate detached device*/
569 ocf_refcnt_freeze(&cache->refcnt.metadata);
570
571 env_atomic_set(&(cache->last_access_ms),
572 env_ticks_to_msecs(env_get_tick_count()));
573
574 env_bit_set(ocf_cache_state_initializing, &cache->cache_state);
575
576 params->cache = cache;
577 params->flags.cache_alloc = true;
578
579 return 0;
580
581 lock_err:
582 ocf_mngt_cache_lock_deinit(cache);
583 alloc_err:
584 env_vfree(cache);
585
586 return result;
587 }
588
589 static void _ocf_mngt_attach_cache_device(ocf_pipeline_t pipeline,
590 void *priv, ocf_pipeline_arg_t arg)
591 {
592 struct ocf_cache_attach_context *context = priv;
593 ocf_cache_t cache = context->cache;
594 ocf_volume_type_t type;
595 int ret;
596
597 cache->device = env_vzalloc(sizeof(*cache->device));
598 if (!cache->device)
599 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
600
601 context->flags.device_alloc = true;
602
603 cache->device->init_mode = context->init_mode;
604
605 /* Prepare UUID of cache volume */
606 type = ocf_ctx_get_volume_type(cache->owner, context->cfg.volume_type);
607 if (!type) {
608 OCF_PL_FINISH_RET(context->pipeline,
609 -OCF_ERR_INVAL_VOLUME_TYPE);
610 }
611
612 ret = ocf_volume_init(&cache->device->volume, type,
613 &context->cfg.uuid, true);
614 if (ret)
615 OCF_PL_FINISH_RET(context->pipeline, ret);
616
617 cache->device->volume.cache = cache;
618 context->flags.volume_inited = true;
619
620 /*
621 * Open cache device, It has to be done first because metadata service
622 * need to know size of cache device.
623 */
624 ret = ocf_volume_open(&cache->device->volume,
625 context->cfg.volume_params);
626 if (ret) {
627 ocf_cache_log(cache, log_err, "ERROR: Cache not available\n");
628 OCF_PL_FINISH_RET(context->pipeline, ret);
629 }
630 context->flags.device_opened = true;
631
632 context->volume_size = ocf_volume_get_length(&cache->device->volume);
633
634 /* Check minimum size of cache device */
635 if (context->volume_size < OCF_CACHE_SIZE_MIN) {
636 ocf_cache_log(cache, log_err, "ERROR: Cache cache size must "
637 "be at least %llu [MiB]\n", OCF_CACHE_SIZE_MIN / MiB);
638 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_INVAL_CACHE_DEV);
639 }
640
641 ocf_pipeline_next(pipeline);
642 }
643
644 /**
645 * @brief prepare cache for init. This is first step towards initializing
646 * the cache
647 */
648 static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params *param,
649 struct ocf_mngt_cache_config *cfg)
650 {
651 ocf_cache_t cache;
652 int ret = 0;
653
654 /* Check if cache with specified name exists */
655 ret = ocf_mngt_cache_get_by_name(param->ctx, cfg->name,
656 OCF_CACHE_NAME_SIZE, &cache);
657 if (!ret) {
658 ocf_mngt_cache_put(cache);
659 /* Cache already exist */
660 ret = -OCF_ERR_CACHE_EXIST;
661 goto out;
662 }
663
664 ocf_log(param->ctx, log_info, "Inserting cache %s\n", cfg->name);
665
666 ret = _ocf_mngt_init_new_cache(param);
667 if (ret)
668 goto out;
669
670 cache = param->cache;
671
672 cache->backfill.max_queue_size = cfg->backfill.max_queue_size;
673 cache->backfill.queue_unblock_size = cfg->backfill.queue_unblock_size;
674
675 param->flags.cache_locked = true;
676
677 cache->pt_unaligned_io = cfg->pt_unaligned_io;
678 cache->use_submit_io_fast = cfg->use_submit_io_fast;
679
680 cache->eviction_policy_init = cfg->eviction_policy;
681 cache->metadata.is_volatile = cfg->metadata_volatile;
682
683 out:
684 return ret;
685 }
686
687 static void _ocf_mngt_test_volume_initial_write_complete(void *priv, int error)
688 {
689 struct ocf_cache_attach_context *context = priv;
690
691 OCF_PL_NEXT_ON_SUCCESS_RET(context->test.pipeline, error);
692 }
693
694 static void _ocf_mngt_test_volume_initial_write(
695 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
696 {
697 struct ocf_cache_attach_context *context = priv;
698 ocf_cache_t cache = context->cache;
699
700 /*
701 * Write buffer filled with "1"
702 */
703
704 ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 1));
705
706 ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
707 OCF_WRITE, context->test.rw_buffer,
708 _ocf_mngt_test_volume_initial_write_complete, context);
709 }
710
711 static void _ocf_mngt_test_volume_first_read_complete(void *priv, int error)
712 {
713 struct ocf_cache_attach_context *context = priv;
714 ocf_cache_t cache = context->cache;
715 int ret, diff;
716
717 if (error)
718 OCF_PL_FINISH_RET(context->test.pipeline, error);
719
720 ret = env_memcmp(context->test.rw_buffer, PAGE_SIZE,
721 context->test.cmp_buffer, PAGE_SIZE, &diff);
722 if (ret)
723 OCF_PL_FINISH_RET(context->test.pipeline, ret);
724
725 if (diff) {
726 /* we read back different data than what we had just
727 written - this is fatal error */
728 OCF_PL_FINISH_RET(context->test.pipeline, -OCF_ERR_IO);
729 }
730
731 if (!ocf_volume_is_atomic(&cache->device->volume)) {
732 /* If not atomic, stop testing here */
733 OCF_PL_FINISH_RET(context->test.pipeline, 0);
734 }
735
736 ocf_pipeline_next(context->test.pipeline);
737 }
738
739 static void _ocf_mngt_test_volume_first_read(
740 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
741 {
742 struct ocf_cache_attach_context *context = priv;
743 ocf_cache_t cache = context->cache;
744
745 /*
746 * First read
747 */
748
749 ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 0));
750 ENV_BUG_ON(env_memset(context->test.cmp_buffer, PAGE_SIZE, 1));
751
752 ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
753 OCF_READ, context->test.rw_buffer,
754 _ocf_mngt_test_volume_first_read_complete, context);
755 }
756
757 static void _ocf_mngt_test_volume_discard_complete(void *priv, int error)
758 {
759 struct ocf_cache_attach_context *context = priv;
760
761 OCF_PL_NEXT_ON_SUCCESS_RET(context->test.pipeline, error);
762 }
763
764 static void _ocf_mngt_test_volume_discard(
765 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
766 {
767 struct ocf_cache_attach_context *context = priv;
768 ocf_cache_t cache = context->cache;
769
770 /*
771 * Submit discard request
772 */
773
774 ocf_submit_volume_discard(&cache->device->volume,
775 context->test.reserved_lba_addr, PAGE_SIZE,
776 _ocf_mngt_test_volume_discard_complete, context);
777 }
778
779 static void _ocf_mngt_test_volume_second_read_complete(void *priv, int error)
780 {
781 struct ocf_cache_attach_context *context = priv;
782 ocf_cache_t cache = context->cache;
783 int ret, diff;
784
785 if (error)
786 OCF_PL_FINISH_RET(context->test.pipeline, error);
787
788 ret = env_memcmp(context->test.rw_buffer, PAGE_SIZE,
789 context->test.cmp_buffer, PAGE_SIZE, &diff);
790 if (ret)
791 OCF_PL_FINISH_RET(context->test.pipeline, ret);
792
793 if (diff) {
794 /* discard does not cause target adresses to return 0 on
795 subsequent read */
796 cache->device->volume.features.discard_zeroes = 0;
797 }
798
799 ocf_pipeline_next(context->test.pipeline);
800 }
801
802 static void _ocf_mngt_test_volume_second_read(
803 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
804 {
805 struct ocf_cache_attach_context *context = priv;
806 ocf_cache_t cache = context->cache;
807
808 /*
809 * Second read
810 */
811
812 ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 1));
813 ENV_BUG_ON(env_memset(context->test.cmp_buffer, PAGE_SIZE, 0));
814
815 ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
816 OCF_READ, context->test.rw_buffer,
817 _ocf_mngt_test_volume_second_read_complete, context);
818 }
819
820 static void _ocf_mngt_test_volume_finish(ocf_pipeline_t pipeline,
821 void *priv, int error)
822 {
823 struct ocf_cache_attach_context *context = priv;
824
825 env_free(context->test.rw_buffer);
826 env_free(context->test.cmp_buffer);
827
828 ocf_pipeline_destroy(context->test.pipeline);
829
830 OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
831 }
832
833 struct ocf_pipeline_properties _ocf_mngt_test_volume_pipeline_properties = {
834 .priv_size = 0,
835 .finish = _ocf_mngt_test_volume_finish,
836 .steps = {
837 OCF_PL_STEP(_ocf_mngt_test_volume_initial_write),
838 OCF_PL_STEP(_ocf_mngt_test_volume_first_read),
839 OCF_PL_STEP(_ocf_mngt_test_volume_discard),
840 OCF_PL_STEP(_ocf_mngt_test_volume_second_read),
841 OCF_PL_STEP_TERMINATOR(),
842 },
843 };
844
845 static void _ocf_mngt_test_volume(ocf_pipeline_t pipeline,
846 void *priv, ocf_pipeline_arg_t arg)
847 {
848 struct ocf_cache_attach_context *context = priv;
849 ocf_cache_t cache = context->cache;
850 ocf_pipeline_t test_pipeline;
851 int result;
852
853 cache->device->volume.features.discard_zeroes = 1;
854
855 if (!context->cfg.perform_test)
856 OCF_PL_NEXT_RET(pipeline);
857
858 context->test.reserved_lba_addr = ocf_metadata_get_reserved_lba(cache);
859
860 context->test.rw_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
861 if (!context->test.rw_buffer)
862 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
863
864 context->test.cmp_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
865 if (!context->test.cmp_buffer)
866 goto err_buffer;
867
868 result = ocf_pipeline_create(&test_pipeline, cache,
869 &_ocf_mngt_test_volume_pipeline_properties);
870 if (result)
871 goto err_pipeline;
872
873 ocf_pipeline_set_priv(test_pipeline, context);
874
875 context->test.pipeline = test_pipeline;
876
877 OCF_PL_NEXT_RET(test_pipeline);
878
879 err_pipeline:
880 env_free(context->test.rw_buffer);
881 err_buffer:
882 env_free(context->test.cmp_buffer);
883 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_NO_MEM);
884 }
885
886 /**
887 * Prepare metadata accordingly to mode (for load/recovery read from disk)
888 */
889 static void _ocf_mngt_attach_load_properties_end(void *priv, int error,
890 struct ocf_metadata_load_properties *properties)
891 {
892 struct ocf_cache_attach_context *context = priv;
893 ocf_cache_t cache = context->cache;
894
895 context->metadata.status = error;
896
897 if (error) {
898 /*
899 * If --load option wasn't used and old metadata doesn't exist on the
900 * device, dismiss error.
901 */
902 if (error == -OCF_ERR_NO_METADATA &&
903 cache->device->init_mode != ocf_init_mode_load)
904 OCF_PL_NEXT_RET(context->pipeline);
905 else
906 OCF_PL_FINISH_RET(context->pipeline, error);
907 } else if (cache->device->init_mode != ocf_init_mode_load) {
908 /*
909 * To prevent silent metadata overriding, return error if old metadata
910 * was detected but --load flag wasn't used.
911 */
912 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_METADATA_FOUND);
913 }
914
915 /*
916 * Check if name loaded from disk is the same as present one.
917 */
918 if (env_strncmp(cache->conf_meta->name, OCF_CACHE_NAME_SIZE,
919 properties->cache_name, OCF_CACHE_NAME_SIZE)) {
920 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_CACHE_NAME_MISMATCH);
921 }
922
923 context->metadata.shutdown_status = properties->shutdown_status;
924 context->metadata.dirty_flushed = properties->dirty_flushed;
925
926 if (cache->device->init_mode == ocf_init_mode_load) {
927 context->metadata.line_size = properties->line_size;
928 cache->conf_meta->metadata_layout = properties->layout;
929 cache->conf_meta->cache_mode = properties->cache_mode;
930 }
931
932 ocf_pipeline_next(context->pipeline);
933 }
934
935 static void _ocf_mngt_attach_load_properties(ocf_pipeline_t pipeline,
936 void *priv, ocf_pipeline_arg_t arg)
937 {
938 struct ocf_cache_attach_context *context = priv;
939 ocf_cache_t cache = context->cache;
940
941 OCF_ASSERT_PLUGGED(cache);
942
943 context->metadata.shutdown_status = ocf_metadata_clean_shutdown;
944 context->metadata.dirty_flushed = DIRTY_FLUSHED;
945 context->metadata.line_size = context->cfg.cache_line_size;
946
947 if (context->cfg.force)
948 OCF_PL_NEXT_RET(context->pipeline);
949
950 if (cache->device->init_mode == ocf_init_mode_metadata_volatile)
951 OCF_PL_NEXT_RET(context->pipeline);
952
953 ocf_metadata_load_properties(&cache->device->volume,
954 _ocf_mngt_attach_load_properties_end, context);
955 }
956
957 static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
958 void *priv, ocf_pipeline_arg_t arg)
959 {
960 struct ocf_cache_attach_context *context = priv;
961 ocf_cache_t cache = context->cache;
962 int ret;
963
964 if (context->init_mode == ocf_init_mode_load &&
965 context->metadata.status) {
966 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
967 }
968
969 context->metadata.line_size = context->metadata.line_size ?:
970 cache->metadata.settings.size;
971
972 /*
973 * Initialize variable size metadata segments
974 */
975 if (ocf_metadata_init_variable_size(cache, context->volume_size,
976 context->metadata.line_size,
977 cache->conf_meta->metadata_layout)) {
978 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
979 }
980 context->flags.attached_metadata_inited = true;
981
982 cache->freelist = ocf_freelist_init(cache);
983 if (!cache->freelist)
984 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_START_CACHE_FAIL);
985 context->flags.freelist_inited = true;
986
987 ret = ocf_concurrency_init(cache);
988 if (ret)
989 OCF_PL_FINISH_RET(context->pipeline, ret);
990
991 context->flags.concurrency_inited = 1;
992
993 ocf_pipeline_next(context->pipeline);
994 }
995
996 /**
997 * @brief initializing cache anew (not loading or recovering)
998 */
999 static void _ocf_mngt_init_instance_init(struct ocf_cache_attach_context *context)
1000 {
1001 ocf_cache_t cache = context->cache;
1002 ocf_error_t result;
1003
1004 if (!context->metadata.status && !context->cfg.force &&
1005 context->metadata.shutdown_status !=
1006 ocf_metadata_detached) {
1007
1008 if (context->metadata.shutdown_status !=
1009 ocf_metadata_clean_shutdown) {
1010 ocf_cache_log(cache, log_err, DIRTY_SHUTDOWN_ERROR_MSG);
1011 OCF_PL_FINISH_RET(context->pipeline,
1012 -OCF_ERR_DIRTY_SHUTDOWN);
1013 }
1014
1015 if (context->metadata.dirty_flushed == DIRTY_NOT_FLUSHED) {
1016 ocf_cache_log(cache, log_err,
1017 DIRTY_NOT_FLUSHED_ERROR_MSG);
1018 OCF_PL_FINISH_RET(context->pipeline,
1019 -OCF_ERR_DIRTY_EXISTS);
1020
1021 }
1022 }
1023
1024 result = init_attached_data_structures(cache, cache->eviction_policy_init);
1025 if (result)
1026 OCF_PL_FINISH_RET(context->pipeline, result);
1027
1028 /* In initial cache state there is no dirty data, so all dirty data is
1029 considered to be flushed
1030 */
1031 cache->conf_meta->dirty_flushed = true;
1032
1033 ocf_pipeline_next(context->pipeline);
1034 }
1035
1036 uint64_t _ocf_mngt_calculate_ram_needed(ocf_cache_t cache,
1037 ocf_volume_t cache_volume)
1038 {
1039 ocf_cache_line_size_t line_size = ocf_line_size(cache);
1040 uint64_t volume_size = ocf_volume_get_length(cache_volume);
1041 uint64_t const_data_size;
1042 uint64_t cache_line_no;
1043 uint64_t data_per_line;
1044 uint64_t min_free_ram;
1045
1046 /* Superblock + per core metadata */
1047 const_data_size = 100 * MiB;
1048
1049 /* Cache metadata */
1050 cache_line_no = volume_size / line_size;
1051 data_per_line = (68 + (2 * (line_size / KiB / 4)));
1052
1053 min_free_ram = const_data_size + cache_line_no * data_per_line;
1054
1055 /* 110% of calculated value */
1056 min_free_ram = (11 * min_free_ram) / 10;
1057
1058 return min_free_ram;
1059 }
1060
1061 int ocf_mngt_get_ram_needed(ocf_cache_t cache,
1062 struct ocf_mngt_cache_device_config *cfg, uint64_t *ram_needed)
1063 {
1064 ocf_volume_t volume;
1065 ocf_volume_type_t type;
1066 int result;
1067
1068 OCF_CHECK_NULL(cache);
1069 OCF_CHECK_NULL(cfg);
1070 OCF_CHECK_NULL(ram_needed);
1071
1072 type = ocf_ctx_get_volume_type(cache->owner, cfg->volume_type);
1073 if (!type)
1074 return -OCF_ERR_INVAL_VOLUME_TYPE;
1075
1076 result = ocf_volume_create(&volume, type,
1077 &cfg->uuid);
1078 if (result)
1079 return result;
1080
1081 result = ocf_volume_open(volume, cfg->volume_params);
1082 if (result) {
1083 ocf_volume_destroy(volume);
1084 return result;
1085 }
1086
1087 *ram_needed = _ocf_mngt_calculate_ram_needed(cache, volume);
1088
1089 ocf_volume_close(volume);
1090 ocf_volume_destroy(volume);
1091
1092 return 0;
1093 }
1094
1095 /**
1096 * @brief for error handling do partial cleanup of datastructures upon
1097 * premature function exit.
1098 *
1099 * @param ctx OCF context
1100 * @param params - startup params containing initialization status flags.
1101 *
1102 */
1103 static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx,
1104 struct ocf_cache_mngt_init_params *params)
1105 {
1106 ocf_cache_t cache = params->cache;
1107
1108 if (!params->flags.cache_alloc)
1109 return;
1110
1111 if (params->flags.metadata_inited)
1112 ocf_metadata_deinit(cache);
1113
1114 if (!params->flags.added_to_list)
1115 return;
1116
1117 env_rmutex_lock(&ctx->lock);
1118
1119 list_del(&cache->list);
1120 env_vfree(cache);
1121
1122 env_rmutex_unlock(&ctx->lock);
1123 }
1124
1125 static void _ocf_mngt_attach_handle_error(
1126 struct ocf_cache_attach_context *context)
1127 {
1128 ocf_cache_t cache = context->cache;
1129
1130 if (context->flags.cleaner_started)
1131 ocf_stop_cleaner(cache);
1132
1133 if (context->flags.promotion_initialized)
1134 __deinit_promotion_policy(cache);
1135
1136 if (context->flags.cores_opened)
1137 _ocf_mngt_close_all_uninitialized_cores(cache);
1138
1139 if (context->flags.attached_metadata_inited)
1140 ocf_metadata_deinit_variable_size(cache);
1141
1142 if (context->flags.device_opened)
1143 ocf_volume_close(&cache->device->volume);
1144
1145 if (context->flags.concurrency_inited)
1146 ocf_concurrency_deinit(cache);
1147
1148 if (context->flags.freelist_inited)
1149 ocf_freelist_deinit(cache->freelist);
1150
1151 if (context->flags.volume_inited)
1152 ocf_volume_deinit(&cache->device->volume);
1153
1154 if (context->flags.device_alloc)
1155 env_vfree(cache->device);
1156
1157 ocf_pipeline_destroy(cache->stop_pipeline);
1158 }
1159
1160 static void _ocf_mngt_cache_init(ocf_cache_t cache,
1161 struct ocf_cache_mngt_init_params *params)
1162 {
1163 /*
1164 * Super block elements initialization
1165 */
1166 cache->conf_meta->cache_mode = params->metadata.cache_mode;
1167 cache->conf_meta->metadata_layout = params->metadata.layout;
1168 cache->conf_meta->promotion_policy_type = params->metadata.promotion_policy;
1169
1170 INIT_LIST_HEAD(&cache->io_queues);
1171
1172 /* Init Partitions */
1173 ocf_part_init(cache);
1174
1175 __init_cores(cache);
1176 __init_metadata_version(cache);
1177 __init_partitions(cache);
1178 }
1179
1180 static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
1181 struct ocf_mngt_cache_config *cfg)
1182 {
1183 struct ocf_cache_mngt_init_params params;
1184 ocf_cache_t tmp_cache;
1185 int result;
1186
1187 ENV_BUG_ON(env_memset(&params, sizeof(params), 0));
1188
1189 params.ctx = ctx;
1190 params.metadata.cache_mode = cfg->cache_mode;
1191 params.metadata.layout = cfg->metadata_layout;
1192 params.metadata.line_size = cfg->cache_line_size;
1193 params.metadata_volatile = cfg->metadata_volatile;
1194 params.metadata.promotion_policy = cfg->promotion_policy;
1195 params.locked = cfg->locked;
1196
1197 result = env_rmutex_lock_interruptible(&ctx->lock);
1198 if (result)
1199 goto _cache_mngt_init_instance_ERROR;
1200
1201 /* Prepare cache */
1202 result = _ocf_mngt_init_prepare_cache(&params, cfg);
1203 if (result) {
1204 env_rmutex_unlock(&ctx->lock);
1205 goto _cache_mngt_init_instance_ERROR;
1206 }
1207
1208 tmp_cache = params.cache;
1209 tmp_cache->owner = ctx;
1210
1211 /*
1212 * Initialize metadata selected segments of metadata in memory
1213 */
1214 result = ocf_metadata_init(tmp_cache, params.metadata.line_size);
1215 if (result) {
1216 env_rmutex_unlock(&ctx->lock);
1217 result = -OCF_ERR_NO_MEM;
1218 goto _cache_mngt_init_instance_ERROR;
1219 }
1220 params.flags.metadata_inited = true;
1221
1222 result = ocf_cache_set_name(tmp_cache, cfg->name, OCF_CACHE_NAME_SIZE);
1223 if (result) {
1224 env_rmutex_unlock(&ctx->lock);
1225 goto _cache_mngt_init_instance_ERROR;
1226 }
1227
1228 list_add_tail(&tmp_cache->list, &ctx->caches);
1229 params.flags.added_to_list = true;
1230 env_rmutex_unlock(&ctx->lock);
1231
1232 result = ocf_metadata_io_init(tmp_cache);
1233 if (result)
1234 goto _cache_mngt_init_instance_ERROR;
1235
1236 ocf_cache_log(tmp_cache, log_debug, "Metadata initialized\n");
1237
1238 _ocf_mngt_cache_init(tmp_cache, &params);
1239
1240 ocf_ctx_get(ctx);
1241
1242 if (!params.locked) {
1243 /* User did not request to lock cache instance after creation -
1244 unlock it here since we have acquired the lock to
1245 perform management operations. */
1246 ocf_mngt_cache_unlock(tmp_cache);
1247 params.flags.cache_locked = false;
1248 }
1249
1250 *cache = tmp_cache;
1251
1252 return 0;
1253
1254 _cache_mngt_init_instance_ERROR:
1255 _ocf_mngt_init_handle_error(ctx, &params);
1256 *cache = NULL;
1257 return result;
1258 }
1259
1260 static void _ocf_mngt_cache_set_valid(ocf_cache_t cache)
1261 {
1262 /*
1263 * Clear initialization state and set the valid bit so we know
1264 * its in use.
1265 */
1266 env_bit_clear(ocf_cache_state_initializing, &cache->cache_state);
1267 env_bit_set(ocf_cache_state_running, &cache->cache_state);
1268 }
1269
1270 static void _ocf_mngt_init_attached_nonpersistent(ocf_cache_t cache)
1271 {
1272 env_atomic_set(&cache->fallback_pt_error_counter, 0);
1273 }
1274
1275 static void _ocf_mngt_attach_check_ram(ocf_pipeline_t pipeline,
1276 void *priv, ocf_pipeline_arg_t arg)
1277 {
1278 struct ocf_cache_attach_context *context = priv;
1279 ocf_cache_t cache = context->cache;
1280 uint64_t min_free_ram;
1281 uint64_t free_ram;
1282
1283 min_free_ram = _ocf_mngt_calculate_ram_needed(cache,
1284 &cache->device->volume);
1285
1286 free_ram = env_get_free_memory();
1287
1288 if (free_ram < min_free_ram) {
1289 ocf_cache_log(cache, log_err, "Not enough free RAM for cache "
1290 "metadata to start cache\n");
1291 ocf_cache_log(cache, log_err,
1292 "Available RAM: %" ENV_PRIu64 " B\n", free_ram);
1293 ocf_cache_log(cache, log_err, "Needed RAM: %" ENV_PRIu64 " B\n",
1294 min_free_ram);
1295 OCF_PL_FINISH_RET(pipeline, -OCF_ERR_NO_FREE_RAM);
1296 }
1297
1298 ocf_pipeline_next(pipeline);
1299 }
1300
1301
1302 static void _ocf_mngt_attach_load_superblock_complete(void *priv, int error)
1303 {
1304 struct ocf_cache_attach_context *context = priv;
1305 ocf_cache_t cache = context->cache;
1306
1307 if (cache->conf_meta->cachelines !=
1308 ocf_metadata_get_cachelines_count(cache)) {
1309 ocf_cache_log(cache, log_err,
1310 "ERROR: Cache device size mismatch!\n");
1311 OCF_PL_FINISH_RET(context->pipeline,
1312 -OCF_ERR_START_CACHE_FAIL);
1313 }
1314
1315 if (error) {
1316 ocf_cache_log(cache, log_err,
1317 "ERROR: Cannot load cache state\n");
1318 OCF_PL_FINISH_RET(context->pipeline,
1319 -OCF_ERR_START_CACHE_FAIL);
1320 }
1321
1322 ocf_pipeline_next(context->pipeline);
1323 }
1324
1325 static void _ocf_mngt_attach_load_superblock(ocf_pipeline_t pipeline,
1326 void *priv, ocf_pipeline_arg_t arg)
1327 {
1328 struct ocf_cache_attach_context *context = priv;
1329 ocf_cache_t cache = context->cache;
1330
1331 if (cache->device->init_mode != ocf_init_mode_load)
1332 OCF_PL_NEXT_RET(context->pipeline);
1333
1334 ocf_cache_log(cache, log_info, "Loading cache state...\n");
1335 ocf_metadata_load_superblock(cache,
1336 _ocf_mngt_attach_load_superblock_complete, context);
1337 }
1338
1339 static void _ocf_mngt_attach_init_instance(ocf_pipeline_t pipeline,
1340 void *priv, ocf_pipeline_arg_t arg)
1341 {
1342 struct ocf_cache_attach_context *context = priv;
1343 ocf_cache_t cache = context->cache;
1344 int result;
1345
1346 result = ocf_start_cleaner(cache);
1347 if (result) {
1348 ocf_cache_log(cache, log_err,
1349 "Error while starting cleaner\n");
1350 OCF_PL_FINISH_RET(context->pipeline, result);
1351 }
1352 context->flags.cleaner_started = true;
1353
1354 result = ocf_promotion_init(cache, cache->conf_meta->promotion_policy_type);
1355 if (result) {
1356 ocf_cache_log(cache, log_err,
1357 "Cannot initialize promotion policy\n");
1358 OCF_PL_FINISH_RET(context->pipeline, result);
1359 }
1360 context->flags.promotion_initialized = true;
1361
1362 switch (cache->device->init_mode) {
1363 case ocf_init_mode_init:
1364 case ocf_init_mode_metadata_volatile:
1365 _ocf_mngt_init_instance_init(context);
1366 return;
1367 case ocf_init_mode_load:
1368 _ocf_mngt_init_instance_load(context);
1369 return;
1370 default:
1371 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_INVAL);
1372 }
1373 }
1374
1375 static void _ocf_mngt_attach_flush_metadata_complete(void *priv, int error)
1376 {
1377 struct ocf_cache_attach_context *context = priv;
1378 ocf_cache_t cache = context->cache;
1379
1380 if (error) {
1381 ocf_cache_log(cache, log_err,
1382 "ERROR: Cannot save cache state\n");
1383 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
1384 }
1385
1386 ocf_pipeline_next(context->pipeline);
1387 }
1388
1389 static void _ocf_mngt_attach_flush_metadata(ocf_pipeline_t pipeline,
1390 void *priv, ocf_pipeline_arg_t arg)
1391 {
1392 struct ocf_cache_attach_context *context = priv;
1393 ocf_cache_t cache = context->cache;
1394
1395 ocf_metadata_flush_all(cache,
1396 _ocf_mngt_attach_flush_metadata_complete, context);
1397 }
1398
1399 static void _ocf_mngt_attach_discard_complete(void *priv, int error)
1400 {
1401 struct ocf_cache_attach_context *context = priv;
1402 ocf_cache_t cache = context->cache;
1403 bool discard = cache->device->volume.features.discard_zeroes;
1404
1405 if (error) {
1406 ocf_cache_log(cache, log_warn, "%s failed\n",
1407 discard ? "Discarding whole cache device" :
1408 "Overwriting cache with zeroes");
1409
1410 if (ocf_volume_is_atomic(&cache->device->volume)) {
1411 ocf_cache_log(cache, log_err, "This step is required"
1412 " for atomic mode!\n");
1413 OCF_PL_FINISH_RET(context->pipeline, error);
1414 }
1415
1416 ocf_cache_log(cache, log_warn, "This may impact cache"
1417 " performance!\n");
1418 }
1419
1420 ocf_pipeline_next(context->pipeline);
1421 }
1422
1423 static void _ocf_mngt_attach_discard(ocf_pipeline_t pipeline,
1424 void *priv, ocf_pipeline_arg_t arg)
1425 {
1426 struct ocf_cache_attach_context *context = priv;
1427 ocf_cache_t cache = context->cache;
1428 uint64_t addr = cache->device->metadata_offset;
1429 uint64_t length = ocf_volume_get_length(&cache->device->volume) - addr;
1430 bool discard = cache->device->volume.features.discard_zeroes;
1431
1432 if (cache->device->init_mode == ocf_init_mode_load)
1433 OCF_PL_NEXT_RET(context->pipeline);
1434
1435 if (!context->cfg.discard_on_start)
1436 OCF_PL_NEXT_RET(context->pipeline);
1437
1438 if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
1439 /* discard doesn't zero data - need to explicitly write zeros */
1440 ocf_submit_write_zeros(&cache->device->volume, addr, length,
1441 _ocf_mngt_attach_discard_complete, context);
1442 } else {
1443 /* Discard volume after metadata */
1444 ocf_submit_volume_discard(&cache->device->volume, addr, length,
1445 _ocf_mngt_attach_discard_complete, context);
1446 }
1447 }
1448
1449 static void _ocf_mngt_attach_flush_complete(void *priv, int error)
1450 {
1451 struct ocf_cache_attach_context *context = priv;
1452
1453 OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
1454 }
1455
1456 static void _ocf_mngt_attach_flush(ocf_pipeline_t pipeline,
1457 void *priv, ocf_pipeline_arg_t arg)
1458 {
1459 struct ocf_cache_attach_context *context = priv;
1460 ocf_cache_t cache = context->cache;
1461 bool discard = cache->device->volume.features.discard_zeroes;
1462
1463 if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
1464 ocf_submit_volume_flush(&cache->device->volume,
1465 _ocf_mngt_attach_flush_complete, context);
1466 } else {
1467 ocf_pipeline_next(context->pipeline);
1468 }
1469 }
1470
1471 static void _ocf_mngt_attach_shutdown_status_complete(void *priv, int error)
1472 {
1473 struct ocf_cache_attach_context *context = priv;
1474 ocf_cache_t cache = context->cache;
1475
1476 if (error) {
1477 ocf_cache_log(cache, log_err, "Cannot flush shutdown status\n");
1478 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
1479 }
1480
1481 ocf_pipeline_next(context->pipeline);
1482 }
1483
1484 static void _ocf_mngt_attach_shutdown_status(ocf_pipeline_t pipeline,
1485 void *priv, ocf_pipeline_arg_t arg)
1486 {
1487 struct ocf_cache_attach_context *context = priv;
1488 ocf_cache_t cache = context->cache;
1489
1490 /* clear clean shutdown status */
1491 ocf_metadata_set_shutdown_status(cache, ocf_metadata_dirty_shutdown,
1492 _ocf_mngt_attach_shutdown_status_complete, context);
1493 }
1494
1495 static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline,
1496 void *priv, ocf_pipeline_arg_t arg)
1497 {
1498 struct ocf_cache_attach_context *context = priv;
1499 ocf_cache_t cache = context->cache;
1500
1501 ocf_cleaner_refcnt_unfreeze(cache);
1502 ocf_refcnt_unfreeze(&cache->refcnt.metadata);
1503
1504 ocf_cache_log(cache, log_debug, "Cache attached\n");
1505
1506 ocf_pipeline_next(context->pipeline);
1507 }
1508
1509 static void _ocf_mngt_cache_attach_finish(ocf_pipeline_t pipeline,
1510 void *priv, int error)
1511 {
1512 struct ocf_cache_attach_context *context = priv;
1513
1514 if (error)
1515 _ocf_mngt_attach_handle_error(context);
1516
1517 context->cmpl(context->cache, context->priv1, context->priv2, error);
1518
1519 env_vfree(context->cfg.uuid.data);
1520 ocf_pipeline_destroy(context->pipeline);
1521 }
1522
1523 struct ocf_pipeline_properties _ocf_mngt_cache_attach_pipeline_properties = {
1524 .priv_size = sizeof(struct ocf_cache_attach_context),
1525 .finish = _ocf_mngt_cache_attach_finish,
1526 .steps = {
1527 OCF_PL_STEP(_ocf_mngt_attach_cache_device),
1528 OCF_PL_STEP(_ocf_mngt_attach_check_ram),
1529 OCF_PL_STEP(_ocf_mngt_attach_load_properties),
1530 OCF_PL_STEP(_ocf_mngt_attach_prepare_metadata),
1531 OCF_PL_STEP(_ocf_mngt_test_volume),
1532 OCF_PL_STEP(_ocf_mngt_attach_load_superblock),
1533 OCF_PL_STEP(_ocf_mngt_attach_init_instance),
1534 OCF_PL_STEP(_ocf_mngt_attach_flush_metadata),
1535 OCF_PL_STEP(_ocf_mngt_attach_discard),
1536 OCF_PL_STEP(_ocf_mngt_attach_flush),
1537 OCF_PL_STEP(_ocf_mngt_attach_shutdown_status),
1538 OCF_PL_STEP(_ocf_mngt_attach_post_init),
1539 OCF_PL_STEP_TERMINATOR(),
1540 },
1541 };
1542
1543 typedef void (*_ocf_mngt_cache_unplug_end_t)(void *context, int error);
1544
1545 struct _ocf_mngt_cache_unplug_context {
1546 _ocf_mngt_cache_unplug_end_t cmpl;
1547 void *priv;
1548 ocf_cache_t cache;
1549 };
1550
1551 struct ocf_mngt_cache_stop_context {
1552 /* unplug context - this is private structure of _ocf_mngt_cache_unplug,
1553 * it is member of stop context only to reserve memory in advance for
1554 * _ocf_mngt_cache_unplug, eliminating the possibility of ENOMEM error
1555 * at the point where we are effectively unable to handle it */
1556 struct _ocf_mngt_cache_unplug_context unplug_context;
1557
1558 ocf_mngt_cache_stop_end_t cmpl;
1559 void *priv;
1560 ocf_pipeline_t pipeline;
1561 ocf_cache_t cache;
1562 ocf_ctx_t ctx;
1563 char cache_name[OCF_CACHE_NAME_SIZE];
1564 int cache_write_error;
1565 };
1566
1567 static void ocf_mngt_cache_stop_wait_metadata_io_finish(void *priv)
1568 {
1569 struct ocf_mngt_cache_stop_context *context = priv;
1570
1571 ocf_pipeline_next(context->pipeline);
1572 }
1573
1574 static void ocf_mngt_cache_stop_wait_metadata_io(ocf_pipeline_t pipeline,
1575 void *priv, ocf_pipeline_arg_t arg)
1576 {
1577 struct ocf_mngt_cache_stop_context *context = priv;
1578 ocf_cache_t cache = context->cache;
1579
1580 ocf_refcnt_freeze(&cache->refcnt.metadata);
1581 ocf_refcnt_register_zero_cb(&cache->refcnt.metadata,
1582 ocf_mngt_cache_stop_wait_metadata_io_finish, context);
1583 }
1584
1585 static void _ocf_mngt_cache_stop_remove_cores(ocf_cache_t cache, bool attached)
1586 {
1587 ocf_core_t core;
1588 ocf_core_id_t core_id;
1589 int no = cache->conf_meta->core_count;
1590
1591 /* All exported objects removed, cleaning up rest. */
1592 for_each_core_all(cache, core, core_id) {
1593 if (!env_bit_test(core_id, cache->conf_meta->valid_core_bitmap))
1594 continue;
1595
1596 cache_mngt_core_remove_from_cache(core);
1597 if (attached)
1598 cache_mngt_core_remove_from_cleaning_pol(core);
1599 cache_mngt_core_close(core);
1600 if (--no == 0)
1601 break;
1602 }
1603 ENV_BUG_ON(cache->conf_meta->core_count != 0);
1604 }
1605
1606 static void ocf_mngt_cache_stop_remove_cores(ocf_pipeline_t pipeline,
1607 void *priv, ocf_pipeline_arg_t arg)
1608 {
1609 struct ocf_mngt_cache_stop_context *context = priv;
1610 ocf_cache_t cache = context->cache;
1611
1612 _ocf_mngt_cache_stop_remove_cores(cache, true);
1613
1614 ocf_pipeline_next(pipeline);
1615 }
1616
1617 static void ocf_mngt_cache_stop_unplug_complete(void *priv, int error)
1618 {
1619 struct ocf_mngt_cache_stop_context *context = priv;
1620
1621 if (error) {
1622 ENV_BUG_ON(error != -OCF_ERR_WRITE_CACHE);
1623 context->cache_write_error = error;
1624 }
1625
1626 ocf_pipeline_next(context->pipeline);
1627 }
1628
1629 static void _ocf_mngt_cache_unplug(ocf_cache_t cache, bool stop,
1630 struct _ocf_mngt_cache_unplug_context *context,
1631 _ocf_mngt_cache_unplug_end_t cmpl, void *priv);
1632
1633 static void ocf_mngt_cache_stop_unplug(ocf_pipeline_t pipeline,
1634 void *priv, ocf_pipeline_arg_t arg)
1635 {
1636 struct ocf_mngt_cache_stop_context *context = priv;
1637 ocf_cache_t cache = context->cache;
1638
1639 _ocf_mngt_cache_unplug(cache, true, &context->unplug_context,
1640 ocf_mngt_cache_stop_unplug_complete, context);
1641 }
1642
1643 static void _ocf_mngt_cache_put_io_queues(ocf_cache_t cache)
1644 {
1645 ocf_queue_t queue, tmp_queue;
1646
1647 list_for_each_entry_safe(queue, tmp_queue, &cache->io_queues, list)
1648 ocf_queue_put(queue);
1649 }
1650
1651 static void ocf_mngt_cache_stop_put_io_queues(ocf_pipeline_t pipeline,
1652 void *priv, ocf_pipeline_arg_t arg)
1653 {
1654 struct ocf_mngt_cache_stop_context *context = priv;
1655 ocf_cache_t cache = context->cache;
1656
1657 _ocf_mngt_cache_put_io_queues(cache);
1658
1659 ocf_pipeline_next(pipeline);
1660 }
1661
1662 static void ocf_mngt_cache_remove(ocf_ctx_t ctx, ocf_cache_t cache)
1663 {
1664 /* Mark device uninitialized */
1665 ocf_refcnt_freeze(&cache->refcnt.cache);
1666
1667 /* Deinitialize locks */
1668 ocf_mngt_cache_lock_deinit(cache);
1669 env_mutex_destroy(&cache->flush_mutex);
1670
1671 /* Remove cache from the list */
1672 env_rmutex_lock(&ctx->lock);
1673 list_del(&cache->list);
1674 env_rmutex_unlock(&ctx->lock);
1675 }
1676
1677 static void ocf_mngt_cache_stop_finish(ocf_pipeline_t pipeline,
1678 void *priv, int error)
1679 {
1680 struct ocf_mngt_cache_stop_context *context = priv;
1681 ocf_cache_t cache = context->cache;
1682 ocf_ctx_t ctx = context->ctx;
1683 int pipeline_error;
1684 ocf_mngt_cache_stop_end_t pipeline_cmpl;
1685 void *completion_priv;
1686
1687 if (!error) {
1688 ocf_mngt_cache_remove(context->ctx, cache);
1689 } else {
1690 /* undo metadata counter freeze */
1691 ocf_refcnt_unfreeze(&cache->refcnt.metadata);
1692
1693 env_bit_clear(ocf_cache_state_stopping, &cache->cache_state);
1694 env_bit_set(ocf_cache_state_running, &cache->cache_state);
1695 }
1696
1697 if (!error) {
1698 if (!context->cache_write_error) {
1699 ocf_log(ctx, log_info,
1700 "Cache %s successfully stopped\n",
1701 context->cache_name);
1702 } else {
1703 ocf_log(ctx, log_warn, "Stopped cache %s with errors\n",
1704 context->cache_name);
1705 }
1706 } else {
1707 ocf_log(ctx, log_err, "Stopping cache %s failed\n",
1708 context->cache_name);
1709 }
1710
1711 /*
1712 * FIXME: Destroying pipeline before completing management operation is a
1713 * temporary workaround for insufficient object lifetime management in pyocf
1714 * Context must not be referenced after destroying pipeline as this is
1715 * typically freed upon pipeline destroy.
1716 */
1717 pipeline_error = error ?: context->cache_write_error;
1718 pipeline_cmpl = context->cmpl;
1719 completion_priv = context->priv;
1720
1721 ocf_pipeline_destroy(context->pipeline);
1722
1723 pipeline_cmpl(cache, completion_priv, pipeline_error);
1724
1725 if (!error) {
1726 /* Finally release cache instance */
1727 ocf_mngt_cache_put(cache);
1728 }
1729 }
1730
1731 struct ocf_pipeline_properties ocf_mngt_cache_stop_pipeline_properties = {
1732 .priv_size = sizeof(struct ocf_mngt_cache_stop_context),
1733 .finish = ocf_mngt_cache_stop_finish,
1734 .steps = {
1735 OCF_PL_STEP(ocf_mngt_cache_stop_wait_metadata_io),
1736 OCF_PL_STEP(ocf_mngt_cache_stop_remove_cores),
1737 OCF_PL_STEP(ocf_mngt_cache_stop_unplug),
1738 OCF_PL_STEP(ocf_mngt_cache_stop_put_io_queues),
1739 OCF_PL_STEP_TERMINATOR(),
1740 },
1741 };
1742
1743
1744 static void _ocf_mngt_cache_attach(ocf_cache_t cache,
1745 struct ocf_mngt_cache_device_config *cfg, bool load,
1746 _ocf_mngt_cache_attach_end_t cmpl, void *priv1, void *priv2)
1747 {
1748 struct ocf_cache_attach_context *context;
1749 ocf_pipeline_t pipeline;
1750 void *data;
1751 int result;
1752
1753 result = ocf_pipeline_create(&pipeline, cache,
1754 &_ocf_mngt_cache_attach_pipeline_properties);
1755 if (result)
1756 OCF_CMPL_RET(cache, priv1, priv2, -OCF_ERR_NO_MEM);
1757
1758 result = ocf_pipeline_create(&cache->stop_pipeline, cache,
1759 &ocf_mngt_cache_stop_pipeline_properties);
1760 if (result) {
1761 ocf_pipeline_destroy(pipeline);
1762 OCF_CMPL_RET(cache, priv1, priv2, -OCF_ERR_NO_MEM);
1763 }
1764
1765 context = ocf_pipeline_get_priv(pipeline);
1766
1767 context->cmpl = cmpl;
1768 context->priv1 = priv1;
1769 context->priv2 = priv2;
1770 context->pipeline = pipeline;
1771
1772 context->cache = cache;
1773 context->cfg = *cfg;
1774
1775 data = env_vmalloc(cfg->uuid.size);
1776 if (!data) {
1777 result = -OCF_ERR_NO_MEM;
1778 goto err_pipeline;
1779 }
1780
1781 result = env_memcpy(data, cfg->uuid.size, cfg->uuid.data,
1782 cfg->uuid.size);
1783 if (result)
1784 goto err_uuid;
1785
1786 context->cfg.uuid.data = data;
1787
1788 if (cache->metadata.is_volatile) {
1789 context->init_mode = ocf_init_mode_metadata_volatile;
1790 } else {
1791 context->init_mode = load ?
1792 ocf_init_mode_load : ocf_init_mode_init;
1793 }
1794
1795 _ocf_mngt_init_attached_nonpersistent(cache);
1796
1797 OCF_PL_NEXT_RET(pipeline);
1798
1799 err_uuid:
1800 env_vfree(data);
1801 err_pipeline:
1802 ocf_pipeline_destroy(pipeline);
1803 ocf_pipeline_destroy(cache->stop_pipeline);
1804 OCF_CMPL_RET(cache, priv1, priv2, result);
1805 }
1806
1807 static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config *cfg)
1808 {
1809 if (!strnlen(cfg->name, OCF_CACHE_NAME_SIZE))
1810 return -OCF_ERR_INVAL;
1811
1812 if (!ocf_cache_mode_is_valid(cfg->cache_mode))
1813 return -OCF_ERR_INVALID_CACHE_MODE;
1814
1815 if (cfg->eviction_policy >= ocf_eviction_max ||
1816 cfg->eviction_policy < 0) {
1817 return -OCF_ERR_INVAL;
1818 }
1819
1820 if (cfg->promotion_policy >= ocf_promotion_max ||
1821 cfg->promotion_policy < 0 ) {
1822 return -OCF_ERR_INVAL;
1823 }
1824
1825 if (!ocf_cache_line_size_is_valid(cfg->cache_line_size))
1826 return -OCF_ERR_INVALID_CACHE_LINE_SIZE;
1827
1828 if (cfg->metadata_layout >= ocf_metadata_layout_max ||
1829 cfg->metadata_layout < 0) {
1830 return -OCF_ERR_INVAL;
1831 }
1832
1833 if (cfg->backfill.queue_unblock_size > cfg->backfill.max_queue_size )
1834 return -OCF_ERR_INVAL;
1835
1836 return 0;
1837 }
1838
1839 static int _ocf_mngt_cache_validate_device_cfg(
1840 struct ocf_mngt_cache_device_config *device_cfg)
1841 {
1842 if (!device_cfg->uuid.data)
1843 return -OCF_ERR_INVAL;
1844
1845 if (device_cfg->uuid.size > OCF_VOLUME_UUID_MAX_SIZE)
1846 return -OCF_ERR_INVAL;
1847
1848 if (device_cfg->cache_line_size != ocf_cache_line_size_none &&
1849 !ocf_cache_line_size_is_valid(device_cfg->cache_line_size))
1850 return -OCF_ERR_INVALID_CACHE_LINE_SIZE;
1851
1852 return 0;
1853 }
1854
1855 static const char *_ocf_cache_mode_names[ocf_cache_mode_max] = {
1856 [ocf_cache_mode_wt] = "wt",
1857 [ocf_cache_mode_wb] = "wb",
1858 [ocf_cache_mode_wa] = "wa",
1859 [ocf_cache_mode_pt] = "pt",
1860 [ocf_cache_mode_wi] = "wi",
1861 [ocf_cache_mode_wo] = "wo",
1862 };
1863
1864 static const char *_ocf_cache_mode_get_name(ocf_cache_mode_t cache_mode)
1865 {
1866 if (!ocf_cache_mode_is_valid(cache_mode))
1867 return NULL;
1868
1869 return _ocf_cache_mode_names[cache_mode];
1870 }
1871
1872 int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
1873 struct ocf_mngt_cache_config *cfg)
1874 {
1875 int result;
1876
1877 if (!ctx || !cache || !cfg)
1878 return -OCF_ERR_INVAL;
1879
1880 result = _ocf_mngt_cache_validate_cfg(cfg);
1881 if (result)
1882 return result;
1883
1884 result = _ocf_mngt_cache_start(ctx, cache, cfg);
1885 if (!result) {
1886 _ocf_mngt_cache_set_valid(*cache);
1887
1888 ocf_cache_log(*cache, log_info, "Successfully added\n");
1889 ocf_cache_log(*cache, log_info, "Cache mode : %s\n",
1890 _ocf_cache_mode_get_name(ocf_cache_get_mode(*cache)));
1891 } else
1892 ocf_log(ctx, log_err, "%s: Inserting cache failed\n", cfg->name);
1893
1894 return result;
1895 }
1896
1897 int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache, ocf_queue_t queue)
1898 {
1899 OCF_CHECK_NULL(cache);
1900 OCF_CHECK_NULL(queue);
1901
1902 if (cache->mngt_queue)
1903 return -OCF_ERR_INVAL;
1904
1905 ocf_queue_get(queue);
1906 cache->mngt_queue = queue;
1907
1908 return 0;
1909 }
1910
1911 static void _ocf_mngt_cache_attach_complete(ocf_cache_t cache, void *priv1,
1912 void *priv2, int error)
1913 {
1914 ocf_mngt_cache_attach_end_t cmpl = priv1;
1915
1916 if (!error) {
1917 ocf_cache_log(cache, log_info, "Successfully attached\n");
1918 } else {
1919 ocf_cache_log(cache, log_err, "Attaching cache device "
1920 "failed\n");
1921 }
1922
1923 OCF_CMPL_RET(cache, priv2, error);
1924 }
1925
1926 void ocf_mngt_cache_attach(ocf_cache_t cache,
1927 struct ocf_mngt_cache_device_config *cfg,
1928 ocf_mngt_cache_attach_end_t cmpl, void *priv)
1929 {
1930 int result;
1931
1932 OCF_CHECK_NULL(cache);
1933 OCF_CHECK_NULL(cfg);
1934
1935 if (!cache->mngt_queue)
1936 OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
1937
1938 result = _ocf_mngt_cache_validate_device_cfg(cfg);
1939 if (result)
1940 OCF_CMPL_RET(cache, priv, result);
1941
1942 _ocf_mngt_cache_attach(cache, cfg, false,
1943 _ocf_mngt_cache_attach_complete, cmpl, priv);
1944 }
1945
1946 static void _ocf_mngt_cache_unplug_complete(void *priv, int error)
1947 {
1948 struct _ocf_mngt_cache_unplug_context *context = priv;
1949 ocf_cache_t cache = context->cache;
1950
1951 ocf_volume_close(&cache->device->volume);
1952
1953 ocf_metadata_deinit_variable_size(cache);
1954 ocf_concurrency_deinit(cache);
1955 ocf_freelist_deinit(cache->freelist);
1956
1957 ocf_volume_deinit(&cache->device->volume);
1958
1959 env_vfree(cache->device);
1960 cache->device = NULL;
1961
1962 /* TODO: this should be removed from detach after 'attached' stats
1963 are better separated in statistics */
1964 _ocf_mngt_init_attached_nonpersistent(cache);
1965
1966 context->cmpl(context->priv, error ? -OCF_ERR_WRITE_CACHE : 0);
1967 }
1968
1969 /**
1970 * @brief Unplug caching device from cache instance. Variable size metadata
1971 * containers are deinitialiazed as well as other cacheline related
1972 * structures. Cache volume is closed.
1973 *
1974 * @param cache OCF cache instance
1975 * @param stop - true if unplugging during stop - in this case we mark
1976 * clean shutdown in metadata and flush all containers.
1977 * - false if the device is to be detached from cache - loading
1978 * metadata from this device will not be possible.
1979 * @param context - context for this call, must be zeroed
1980 * @param cmpl Completion callback
1981 * @param priv Completion context
1982 */
1983 static void _ocf_mngt_cache_unplug(ocf_cache_t cache, bool stop,
1984 struct _ocf_mngt_cache_unplug_context *context,
1985 _ocf_mngt_cache_unplug_end_t cmpl, void *priv)
1986 {
1987 ENV_BUG_ON(stop && cache->conf_meta->core_count != 0);
1988
1989 context->cmpl = cmpl;
1990 context->priv = priv;
1991 context->cache = cache;
1992
1993 ocf_stop_cleaner(cache);
1994
1995 __deinit_cleaning_policy(cache);
1996 __deinit_promotion_policy(cache);
1997
1998 if (ocf_mngt_cache_is_dirty(cache)) {
1999 ENV_BUG_ON(!stop);
2000
2001 cache->conf_meta->dirty_flushed = DIRTY_NOT_FLUSHED;
2002
2003 ocf_cache_log(cache, log_warn, "Cache is still dirty. "
2004 "DO NOT USE your core devices until flushing "
2005 "dirty data!\n");
2006 } else {
2007 cache->conf_meta->dirty_flushed = DIRTY_FLUSHED;
2008 }
2009
2010 if (!stop) {
2011 /* Just set correct shutdown status */
2012 ocf_metadata_set_shutdown_status(cache, ocf_metadata_detached,
2013 _ocf_mngt_cache_unplug_complete, context);
2014 } else {
2015 /* Flush metadata */
2016 ocf_metadata_flush_all(cache,
2017 _ocf_mngt_cache_unplug_complete, context);
2018 }
2019 }
2020
2021 static int _ocf_mngt_cache_load_core_log(ocf_core_t core, void *cntx)
2022 {
2023 ocf_core_log(core, log_info, "Successfully added\n");
2024
2025 return 0;
2026 }
2027
2028 static void _ocf_mngt_cache_load_log(ocf_cache_t cache)
2029 {
2030 ocf_cache_mode_t cache_mode = ocf_cache_get_mode(cache);
2031 ocf_eviction_t eviction_type = cache->conf_meta->eviction_policy_type;
2032 ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type;
2033 ocf_promotion_t promotion_type = cache->conf_meta->promotion_policy_type;
2034
2035 ocf_cache_log(cache, log_info, "Successfully loaded\n");
2036 ocf_cache_log(cache, log_info, "Cache mode : %s\n",
2037 _ocf_cache_mode_get_name(cache_mode));
2038 ocf_cache_log(cache, log_info, "Eviction policy : %s\n",
2039 evict_policy_ops[eviction_type].name);
2040 ocf_cache_log(cache, log_info, "Cleaning policy : %s\n",
2041 cleaning_policy_ops[cleaning_type].name);
2042 ocf_cache_log(cache, log_info, "Promotion policy : %s\n",
2043 ocf_promotion_policies[promotion_type].name);
2044 ocf_core_visit(cache, _ocf_mngt_cache_load_core_log,
2045 cache, false);
2046 }
2047
2048 static void _ocf_mngt_cache_load_complete(ocf_cache_t cache, void *priv1,
2049 void *priv2, int error)
2050 {
2051 ocf_mngt_cache_load_end_t cmpl = priv1;
2052
2053 if (error)
2054 OCF_CMPL_RET(cache, priv2, error);
2055
2056 _ocf_mngt_cache_set_valid(cache);
2057 _ocf_mngt_cache_load_log(cache);
2058
2059 OCF_CMPL_RET(cache, priv2, 0);
2060 }
2061
2062 void ocf_mngt_cache_load(ocf_cache_t cache,
2063 struct ocf_mngt_cache_device_config *cfg,
2064 ocf_mngt_cache_load_end_t cmpl, void *priv)
2065 {
2066 int result;
2067
2068 OCF_CHECK_NULL(cache);
2069 OCF_CHECK_NULL(cfg);
2070
2071 if (!cache->mngt_queue)
2072 OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
2073
2074 /* Load is not allowed in volatile metadata mode */
2075 if (cache->metadata.is_volatile)
2076 OCF_CMPL_RET(cache, priv, -EINVAL);
2077
2078 result = _ocf_mngt_cache_validate_device_cfg(cfg);
2079 if (result)
2080 OCF_CMPL_RET(cache, priv, result);
2081
2082 _ocf_mngt_cache_attach(cache, cfg, true,
2083 _ocf_mngt_cache_load_complete, cmpl, priv);
2084 }
2085
2086 static void ocf_mngt_cache_stop_detached(ocf_cache_t cache,
2087 ocf_mngt_cache_stop_end_t cmpl, void *priv)
2088 {
2089 _ocf_mngt_cache_stop_remove_cores(cache, false);
2090 _ocf_mngt_cache_put_io_queues(cache);
2091 ocf_mngt_cache_remove(cache->owner, cache);
2092 ocf_cache_log(cache, log_info, "Cache %s successfully stopped\n",
2093 ocf_cache_get_name(cache));
2094 cmpl(cache, priv, 0);
2095 ocf_mngt_cache_put(cache);
2096 }
2097
2098 void ocf_mngt_cache_stop(ocf_cache_t cache,
2099 ocf_mngt_cache_stop_end_t cmpl, void *priv)
2100 {
2101 struct ocf_mngt_cache_stop_context *context;
2102 ocf_pipeline_t pipeline;
2103
2104 OCF_CHECK_NULL(cache);
2105
2106 if (!ocf_cache_is_device_attached(cache)) {
2107 ocf_mngt_cache_stop_detached(cache, cmpl, priv);
2108 return;
2109 }
2110
2111 ENV_BUG_ON(!cache->mngt_queue);
2112
2113 pipeline = cache->stop_pipeline;
2114 context = ocf_pipeline_get_priv(pipeline);
2115
2116 context->cmpl = cmpl;
2117 context->priv = priv;
2118 context->pipeline = pipeline;
2119 context->cache = cache;
2120 context->ctx = cache->owner;
2121
2122 ENV_BUG_ON(env_strncpy(context->cache_name, sizeof(context->cache_name),
2123 ocf_cache_get_name(cache), sizeof(context->cache_name)));
2124
2125 ocf_cache_log(cache, log_info, "Stopping cache\n");
2126
2127 env_bit_set(ocf_cache_state_stopping, &cache->cache_state);
2128 env_bit_clear(ocf_cache_state_running, &cache->cache_state);
2129
2130 ocf_pipeline_next(pipeline);
2131 }
2132
2133 struct ocf_mngt_cache_save_context {
2134 ocf_mngt_cache_save_end_t cmpl;
2135 void *priv;
2136 ocf_pipeline_t pipeline;
2137 ocf_cache_t cache;
2138 };
2139
2140 static void ocf_mngt_cache_save_finish(ocf_pipeline_t pipeline,
2141 void *priv, int error)
2142 {
2143 struct ocf_mngt_cache_save_context *context = priv;
2144
2145 context->cmpl(context->cache, context->priv, error);
2146
2147 ocf_pipeline_destroy(context->pipeline);
2148 }
2149
2150 struct ocf_pipeline_properties ocf_mngt_cache_save_pipeline_properties = {
2151 .priv_size = sizeof(struct ocf_mngt_cache_save_context),
2152 .finish = ocf_mngt_cache_save_finish,
2153 .steps = {
2154 OCF_PL_STEP_TERMINATOR(),
2155 },
2156 };
2157
2158 static void ocf_mngt_cache_save_flush_sb_complete(void *priv, int error)
2159 {
2160 struct ocf_mngt_cache_save_context *context = priv;
2161 ocf_cache_t cache = context->cache;
2162
2163 if (error) {
2164 ocf_cache_log(cache, log_err,
2165 "Failed to flush superblock! Changes "
2166 "in cache config are not persistent!\n");
2167 OCF_PL_FINISH_RET(context->pipeline, -OCF_ERR_WRITE_CACHE);
2168 }
2169
2170 ocf_pipeline_next(context->pipeline);
2171 }
2172
2173 void ocf_mngt_cache_save(ocf_cache_t cache,
2174 ocf_mngt_cache_save_end_t cmpl, void *priv)
2175 {
2176 struct ocf_mngt_cache_save_context *context;
2177 ocf_pipeline_t pipeline;
2178 int result;
2179
2180 OCF_CHECK_NULL(cache);
2181
2182 if (!cache->mngt_queue)
2183 OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
2184
2185 result = ocf_pipeline_create(&pipeline, cache,
2186 &ocf_mngt_cache_save_pipeline_properties);
2187 if (result)
2188 OCF_CMPL_RET(cache, priv, result);
2189
2190 context = ocf_pipeline_get_priv(pipeline);
2191
2192 context->cmpl = cmpl;
2193 context->priv = priv;
2194 context->pipeline = pipeline;
2195 context->cache = cache;
2196
2197 ocf_metadata_flush_superblock(cache,
2198 ocf_mngt_cache_save_flush_sb_complete, context);
2199 }
2200
2201 static void _cache_mngt_update_initial_dirty_clines(ocf_cache_t cache)
2202 {
2203 ocf_core_t core;
2204 ocf_core_id_t core_id;
2205
2206 for_each_core(cache, core, core_id) {
2207 env_atomic_set(&core->runtime_meta->initial_dirty_clines,
2208 env_atomic_read(&core->runtime_meta->
2209 dirty_clines));
2210 }
2211
2212 }
2213
2214 static int _cache_mngt_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
2215 {
2216 ocf_cache_mode_t mode_old = cache->conf_meta->cache_mode;
2217
2218 /* Check if IO interface type is valid */
2219 if (!ocf_cache_mode_is_valid(mode))
2220 return -OCF_ERR_INVAL;
2221
2222 if (mode == mode_old) {
2223 ocf_cache_log(cache, log_info, "Cache mode '%s' is already set\n",
2224 ocf_get_io_iface_name(mode));
2225 return 0;
2226 }
2227
2228 cache->conf_meta->cache_mode = mode;
2229
2230 if (ocf_mngt_cache_mode_has_lazy_write(mode_old) &&
2231 !ocf_mngt_cache_mode_has_lazy_write(mode)) {
2232 _cache_mngt_update_initial_dirty_clines(cache);
2233 }
2234
2235 ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' "
2236 "successful\n", ocf_get_io_iface_name(mode_old),
2237 ocf_get_io_iface_name(mode));
2238
2239 return 0;
2240 }
2241
2242 int ocf_mngt_cache_set_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
2243 {
2244 int result;
2245
2246 OCF_CHECK_NULL(cache);
2247
2248 if (!ocf_cache_mode_is_valid(mode)) {
2249 ocf_cache_log(cache, log_err, "Cache mode %u is invalid\n",
2250 mode);
2251 return -OCF_ERR_INVAL;
2252 }
2253
2254 result = _cache_mngt_set_cache_mode(cache, mode);
2255
2256 if (result) {
2257 const char *name = ocf_get_io_iface_name(mode);
2258
2259 ocf_cache_log(cache, log_err, "Setting cache mode '%s' "
2260 "failed\n", name);
2261 }
2262
2263 return result;
2264 }
2265
2266 int ocf_mngt_cache_promotion_set_policy(ocf_cache_t cache, ocf_promotion_t type)
2267 {
2268 int result;
2269
2270 ocf_metadata_start_exclusive_access(&cache->metadata.lock);
2271
2272 result = ocf_promotion_set_policy(cache->promotion_policy, type);
2273
2274 ocf_metadata_end_exclusive_access(&cache->metadata.lock);
2275
2276 return result;
2277 }
2278
2279 ocf_promotion_t ocf_mngt_cache_promotion_get_policy(ocf_cache_t cache)
2280 {
2281 ocf_promotion_t result;
2282
2283 ocf_metadata_start_shared_access(&cache->metadata.lock);
2284
2285 result = cache->conf_meta->promotion_policy_type;
2286
2287 ocf_metadata_end_shared_access(&cache->metadata.lock);
2288
2289 return result;
2290 }
2291
2292 int ocf_mngt_cache_promotion_get_param(ocf_cache_t cache, ocf_promotion_t type,
2293 uint8_t param_id, uint32_t *param_value)
2294 {
2295 int result;
2296
2297 ocf_metadata_start_shared_access(&cache->metadata.lock);
2298
2299 result = ocf_promotion_get_param(cache, type, param_id, param_value);
2300
2301 ocf_metadata_end_shared_access(&cache->metadata.lock);
2302
2303 return result;
2304 }
2305
2306 int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache, ocf_promotion_t type,
2307 uint8_t param_id, uint32_t param_value)
2308 {
2309 int result;
2310
2311 ocf_metadata_start_exclusive_access(&cache->metadata.lock);
2312
2313 result = ocf_promotion_set_param(cache, type, param_id, param_value);
2314
2315 ocf_metadata_end_exclusive_access(&cache->metadata.lock);
2316
2317 return result;
2318 }
2319
2320 int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache)
2321 {
2322 OCF_CHECK_NULL(cache);
2323
2324 if (ocf_fallback_pt_is_on(cache)) {
2325 ocf_cache_log(cache, log_info,
2326 "Fallback Pass Through inactive\n");
2327 }
2328
2329 env_atomic_set(&cache->fallback_pt_error_counter, 0);
2330
2331 return 0;
2332 }
2333
2334 int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache,
2335 uint32_t new_threshold)
2336 {
2337 bool old_fallback_pt_state, new_fallback_pt_state;
2338
2339 OCF_CHECK_NULL(cache);
2340
2341 if (new_threshold > OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD)
2342 return -OCF_ERR_INVAL;
2343
2344 old_fallback_pt_state = ocf_fallback_pt_is_on(cache);
2345
2346 cache->fallback_pt_error_threshold = new_threshold;
2347
2348 new_fallback_pt_state = ocf_fallback_pt_is_on(cache);
2349
2350 if (old_fallback_pt_state != new_fallback_pt_state) {
2351 if (new_fallback_pt_state) {
2352 ocf_cache_log(cache, log_info, "Error threshold reached. "
2353 "Fallback Pass Through activated\n");
2354 } else {
2355 ocf_cache_log(cache, log_info, "Fallback Pass Through "
2356 "inactive\n");
2357 }
2358 }
2359
2360 return 0;
2361 }
2362
2363 int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache,
2364 uint32_t *threshold)
2365 {
2366 OCF_CHECK_NULL(cache);
2367 OCF_CHECK_NULL(threshold);
2368
2369 *threshold = cache->fallback_pt_error_threshold;
2370
2371 return 0;
2372 }
2373
2374 struct ocf_mngt_cache_detach_context {
2375 /* unplug context - this is private structure of _ocf_mngt_cache_unplug,
2376 * it is member of detach context only to reserve memory in advance for
2377 * _ocf_mngt_cache_unplug, eliminating the possibility of ENOMEM error
2378 * at the point where we are effectively unable to handle it */
2379 struct _ocf_mngt_cache_unplug_context unplug_context;
2380
2381 ocf_mngt_cache_detach_end_t cmpl;
2382 void *priv;
2383 ocf_pipeline_t pipeline;
2384 ocf_cache_t cache;
2385 int cache_write_error;
2386 struct ocf_cleaner_wait_context cleaner_wait;
2387 };
2388
2389 static void ocf_mngt_cache_detach_flush_cmpl(ocf_cache_t cache,
2390 void *priv, int error)
2391 {
2392 struct ocf_mngt_cache_detach_context *context = priv;
2393
2394 OCF_PL_NEXT_ON_SUCCESS_RET(context->pipeline, error);
2395 }
2396
2397 static void ocf_mngt_cache_detach_flush(ocf_pipeline_t pipeline,
2398 void *priv, ocf_pipeline_arg_t arg)
2399 {
2400 struct ocf_mngt_cache_detach_context *context = priv;
2401 ocf_cache_t cache = context->cache;
2402
2403 ocf_mngt_cache_flush(cache, ocf_mngt_cache_detach_flush_cmpl, context);
2404 }
2405
2406 static void ocf_mngt_cache_detach_stop_cache_io_finish(void *priv)
2407 {
2408 struct ocf_mngt_cache_detach_context *context = priv;
2409 ocf_pipeline_next(context->pipeline);
2410 }
2411
2412 static void ocf_mngt_cache_detach_stop_cache_io(ocf_pipeline_t pipeline,
2413 void *priv, ocf_pipeline_arg_t arg)
2414 {
2415 struct ocf_mngt_cache_detach_context *context = priv;
2416 ocf_cache_t cache = context->cache;
2417
2418 ocf_refcnt_freeze(&cache->refcnt.metadata);
2419 ocf_refcnt_register_zero_cb(&cache->refcnt.metadata,
2420 ocf_mngt_cache_detach_stop_cache_io_finish, context);
2421 }
2422
2423 static void ocf_mngt_cache_detach_stop_cleaner_io_finish(void *priv)
2424 {
2425 ocf_pipeline_t pipeline = priv;
2426 ocf_pipeline_next(pipeline);
2427 }
2428
2429 static void ocf_mngt_cache_detach_stop_cleaner_io(ocf_pipeline_t pipeline,
2430 void *priv, ocf_pipeline_arg_t arg)
2431 {
2432 struct ocf_mngt_cache_detach_context *context = priv;
2433 ocf_cache_t cache = context->cache;
2434
2435 ocf_cleaner_refcnt_freeze(cache);
2436 ocf_cleaner_refcnt_register_zero_cb(cache, &context->cleaner_wait,
2437 ocf_mngt_cache_detach_stop_cleaner_io_finish,
2438 pipeline);
2439 }
2440
2441 static void ocf_mngt_cache_detach_update_metadata(ocf_pipeline_t pipeline,
2442 void *priv, ocf_pipeline_arg_t arg)
2443 {
2444 struct ocf_mngt_cache_detach_context *context = priv;
2445 ocf_cache_t cache = context->cache;
2446 ocf_core_t core;
2447 ocf_core_id_t core_id;
2448 int no = cache->conf_meta->core_count;
2449
2450 /* remove cacheline metadata and cleaning policy meta for all cores */
2451 for_each_core_metadata(cache, core, core_id) {
2452 cache_mngt_core_deinit_attached_meta(core);
2453 cache_mngt_core_remove_from_cleaning_pol(core);
2454 if (--no == 0)
2455 break;
2456 }
2457
2458 ocf_pipeline_next(context->pipeline);
2459 }
2460
2461 static void ocf_mngt_cache_detach_unplug_complete(void *priv, int error)
2462 {
2463 struct ocf_mngt_cache_detach_context *context = priv;
2464
2465 if (error) {
2466 ENV_BUG_ON(error != -OCF_ERR_WRITE_CACHE);
2467 context->cache_write_error = error;
2468 }
2469
2470 ocf_pipeline_next(context->pipeline);
2471 }
2472
2473 static void ocf_mngt_cache_detach_unplug(ocf_pipeline_t pipeline,
2474 void *priv, ocf_pipeline_arg_t arg)
2475 {
2476 struct ocf_mngt_cache_detach_context *context = priv;
2477 ocf_cache_t cache = context->cache;
2478
2479 /* Do the actual detach - deinit cacheline metadata,
2480 * stop cleaner thread and close cache bottom device */
2481 _ocf_mngt_cache_unplug(cache, false, &context->unplug_context,
2482 ocf_mngt_cache_detach_unplug_complete, context);
2483 }
2484
2485 static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline,
2486 void *priv, int error)
2487 {
2488 struct ocf_mngt_cache_detach_context *context = priv;
2489 ocf_cache_t cache = context->cache;
2490
2491 ocf_refcnt_unfreeze(&cache->refcnt.dirty);
2492
2493 if (!error) {
2494 if (!context->cache_write_error) {
2495 ocf_cache_log(cache, log_info,
2496 "Device successfully detached\n");
2497 } else {
2498 ocf_cache_log(cache, log_warn,
2499 "Device detached with errors\n");
2500 }
2501 } else {
2502 ocf_cache_log(cache, log_err,
2503 "Detaching device failed\n");
2504 }
2505
2506 context->cmpl(cache, context->priv,
2507 error ?: context->cache_write_error);
2508
2509 ocf_pipeline_destroy(context->pipeline);
2510 ocf_pipeline_destroy(cache->stop_pipeline);
2511 }
2512
2513 struct ocf_pipeline_properties ocf_mngt_cache_detach_pipeline_properties = {
2514 .priv_size = sizeof(struct ocf_mngt_cache_detach_context),
2515 .finish = ocf_mngt_cache_detach_finish,
2516 .steps = {
2517 OCF_PL_STEP(ocf_mngt_cache_detach_flush),
2518 OCF_PL_STEP(ocf_mngt_cache_detach_stop_cache_io),
2519 OCF_PL_STEP(ocf_mngt_cache_detach_stop_cleaner_io),
2520 OCF_PL_STEP(ocf_mngt_cache_detach_update_metadata),
2521 OCF_PL_STEP(ocf_mngt_cache_detach_unplug),
2522 OCF_PL_STEP_TERMINATOR(),
2523 },
2524 };
2525
2526 void ocf_mngt_cache_detach(ocf_cache_t cache,
2527 ocf_mngt_cache_detach_end_t cmpl, void *priv)
2528 {
2529 struct ocf_mngt_cache_detach_context *context;
2530 ocf_pipeline_t pipeline;
2531 int result;
2532
2533 OCF_CHECK_NULL(cache);
2534
2535 if (!cache->mngt_queue)
2536 OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
2537
2538 if (!ocf_cache_is_device_attached(cache))
2539 OCF_CMPL_RET(cache, priv, -OCF_ERR_INVAL);
2540
2541 result = ocf_pipeline_create(&pipeline, cache,
2542 &ocf_mngt_cache_detach_pipeline_properties);
2543 if (result)
2544 OCF_CMPL_RET(cache, priv, -OCF_ERR_NO_MEM);
2545
2546 context = ocf_pipeline_get_priv(pipeline);
2547
2548 context->cmpl = cmpl;
2549 context->priv = priv;
2550 context->pipeline = pipeline;
2551 context->cache = cache;
2552
2553 /* prevent dirty io */
2554 ocf_refcnt_freeze(&cache->refcnt.dirty);
2555
2556 ocf_pipeline_next(pipeline);
2557 }