]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/mngt/ocf_mngt_cache.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / mngt / ocf_mngt_cache.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "ocf/ocf.h"
7 #include "ocf_mngt_common.h"
8 #include "ocf_mngt_core_priv.h"
9 #include "../ocf_priv.h"
10 #include "../ocf_core_priv.h"
11 #include "../ocf_queue_priv.h"
12 #include "../metadata/metadata.h"
13 #include "../engine/cache_engine.h"
14 #include "../utils/utils_part.h"
15 #include "../utils/utils_cache_line.h"
16 #include "../utils/utils_device.h"
17 #include "../utils/utils_io.h"
18 #include "../utils/utils_cache_line.h"
19 #include "../utils/utils_pipeline.h"
20 #include "../utils/utils_refcnt.h"
21 #include "../ocf_utils.h"
22 #include "../concurrency/ocf_concurrency.h"
23 #include "../eviction/ops.h"
24 #include "../ocf_ctx_priv.h"
25 #include "../cleaning/cleaning.h"
26
27 #define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device)
28
29 static ocf_cache_t _ocf_mngt_get_cache(ocf_ctx_t owner,
30 ocf_cache_id_t cache_id)
31 {
32 ocf_cache_t iter = NULL;
33 ocf_cache_t cache = NULL;
34
35 list_for_each_entry(iter, &owner->caches, list) {
36 if (iter->cache_id == cache_id) {
37 cache = iter;
38 break;
39 }
40 }
41
42 return cache;
43 }
44
45 #define DIRTY_SHUTDOWN_ERROR_MSG "Please use --load option to restore " \
46 "previous cache state (Warning: data corruption may happen)" \
47 "\nOr initialize your cache using --force option. " \
48 "Warning: All dirty data will be lost!\n"
49
50 #define DIRTY_NOT_FLUSHED_ERROR_MSG "Cache closed w/ no data flushing\n" \
51 "Restart with --load or --force option\n"
52
53 /**
54 * @brief Helpful function to start cache
55 */
56 struct ocf_cachemng_init_params {
57 bool metadata_volatile;
58
59 ocf_cache_id_t id;
60 /*!< cache id */
61
62 ocf_ctx_t ctx;
63 /*!< OCF context */
64
65 ocf_cache_t cache;
66 /*!< cache that is being initialized */
67
68 uint8_t locked;
69 /*!< Keep cache locked */
70
71 /**
72 * @brief initialization state (in case of error, it is used to know
73 * which assets have to be deallocated in premature exit from function
74 */
75 struct {
76 bool cache_alloc : 1;
77 /*!< cache is allocated and added to list */
78
79 bool metadata_inited : 1;
80 /*!< Metadata is inited to valid state */
81
82 bool cache_locked : 1;
83 /*!< Cache has been locked */
84 } flags;
85
86 struct ocf_metadata_init_params {
87 ocf_cache_line_size_t line_size;
88 /*!< Metadata cache line size */
89
90 ocf_metadata_layout_t layout;
91 /*!< Metadata layout (striping/sequential) */
92
93 ocf_cache_mode_t cache_mode;
94 /*!< cache mode */
95 } metadata;
96 };
97
98 typedef void (*_ocf_mngt_cache_attach_end_t)(ocf_cache_t, void *priv1,
99 void *priv2, int error);
100
101 struct ocf_cache_attach_context {
102 ocf_cache_t cache;
103 /*!< cache that is being initialized */
104
105 struct ocf_mngt_cache_device_config cfg;
106
107 uint64_t volume_size;
108 /*!< size of the device in cache lines */
109
110 enum ocf_mngt_cache_init_mode init_mode;
111 /*!< cache init mode */
112
113 /**
114 * @brief initialization state (in case of error, it is used to know
115 * which assets have to be deallocated in premature exit from function
116 */
117 struct {
118 bool device_alloc : 1;
119 /*!< data structure allocated */
120
121 bool volume_inited : 1;
122 /*!< uuid for cache device is allocated */
123
124 bool attached_metadata_inited : 1;
125 /*!< attached metadata sections initialized */
126
127 bool device_opened : 1;
128 /*!< underlying device volume is open */
129
130 bool cleaner_started : 1;
131 /*!< Cleaner has been started */
132
133 bool cores_opened : 1;
134 /*!< underlying cores are opened (happens only during
135 * load or recovery
136 */
137
138 bool concurrency_inited : 1;
139 } flags;
140
141 struct {
142 ocf_cache_line_size_t line_size;
143 /*!< Metadata cache line size */
144
145 ocf_metadata_layout_t layout;
146 /*!< Metadata layout (striping/sequential) */
147
148 ocf_cache_mode_t cache_mode;
149 /*!< cache mode */
150
151 enum ocf_metadata_shutdown_status shutdown_status;
152 /*!< dirty or clean */
153
154 uint8_t dirty_flushed;
155 /*!< is dirty data fully flushed */
156
157 int status;
158 /*!< metadata retrieval status (nonzero is sign of an error
159 * during recovery/load but is non issue in case of clean init
160 */
161 } metadata;
162
163 struct {
164 void *rw_buffer;
165 void *cmp_buffer;
166 unsigned long reserved_lba_addr;
167 ocf_pipeline_t pipeline;
168 } test;
169
170 _ocf_mngt_cache_attach_end_t cmpl;
171 void *priv1;
172 void *priv2;
173
174 ocf_pipeline_t pipeline;
175 };
176
177 static ocf_cache_id_t _ocf_mngt_cache_find_free_id(ocf_ctx_t owner)
178 {
179 ocf_cache_id_t id = OCF_CACHE_ID_INVALID;
180
181 for (id = OCF_CACHE_ID_MIN; id <= OCF_CACHE_ID_MAX; id++) {
182 if (!_ocf_mngt_get_cache(owner, id))
183 return id;
184 }
185
186 return OCF_CACHE_ID_INVALID;
187 }
188
189 static void __init_hash_table(ocf_cache_t cache)
190 {
191 /* Initialize hash table*/
192 ocf_metadata_init_hash_table(cache);
193 }
194
195 static void __init_freelist(ocf_cache_t cache)
196 {
197 /* Initialize free list partition*/
198 ocf_metadata_init_freelist_partition(cache);
199 }
200
201 static void __init_partitions(ocf_cache_t cache)
202 {
203 ocf_part_id_t i_part;
204
205 /* Init default Partition */
206 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, PARTITION_DEFAULT,
207 "unclassified", 0, PARTITION_SIZE_MAX,
208 OCF_IO_CLASS_PRIO_LOWEST, true));
209
210 /* Add other partition to the cache and make it as dummy */
211 for (i_part = 0; i_part < OCF_IO_CLASS_MAX; i_part++) {
212 if (i_part == PARTITION_DEFAULT)
213 continue;
214
215 /* Init default Partition */
216 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache, i_part,
217 "Inactive", 0, PARTITION_SIZE_MAX,
218 OCF_IO_CLASS_PRIO_LOWEST, false));
219 }
220 }
221
222 static void __init_partitions_attached(ocf_cache_t cache)
223 {
224 ocf_part_id_t part_id;
225
226 for (part_id = 0; part_id < OCF_IO_CLASS_MAX; part_id++) {
227 cache->user_parts[part_id].runtime->head =
228 cache->device->collision_table_entries;
229 cache->user_parts[part_id].runtime->curr_size = 0;
230
231 ocf_eviction_initialize(cache, part_id);
232 }
233 }
234
235 static void __init_cleaning_policy(ocf_cache_t cache)
236 {
237 ocf_cleaning_t cleaning_policy = ocf_cleaning_default;
238 int i;
239
240 OCF_ASSERT_PLUGGED(cache);
241
242 for (i = 0; i < ocf_cleaning_max; i++) {
243 if (cleaning_policy_ops[i].setup)
244 cleaning_policy_ops[i].setup(cache);
245 }
246
247 cache->conf_meta->cleaning_policy_type = ocf_cleaning_default;
248 if (cleaning_policy_ops[cleaning_policy].initialize)
249 cleaning_policy_ops[cleaning_policy].initialize(cache, 1);
250 }
251
252 static void __deinit_cleaning_policy(ocf_cache_t cache)
253 {
254 ocf_cleaning_t cleaning_policy;
255
256 cleaning_policy = cache->conf_meta->cleaning_policy_type;
257 if (cleaning_policy_ops[cleaning_policy].deinitialize)
258 cleaning_policy_ops[cleaning_policy].deinitialize(cache);
259 }
260
261 static void __init_eviction_policy(ocf_cache_t cache,
262 ocf_eviction_t eviction)
263 {
264 ENV_BUG_ON(eviction < 0 || eviction >= ocf_eviction_max);
265
266 cache->conf_meta->eviction_policy_type = eviction;
267 }
268
269 static void __init_cores(ocf_cache_t cache)
270 {
271 /* No core devices yet */
272 cache->conf_meta->core_count = 0;
273 ENV_BUG_ON(env_memset(cache->conf_meta->valid_core_bitmap,
274 sizeof(cache->conf_meta->valid_core_bitmap), 0));
275 }
276
277 static void __init_metadata_version(ocf_cache_t cache)
278 {
279 cache->conf_meta->metadata_version = METADATA_VERSION();
280 }
281
282 static void __reset_stats(ocf_cache_t cache)
283 {
284 int core_id;
285 ocf_part_id_t i;
286
287 for (core_id = 0; core_id < OCF_CORE_MAX; core_id++) {
288 env_atomic_set(&cache->core_runtime_meta[core_id].
289 cached_clines, 0);
290 env_atomic_set(&cache->core_runtime_meta[core_id].
291 dirty_clines, 0);
292 env_atomic64_set(&cache->core_runtime_meta[core_id].
293 dirty_since, 0);
294
295 for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
296 env_atomic_set(&cache->core_runtime_meta[core_id].
297 part_counters[i].cached_clines, 0);
298 env_atomic_set(&cache->core_runtime_meta[core_id].
299 part_counters[i].dirty_clines, 0);
300 }
301 }
302 }
303
304 static void init_attached_data_structures(ocf_cache_t cache,
305 ocf_eviction_t eviction_policy)
306 {
307 /* Lock to ensure consistency */
308 OCF_METADATA_LOCK_WR();
309 __init_hash_table(cache);
310 __init_freelist(cache);
311 __init_partitions_attached(cache);
312 __init_cleaning_policy(cache);
313 __init_eviction_policy(cache, eviction_policy);
314 OCF_METADATA_UNLOCK_WR();
315 }
316
317 static void init_attached_data_structures_recovery(ocf_cache_t cache)
318 {
319 OCF_METADATA_LOCK_WR();
320 __init_hash_table(cache);
321 __init_freelist(cache);
322 __init_partitions_attached(cache);
323 __reset_stats(cache);
324 __init_metadata_version(cache);
325 OCF_METADATA_UNLOCK_WR();
326 }
327
328 /****************************************************************
329 * Function for removing all uninitialized core objects *
330 * from the cache instance. *
331 * Used in case of cache initialization errors. *
332 ****************************************************************/
333 static void _ocf_mngt_close_all_uninitialized_cores(
334 ocf_cache_t cache)
335 {
336 ocf_volume_t volume;
337 int j, i;
338
339 for (j = cache->conf_meta->core_count, i = 0; j > 0; ++i) {
340 if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
341 continue;
342
343 volume = &(cache->core[i].volume);
344 ocf_volume_close(volume);
345
346 --j;
347
348 env_free(cache->core[i].counters);
349 cache->core[i].counters = NULL;
350
351 env_bit_clear(i, cache->conf_meta->valid_core_bitmap);
352 }
353
354 cache->conf_meta->core_count = 0;
355 }
356
357 /**
358 * @brief routine loading metadata from cache device
359 * - attempts to open all the underlying cores
360 */
361 static int _ocf_mngt_init_instance_add_cores(
362 struct ocf_cache_attach_context *context)
363 {
364 ocf_cache_t cache = context->cache;
365 /* FIXME: This is temporary hack. Remove after storing name it meta. */
366 char core_name[OCF_CORE_NAME_SIZE];
367 int ret = -1, i;
368 uint64_t hd_lines = 0;
369
370 OCF_ASSERT_PLUGGED(cache);
371
372 if (cache->conf_meta->cachelines !=
373 ocf_metadata_get_cachelines_count(cache)) {
374 ocf_cache_log(cache, log_err,
375 "ERROR: Cache device size mismatch!\n");
376 return -OCF_ERR_START_CACHE_FAIL;
377 }
378
379 /* Count value will be re-calculated on the basis of 'added' flag */
380 cache->conf_meta->core_count = 0;
381
382 /* Check in metadata which cores were added into cache */
383 for (i = 0; i < OCF_CORE_MAX; i++) {
384 ocf_volume_t tvolume = NULL;
385 ocf_core_t core = &cache->core[i];
386
387 if (!cache->core_conf_meta[i].added)
388 continue;
389
390 if (!cache->core[i].volume.type)
391 goto err;
392
393 ret = snprintf(core_name, sizeof(core_name), "core%d", i);
394 if (ret < 0 || ret >= sizeof(core_name))
395 goto err;
396
397 ret = ocf_core_set_name(core, core_name, sizeof(core_name));
398 if (ret)
399 goto err;
400
401 tvolume = ocf_mngt_core_pool_lookup(ocf_cache_get_ctx(cache),
402 &core->volume.uuid, core->volume.type);
403 if (tvolume) {
404 /*
405 * Attach bottom device to core structure
406 * in cache
407 */
408 ocf_volume_move(&core->volume, tvolume);
409 ocf_mngt_core_pool_remove(cache->owner, tvolume);
410
411 core->opened = true;
412 ocf_cache_log(cache, log_info,
413 "Attached core %u from pool\n", i);
414 } else if (context->cfg.open_cores) {
415 ret = ocf_volume_open(&core->volume, NULL);
416 if (ret == -OCF_ERR_NOT_OPEN_EXC) {
417 ocf_cache_log(cache, log_warn,
418 "Cannot open core %u. "
419 "Cache is busy", i);
420 } else if (ret) {
421 ocf_cache_log(cache, log_warn,
422 "Cannot open core %u", i);
423 } else {
424 core->opened = true;
425 }
426 }
427
428 env_bit_set(i, cache->conf_meta->valid_core_bitmap);
429 cache->conf_meta->core_count++;
430 core->volume.cache = cache;
431
432 if (ocf_mngt_core_init_front_volume(core))
433 goto err;
434
435 core->counters =
436 env_zalloc(sizeof(*core->counters), ENV_MEM_NORMAL);
437 if (!core->counters)
438 goto err;
439
440 if (!core->opened) {
441 env_bit_set(ocf_cache_state_incomplete,
442 &cache->cache_state);
443 cache->ocf_core_inactive_count++;
444 ocf_cache_log(cache, log_warn,
445 "Cannot find core %u in pool"
446 ", core added as inactive\n", i);
447 continue;
448 }
449
450 hd_lines = ocf_bytes_2_lines(cache,
451 ocf_volume_get_length(
452 &cache->core[i].volume));
453
454 if (hd_lines) {
455 ocf_cache_log(cache, log_info,
456 "Disk lines = %" ENV_PRIu64 "\n", hd_lines);
457 }
458 }
459
460 context->flags.cores_opened = true;
461 return 0;
462
463 err:
464 _ocf_mngt_close_all_uninitialized_cores(cache);
465
466 return -OCF_ERR_START_CACHE_FAIL;
467 }
468
469 void _ocf_mngt_init_instance_load_complete(void *priv, int error)
470 {
471 struct ocf_cache_attach_context *context = priv;
472 ocf_cache_t cache = context->cache;
473 ocf_cleaning_t cleaning_policy;
474
475 if (error) {
476 ocf_cache_log(cache, log_err,
477 "Cannot read cache metadata\n");
478 ocf_pipeline_finish(context->pipeline,
479 -OCF_ERR_START_CACHE_FAIL);
480 return;
481 }
482
483 cleaning_policy = cache->conf_meta->cleaning_policy_type;
484 if (!cleaning_policy_ops[cleaning_policy].initialize)
485 goto out;
486
487 if (context->metadata.shutdown_status == ocf_metadata_clean_shutdown)
488 cleaning_policy_ops[cleaning_policy].initialize(cache, 0);
489 else
490 cleaning_policy_ops[cleaning_policy].initialize(cache, 1);
491
492 out:
493 ocf_pipeline_next(context->pipeline);
494 }
495
496 /**
497 * handle load variant
498 */
499 static void _ocf_mngt_init_instance_clean_load(
500 struct ocf_cache_attach_context *context)
501 {
502 ocf_cache_t cache = context->cache;
503
504 ocf_metadata_load_all(cache,
505 _ocf_mngt_init_instance_load_complete, context);
506 }
507
508 /**
509 * handle recovery variant
510 */
511 static void _ocf_mngt_init_instance_recovery(
512 struct ocf_cache_attach_context *context)
513 {
514 ocf_cache_t cache = context->cache;
515
516 init_attached_data_structures_recovery(cache);
517
518 ocf_cache_log(cache, log_warn,
519 "ERROR: Cache device did not shut down properly!\n");
520
521 ocf_cache_log(cache, log_info, "Initiating recovery sequence...\n");
522
523 ocf_metadata_load_recovery(cache,
524 _ocf_mngt_init_instance_load_complete, context);
525 }
526
527 static void _ocf_mngt_init_instance_load(
528 struct ocf_cache_attach_context *context)
529 {
530 ocf_cache_t cache = context->cache;
531 int ret;
532
533 OCF_ASSERT_PLUGGED(cache);
534
535 ret = _ocf_mngt_init_instance_add_cores(context);
536 if (ret) {
537 ocf_pipeline_finish(context->pipeline, ret);
538 return;
539 }
540
541 if (context->metadata.shutdown_status == ocf_metadata_clean_shutdown)
542 _ocf_mngt_init_instance_clean_load(context);
543 else
544 _ocf_mngt_init_instance_recovery(context);
545 }
546
547 /**
548 * @brief allocate memory for new cache, add it to cache queue, set initial
549 * values and running state
550 */
551 static int _ocf_mngt_init_new_cache(struct ocf_cachemng_init_params *params)
552 {
553 ocf_cache_t cache = env_vzalloc(sizeof(*cache));
554
555 if (!cache)
556 return -OCF_ERR_NO_MEM;
557
558 if (env_rwsem_init(&cache->lock) ||
559 env_mutex_init(&cache->flush_mutex)) {
560 env_vfree(cache);
561 return -OCF_ERR_NO_MEM;
562 }
563
564 INIT_LIST_HEAD(&cache->list);
565 list_add_tail(&cache->list, &params->ctx->caches);
566 env_atomic_set(&cache->ref_count, 1);
567 cache->owner = params->ctx;
568
569 /* Copy all required initialization parameters */
570 cache->cache_id = params->id;
571
572 env_atomic_set(&(cache->last_access_ms),
573 env_ticks_to_msecs(env_get_tick_count()));
574
575 env_bit_set(ocf_cache_state_initializing, &cache->cache_state);
576
577 params->cache = cache;
578 params->flags.cache_alloc = true;
579
580 return 0;
581 }
582
583 static void _ocf_mngt_attach_cache_device(ocf_pipeline_t pipeline,
584 void *priv, ocf_pipeline_arg_t arg)
585 {
586 struct ocf_cache_attach_context *context = priv;
587 ocf_cache_t cache = context->cache;
588 ocf_volume_type_t type;
589 int ret;
590
591 cache->device = env_vzalloc(sizeof(*cache->device));
592 if (!cache->device) {
593 ret = -OCF_ERR_NO_MEM;
594 goto err;
595 }
596 context->flags.device_alloc = true;
597
598 cache->device->init_mode = context->init_mode;
599
600 /* Prepare UUID of cache volume */
601 type = ocf_ctx_get_volume_type(cache->owner, context->cfg.volume_type);
602 if (!type) {
603 ret = -OCF_ERR_INVAL_VOLUME_TYPE;
604 goto err;
605 }
606
607 ret = ocf_volume_init(&cache->device->volume, type,
608 &context->cfg.uuid, true);
609 if (ret)
610 goto err;
611
612 cache->device->volume.cache = cache;
613 context->flags.volume_inited = true;
614
615 /*
616 * Open cache device, It has to be done first because metadata service
617 * need to know size of cache device.
618 */
619 ret = ocf_volume_open(&cache->device->volume,
620 context->cfg.volume_params);
621 if (ret) {
622 ocf_cache_log(cache, log_err, "ERROR: Cache not available\n");
623 goto err;
624 }
625 context->flags.device_opened = true;
626
627 context->volume_size = ocf_volume_get_length(&cache->device->volume);
628
629 /* Check minimum size of cache device */
630 if (context->volume_size < OCF_CACHE_SIZE_MIN) {
631 ocf_cache_log(cache, log_err, "ERROR: Cache cache size must "
632 "be at least %llu [MiB]\n", OCF_CACHE_SIZE_MIN / MiB);
633 ret = -OCF_ERR_START_CACHE_FAIL;
634 goto err;
635 }
636
637 ocf_pipeline_next(pipeline);
638 return;
639
640 err:
641 ocf_pipeline_finish(context->pipeline, ret);
642 }
643
644 /**
645 * @brief prepare cache for init. This is first step towards initializing
646 * the cache
647 */
648 static int _ocf_mngt_init_prepare_cache(struct ocf_cachemng_init_params *param,
649 struct ocf_mngt_cache_config *cfg)
650 {
651 ocf_cache_t cache;
652 char cache_name[OCF_CACHE_NAME_SIZE];
653 int ret = 0;
654
655 ret = env_mutex_lock_interruptible(&param->ctx->lock);
656 if (ret)
657 return ret;
658
659 if (param->id == OCF_CACHE_ID_INVALID) {
660 /* ID was not specified, take first free id */
661 param->id = _ocf_mngt_cache_find_free_id(param->ctx);
662 if (param->id == OCF_CACHE_ID_INVALID) {
663 ret = -OCF_ERR_TOO_MANY_CACHES;
664 goto out;
665 }
666 cfg->id = param->id;
667 } else {
668 /* ID was set, check if cache exist with specified ID */
669 cache = _ocf_mngt_get_cache(param->ctx, param->id);
670 if (cache) {
671 /* Cache already exist */
672 ret = -OCF_ERR_CACHE_EXIST;
673 goto out;
674 }
675 }
676
677 if (cfg->name) {
678 ret = env_strncpy(cache_name, sizeof(cache_name),
679 cfg->name, sizeof(cache_name));
680 if (ret)
681 goto out;
682 } else {
683 ret = snprintf(cache_name, sizeof(cache_name),
684 "cache%hu", param->id);
685 if (ret < 0)
686 goto out;
687 }
688
689 ocf_log(param->ctx, log_info, "Inserting cache %s\n", cache_name);
690
691 ret = _ocf_mngt_init_new_cache(param);
692 if (ret)
693 goto out;
694
695 cache = param->cache;
696
697 ret = ocf_cache_set_name(cache, cache_name, sizeof(cache_name));
698 if (ret)
699 goto out;
700
701 cache->backfill.max_queue_size = cfg->backfill.max_queue_size;
702 cache->backfill.queue_unblock_size = cfg->backfill.queue_unblock_size;
703
704 env_rwsem_down_write(&cache->lock); /* Lock cache during setup */
705 param->flags.cache_locked = true;
706
707 cache->pt_unaligned_io = cfg->pt_unaligned_io;
708 cache->use_submit_io_fast = cfg->use_submit_io_fast;
709
710 cache->eviction_policy_init = cfg->eviction_policy;
711 cache->metadata.is_volatile = cfg->metadata_volatile;
712
713 out:
714 env_mutex_unlock(&param->ctx->lock);
715 return ret;
716 }
717
718 static void _ocf_mngt_test_volume_initial_write_complete(void *priv, int error)
719 {
720 struct ocf_cache_attach_context *context = priv;
721
722 if (error) {
723 ocf_pipeline_finish(context->test.pipeline, error);
724 return;
725 }
726
727 ocf_pipeline_next(context->test.pipeline);
728 }
729
730 static void _ocf_mngt_test_volume_initial_write(
731 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
732 {
733 struct ocf_cache_attach_context *context = priv;
734 ocf_cache_t cache = context->cache;
735
736 /*
737 * Write buffer filled with "1"
738 */
739
740 ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 1));
741
742 ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
743 OCF_WRITE, context->test.rw_buffer,
744 _ocf_mngt_test_volume_initial_write_complete, context);
745 }
746
747 static void _ocf_mngt_test_volume_first_read_complete(void *priv, int error)
748 {
749 struct ocf_cache_attach_context *context = priv;
750 ocf_cache_t cache = context->cache;
751 int ret, diff;
752
753 if (error) {
754 ocf_pipeline_finish(context->test.pipeline, error);
755 return;
756 }
757
758 ret = env_memcmp(context->test.rw_buffer, PAGE_SIZE,
759 context->test.cmp_buffer, PAGE_SIZE, &diff);
760 if (ret) {
761 ocf_pipeline_finish(context->test.pipeline, ret);
762 return;
763 }
764
765 if (diff) {
766 /* we read back different data than what we had just
767 written - this is fatal error */
768 ocf_pipeline_finish(context->test.pipeline, -EIO);
769 return;
770 }
771
772 if (!ocf_volume_is_atomic(&cache->device->volume)) {
773 /* If not atomic, stop testing here */
774 ocf_pipeline_finish(context->test.pipeline, 0);
775 return;
776 }
777
778 ocf_pipeline_next(context->test.pipeline);
779 }
780
781 static void _ocf_mngt_test_volume_first_read(
782 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
783 {
784 struct ocf_cache_attach_context *context = priv;
785 ocf_cache_t cache = context->cache;
786
787 /*
788 * First read
789 */
790
791 ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 0));
792 ENV_BUG_ON(env_memset(context->test.cmp_buffer, PAGE_SIZE, 1));
793
794 ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
795 OCF_READ, context->test.rw_buffer,
796 _ocf_mngt_test_volume_first_read_complete, context);
797 }
798
799 static void _ocf_mngt_test_volume_discard_complete(void *priv, int error)
800 {
801 struct ocf_cache_attach_context *context = priv;
802
803 if (error) {
804 ocf_pipeline_finish(context->test.pipeline, error);
805 return;
806 }
807
808 ocf_pipeline_next(context->test.pipeline);
809 }
810
811 static void _ocf_mngt_test_volume_discard(
812 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
813 {
814 struct ocf_cache_attach_context *context = priv;
815 ocf_cache_t cache = context->cache;
816
817 /*
818 * Submit discard request
819 */
820
821 ocf_submit_volume_discard(&cache->device->volume,
822 context->test.reserved_lba_addr, PAGE_SIZE,
823 _ocf_mngt_test_volume_discard_complete, context);
824 }
825
826 static void _ocf_mngt_test_volume_second_read_complete(void *priv, int error)
827 {
828 struct ocf_cache_attach_context *context = priv;
829 ocf_cache_t cache = context->cache;
830 int ret, diff;
831
832 if (error) {
833 ocf_pipeline_finish(context->test.pipeline, error);
834 return;
835 }
836
837 ret = env_memcmp(context->test.rw_buffer, PAGE_SIZE,
838 context->test.cmp_buffer, PAGE_SIZE, &diff);
839 if (ret) {
840 ocf_pipeline_finish(context->test.pipeline, ret);
841 return;
842 }
843
844 if (diff) {
845 /* discard does not cause target adresses to return 0 on
846 subsequent read */
847 cache->device->volume.features.discard_zeroes = 0;
848 }
849
850 ocf_pipeline_next(context->test.pipeline);
851 }
852
853 static void _ocf_mngt_test_volume_second_read(
854 ocf_pipeline_t test_pipeline, void *priv, ocf_pipeline_arg_t arg)
855 {
856 struct ocf_cache_attach_context *context = priv;
857 ocf_cache_t cache = context->cache;
858
859 /*
860 * Second read
861 */
862
863 ENV_BUG_ON(env_memset(context->test.rw_buffer, PAGE_SIZE, 1));
864 ENV_BUG_ON(env_memset(context->test.cmp_buffer, PAGE_SIZE, 0));
865
866 ocf_submit_cache_page(cache, context->test.reserved_lba_addr,
867 OCF_READ, context->test.rw_buffer,
868 _ocf_mngt_test_volume_second_read_complete, context);
869 }
870
871 static void _ocf_mngt_test_volume_finish(ocf_pipeline_t pipeline,
872 void *priv, int error)
873 {
874 struct ocf_cache_attach_context *context = priv;
875
876 env_free(context->test.rw_buffer);
877 env_free(context->test.cmp_buffer);
878
879 if (error)
880 ocf_pipeline_finish(context->pipeline, error);
881 else
882 ocf_pipeline_next(context->pipeline);
883
884 ocf_pipeline_destroy(context->test.pipeline);
885 }
886
887 struct ocf_pipeline_properties _ocf_mngt_test_volume_pipeline_properties = {
888 .priv_size = 0,
889 .finish = _ocf_mngt_test_volume_finish,
890 .steps = {
891 OCF_PL_STEP(_ocf_mngt_test_volume_initial_write),
892 OCF_PL_STEP(_ocf_mngt_test_volume_first_read),
893 OCF_PL_STEP(_ocf_mngt_test_volume_discard),
894 OCF_PL_STEP(_ocf_mngt_test_volume_second_read),
895 OCF_PL_STEP_TERMINATOR(),
896 },
897 };
898
899 static void _ocf_mngt_test_volume(ocf_pipeline_t pipeline,
900 void *priv, ocf_pipeline_arg_t arg)
901 {
902 struct ocf_cache_attach_context *context = priv;
903 ocf_cache_t cache = context->cache;
904 ocf_pipeline_t test_pipeline;
905 int result;
906
907 cache->device->volume.features.discard_zeroes = 1;
908
909 if (!context->cfg.perform_test) {
910 ocf_pipeline_next(pipeline);
911 return;
912 }
913
914 context->test.reserved_lba_addr = ocf_metadata_get_reserved_lba(cache);
915
916 context->test.rw_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
917 if (!context->test.rw_buffer) {
918 ocf_pipeline_finish(context->pipeline, -OCF_ERR_NO_MEM);
919 return;
920 }
921
922 context->test.cmp_buffer = env_malloc(PAGE_SIZE, ENV_MEM_NORMAL);
923 if (!context->test.cmp_buffer)
924 goto err_buffer;
925
926 result = ocf_pipeline_create(&test_pipeline, cache,
927 &_ocf_mngt_test_volume_pipeline_properties);
928 if (result)
929 goto err_pipeline;
930
931 ocf_pipeline_set_priv(test_pipeline, context);
932
933 context->test.pipeline = test_pipeline;
934
935 ocf_pipeline_next(test_pipeline);
936 return;
937
938 err_pipeline:
939 env_free(context->test.rw_buffer);
940 err_buffer:
941 env_free(context->test.cmp_buffer);
942 ocf_pipeline_finish(context->pipeline, -OCF_ERR_NO_MEM);
943 }
944
945 /**
946 * Prepare metadata accordingly to mode (for load/recovery read from disk)
947 */
948
949 static void _ocf_mngt_attach_load_properties_end(void *priv, int error,
950 struct ocf_metadata_load_properties *properties)
951 {
952 struct ocf_cache_attach_context *context = priv;
953 ocf_cache_t cache = context->cache;
954
955 context->metadata.status = error;
956
957 if (error) {
958 ocf_pipeline_next(context->pipeline);
959 return;
960 }
961
962 context->metadata.shutdown_status = properties->shutdown_status;
963 context->metadata.dirty_flushed = properties->dirty_flushed;
964
965 if (cache->device->init_mode == ocf_init_mode_load) {
966 context->metadata.line_size = properties->line_size;
967 cache->conf_meta->metadata_layout = properties->layout;
968 cache->conf_meta->cache_mode = properties->cache_mode;
969 }
970
971 ocf_pipeline_next(context->pipeline);
972 }
973
974 static void _ocf_mngt_attach_load_properties(ocf_pipeline_t pipeline,
975 void *priv, ocf_pipeline_arg_t arg)
976 {
977 struct ocf_cache_attach_context *context = priv;
978 ocf_cache_t cache = context->cache;
979
980 OCF_ASSERT_PLUGGED(cache);
981
982 context->metadata.shutdown_status = ocf_metadata_clean_shutdown;
983 context->metadata.dirty_flushed = DIRTY_FLUSHED;
984 context->metadata.line_size = context->cfg.cache_line_size;
985
986 if (cache->device->init_mode == ocf_init_mode_metadata_volatile) {
987 ocf_pipeline_next(context->pipeline);
988 return;
989 }
990
991 ocf_metadata_load_properties(&cache->device->volume,
992 _ocf_mngt_attach_load_properties_end, context);
993 }
994
995 static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline,
996 void *priv, ocf_pipeline_arg_t arg)
997 {
998 struct ocf_cache_attach_context *context = priv;
999 ocf_cache_t cache = context->cache;
1000 int ret, i;
1001
1002 if (context->init_mode == ocf_init_mode_load &&
1003 context->metadata.status) {
1004 ocf_pipeline_finish(context->pipeline,
1005 -OCF_ERR_START_CACHE_FAIL);
1006 return;
1007 }
1008
1009 context->metadata.line_size = context->metadata.line_size ?:
1010 cache->metadata.settings.size;
1011
1012 /*
1013 * Initialize variable size metadata segments
1014 */
1015 if (ocf_metadata_init_variable_size(cache, context->volume_size,
1016 context->metadata.line_size,
1017 cache->conf_meta->metadata_layout)) {
1018 ocf_pipeline_finish(context->pipeline,
1019 -OCF_ERR_START_CACHE_FAIL);
1020 return;
1021 }
1022
1023 ocf_cache_log(cache, log_debug, "Cache attached\n");
1024 context->flags.attached_metadata_inited = true;
1025
1026 for (i = 0; i < OCF_IO_CLASS_MAX + 1; ++i) {
1027 cache->user_parts[i].runtime =
1028 &cache->device->runtime_meta->user_parts[i];
1029 }
1030
1031 cache->device->freelist_part = &cache->device->runtime_meta->freelist_part;
1032
1033 ret = ocf_concurrency_init(cache);
1034 if (ret) {
1035 ocf_pipeline_finish(context->pipeline, ret);
1036 return;
1037 }
1038
1039 context->flags.concurrency_inited = 1;
1040
1041 ocf_pipeline_next(context->pipeline);
1042 }
1043
1044 /**
1045 * @brief initializing cache anew (not loading or recovering)
1046 */
1047 static void _ocf_mngt_init_instance_init(struct ocf_cache_attach_context *context)
1048 {
1049 ocf_cache_t cache = context->cache;
1050
1051 if (!context->metadata.status && !context->cfg.force &&
1052 context->metadata.shutdown_status !=
1053 ocf_metadata_detached) {
1054
1055 if (context->metadata.shutdown_status !=
1056 ocf_metadata_clean_shutdown) {
1057 ocf_cache_log(cache, log_err, DIRTY_SHUTDOWN_ERROR_MSG);
1058 ocf_pipeline_finish(context->pipeline,
1059 -OCF_ERR_DIRTY_SHUTDOWN);
1060 return;
1061 }
1062
1063 if (context->metadata.dirty_flushed == DIRTY_NOT_FLUSHED) {
1064 ocf_cache_log(cache, log_err,
1065 DIRTY_NOT_FLUSHED_ERROR_MSG);
1066 ocf_pipeline_finish(context->pipeline,
1067 -OCF_ERR_DIRTY_EXISTS);
1068 return;
1069 }
1070 }
1071
1072 init_attached_data_structures(cache, cache->eviction_policy_init);
1073
1074 /* In initial cache state there is no dirty data, so all dirty data is
1075 considered to be flushed
1076 */
1077 cache->conf_meta->dirty_flushed = true;
1078
1079 ocf_pipeline_next(context->pipeline);
1080 }
1081
1082 uint64_t _ocf_mngt_calculate_ram_needed(ocf_cache_t cache,
1083 ocf_volume_t cache_volume)
1084 {
1085 ocf_cache_line_size_t line_size = ocf_line_size(cache);
1086 uint64_t volume_size = ocf_volume_get_length(cache_volume);
1087 uint64_t const_data_size;
1088 uint64_t cache_line_no;
1089 uint64_t data_per_line;
1090 uint64_t min_free_ram;
1091
1092 /* Superblock + per core metadata */
1093 const_data_size = 50 * MiB;
1094
1095 /* Cache metadata */
1096 cache_line_no = volume_size / line_size;
1097 data_per_line = (52 + (2 * (line_size / KiB / 4)));
1098
1099 min_free_ram = const_data_size + cache_line_no * data_per_line;
1100
1101 /* 110% of calculated value */
1102 min_free_ram = (11 * min_free_ram) / 10;
1103
1104 return min_free_ram;
1105 }
1106
1107 int ocf_mngt_get_ram_needed(ocf_cache_t cache,
1108 struct ocf_mngt_cache_device_config *cfg, uint64_t *ram_needed)
1109 {
1110 struct ocf_volume volume;
1111 ocf_volume_type_t type;
1112 int result;
1113
1114 OCF_CHECK_NULL(cache);
1115 OCF_CHECK_NULL(cfg);
1116 OCF_CHECK_NULL(ram_needed);
1117
1118 type = ocf_ctx_get_volume_type(cache->owner, cfg->volume_type);
1119 if (!type)
1120 return -OCF_ERR_INVAL_VOLUME_TYPE;
1121
1122 result = ocf_volume_init(&cache->device->volume, type,
1123 &cfg->uuid, false);
1124 if (result)
1125 return result;
1126
1127 result = ocf_volume_open(&volume, cfg->volume_params);
1128 if (result) {
1129 ocf_volume_deinit(&volume);
1130 return result;
1131 }
1132
1133 *ram_needed = _ocf_mngt_calculate_ram_needed(cache, &volume);
1134
1135 ocf_volume_close(&volume);
1136 ocf_volume_deinit(&volume);
1137
1138 return 0;
1139 }
1140
1141 /**
1142 * @brief for error handling do partial cleanup of datastructures upon
1143 * premature function exit.
1144 *
1145 * @param ctx OCF context
1146 * @param params - startup params containing initialization status flags.
1147 *
1148 */
1149 static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx,
1150 struct ocf_cachemng_init_params *params)
1151 {
1152 ocf_cache_t cache = params->cache;
1153
1154 if (!params->flags.cache_alloc)
1155 return;
1156
1157 if (params->flags.metadata_inited)
1158 ocf_metadata_deinit(cache);
1159
1160 env_mutex_lock(&ctx->lock);
1161
1162 list_del(&cache->list);
1163 env_vfree(cache);
1164
1165 env_mutex_unlock(&ctx->lock);
1166 }
1167
1168 static void _ocf_mngt_attach_handle_error(
1169 struct ocf_cache_attach_context *context)
1170 {
1171 ocf_cache_t cache = context->cache;
1172
1173 if (context->flags.cleaner_started)
1174 ocf_stop_cleaner(cache);
1175
1176 if (context->flags.cores_opened)
1177 _ocf_mngt_close_all_uninitialized_cores(cache);
1178
1179 if (context->flags.attached_metadata_inited)
1180 ocf_metadata_deinit_variable_size(cache);
1181
1182 if (context->flags.device_opened)
1183 ocf_volume_close(&cache->device->volume);
1184
1185 if (context->flags.concurrency_inited)
1186 ocf_concurrency_deinit(cache);
1187
1188 if (context->flags.volume_inited)
1189 ocf_volume_deinit(&cache->device->volume);
1190
1191 if (context->flags.device_alloc)
1192 env_vfree(cache->device);
1193 }
1194
1195 static int _ocf_mngt_cache_init(ocf_cache_t cache,
1196 struct ocf_cachemng_init_params *params)
1197 {
1198 int i;
1199
1200 /*
1201 * Super block elements initialization
1202 */
1203 cache->conf_meta->cache_mode = params->metadata.cache_mode;
1204 cache->conf_meta->metadata_layout = params->metadata.layout;
1205
1206 for (i = 0; i < OCF_IO_CLASS_MAX + 1; ++i) {
1207 cache->user_parts[i].config =
1208 &cache->conf_meta->user_parts[i];
1209 }
1210
1211 INIT_LIST_HEAD(&cache->io_queues);
1212
1213 /* Init Partitions */
1214 ocf_part_init(cache);
1215
1216 __init_cores(cache);
1217 __init_metadata_version(cache);
1218 __init_partitions(cache);
1219
1220 return 0;
1221 }
1222
1223 static int _ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
1224 struct ocf_mngt_cache_config *cfg)
1225 {
1226 struct ocf_cachemng_init_params params;
1227 int result;
1228
1229 ENV_BUG_ON(env_memset(&params, sizeof(params), 0));
1230
1231 params.id = cfg->id;
1232
1233 params.ctx = ctx;
1234 params.metadata.cache_mode = cfg->cache_mode;
1235 params.metadata.layout = cfg->metadata_layout;
1236 params.metadata.line_size = cfg->cache_line_size;
1237 params.metadata_volatile = cfg->metadata_volatile;
1238 params.locked = cfg->locked;
1239
1240 /* Prepare cache */
1241 result = _ocf_mngt_init_prepare_cache(&params, cfg);
1242 if (result)
1243 goto _cache_mng_init_instance_ERROR;
1244
1245 *cache = params.cache;
1246
1247 /*
1248 * Initialize metadata selected segments of metadata in memory
1249 */
1250 result = ocf_metadata_init(*cache, params.metadata.line_size);
1251 if (result) {
1252 result = -OCF_ERR_START_CACHE_FAIL;
1253 goto _cache_mng_init_instance_ERROR;
1254 }
1255
1256 ocf_log(ctx, log_debug, "Metadata initialized\n");
1257 params.flags.metadata_inited = true;
1258
1259 result = _ocf_mngt_cache_init(*cache, &params);
1260 if (result)
1261 goto _cache_mng_init_instance_ERROR;
1262
1263 if (params.locked) {
1264 /* Increment reference counter to match cache_lock /
1265 cache_unlock convention. User is expected to call
1266 ocf_mngt_cache_unlock in future which would up the
1267 semaphore as well as decrement ref_count. */
1268 env_atomic_inc(&(*cache)->ref_count);
1269 } else {
1270 /* User did not request to lock cache instance after creation -
1271 up the semaphore here since we have acquired the lock to
1272 perform management operations. */
1273 env_rwsem_up_write(&(*cache)->lock);
1274 params.flags.cache_locked = false;
1275 }
1276
1277 return 0;
1278
1279 _cache_mng_init_instance_ERROR:
1280 _ocf_mngt_init_handle_error(ctx, &params);
1281 *cache = NULL;
1282 return result;
1283 }
1284
1285 static void _ocf_mng_cache_set_valid(ocf_cache_t cache)
1286 {
1287 /*
1288 * Clear initialization state and set the valid bit so we know
1289 * its in use.
1290 */
1291 cache->valid_ocf_cache_device_t = 1;
1292 env_bit_clear(ocf_cache_state_initializing, &cache->cache_state);
1293 env_bit_set(ocf_cache_state_running, &cache->cache_state);
1294 }
1295
1296 static int _ocf_mngt_cache_add_cores_t_clean_pol(ocf_cache_t cache)
1297 {
1298 int clean_type = cache->conf_meta->cleaning_policy_type;
1299 int i, j, no;
1300 int result;
1301
1302 if (cleaning_policy_ops[clean_type].add_core) {
1303 no = cache->conf_meta->core_count;
1304 for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) {
1305 if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
1306 continue;
1307 result = cleaning_policy_ops[clean_type].add_core(cache, i);
1308 if (result) {
1309 goto err;
1310 }
1311 j++;
1312 }
1313 }
1314
1315 return 0;
1316
1317 err:
1318 if (!cleaning_policy_ops[clean_type].remove_core)
1319 return result;
1320
1321 while (i--) {
1322 if (env_bit_test(i, cache->conf_meta->valid_core_bitmap))
1323 cleaning_policy_ops[clean_type].remove_core(cache, i);
1324 };
1325
1326 return result;
1327 }
1328
1329 static void _ocf_mngt_init_attached_nonpersistent(ocf_cache_t cache)
1330 {
1331 env_atomic_set(&cache->fallback_pt_error_counter, 0);
1332 }
1333
1334 static void _ocf_mngt_attach_check_ram(ocf_pipeline_t pipeline,
1335 void *priv, ocf_pipeline_arg_t arg)
1336 {
1337 struct ocf_cache_attach_context *context = priv;
1338 ocf_cache_t cache = context->cache;
1339 uint64_t min_free_ram;
1340 uint64_t free_ram;
1341
1342 min_free_ram = _ocf_mngt_calculate_ram_needed(cache,
1343 &cache->device->volume);
1344
1345 free_ram = env_get_free_memory();
1346
1347 if (free_ram < min_free_ram) {
1348 ocf_cache_log(cache, log_err, "Not enough free RAM for cache "
1349 "metadata to start cache\n");
1350 ocf_cache_log(cache, log_err,
1351 "Available RAM: %" ENV_PRIu64 " B\n", free_ram);
1352 ocf_cache_log(cache, log_err, "Needed RAM: %" ENV_PRIu64 " B\n",
1353 min_free_ram);
1354 ocf_pipeline_finish(pipeline, -OCF_ERR_NO_FREE_RAM);
1355 }
1356
1357 ocf_pipeline_next(pipeline);
1358 }
1359
1360
1361 static void _ocf_mngt_attach_load_superblock_complete(void *priv, int error)
1362 {
1363 struct ocf_cache_attach_context *context = priv;
1364 ocf_cache_t cache = context->cache;
1365
1366 if (error) {
1367 ocf_cache_log(cache, log_err,
1368 "ERROR: Cannot load cache state\n");
1369 ocf_pipeline_finish(context->pipeline,
1370 -OCF_ERR_START_CACHE_FAIL);
1371 return;
1372 }
1373
1374 ocf_pipeline_next(context->pipeline);
1375 }
1376
1377 static void _ocf_mngt_attach_load_superblock(ocf_pipeline_t pipeline,
1378 void *priv, ocf_pipeline_arg_t arg)
1379 {
1380 struct ocf_cache_attach_context *context = priv;
1381 ocf_cache_t cache = context->cache;
1382
1383 if (cache->device->init_mode != ocf_init_mode_load) {
1384 ocf_pipeline_next(context->pipeline);
1385 return;
1386 }
1387
1388 ocf_cache_log(cache, log_info, "Loading cache state...\n");
1389 ocf_metadata_load_superblock(cache,
1390 _ocf_mngt_attach_load_superblock_complete, context);
1391 }
1392
1393 static void _ocf_mngt_attach_init_instance(ocf_pipeline_t pipeline,
1394 void *priv, ocf_pipeline_arg_t arg)
1395 {
1396 struct ocf_cache_attach_context *context = priv;
1397 ocf_cache_t cache = context->cache;
1398 int result;
1399
1400 result = ocf_start_cleaner(cache);
1401 if (result) {
1402 ocf_cache_log(cache, log_err,
1403 "Error while starting cleaner\n");
1404 ocf_pipeline_finish(context->pipeline, result);
1405 }
1406 context->flags.cleaner_started = true;
1407
1408 switch (cache->device->init_mode) {
1409 case ocf_init_mode_init:
1410 case ocf_init_mode_metadata_volatile:
1411 _ocf_mngt_init_instance_init(context);
1412 return;
1413 case ocf_init_mode_load:
1414 _ocf_mngt_init_instance_load(context);
1415 return;
1416 default:
1417 ocf_pipeline_finish(context->pipeline, -OCF_ERR_INVAL);
1418 }
1419 }
1420
1421 static void _ocf_mngt_attach_clean_pol(ocf_pipeline_t pipeline,
1422 void *priv, ocf_pipeline_arg_t arg)
1423 {
1424 struct ocf_cache_attach_context *context = priv;
1425 ocf_cache_t cache = context->cache;
1426 int result;
1427
1428 /* TODO: Should this even be here? */
1429 if (cache->device->init_mode != ocf_init_mode_load) {
1430 result = _ocf_mngt_cache_add_cores_t_clean_pol(cache);
1431 if (result) {
1432 ocf_pipeline_finish(context->pipeline, result);
1433 return;
1434 }
1435 }
1436
1437 ocf_pipeline_next(context->pipeline);
1438 }
1439
1440 static void _ocf_mngt_attach_flush_metadata_complete(void *priv, int error)
1441 {
1442 struct ocf_cache_attach_context *context = priv;
1443 ocf_cache_t cache = context->cache;
1444
1445 if (error) {
1446 ocf_cache_log(cache, log_err,
1447 "ERROR: Cannot save cache state\n");
1448 ocf_pipeline_finish(context->pipeline,
1449 -OCF_ERR_WRITE_CACHE);
1450 return;
1451 }
1452
1453 ocf_pipeline_next(context->pipeline);
1454 }
1455
1456 static void _ocf_mngt_attach_flush_metadata(ocf_pipeline_t pipeline,
1457 void *priv, ocf_pipeline_arg_t arg)
1458 {
1459 struct ocf_cache_attach_context *context = priv;
1460 ocf_cache_t cache = context->cache;
1461
1462 ocf_metadata_flush_all(cache,
1463 _ocf_mngt_attach_flush_metadata_complete, context);
1464 }
1465
1466 static void _ocf_mngt_attach_discard_complete(void *priv, int error)
1467 {
1468 struct ocf_cache_attach_context *context = priv;
1469 ocf_cache_t cache = context->cache;
1470 bool discard = cache->device->volume.features.discard_zeroes;
1471
1472 if (error) {
1473 ocf_cache_log(cache, log_warn, "%s failed\n",
1474 discard ? "Discarding whole cache device" :
1475 "Overwriting cache with zeroes");
1476
1477 if (ocf_volume_is_atomic(&cache->device->volume)) {
1478 ocf_cache_log(cache, log_err, "This step is required"
1479 " for atomic mode!\n");
1480 ocf_pipeline_finish(context->pipeline, error);
1481 return;
1482 }
1483
1484 ocf_cache_log(cache, log_warn, "This may impact cache"
1485 " performance!\n");
1486 }
1487
1488 ocf_pipeline_next(context->pipeline);
1489 }
1490
1491 static void _ocf_mngt_attach_discard(ocf_pipeline_t pipeline,
1492 void *priv, ocf_pipeline_arg_t arg)
1493 {
1494 struct ocf_cache_attach_context *context = priv;
1495 ocf_cache_t cache = context->cache;
1496 uint64_t addr = cache->device->metadata_offset;
1497 uint64_t length = ocf_volume_get_length(&cache->device->volume) - addr;
1498 bool discard = cache->device->volume.features.discard_zeroes;
1499
1500 if (cache->device->init_mode == ocf_init_mode_load) {
1501 ocf_pipeline_next(context->pipeline);
1502 return;
1503 }
1504
1505 if (!context->cfg.discard_on_start) {
1506 ocf_pipeline_next(context->pipeline);
1507 return;
1508 }
1509
1510 if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
1511 /* discard doesn't zero data - need to explicitly write zeros */
1512 ocf_submit_write_zeros(&cache->device->volume, addr, length,
1513 _ocf_mngt_attach_discard_complete, context);
1514 } else {
1515 /* Discard volume after metadata */
1516 ocf_submit_volume_discard(&cache->device->volume, addr, length,
1517 _ocf_mngt_attach_discard_complete, context);
1518 }
1519 }
1520
1521 static void _ocf_mngt_attach_flush_complete(void *priv, int error)
1522 {
1523 struct ocf_cache_attach_context *context = priv;
1524
1525 if (error)
1526 ocf_pipeline_finish(context->pipeline, error);
1527 else
1528 ocf_pipeline_next(context->pipeline);
1529 }
1530
1531 static void _ocf_mngt_attach_flush(ocf_pipeline_t pipeline,
1532 void *priv, ocf_pipeline_arg_t arg)
1533 {
1534 struct ocf_cache_attach_context *context = priv;
1535 ocf_cache_t cache = context->cache;
1536 bool discard = cache->device->volume.features.discard_zeroes;
1537
1538 if (!discard && ocf_volume_is_atomic(&cache->device->volume)) {
1539 ocf_submit_volume_flush(&cache->device->volume,
1540 _ocf_mngt_attach_flush_complete, context);
1541 } else {
1542 ocf_pipeline_next(context->pipeline);
1543 }
1544 }
1545
1546 static void _ocf_mngt_attach_shutdown_status_complete(void *priv, int error)
1547 {
1548 struct ocf_cache_attach_context *context = priv;
1549 ocf_cache_t cache = context->cache;
1550
1551 if (error) {
1552 ocf_cache_log(cache, log_err, "Cannot flush shutdown status\n");
1553 ocf_pipeline_finish(context->pipeline,
1554 -OCF_ERR_WRITE_CACHE);
1555 return;
1556 }
1557
1558 ocf_pipeline_next(context->pipeline);
1559 }
1560
1561 static void _ocf_mngt_attach_shutdown_status(ocf_pipeline_t pipeline,
1562 void *priv, ocf_pipeline_arg_t arg)
1563 {
1564 struct ocf_cache_attach_context *context = priv;
1565 ocf_cache_t cache = context->cache;
1566
1567 /* clear clean shutdown status */
1568 ocf_metadata_set_shutdown_status(cache, ocf_metadata_dirty_shutdown,
1569 _ocf_mngt_attach_shutdown_status_complete, context);
1570 }
1571
1572 static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline,
1573 void *priv, ocf_pipeline_arg_t arg)
1574 {
1575 struct ocf_cache_attach_context *context = priv;
1576 ocf_cache_t cache = context->cache;
1577
1578 env_waitqueue_init(&cache->pending_cache_wq);
1579
1580 env_atomic_set(&cache->attached, 1);
1581
1582 ocf_pipeline_next(context->pipeline);
1583 }
1584
1585 static void _ocf_mngt_cache_attach_finish(ocf_pipeline_t pipeline,
1586 void *priv, int error)
1587 {
1588 struct ocf_cache_attach_context *context = priv;
1589
1590 if (error)
1591 _ocf_mngt_attach_handle_error(context);
1592
1593 context->cmpl(context->cache, context->priv1, context->priv2, error);
1594
1595 env_vfree(context->cfg.uuid.data);
1596 ocf_pipeline_destroy(context->pipeline);
1597 }
1598
1599 struct ocf_pipeline_properties _ocf_mngt_cache_attach_pipeline_properties = {
1600 .priv_size = sizeof(struct ocf_cache_attach_context),
1601 .finish = _ocf_mngt_cache_attach_finish,
1602 .steps = {
1603 OCF_PL_STEP(_ocf_mngt_attach_cache_device),
1604 OCF_PL_STEP(_ocf_mngt_attach_check_ram),
1605 OCF_PL_STEP(_ocf_mngt_attach_load_properties),
1606 OCF_PL_STEP(_ocf_mngt_attach_prepare_metadata),
1607 OCF_PL_STEP(_ocf_mngt_test_volume),
1608 OCF_PL_STEP(_ocf_mngt_attach_load_superblock),
1609 OCF_PL_STEP(_ocf_mngt_attach_init_instance),
1610 OCF_PL_STEP(_ocf_mngt_attach_clean_pol),
1611 OCF_PL_STEP(_ocf_mngt_attach_flush_metadata),
1612 OCF_PL_STEP(_ocf_mngt_attach_discard),
1613 OCF_PL_STEP(_ocf_mngt_attach_flush),
1614 OCF_PL_STEP(_ocf_mngt_attach_shutdown_status),
1615 OCF_PL_STEP(_ocf_mngt_attach_post_init),
1616 OCF_PL_STEP_TERMINATOR(),
1617 },
1618 };
1619
1620 static void _ocf_mngt_cache_attach(ocf_cache_t cache,
1621 struct ocf_mngt_cache_device_config *cfg, bool load,
1622 _ocf_mngt_cache_attach_end_t cmpl, void *priv1, void *priv2)
1623 {
1624 struct ocf_cache_attach_context *context;
1625 ocf_pipeline_t pipeline;
1626 void *data;
1627 int result;
1628
1629 result = ocf_pipeline_create(&pipeline, cache,
1630 &_ocf_mngt_cache_attach_pipeline_properties);
1631 if (result) {
1632 cmpl(cache, priv1, priv2, -OCF_ERR_NO_MEM);
1633 return;
1634 }
1635
1636 context = ocf_pipeline_get_priv(pipeline);
1637
1638 context->cmpl = cmpl;
1639 context->priv1 = priv1;
1640 context->priv2 = priv2;
1641 context->pipeline = pipeline;
1642
1643 context->cache = cache;
1644 context->cfg = *cfg;
1645
1646 data = env_vmalloc(cfg->uuid.size);
1647 if (!data) {
1648 result = -OCF_ERR_NO_MEM;
1649 goto err_pipeline;
1650 }
1651
1652 result = env_memcpy(data, cfg->uuid.size, cfg->uuid.data,
1653 cfg->uuid.size);
1654 if (result)
1655 goto err_uuid;
1656
1657 context->cfg.uuid.data = data;
1658
1659 if (cache->metadata.is_volatile) {
1660 context->init_mode = ocf_init_mode_metadata_volatile;
1661 } else {
1662 context->init_mode = load ?
1663 ocf_init_mode_load : ocf_init_mode_init;
1664 }
1665
1666 _ocf_mngt_init_attached_nonpersistent(cache);
1667
1668 ocf_pipeline_next(pipeline);
1669 return;
1670
1671 err_uuid:
1672 env_vfree(data);
1673 err_pipeline:
1674 ocf_pipeline_destroy(pipeline);
1675 cmpl(cache, priv1, priv2, result);
1676 }
1677
1678 static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config *cfg)
1679 {
1680 if (cfg->id > OCF_CACHE_ID_MAX)
1681 return -OCF_ERR_INVAL;
1682
1683 if (!ocf_cache_mode_is_valid(cfg->cache_mode))
1684 return -OCF_ERR_INVALID_CACHE_MODE;
1685
1686 if (cfg->eviction_policy >= ocf_eviction_max ||
1687 cfg->eviction_policy < 0) {
1688 return -OCF_ERR_INVAL;
1689 }
1690
1691 if (!ocf_cache_line_size_is_valid(cfg->cache_line_size))
1692 return -OCF_ERR_INVALID_CACHE_LINE_SIZE;
1693
1694 if (cfg->metadata_layout >= ocf_metadata_layout_max ||
1695 cfg->metadata_layout < 0) {
1696 return -OCF_ERR_INVAL;
1697 }
1698
1699 if (cfg->backfill.queue_unblock_size > cfg->backfill.max_queue_size )
1700 return -OCF_ERR_INVAL;
1701
1702 return 0;
1703 }
1704
1705 static int _ocf_mngt_cache_validate_device_cfg(
1706 struct ocf_mngt_cache_device_config *device_cfg)
1707 {
1708 if (!device_cfg->uuid.data)
1709 return -OCF_ERR_INVAL;
1710
1711 if (device_cfg->uuid.size > OCF_VOLUME_UUID_MAX_SIZE)
1712 return -OCF_ERR_INVAL;
1713
1714 if (device_cfg->cache_line_size &&
1715 !ocf_cache_line_size_is_valid(device_cfg->cache_line_size))
1716 return -OCF_ERR_INVALID_CACHE_LINE_SIZE;
1717
1718 return 0;
1719 }
1720
1721 static const char *_ocf_cache_mode_names[ocf_cache_mode_max] = {
1722 [ocf_cache_mode_wt] = "wt",
1723 [ocf_cache_mode_wb] = "wb",
1724 [ocf_cache_mode_wa] = "wa",
1725 [ocf_cache_mode_pt] = "pt",
1726 [ocf_cache_mode_wi] = "wi",
1727 };
1728
1729 static const char *_ocf_cache_mode_get_name(ocf_cache_mode_t cache_mode)
1730 {
1731 if (!ocf_cache_mode_is_valid(cache_mode))
1732 return NULL;
1733
1734 return _ocf_cache_mode_names[cache_mode];
1735 }
1736
1737 int ocf_mngt_cache_start(ocf_ctx_t ctx, ocf_cache_t *cache,
1738 struct ocf_mngt_cache_config *cfg)
1739 {
1740 int result;
1741
1742 if (!ctx || !cache || !cfg)
1743 return -OCF_ERR_INVAL;
1744
1745 result = _ocf_mngt_cache_validate_cfg(cfg);
1746 if (result)
1747 return result;
1748
1749 result = _ocf_mngt_cache_start(ctx, cache, cfg);
1750 if (!result) {
1751 _ocf_mng_cache_set_valid(*cache);
1752
1753 ocf_cache_log(*cache, log_info, "Successfully added\n");
1754 ocf_cache_log(*cache, log_info, "Cache mode : %s\n",
1755 _ocf_cache_mode_get_name(ocf_cache_get_mode(*cache)));
1756 } else {
1757 if (cfg->name) {
1758 ocf_log(ctx, log_err, "Inserting cache %s failed\n",
1759 cfg->name);
1760 } else {
1761 ocf_log(ctx, log_err, "Inserting cache failed\n");
1762 }
1763 }
1764
1765 return result;
1766 }
1767
1768 int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache, ocf_queue_t queue)
1769 {
1770 OCF_CHECK_NULL(cache);
1771 OCF_CHECK_NULL(queue);
1772
1773 if (cache->mngt_queue)
1774 return -OCF_ERR_INVAL;
1775
1776 ocf_queue_get(queue);
1777 cache->mngt_queue = queue;
1778
1779 return 0;
1780 }
1781
1782 static void _ocf_mngt_cache_attach_complete(ocf_cache_t cache, void *priv1,
1783 void *priv2, int error)
1784 {
1785 ocf_mngt_cache_attach_end_t cmpl = priv1;
1786
1787 if (!error) {
1788 ocf_cache_log(cache, log_info, "Successfully attached\n");
1789 } else {
1790 ocf_cache_log(cache, log_err, "Attaching cache device "
1791 "failed\n");
1792 }
1793
1794 cmpl(cache, priv2, error);
1795 }
1796
1797 void ocf_mngt_cache_attach(ocf_cache_t cache,
1798 struct ocf_mngt_cache_device_config *cfg,
1799 ocf_mngt_cache_attach_end_t cmpl, void *priv)
1800 {
1801 int result;
1802
1803 OCF_CHECK_NULL(cache);
1804 OCF_CHECK_NULL(cfg);
1805
1806 result = _ocf_mngt_cache_validate_device_cfg(cfg);
1807 if (result) {
1808 cmpl(cache, priv, result);
1809 return;
1810 }
1811
1812 _ocf_mngt_cache_attach(cache, cfg, false,
1813 _ocf_mngt_cache_attach_complete, cmpl, priv);
1814 }
1815
1816 typedef void (*_ocf_mngt_cache_unplug_end_t)(void *context, int error);
1817
1818 struct _ocf_mngt_cache_unplug_context {
1819 _ocf_mngt_cache_unplug_end_t cmpl;
1820 void *priv;
1821 ocf_cache_t cache;
1822 };
1823
1824 static void _ocf_mngt_cache_unplug_complete(void *priv, int error)
1825 {
1826 struct _ocf_mngt_cache_unplug_context *context = priv;
1827 ocf_cache_t cache = context->cache;
1828
1829 ocf_volume_close(&cache->device->volume);
1830
1831 ocf_metadata_deinit_variable_size(cache);
1832 ocf_concurrency_deinit(cache);
1833
1834 ocf_volume_deinit(&cache->device->volume);
1835
1836 env_vfree(cache->device);
1837 cache->device = NULL;
1838 env_atomic_set(&cache->attached, 0);
1839
1840 /* TODO: this should be removed from detach after 'attached' stats
1841 are better separated in statistics */
1842 _ocf_mngt_init_attached_nonpersistent(cache);
1843
1844 context->cmpl(context->priv, error ? -OCF_ERR_WRITE_CACHE : 0);
1845 env_vfree(context);
1846 }
1847
1848 /**
1849 * @brief Unplug caching device from cache instance. Variable size metadata
1850 * containers are deinitialiazed as well as other cacheline related
1851 * structures. Cache volume is closed.
1852 *
1853 * @param cache OCF cache instance
1854 * @param stop - true if unplugging during stop - in this case we mark
1855 * clean shutdown in metadata and flush all containers.
1856 * - false if the device is to be detached from cache - loading
1857 * metadata from this device will not be possible.
1858 * @param cmpl Completion callback
1859 * @param priv Completion context
1860 */
1861 static void _ocf_mngt_cache_unplug(ocf_cache_t cache, bool stop,
1862 _ocf_mngt_cache_unplug_end_t cmpl, void *priv)
1863 {
1864 struct _ocf_mngt_cache_unplug_context *context;
1865
1866 ENV_BUG_ON(stop && cache->conf_meta->core_count != 0);
1867
1868 context = env_vzalloc(sizeof(*context));
1869 if (!context) {
1870 cmpl(priv, -OCF_ERR_NO_MEM);
1871 return;
1872 }
1873
1874 context->cmpl = cmpl;
1875 context->priv = priv;
1876 context->cache = cache;
1877
1878 ocf_stop_cleaner(cache);
1879
1880 __deinit_cleaning_policy(cache);
1881
1882 if (ocf_mngt_cache_is_dirty(cache)) {
1883 ENV_BUG_ON(!stop);
1884
1885 cache->conf_meta->dirty_flushed = DIRTY_NOT_FLUSHED;
1886
1887 ocf_cache_log(cache, log_warn, "Cache is still dirty. "
1888 "DO NOT USE your core devices until flushing "
1889 "dirty data!\n");
1890 } else {
1891 cache->conf_meta->dirty_flushed = DIRTY_FLUSHED;
1892 }
1893
1894 if (!stop) {
1895 /* Just set correct shutdown status */
1896 ocf_metadata_set_shutdown_status(cache, ocf_metadata_detached,
1897 _ocf_mngt_cache_unplug_complete, context);
1898 } else {
1899 /* Flush metadata */
1900 ocf_metadata_flush_all(cache,
1901 _ocf_mngt_cache_unplug_complete, context);
1902 }
1903 }
1904
1905 static int _ocf_mngt_cache_load_core_log(ocf_core_t core, void *cntx)
1906 {
1907 ocf_core_log(core, log_info, "Successfully added\n");
1908
1909 return 0;
1910 }
1911
1912 static void _ocf_mngt_cache_load_log(ocf_cache_t cache)
1913 {
1914 ocf_cache_mode_t cache_mode = ocf_cache_get_mode(cache);
1915 ocf_eviction_t eviction_type = cache->conf_meta->eviction_policy_type;
1916 ocf_cleaning_t cleaning_type = cache->conf_meta->cleaning_policy_type;
1917
1918 ocf_cache_log(cache, log_info, "Successfully loaded\n");
1919 ocf_cache_log(cache, log_info, "Cache mode : %s\n",
1920 _ocf_cache_mode_get_name(cache_mode));
1921 ocf_cache_log(cache, log_info, "Eviction policy : %s\n",
1922 evict_policy_ops[eviction_type].name);
1923 ocf_cache_log(cache, log_info, "Cleaning policy : %s\n",
1924 cleaning_policy_ops[cleaning_type].name);
1925 ocf_core_visit(cache, _ocf_mngt_cache_load_core_log,
1926 cache, false);
1927 }
1928
1929 static void _ocf_mngt_cache_load_complete(ocf_cache_t cache, void *priv1,
1930 void *priv2, int error)
1931 {
1932 ocf_mngt_cache_load_end_t cmpl = priv1;
1933
1934 if (error) {
1935 cmpl(cache, priv2, error);
1936 return;
1937 }
1938
1939 _ocf_mng_cache_set_valid(cache);
1940 _ocf_mngt_cache_load_log(cache);
1941
1942 cmpl(cache, priv2, 0);
1943 }
1944
1945 void ocf_mngt_cache_load(ocf_cache_t cache,
1946 struct ocf_mngt_cache_device_config *cfg,
1947 ocf_mngt_cache_load_end_t cmpl, void *priv)
1948 {
1949 int result;
1950
1951 OCF_CHECK_NULL(cache);
1952 OCF_CHECK_NULL(cfg);
1953
1954 /* Load is not allowed in volatile metadata mode */
1955 if (cache->metadata.is_volatile)
1956 cmpl(cache, priv, -EINVAL);
1957
1958 result = _ocf_mngt_cache_validate_device_cfg(cfg);
1959 if (result) {
1960 cmpl(cache, priv, result);
1961 return;
1962 }
1963
1964 _ocf_mngt_cache_attach(cache, cfg, true,
1965 _ocf_mngt_cache_load_complete, cmpl, priv);
1966 }
1967
1968 struct ocf_mngt_cache_stop_context {
1969 ocf_mngt_cache_stop_end_t cmpl;
1970 void *priv;
1971 ocf_pipeline_t pipeline;
1972 ocf_cache_t cache;
1973 ocf_ctx_t ctx;
1974 char cache_name[OCF_CACHE_NAME_SIZE];
1975 int cache_write_error;
1976 };
1977
1978 static void ocf_mngt_cache_stop_wait_io(ocf_pipeline_t pipeline,
1979 void *priv, ocf_pipeline_arg_t arg)
1980 {
1981 struct ocf_mngt_cache_stop_context *context = priv;
1982 ocf_cache_t cache = context->cache;
1983
1984 /* TODO: Make this asynchronous! */
1985 ocf_cache_wait_for_io_finish(cache);
1986 ocf_pipeline_next(pipeline);
1987 }
1988
1989 static void ocf_mngt_cache_stop_remove_cores(ocf_pipeline_t pipeline,
1990 void *priv, ocf_pipeline_arg_t arg)
1991 {
1992 struct ocf_mngt_cache_stop_context *context = priv;
1993 ocf_cache_t cache = context->cache;
1994 int i, j, no;
1995
1996 no = cache->conf_meta->core_count;
1997
1998 /* All exported objects removed, cleaning up rest. */
1999 for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) {
2000 if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
2001 continue;
2002 cache_mng_core_remove_from_cache(cache, i);
2003 if (ocf_cache_is_device_attached(cache))
2004 cache_mng_core_remove_from_cleaning_pol(cache, i);
2005 cache_mng_core_close(cache, i);
2006 j++;
2007 }
2008 ENV_BUG_ON(cache->conf_meta->core_count != 0);
2009
2010 ocf_pipeline_next(pipeline);
2011 }
2012
2013 static void ocf_mngt_cache_stop_unplug_complete(void *priv, int error)
2014 {
2015 struct ocf_mngt_cache_stop_context *context = priv;
2016
2017 /* short-circut execution in case of critical error */
2018 if (error && error != -OCF_ERR_WRITE_CACHE) {
2019 ocf_pipeline_finish(context->pipeline, error);
2020 return;
2021 }
2022
2023 /* in case of non-critical (disk write) error just remember its value */
2024 if (error)
2025 context->cache_write_error = error;
2026
2027 ocf_pipeline_next(context->pipeline);
2028 }
2029
2030 static void ocf_mngt_cache_stop_unplug(ocf_pipeline_t pipeline,
2031 void *priv, ocf_pipeline_arg_t arg)
2032 {
2033 struct ocf_mngt_cache_stop_context *context = priv;
2034 ocf_cache_t cache = context->cache;
2035
2036 if (!env_atomic_read(&cache->attached)) {
2037 ocf_pipeline_next(pipeline);
2038 return;
2039 }
2040
2041 _ocf_mngt_cache_unplug(cache, true,
2042 ocf_mngt_cache_stop_unplug_complete, context);
2043 }
2044
2045 static void ocf_mngt_cache_stop_put_io_queues(ocf_pipeline_t pipeline,
2046 void *priv, ocf_pipeline_arg_t arg)
2047 {
2048 struct ocf_mngt_cache_stop_context *context = priv;
2049 ocf_cache_t cache = context->cache;
2050 ocf_queue_t queue, tmp_queue;
2051
2052 list_for_each_entry_safe(queue, tmp_queue, &cache->io_queues, list)
2053 ocf_queue_put(queue);
2054
2055 ocf_pipeline_next(pipeline);
2056 }
2057
2058 static void ocf_mngt_cache_stop_finish(ocf_pipeline_t pipeline,
2059 void *priv, int error)
2060 {
2061 struct ocf_mngt_cache_stop_context *context = priv;
2062 ocf_cache_t cache = context->cache;
2063 ocf_ctx_t ctx = context->ctx;
2064
2065 if (!error) {
2066 env_mutex_lock(&ctx->lock);
2067 /* Mark device uninitialized */
2068 cache->valid_ocf_cache_device_t = 0;
2069 /* Remove cache from the list */
2070 list_del(&cache->list);
2071 env_mutex_unlock(&ctx->lock);
2072 } else {
2073 env_bit_clear(ocf_cache_state_stopping, &cache->cache_state);
2074 env_bit_set(ocf_cache_state_running, &cache->cache_state);
2075 }
2076
2077 if (context->cache_write_error) {
2078 ocf_log(ctx, log_warn, "Stopped cache %s with errors\n",
2079 context->cache_name);
2080 } else if (error) {
2081 ocf_log(ctx, log_err, "Stopping cache %s failed\n",
2082 context->cache_name);
2083 } else {
2084 ocf_log(ctx, log_info, "Cache %s successfully stopped\n",
2085 context->cache_name);
2086 }
2087
2088 context->cmpl(cache, context->priv,
2089 error ?: context->cache_write_error);
2090
2091 ocf_pipeline_destroy(context->pipeline);
2092
2093 if (!error) {
2094 /* Finally release cache instance */
2095 ocf_mngt_cache_put(cache);
2096 }
2097 }
2098
2099 struct ocf_pipeline_properties ocf_mngt_cache_stop_pipeline_properties = {
2100 .priv_size = sizeof(struct ocf_mngt_cache_stop_context),
2101 .finish = ocf_mngt_cache_stop_finish,
2102 .steps = {
2103 OCF_PL_STEP(ocf_mngt_cache_stop_wait_io),
2104 OCF_PL_STEP(ocf_mngt_cache_stop_remove_cores),
2105 OCF_PL_STEP(ocf_mngt_cache_stop_unplug),
2106 OCF_PL_STEP(ocf_mngt_cache_stop_put_io_queues),
2107 OCF_PL_STEP_TERMINATOR(),
2108 },
2109 };
2110
2111 void ocf_mngt_cache_stop(ocf_cache_t cache,
2112 ocf_mngt_cache_stop_end_t cmpl, void *priv)
2113 {
2114 struct ocf_mngt_cache_stop_context *context;
2115 ocf_pipeline_t pipeline;
2116 int result;
2117
2118 OCF_CHECK_NULL(cache);
2119
2120 result = ocf_pipeline_create(&pipeline, cache,
2121 &ocf_mngt_cache_stop_pipeline_properties);
2122 if (result) {
2123 cmpl(cache, priv, -OCF_ERR_NO_MEM);
2124 return;
2125 }
2126
2127 context = ocf_pipeline_get_priv(pipeline);
2128
2129 context->cmpl = cmpl;
2130 context->priv = priv;
2131 context->pipeline = pipeline;
2132 context->cache = cache;
2133 context->ctx = cache->owner;
2134
2135 result = env_strncpy(context->cache_name, sizeof(context->cache_name),
2136 ocf_cache_get_name(cache), sizeof(context->cache_name));
2137 if (result) {
2138 ocf_pipeline_destroy(pipeline);
2139 cmpl(cache, priv, -OCF_ERR_NO_MEM);
2140 return;
2141 }
2142
2143 ocf_cache_log(cache, log_info, "Stopping cache\n");
2144
2145 env_bit_set(ocf_cache_state_stopping, &cache->cache_state);
2146 env_bit_clear(ocf_cache_state_running, &cache->cache_state);
2147
2148 ocf_pipeline_next(pipeline);
2149 }
2150
2151 struct ocf_mngt_cache_save_context {
2152 ocf_mngt_cache_save_end_t cmpl;
2153 void *priv;
2154 ocf_pipeline_t pipeline;
2155 ocf_cache_t cache;
2156 };
2157
2158 static void ocf_mngt_cache_save_finish(ocf_pipeline_t pipeline,
2159 void *priv, int error)
2160 {
2161 struct ocf_mngt_cache_save_context *context = priv;
2162
2163 context->cmpl(context->cache, context->priv, error);
2164
2165 ocf_pipeline_destroy(context->pipeline);
2166 }
2167
2168 struct ocf_pipeline_properties ocf_mngt_cache_save_pipeline_properties = {
2169 .priv_size = sizeof(struct ocf_mngt_cache_save_context),
2170 .finish = ocf_mngt_cache_save_finish,
2171 .steps = {
2172 OCF_PL_STEP_TERMINATOR(),
2173 },
2174 };
2175
2176 static void ocf_mngt_cache_save_flush_sb_complete(void *priv, int error)
2177 {
2178 struct ocf_mngt_cache_save_context *context = priv;
2179 ocf_cache_t cache = context->cache;
2180
2181 if (error) {
2182 ocf_cache_log(cache, log_err,
2183 "Failed to flush superblock! Changes "
2184 "in cache config are not persistent!\n");
2185 ocf_pipeline_finish(context->pipeline, -OCF_ERR_WRITE_CACHE);
2186 return;
2187 }
2188
2189 ocf_pipeline_next(context->pipeline);
2190 }
2191
2192 void ocf_mngt_cache_save(ocf_cache_t cache,
2193 ocf_mngt_cache_save_end_t cmpl, void *priv)
2194 {
2195 struct ocf_mngt_cache_save_context *context;
2196 ocf_pipeline_t pipeline;
2197 int result;
2198
2199 OCF_CHECK_NULL(cache);
2200
2201 result = ocf_pipeline_create(&pipeline, cache,
2202 &ocf_mngt_cache_save_pipeline_properties);
2203 if (result) {
2204 cmpl(cache, priv, result);
2205 return;
2206 }
2207
2208 context = ocf_pipeline_get_priv(pipeline);
2209
2210 context->cmpl = cmpl;
2211 context->priv = priv;
2212 context->pipeline = pipeline;
2213 context->cache = cache;
2214
2215 ocf_metadata_flush_superblock(cache,
2216 ocf_mngt_cache_save_flush_sb_complete, context);
2217 }
2218
2219 static int _cache_mng_set_cache_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
2220 {
2221 ocf_cache_mode_t mode_old = cache->conf_meta->cache_mode;
2222
2223 /* Check if IO interface type is valid */
2224 if (!ocf_cache_mode_is_valid(mode))
2225 return -OCF_ERR_INVAL;
2226
2227 if (mode == mode_old) {
2228 ocf_cache_log(cache, log_info, "Cache mode '%s' is already set\n",
2229 ocf_get_io_iface_name(mode));
2230 return 0;
2231 }
2232
2233 cache->conf_meta->cache_mode = mode;
2234
2235 if (ocf_cache_mode_wb == mode_old) {
2236 int i;
2237
2238 for (i = 0; i != OCF_CORE_MAX; ++i) {
2239 if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
2240 continue;
2241 env_atomic_set(&cache->core_runtime_meta[i].
2242 initial_dirty_clines,
2243 env_atomic_read(&cache->
2244 core_runtime_meta[i].dirty_clines));
2245 }
2246 }
2247
2248 ocf_cache_log(cache, log_info, "Changing cache mode from '%s' to '%s' "
2249 "successful\n", ocf_get_io_iface_name(mode_old),
2250 ocf_get_io_iface_name(mode));
2251
2252 return 0;
2253 }
2254
2255 int ocf_mngt_cache_set_mode(ocf_cache_t cache, ocf_cache_mode_t mode)
2256 {
2257 int result;
2258
2259 OCF_CHECK_NULL(cache);
2260
2261 if (!ocf_cache_mode_is_valid(mode)) {
2262 ocf_cache_log(cache, log_err, "Cache mode %u is invalid\n",
2263 mode);
2264 return -OCF_ERR_INVAL;
2265 }
2266
2267 result = _cache_mng_set_cache_mode(cache, mode);
2268
2269 if (result) {
2270 const char *name = ocf_get_io_iface_name(mode);
2271
2272 ocf_cache_log(cache, log_err, "Setting cache mode '%s' "
2273 "failed\n", name);
2274 }
2275
2276 return result;
2277 }
2278
2279 int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache)
2280 {
2281 OCF_CHECK_NULL(cache);
2282
2283 if (ocf_fallback_pt_is_on(cache)) {
2284 ocf_cache_log(cache, log_info,
2285 "Fallback Pass Through inactive\n");
2286 }
2287
2288 env_atomic_set(&cache->fallback_pt_error_counter, 0);
2289
2290 return 0;
2291 }
2292
2293 int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache,
2294 uint32_t new_threshold)
2295 {
2296 bool old_fallback_pt_state, new_fallback_pt_state;
2297
2298 OCF_CHECK_NULL(cache);
2299
2300 if (new_threshold > OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD)
2301 return -OCF_ERR_INVAL;
2302
2303 old_fallback_pt_state = ocf_fallback_pt_is_on(cache);
2304
2305 cache->fallback_pt_error_threshold = new_threshold;
2306
2307 new_fallback_pt_state = ocf_fallback_pt_is_on(cache);
2308
2309 if (old_fallback_pt_state != new_fallback_pt_state) {
2310 if (new_fallback_pt_state) {
2311 ocf_cache_log(cache, log_info, "Error threshold reached. "
2312 "Fallback Pass Through activated\n");
2313 } else {
2314 ocf_cache_log(cache, log_info, "Fallback Pass Through "
2315 "inactive\n");
2316 }
2317 }
2318
2319 return 0;
2320 }
2321
2322 int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache,
2323 uint32_t *threshold)
2324 {
2325 OCF_CHECK_NULL(cache);
2326 OCF_CHECK_NULL(threshold);
2327
2328 *threshold = cache->fallback_pt_error_threshold;
2329
2330 return 0;
2331 }
2332
2333 struct ocf_mngt_cache_detach_context {
2334 ocf_mngt_cache_detach_end_t cmpl;
2335 void *priv;
2336 ocf_pipeline_t pipeline;
2337 ocf_cache_t cache;
2338 };
2339
2340 static void ocf_mngt_cache_detach_flush_cmpl(ocf_cache_t cache,
2341 void *priv, int error)
2342 {
2343 struct ocf_mngt_cache_detach_context *context = priv;
2344
2345 if (error) {
2346 ocf_pipeline_finish(context->pipeline, error);
2347 return;
2348 }
2349
2350 ocf_pipeline_next(context->pipeline);
2351 }
2352
2353 static void ocf_mngt_cache_detach_flush(ocf_pipeline_t pipeline,
2354 void *priv, ocf_pipeline_arg_t arg)
2355 {
2356 struct ocf_mngt_cache_detach_context *context = priv;
2357 ocf_cache_t cache = context->cache;
2358
2359 ocf_mngt_cache_flush(cache, true, ocf_mngt_cache_detach_flush_cmpl,
2360 context);
2361 }
2362
2363 static void ocf_mngt_cache_detach_wait_pending(ocf_pipeline_t pipeline,
2364 void *priv, ocf_pipeline_arg_t arg)
2365 {
2366 struct ocf_mngt_cache_detach_context *context = priv;
2367 ocf_cache_t cache = context->cache;
2368
2369 env_atomic_set(&cache->attached, 0);
2370
2371 /* FIXME: This should be asynchronous! */
2372 env_waitqueue_wait(cache->pending_cache_wq,
2373 !env_atomic_read(&cache->pending_cache_requests));
2374
2375 ocf_pipeline_next(context->pipeline);
2376 }
2377
2378 static void ocf_mngt_cache_detach_update_metadata(ocf_pipeline_t pipeline,
2379 void *priv, ocf_pipeline_arg_t arg)
2380 {
2381 struct ocf_mngt_cache_detach_context *context = priv;
2382 ocf_cache_t cache = context->cache;
2383 int i, j, no;
2384
2385 no = cache->conf_meta->core_count;
2386
2387 /* remove cacheline metadata and cleaning policy meta for all cores */
2388 for (i = 0, j = 0; j < no && i < OCF_CORE_MAX; i++) {
2389 if (!env_bit_test(i, cache->conf_meta->valid_core_bitmap))
2390 continue;
2391 cache_mng_core_deinit_attached_meta(cache, i);
2392 cache_mng_core_remove_from_cleaning_pol(cache, i);
2393 j++;
2394 }
2395
2396 ocf_pipeline_next(context->pipeline);
2397 }
2398
2399 static void ocf_mngt_cache_detach_unplug_complete(void *priv, int error)
2400 {
2401 struct ocf_mngt_cache_detach_context *context = priv;
2402
2403 if (error) {
2404 ocf_pipeline_finish(context->pipeline, error);
2405 return;
2406 }
2407
2408 ocf_pipeline_next(context->pipeline);
2409 }
2410
2411 static void ocf_mngt_cache_detach_unplug(ocf_pipeline_t pipeline,
2412 void *priv, ocf_pipeline_arg_t arg)
2413 {
2414 struct ocf_mngt_cache_detach_context *context = priv;
2415 ocf_cache_t cache = context->cache;
2416
2417 /* Do the actual detach - deinit cacheline metadata,
2418 * stop cleaner thread and close cache bottom device */
2419 _ocf_mngt_cache_unplug(cache, false,
2420 ocf_mngt_cache_detach_unplug_complete, context);
2421 }
2422
2423 static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline,
2424 void *priv, int error)
2425 {
2426 struct ocf_mngt_cache_detach_context *context = priv;
2427 ocf_cache_t cache = context->cache;
2428
2429 ocf_refcnt_unfreeze(&cache->dirty);
2430
2431 if (!error) {
2432 ocf_cache_log(cache, log_info, "Successfully detached\n");
2433 } else {
2434 if (error == -OCF_ERR_WRITE_CACHE) {
2435 ocf_cache_log(cache, log_warn,
2436 "Detached cache with errors\n");
2437 } else {
2438 ocf_cache_log(cache, log_err,
2439 "Detaching cache failed\n");
2440 }
2441 }
2442
2443 context->cmpl(cache, context->priv, error);
2444
2445 ocf_pipeline_destroy(context->pipeline);
2446 }
2447
2448 struct ocf_pipeline_properties ocf_mngt_cache_detach_pipeline_properties = {
2449 .priv_size = sizeof(struct ocf_mngt_cache_detach_context),
2450 .finish = ocf_mngt_cache_detach_finish,
2451 .steps = {
2452 OCF_PL_STEP(ocf_mngt_cache_detach_flush),
2453 OCF_PL_STEP(ocf_mngt_cache_detach_wait_pending),
2454 OCF_PL_STEP(ocf_mngt_cache_detach_update_metadata),
2455 OCF_PL_STEP(ocf_mngt_cache_detach_unplug),
2456 OCF_PL_STEP_TERMINATOR(),
2457 },
2458 };
2459
2460 void ocf_mngt_cache_detach(ocf_cache_t cache,
2461 ocf_mngt_cache_detach_end_t cmpl, void *priv)
2462 {
2463 struct ocf_mngt_cache_detach_context *context;
2464 ocf_pipeline_t pipeline;
2465 int result;
2466
2467 OCF_CHECK_NULL(cache);
2468
2469 if (!env_atomic_read(&cache->attached)) {
2470 cmpl(cache, priv, -OCF_ERR_INVAL);
2471 return;
2472 }
2473
2474 result = ocf_pipeline_create(&pipeline, cache,
2475 &ocf_mngt_cache_detach_pipeline_properties);
2476 if (result) {
2477 cmpl(cache, priv, -OCF_ERR_NO_MEM);
2478 return;
2479 }
2480
2481 context = ocf_pipeline_get_priv(pipeline);
2482
2483 context->cmpl = cmpl;
2484 context->priv = priv;
2485 context->pipeline = pipeline;
2486 context->cache = cache;
2487
2488 /* prevent dirty io */
2489 ocf_refcnt_freeze(&cache->dirty);
2490
2491 ocf_pipeline_next(pipeline);
2492 }