2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
7 #include "ocf_mngt_common.h"
8 #include "ocf_mngt_core_priv.h"
9 #include "../ocf_priv.h"
10 #include "../ocf_core_priv.h"
11 #include "../ocf_queue_priv.h"
12 #include "../metadata/metadata.h"
13 #include "../engine/cache_engine.h"
14 #include "../utils/utils_part.h"
15 #include "../utils/utils_cache_line.h"
16 #include "../utils/utils_device.h"
17 #include "../utils/utils_io.h"
18 #include "../utils/utils_cache_line.h"
19 #include "../utils/utils_pipeline.h"
20 #include "../utils/utils_refcnt.h"
21 #include "../ocf_utils.h"
22 #include "../concurrency/ocf_concurrency.h"
23 #include "../eviction/ops.h"
24 #include "../ocf_ctx_priv.h"
25 #include "../cleaning/cleaning.h"
27 #define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device)
29 static ocf_cache_t
_ocf_mngt_get_cache(ocf_ctx_t owner
,
30 ocf_cache_id_t cache_id
)
32 ocf_cache_t iter
= NULL
;
33 ocf_cache_t cache
= NULL
;
35 list_for_each_entry(iter
, &owner
->caches
, list
) {
36 if (iter
->cache_id
== cache_id
) {
45 #define DIRTY_SHUTDOWN_ERROR_MSG "Please use --load option to restore " \
46 "previous cache state (Warning: data corruption may happen)" \
47 "\nOr initialize your cache using --force option. " \
48 "Warning: All dirty data will be lost!\n"
50 #define DIRTY_NOT_FLUSHED_ERROR_MSG "Cache closed w/ no data flushing\n" \
51 "Restart with --load or --force option\n"
54 * @brief Helpful function to start cache
56 struct ocf_cachemng_init_params
{
57 bool metadata_volatile
;
66 /*!< cache that is being initialized */
69 /*!< Keep cache locked */
72 * @brief initialization state (in case of error, it is used to know
73 * which assets have to be deallocated in premature exit from function
77 /*!< cache is allocated and added to list */
79 bool metadata_inited
: 1;
80 /*!< Metadata is inited to valid state */
82 bool cache_locked
: 1;
83 /*!< Cache has been locked */
86 struct ocf_metadata_init_params
{
87 ocf_cache_line_size_t line_size
;
88 /*!< Metadata cache line size */
90 ocf_metadata_layout_t layout
;
91 /*!< Metadata layout (striping/sequential) */
93 ocf_cache_mode_t cache_mode
;
98 typedef void (*_ocf_mngt_cache_attach_end_t
)(ocf_cache_t
, void *priv1
,
99 void *priv2
, int error
);
101 struct ocf_cache_attach_context
{
103 /*!< cache that is being initialized */
105 struct ocf_mngt_cache_device_config cfg
;
107 uint64_t volume_size
;
108 /*!< size of the device in cache lines */
110 enum ocf_mngt_cache_init_mode init_mode
;
111 /*!< cache init mode */
114 * @brief initialization state (in case of error, it is used to know
115 * which assets have to be deallocated in premature exit from function
118 bool device_alloc
: 1;
119 /*!< data structure allocated */
121 bool volume_inited
: 1;
122 /*!< uuid for cache device is allocated */
124 bool attached_metadata_inited
: 1;
125 /*!< attached metadata sections initialized */
127 bool device_opened
: 1;
128 /*!< underlying device volume is open */
130 bool cleaner_started
: 1;
131 /*!< Cleaner has been started */
133 bool cores_opened
: 1;
134 /*!< underlying cores are opened (happens only during
138 bool concurrency_inited
: 1;
142 ocf_cache_line_size_t line_size
;
143 /*!< Metadata cache line size */
145 ocf_metadata_layout_t layout
;
146 /*!< Metadata layout (striping/sequential) */
148 ocf_cache_mode_t cache_mode
;
151 enum ocf_metadata_shutdown_status shutdown_status
;
152 /*!< dirty or clean */
154 uint8_t dirty_flushed
;
155 /*!< is dirty data fully flushed */
158 /*!< metadata retrieval status (nonzero is sign of an error
159 * during recovery/load but is non issue in case of clean init
166 unsigned long reserved_lba_addr
;
167 ocf_pipeline_t pipeline
;
170 _ocf_mngt_cache_attach_end_t cmpl
;
174 ocf_pipeline_t pipeline
;
177 static ocf_cache_id_t
_ocf_mngt_cache_find_free_id(ocf_ctx_t owner
)
179 ocf_cache_id_t id
= OCF_CACHE_ID_INVALID
;
181 for (id
= OCF_CACHE_ID_MIN
; id
<= OCF_CACHE_ID_MAX
; id
++) {
182 if (!_ocf_mngt_get_cache(owner
, id
))
186 return OCF_CACHE_ID_INVALID
;
189 static void __init_hash_table(ocf_cache_t cache
)
191 /* Initialize hash table*/
192 ocf_metadata_init_hash_table(cache
);
195 static void __init_freelist(ocf_cache_t cache
)
197 /* Initialize free list partition*/
198 ocf_metadata_init_freelist_partition(cache
);
201 static void __init_partitions(ocf_cache_t cache
)
203 ocf_part_id_t i_part
;
205 /* Init default Partition */
206 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache
, PARTITION_DEFAULT
,
207 "unclassified", 0, PARTITION_SIZE_MAX
,
208 OCF_IO_CLASS_PRIO_LOWEST
, true));
210 /* Add other partition to the cache and make it as dummy */
211 for (i_part
= 0; i_part
< OCF_IO_CLASS_MAX
; i_part
++) {
212 if (i_part
== PARTITION_DEFAULT
)
215 /* Init default Partition */
216 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache
, i_part
,
217 "Inactive", 0, PARTITION_SIZE_MAX
,
218 OCF_IO_CLASS_PRIO_LOWEST
, false));
222 static void __init_partitions_attached(ocf_cache_t cache
)
224 ocf_part_id_t part_id
;
226 for (part_id
= 0; part_id
< OCF_IO_CLASS_MAX
; part_id
++) {
227 cache
->user_parts
[part_id
].runtime
->head
=
228 cache
->device
->collision_table_entries
;
229 cache
->user_parts
[part_id
].runtime
->curr_size
= 0;
231 ocf_eviction_initialize(cache
, part_id
);
235 static void __init_cleaning_policy(ocf_cache_t cache
)
237 ocf_cleaning_t cleaning_policy
= ocf_cleaning_default
;
240 OCF_ASSERT_PLUGGED(cache
);
242 for (i
= 0; i
< ocf_cleaning_max
; i
++) {
243 if (cleaning_policy_ops
[i
].setup
)
244 cleaning_policy_ops
[i
].setup(cache
);
247 cache
->conf_meta
->cleaning_policy_type
= ocf_cleaning_default
;
248 if (cleaning_policy_ops
[cleaning_policy
].initialize
)
249 cleaning_policy_ops
[cleaning_policy
].initialize(cache
, 1);
252 static void __deinit_cleaning_policy(ocf_cache_t cache
)
254 ocf_cleaning_t cleaning_policy
;
256 cleaning_policy
= cache
->conf_meta
->cleaning_policy_type
;
257 if (cleaning_policy_ops
[cleaning_policy
].deinitialize
)
258 cleaning_policy_ops
[cleaning_policy
].deinitialize(cache
);
261 static void __init_eviction_policy(ocf_cache_t cache
,
262 ocf_eviction_t eviction
)
264 ENV_BUG_ON(eviction
< 0 || eviction
>= ocf_eviction_max
);
266 cache
->conf_meta
->eviction_policy_type
= eviction
;
269 static void __init_cores(ocf_cache_t cache
)
271 /* No core devices yet */
272 cache
->conf_meta
->core_count
= 0;
273 ENV_BUG_ON(env_memset(cache
->conf_meta
->valid_core_bitmap
,
274 sizeof(cache
->conf_meta
->valid_core_bitmap
), 0));
277 static void __init_metadata_version(ocf_cache_t cache
)
279 cache
->conf_meta
->metadata_version
= METADATA_VERSION();
282 static void __reset_stats(ocf_cache_t cache
)
287 for (core_id
= 0; core_id
< OCF_CORE_MAX
; core_id
++) {
288 env_atomic_set(&cache
->core_runtime_meta
[core_id
].
290 env_atomic_set(&cache
->core_runtime_meta
[core_id
].
292 env_atomic64_set(&cache
->core_runtime_meta
[core_id
].
295 for (i
= 0; i
!= OCF_IO_CLASS_MAX
; i
++) {
296 env_atomic_set(&cache
->core_runtime_meta
[core_id
].
297 part_counters
[i
].cached_clines
, 0);
298 env_atomic_set(&cache
->core_runtime_meta
[core_id
].
299 part_counters
[i
].dirty_clines
, 0);
304 static void init_attached_data_structures(ocf_cache_t cache
,
305 ocf_eviction_t eviction_policy
)
307 /* Lock to ensure consistency */
308 OCF_METADATA_LOCK_WR();
309 __init_hash_table(cache
);
310 __init_freelist(cache
);
311 __init_partitions_attached(cache
);
312 __init_cleaning_policy(cache
);
313 __init_eviction_policy(cache
, eviction_policy
);
314 OCF_METADATA_UNLOCK_WR();
317 static void init_attached_data_structures_recovery(ocf_cache_t cache
)
319 OCF_METADATA_LOCK_WR();
320 __init_hash_table(cache
);
321 __init_freelist(cache
);
322 __init_partitions_attached(cache
);
323 __reset_stats(cache
);
324 __init_metadata_version(cache
);
325 OCF_METADATA_UNLOCK_WR();
328 /****************************************************************
329 * Function for removing all uninitialized core objects *
330 * from the cache instance. *
331 * Used in case of cache initialization errors. *
332 ****************************************************************/
333 static void _ocf_mngt_close_all_uninitialized_cores(
339 for (j
= cache
->conf_meta
->core_count
, i
= 0; j
> 0; ++i
) {
340 if (!env_bit_test(i
, cache
->conf_meta
->valid_core_bitmap
))
343 volume
= &(cache
->core
[i
].volume
);
344 ocf_volume_close(volume
);
348 env_free(cache
->core
[i
].counters
);
349 cache
->core
[i
].counters
= NULL
;
351 env_bit_clear(i
, cache
->conf_meta
->valid_core_bitmap
);
354 cache
->conf_meta
->core_count
= 0;
358 * @brief routine loading metadata from cache device
359 * - attempts to open all the underlying cores
361 static int _ocf_mngt_init_instance_add_cores(
362 struct ocf_cache_attach_context
*context
)
364 ocf_cache_t cache
= context
->cache
;
365 /* FIXME: This is temporary hack. Remove after storing name it meta. */
366 char core_name
[OCF_CORE_NAME_SIZE
];
368 uint64_t hd_lines
= 0;
370 OCF_ASSERT_PLUGGED(cache
);
372 if (cache
->conf_meta
->cachelines
!=
373 ocf_metadata_get_cachelines_count(cache
)) {
374 ocf_cache_log(cache
, log_err
,
375 "ERROR: Cache device size mismatch!\n");
376 return -OCF_ERR_START_CACHE_FAIL
;
379 /* Count value will be re-calculated on the basis of 'added' flag */
380 cache
->conf_meta
->core_count
= 0;
382 /* Check in metadata which cores were added into cache */
383 for (i
= 0; i
< OCF_CORE_MAX
; i
++) {
384 ocf_volume_t tvolume
= NULL
;
385 ocf_core_t core
= &cache
->core
[i
];
387 if (!cache
->core_conf_meta
[i
].added
)
390 if (!cache
->core
[i
].volume
.type
)
393 ret
= snprintf(core_name
, sizeof(core_name
), "core%d", i
);
394 if (ret
< 0 || ret
>= sizeof(core_name
))
397 ret
= ocf_core_set_name(core
, core_name
, sizeof(core_name
));
401 tvolume
= ocf_mngt_core_pool_lookup(ocf_cache_get_ctx(cache
),
402 &core
->volume
.uuid
, core
->volume
.type
);
405 * Attach bottom device to core structure
408 ocf_volume_move(&core
->volume
, tvolume
);
409 ocf_mngt_core_pool_remove(cache
->owner
, tvolume
);
412 ocf_cache_log(cache
, log_info
,
413 "Attached core %u from pool\n", i
);
414 } else if (context
->cfg
.open_cores
) {
415 ret
= ocf_volume_open(&core
->volume
, NULL
);
416 if (ret
== -OCF_ERR_NOT_OPEN_EXC
) {
417 ocf_cache_log(cache
, log_warn
,
418 "Cannot open core %u. "
421 ocf_cache_log(cache
, log_warn
,
422 "Cannot open core %u", i
);
428 env_bit_set(i
, cache
->conf_meta
->valid_core_bitmap
);
429 cache
->conf_meta
->core_count
++;
430 core
->volume
.cache
= cache
;
432 if (ocf_mngt_core_init_front_volume(core
))
436 env_zalloc(sizeof(*core
->counters
), ENV_MEM_NORMAL
);
441 env_bit_set(ocf_cache_state_incomplete
,
442 &cache
->cache_state
);
443 cache
->ocf_core_inactive_count
++;
444 ocf_cache_log(cache
, log_warn
,
445 "Cannot find core %u in pool"
446 ", core added as inactive\n", i
);
450 hd_lines
= ocf_bytes_2_lines(cache
,
451 ocf_volume_get_length(
452 &cache
->core
[i
].volume
));
455 ocf_cache_log(cache
, log_info
,
456 "Disk lines = %" ENV_PRIu64
"\n", hd_lines
);
460 context
->flags
.cores_opened
= true;
464 _ocf_mngt_close_all_uninitialized_cores(cache
);
466 return -OCF_ERR_START_CACHE_FAIL
;
469 void _ocf_mngt_init_instance_load_complete(void *priv
, int error
)
471 struct ocf_cache_attach_context
*context
= priv
;
472 ocf_cache_t cache
= context
->cache
;
473 ocf_cleaning_t cleaning_policy
;
476 ocf_cache_log(cache
, log_err
,
477 "Cannot read cache metadata\n");
478 ocf_pipeline_finish(context
->pipeline
,
479 -OCF_ERR_START_CACHE_FAIL
);
483 cleaning_policy
= cache
->conf_meta
->cleaning_policy_type
;
484 if (!cleaning_policy_ops
[cleaning_policy
].initialize
)
487 if (context
->metadata
.shutdown_status
== ocf_metadata_clean_shutdown
)
488 cleaning_policy_ops
[cleaning_policy
].initialize(cache
, 0);
490 cleaning_policy_ops
[cleaning_policy
].initialize(cache
, 1);
493 ocf_pipeline_next(context
->pipeline
);
497 * handle load variant
499 static void _ocf_mngt_init_instance_clean_load(
500 struct ocf_cache_attach_context
*context
)
502 ocf_cache_t cache
= context
->cache
;
504 ocf_metadata_load_all(cache
,
505 _ocf_mngt_init_instance_load_complete
, context
);
509 * handle recovery variant
511 static void _ocf_mngt_init_instance_recovery(
512 struct ocf_cache_attach_context
*context
)
514 ocf_cache_t cache
= context
->cache
;
516 init_attached_data_structures_recovery(cache
);
518 ocf_cache_log(cache
, log_warn
,
519 "ERROR: Cache device did not shut down properly!\n");
521 ocf_cache_log(cache
, log_info
, "Initiating recovery sequence...\n");
523 ocf_metadata_load_recovery(cache
,
524 _ocf_mngt_init_instance_load_complete
, context
);
527 static void _ocf_mngt_init_instance_load(
528 struct ocf_cache_attach_context
*context
)
530 ocf_cache_t cache
= context
->cache
;
533 OCF_ASSERT_PLUGGED(cache
);
535 ret
= _ocf_mngt_init_instance_add_cores(context
);
537 ocf_pipeline_finish(context
->pipeline
, ret
);
541 if (context
->metadata
.shutdown_status
== ocf_metadata_clean_shutdown
)
542 _ocf_mngt_init_instance_clean_load(context
);
544 _ocf_mngt_init_instance_recovery(context
);
548 * @brief allocate memory for new cache, add it to cache queue, set initial
549 * values and running state
551 static int _ocf_mngt_init_new_cache(struct ocf_cachemng_init_params
*params
)
553 ocf_cache_t cache
= env_vzalloc(sizeof(*cache
));
556 return -OCF_ERR_NO_MEM
;
558 if (env_rwsem_init(&cache
->lock
) ||
559 env_mutex_init(&cache
->flush_mutex
)) {
561 return -OCF_ERR_NO_MEM
;
564 INIT_LIST_HEAD(&cache
->list
);
565 list_add_tail(&cache
->list
, ¶ms
->ctx
->caches
);
566 env_atomic_set(&cache
->ref_count
, 1);
567 cache
->owner
= params
->ctx
;
569 /* Copy all required initialization parameters */
570 cache
->cache_id
= params
->id
;
572 env_atomic_set(&(cache
->last_access_ms
),
573 env_ticks_to_msecs(env_get_tick_count()));
575 env_bit_set(ocf_cache_state_initializing
, &cache
->cache_state
);
577 params
->cache
= cache
;
578 params
->flags
.cache_alloc
= true;
583 static void _ocf_mngt_attach_cache_device(ocf_pipeline_t pipeline
,
584 void *priv
, ocf_pipeline_arg_t arg
)
586 struct ocf_cache_attach_context
*context
= priv
;
587 ocf_cache_t cache
= context
->cache
;
588 ocf_volume_type_t type
;
591 cache
->device
= env_vzalloc(sizeof(*cache
->device
));
592 if (!cache
->device
) {
593 ret
= -OCF_ERR_NO_MEM
;
596 context
->flags
.device_alloc
= true;
598 cache
->device
->init_mode
= context
->init_mode
;
600 /* Prepare UUID of cache volume */
601 type
= ocf_ctx_get_volume_type(cache
->owner
, context
->cfg
.volume_type
);
603 ret
= -OCF_ERR_INVAL_VOLUME_TYPE
;
607 ret
= ocf_volume_init(&cache
->device
->volume
, type
,
608 &context
->cfg
.uuid
, true);
612 cache
->device
->volume
.cache
= cache
;
613 context
->flags
.volume_inited
= true;
616 * Open cache device, It has to be done first because metadata service
617 * need to know size of cache device.
619 ret
= ocf_volume_open(&cache
->device
->volume
,
620 context
->cfg
.volume_params
);
622 ocf_cache_log(cache
, log_err
, "ERROR: Cache not available\n");
625 context
->flags
.device_opened
= true;
627 context
->volume_size
= ocf_volume_get_length(&cache
->device
->volume
);
629 /* Check minimum size of cache device */
630 if (context
->volume_size
< OCF_CACHE_SIZE_MIN
) {
631 ocf_cache_log(cache
, log_err
, "ERROR: Cache cache size must "
632 "be at least %llu [MiB]\n", OCF_CACHE_SIZE_MIN
/ MiB
);
633 ret
= -OCF_ERR_START_CACHE_FAIL
;
637 ocf_pipeline_next(pipeline
);
641 ocf_pipeline_finish(context
->pipeline
, ret
);
645 * @brief prepare cache for init. This is first step towards initializing
648 static int _ocf_mngt_init_prepare_cache(struct ocf_cachemng_init_params
*param
,
649 struct ocf_mngt_cache_config
*cfg
)
652 char cache_name
[OCF_CACHE_NAME_SIZE
];
655 ret
= env_mutex_lock_interruptible(¶m
->ctx
->lock
);
659 if (param
->id
== OCF_CACHE_ID_INVALID
) {
660 /* ID was not specified, take first free id */
661 param
->id
= _ocf_mngt_cache_find_free_id(param
->ctx
);
662 if (param
->id
== OCF_CACHE_ID_INVALID
) {
663 ret
= -OCF_ERR_TOO_MANY_CACHES
;
668 /* ID was set, check if cache exist with specified ID */
669 cache
= _ocf_mngt_get_cache(param
->ctx
, param
->id
);
671 /* Cache already exist */
672 ret
= -OCF_ERR_CACHE_EXIST
;
678 ret
= env_strncpy(cache_name
, sizeof(cache_name
),
679 cfg
->name
, sizeof(cache_name
));
683 ret
= snprintf(cache_name
, sizeof(cache_name
),
684 "cache%hu", param
->id
);
689 ocf_log(param
->ctx
, log_info
, "Inserting cache %s\n", cache_name
);
691 ret
= _ocf_mngt_init_new_cache(param
);
695 cache
= param
->cache
;
697 ret
= ocf_cache_set_name(cache
, cache_name
, sizeof(cache_name
));
701 cache
->backfill
.max_queue_size
= cfg
->backfill
.max_queue_size
;
702 cache
->backfill
.queue_unblock_size
= cfg
->backfill
.queue_unblock_size
;
704 env_rwsem_down_write(&cache
->lock
); /* Lock cache during setup */
705 param
->flags
.cache_locked
= true;
707 cache
->pt_unaligned_io
= cfg
->pt_unaligned_io
;
708 cache
->use_submit_io_fast
= cfg
->use_submit_io_fast
;
710 cache
->eviction_policy_init
= cfg
->eviction_policy
;
711 cache
->metadata
.is_volatile
= cfg
->metadata_volatile
;
714 env_mutex_unlock(¶m
->ctx
->lock
);
718 static void _ocf_mngt_test_volume_initial_write_complete(void *priv
, int error
)
720 struct ocf_cache_attach_context
*context
= priv
;
723 ocf_pipeline_finish(context
->test
.pipeline
, error
);
727 ocf_pipeline_next(context
->test
.pipeline
);
730 static void _ocf_mngt_test_volume_initial_write(
731 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
733 struct ocf_cache_attach_context
*context
= priv
;
734 ocf_cache_t cache
= context
->cache
;
737 * Write buffer filled with "1"
740 ENV_BUG_ON(env_memset(context
->test
.rw_buffer
, PAGE_SIZE
, 1));
742 ocf_submit_cache_page(cache
, context
->test
.reserved_lba_addr
,
743 OCF_WRITE
, context
->test
.rw_buffer
,
744 _ocf_mngt_test_volume_initial_write_complete
, context
);
747 static void _ocf_mngt_test_volume_first_read_complete(void *priv
, int error
)
749 struct ocf_cache_attach_context
*context
= priv
;
750 ocf_cache_t cache
= context
->cache
;
754 ocf_pipeline_finish(context
->test
.pipeline
, error
);
758 ret
= env_memcmp(context
->test
.rw_buffer
, PAGE_SIZE
,
759 context
->test
.cmp_buffer
, PAGE_SIZE
, &diff
);
761 ocf_pipeline_finish(context
->test
.pipeline
, ret
);
766 /* we read back different data than what we had just
767 written - this is fatal error */
768 ocf_pipeline_finish(context
->test
.pipeline
, -EIO
);
772 if (!ocf_volume_is_atomic(&cache
->device
->volume
)) {
773 /* If not atomic, stop testing here */
774 ocf_pipeline_finish(context
->test
.pipeline
, 0);
778 ocf_pipeline_next(context
->test
.pipeline
);
781 static void _ocf_mngt_test_volume_first_read(
782 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
784 struct ocf_cache_attach_context
*context
= priv
;
785 ocf_cache_t cache
= context
->cache
;
791 ENV_BUG_ON(env_memset(context
->test
.rw_buffer
, PAGE_SIZE
, 0));
792 ENV_BUG_ON(env_memset(context
->test
.cmp_buffer
, PAGE_SIZE
, 1));
794 ocf_submit_cache_page(cache
, context
->test
.reserved_lba_addr
,
795 OCF_READ
, context
->test
.rw_buffer
,
796 _ocf_mngt_test_volume_first_read_complete
, context
);
799 static void _ocf_mngt_test_volume_discard_complete(void *priv
, int error
)
801 struct ocf_cache_attach_context
*context
= priv
;
804 ocf_pipeline_finish(context
->test
.pipeline
, error
);
808 ocf_pipeline_next(context
->test
.pipeline
);
811 static void _ocf_mngt_test_volume_discard(
812 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
814 struct ocf_cache_attach_context
*context
= priv
;
815 ocf_cache_t cache
= context
->cache
;
818 * Submit discard request
821 ocf_submit_volume_discard(&cache
->device
->volume
,
822 context
->test
.reserved_lba_addr
, PAGE_SIZE
,
823 _ocf_mngt_test_volume_discard_complete
, context
);
826 static void _ocf_mngt_test_volume_second_read_complete(void *priv
, int error
)
828 struct ocf_cache_attach_context
*context
= priv
;
829 ocf_cache_t cache
= context
->cache
;
833 ocf_pipeline_finish(context
->test
.pipeline
, error
);
837 ret
= env_memcmp(context
->test
.rw_buffer
, PAGE_SIZE
,
838 context
->test
.cmp_buffer
, PAGE_SIZE
, &diff
);
840 ocf_pipeline_finish(context
->test
.pipeline
, ret
);
845 /* discard does not cause target adresses to return 0 on
847 cache
->device
->volume
.features
.discard_zeroes
= 0;
850 ocf_pipeline_next(context
->test
.pipeline
);
853 static void _ocf_mngt_test_volume_second_read(
854 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
856 struct ocf_cache_attach_context
*context
= priv
;
857 ocf_cache_t cache
= context
->cache
;
863 ENV_BUG_ON(env_memset(context
->test
.rw_buffer
, PAGE_SIZE
, 1));
864 ENV_BUG_ON(env_memset(context
->test
.cmp_buffer
, PAGE_SIZE
, 0));
866 ocf_submit_cache_page(cache
, context
->test
.reserved_lba_addr
,
867 OCF_READ
, context
->test
.rw_buffer
,
868 _ocf_mngt_test_volume_second_read_complete
, context
);
871 static void _ocf_mngt_test_volume_finish(ocf_pipeline_t pipeline
,
872 void *priv
, int error
)
874 struct ocf_cache_attach_context
*context
= priv
;
876 env_free(context
->test
.rw_buffer
);
877 env_free(context
->test
.cmp_buffer
);
880 ocf_pipeline_finish(context
->pipeline
, error
);
882 ocf_pipeline_next(context
->pipeline
);
884 ocf_pipeline_destroy(context
->test
.pipeline
);
887 struct ocf_pipeline_properties _ocf_mngt_test_volume_pipeline_properties
= {
889 .finish
= _ocf_mngt_test_volume_finish
,
891 OCF_PL_STEP(_ocf_mngt_test_volume_initial_write
),
892 OCF_PL_STEP(_ocf_mngt_test_volume_first_read
),
893 OCF_PL_STEP(_ocf_mngt_test_volume_discard
),
894 OCF_PL_STEP(_ocf_mngt_test_volume_second_read
),
895 OCF_PL_STEP_TERMINATOR(),
899 static void _ocf_mngt_test_volume(ocf_pipeline_t pipeline
,
900 void *priv
, ocf_pipeline_arg_t arg
)
902 struct ocf_cache_attach_context
*context
= priv
;
903 ocf_cache_t cache
= context
->cache
;
904 ocf_pipeline_t test_pipeline
;
907 cache
->device
->volume
.features
.discard_zeroes
= 1;
909 if (!context
->cfg
.perform_test
) {
910 ocf_pipeline_next(pipeline
);
914 context
->test
.reserved_lba_addr
= ocf_metadata_get_reserved_lba(cache
);
916 context
->test
.rw_buffer
= env_malloc(PAGE_SIZE
, ENV_MEM_NORMAL
);
917 if (!context
->test
.rw_buffer
) {
918 ocf_pipeline_finish(context
->pipeline
, -OCF_ERR_NO_MEM
);
922 context
->test
.cmp_buffer
= env_malloc(PAGE_SIZE
, ENV_MEM_NORMAL
);
923 if (!context
->test
.cmp_buffer
)
926 result
= ocf_pipeline_create(&test_pipeline
, cache
,
927 &_ocf_mngt_test_volume_pipeline_properties
);
931 ocf_pipeline_set_priv(test_pipeline
, context
);
933 context
->test
.pipeline
= test_pipeline
;
935 ocf_pipeline_next(test_pipeline
);
939 env_free(context
->test
.rw_buffer
);
941 env_free(context
->test
.cmp_buffer
);
942 ocf_pipeline_finish(context
->pipeline
, -OCF_ERR_NO_MEM
);
946 * Prepare metadata accordingly to mode (for load/recovery read from disk)
949 static void _ocf_mngt_attach_load_properties_end(void *priv
, int error
,
950 struct ocf_metadata_load_properties
*properties
)
952 struct ocf_cache_attach_context
*context
= priv
;
953 ocf_cache_t cache
= context
->cache
;
955 context
->metadata
.status
= error
;
958 ocf_pipeline_next(context
->pipeline
);
962 context
->metadata
.shutdown_status
= properties
->shutdown_status
;
963 context
->metadata
.dirty_flushed
= properties
->dirty_flushed
;
965 if (cache
->device
->init_mode
== ocf_init_mode_load
) {
966 context
->metadata
.line_size
= properties
->line_size
;
967 cache
->conf_meta
->metadata_layout
= properties
->layout
;
968 cache
->conf_meta
->cache_mode
= properties
->cache_mode
;
971 ocf_pipeline_next(context
->pipeline
);
974 static void _ocf_mngt_attach_load_properties(ocf_pipeline_t pipeline
,
975 void *priv
, ocf_pipeline_arg_t arg
)
977 struct ocf_cache_attach_context
*context
= priv
;
978 ocf_cache_t cache
= context
->cache
;
980 OCF_ASSERT_PLUGGED(cache
);
982 context
->metadata
.shutdown_status
= ocf_metadata_clean_shutdown
;
983 context
->metadata
.dirty_flushed
= DIRTY_FLUSHED
;
984 context
->metadata
.line_size
= context
->cfg
.cache_line_size
;
986 if (cache
->device
->init_mode
== ocf_init_mode_metadata_volatile
) {
987 ocf_pipeline_next(context
->pipeline
);
991 ocf_metadata_load_properties(&cache
->device
->volume
,
992 _ocf_mngt_attach_load_properties_end
, context
);
995 static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline
,
996 void *priv
, ocf_pipeline_arg_t arg
)
998 struct ocf_cache_attach_context
*context
= priv
;
999 ocf_cache_t cache
= context
->cache
;
1002 if (context
->init_mode
== ocf_init_mode_load
&&
1003 context
->metadata
.status
) {
1004 ocf_pipeline_finish(context
->pipeline
,
1005 -OCF_ERR_START_CACHE_FAIL
);
1009 context
->metadata
.line_size
= context
->metadata
.line_size
?:
1010 cache
->metadata
.settings
.size
;
1013 * Initialize variable size metadata segments
1015 if (ocf_metadata_init_variable_size(cache
, context
->volume_size
,
1016 context
->metadata
.line_size
,
1017 cache
->conf_meta
->metadata_layout
)) {
1018 ocf_pipeline_finish(context
->pipeline
,
1019 -OCF_ERR_START_CACHE_FAIL
);
1023 ocf_cache_log(cache
, log_debug
, "Cache attached\n");
1024 context
->flags
.attached_metadata_inited
= true;
1026 for (i
= 0; i
< OCF_IO_CLASS_MAX
+ 1; ++i
) {
1027 cache
->user_parts
[i
].runtime
=
1028 &cache
->device
->runtime_meta
->user_parts
[i
];
1031 cache
->device
->freelist_part
= &cache
->device
->runtime_meta
->freelist_part
;
1033 ret
= ocf_concurrency_init(cache
);
1035 ocf_pipeline_finish(context
->pipeline
, ret
);
1039 context
->flags
.concurrency_inited
= 1;
1041 ocf_pipeline_next(context
->pipeline
);
1045 * @brief initializing cache anew (not loading or recovering)
1047 static void _ocf_mngt_init_instance_init(struct ocf_cache_attach_context
*context
)
1049 ocf_cache_t cache
= context
->cache
;
1051 if (!context
->metadata
.status
&& !context
->cfg
.force
&&
1052 context
->metadata
.shutdown_status
!=
1053 ocf_metadata_detached
) {
1055 if (context
->metadata
.shutdown_status
!=
1056 ocf_metadata_clean_shutdown
) {
1057 ocf_cache_log(cache
, log_err
, DIRTY_SHUTDOWN_ERROR_MSG
);
1058 ocf_pipeline_finish(context
->pipeline
,
1059 -OCF_ERR_DIRTY_SHUTDOWN
);
1063 if (context
->metadata
.dirty_flushed
== DIRTY_NOT_FLUSHED
) {
1064 ocf_cache_log(cache
, log_err
,
1065 DIRTY_NOT_FLUSHED_ERROR_MSG
);
1066 ocf_pipeline_finish(context
->pipeline
,
1067 -OCF_ERR_DIRTY_EXISTS
);
1072 init_attached_data_structures(cache
, cache
->eviction_policy_init
);
1074 /* In initial cache state there is no dirty data, so all dirty data is
1075 considered to be flushed
1077 cache
->conf_meta
->dirty_flushed
= true;
1079 ocf_pipeline_next(context
->pipeline
);
1082 uint64_t _ocf_mngt_calculate_ram_needed(ocf_cache_t cache
,
1083 ocf_volume_t cache_volume
)
1085 ocf_cache_line_size_t line_size
= ocf_line_size(cache
);
1086 uint64_t volume_size
= ocf_volume_get_length(cache_volume
);
1087 uint64_t const_data_size
;
1088 uint64_t cache_line_no
;
1089 uint64_t data_per_line
;
1090 uint64_t min_free_ram
;
1092 /* Superblock + per core metadata */
1093 const_data_size
= 50 * MiB
;
1095 /* Cache metadata */
1096 cache_line_no
= volume_size
/ line_size
;
1097 data_per_line
= (52 + (2 * (line_size
/ KiB
/ 4)));
1099 min_free_ram
= const_data_size
+ cache_line_no
* data_per_line
;
1101 /* 110% of calculated value */
1102 min_free_ram
= (11 * min_free_ram
) / 10;
1104 return min_free_ram
;
1107 int ocf_mngt_get_ram_needed(ocf_cache_t cache
,
1108 struct ocf_mngt_cache_device_config
*cfg
, uint64_t *ram_needed
)
1110 struct ocf_volume volume
;
1111 ocf_volume_type_t type
;
1114 OCF_CHECK_NULL(cache
);
1115 OCF_CHECK_NULL(cfg
);
1116 OCF_CHECK_NULL(ram_needed
);
1118 type
= ocf_ctx_get_volume_type(cache
->owner
, cfg
->volume_type
);
1120 return -OCF_ERR_INVAL_VOLUME_TYPE
;
1122 result
= ocf_volume_init(&cache
->device
->volume
, type
,
1127 result
= ocf_volume_open(&volume
, cfg
->volume_params
);
1129 ocf_volume_deinit(&volume
);
1133 *ram_needed
= _ocf_mngt_calculate_ram_needed(cache
, &volume
);
1135 ocf_volume_close(&volume
);
1136 ocf_volume_deinit(&volume
);
1142 * @brief for error handling do partial cleanup of datastructures upon
1143 * premature function exit.
1145 * @param ctx OCF context
1146 * @param params - startup params containing initialization status flags.
1149 static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx
,
1150 struct ocf_cachemng_init_params
*params
)
1152 ocf_cache_t cache
= params
->cache
;
1154 if (!params
->flags
.cache_alloc
)
1157 if (params
->flags
.metadata_inited
)
1158 ocf_metadata_deinit(cache
);
1160 env_mutex_lock(&ctx
->lock
);
1162 list_del(&cache
->list
);
1165 env_mutex_unlock(&ctx
->lock
);
1168 static void _ocf_mngt_attach_handle_error(
1169 struct ocf_cache_attach_context
*context
)
1171 ocf_cache_t cache
= context
->cache
;
1173 if (context
->flags
.cleaner_started
)
1174 ocf_stop_cleaner(cache
);
1176 if (context
->flags
.cores_opened
)
1177 _ocf_mngt_close_all_uninitialized_cores(cache
);
1179 if (context
->flags
.attached_metadata_inited
)
1180 ocf_metadata_deinit_variable_size(cache
);
1182 if (context
->flags
.device_opened
)
1183 ocf_volume_close(&cache
->device
->volume
);
1185 if (context
->flags
.concurrency_inited
)
1186 ocf_concurrency_deinit(cache
);
1188 if (context
->flags
.volume_inited
)
1189 ocf_volume_deinit(&cache
->device
->volume
);
1191 if (context
->flags
.device_alloc
)
1192 env_vfree(cache
->device
);
1195 static int _ocf_mngt_cache_init(ocf_cache_t cache
,
1196 struct ocf_cachemng_init_params
*params
)
1201 * Super block elements initialization
1203 cache
->conf_meta
->cache_mode
= params
->metadata
.cache_mode
;
1204 cache
->conf_meta
->metadata_layout
= params
->metadata
.layout
;
1206 for (i
= 0; i
< OCF_IO_CLASS_MAX
+ 1; ++i
) {
1207 cache
->user_parts
[i
].config
=
1208 &cache
->conf_meta
->user_parts
[i
];
1211 INIT_LIST_HEAD(&cache
->io_queues
);
1213 /* Init Partitions */
1214 ocf_part_init(cache
);
1216 __init_cores(cache
);
1217 __init_metadata_version(cache
);
1218 __init_partitions(cache
);
1223 static int _ocf_mngt_cache_start(ocf_ctx_t ctx
, ocf_cache_t
*cache
,
1224 struct ocf_mngt_cache_config
*cfg
)
1226 struct ocf_cachemng_init_params params
;
1229 ENV_BUG_ON(env_memset(¶ms
, sizeof(params
), 0));
1231 params
.id
= cfg
->id
;
1234 params
.metadata
.cache_mode
= cfg
->cache_mode
;
1235 params
.metadata
.layout
= cfg
->metadata_layout
;
1236 params
.metadata
.line_size
= cfg
->cache_line_size
;
1237 params
.metadata_volatile
= cfg
->metadata_volatile
;
1238 params
.locked
= cfg
->locked
;
1241 result
= _ocf_mngt_init_prepare_cache(¶ms
, cfg
);
1243 goto _cache_mng_init_instance_ERROR
;
1245 *cache
= params
.cache
;
1248 * Initialize metadata selected segments of metadata in memory
1250 result
= ocf_metadata_init(*cache
, params
.metadata
.line_size
);
1252 result
= -OCF_ERR_START_CACHE_FAIL
;
1253 goto _cache_mng_init_instance_ERROR
;
1256 ocf_log(ctx
, log_debug
, "Metadata initialized\n");
1257 params
.flags
.metadata_inited
= true;
1259 result
= _ocf_mngt_cache_init(*cache
, ¶ms
);
1261 goto _cache_mng_init_instance_ERROR
;
1263 if (params
.locked
) {
1264 /* Increment reference counter to match cache_lock /
1265 cache_unlock convention. User is expected to call
1266 ocf_mngt_cache_unlock in future which would up the
1267 semaphore as well as decrement ref_count. */
1268 env_atomic_inc(&(*cache
)->ref_count
);
1270 /* User did not request to lock cache instance after creation -
1271 up the semaphore here since we have acquired the lock to
1272 perform management operations. */
1273 env_rwsem_up_write(&(*cache
)->lock
);
1274 params
.flags
.cache_locked
= false;
1279 _cache_mng_init_instance_ERROR
:
1280 _ocf_mngt_init_handle_error(ctx
, ¶ms
);
1285 static void _ocf_mng_cache_set_valid(ocf_cache_t cache
)
1288 * Clear initialization state and set the valid bit so we know
1291 cache
->valid_ocf_cache_device_t
= 1;
1292 env_bit_clear(ocf_cache_state_initializing
, &cache
->cache_state
);
1293 env_bit_set(ocf_cache_state_running
, &cache
->cache_state
);
1296 static int _ocf_mngt_cache_add_cores_t_clean_pol(ocf_cache_t cache
)
1298 int clean_type
= cache
->conf_meta
->cleaning_policy_type
;
1302 if (cleaning_policy_ops
[clean_type
].add_core
) {
1303 no
= cache
->conf_meta
->core_count
;
1304 for (i
= 0, j
= 0; j
< no
&& i
< OCF_CORE_MAX
; i
++) {
1305 if (!env_bit_test(i
, cache
->conf_meta
->valid_core_bitmap
))
1307 result
= cleaning_policy_ops
[clean_type
].add_core(cache
, i
);
1318 if (!cleaning_policy_ops
[clean_type
].remove_core
)
1322 if (env_bit_test(i
, cache
->conf_meta
->valid_core_bitmap
))
1323 cleaning_policy_ops
[clean_type
].remove_core(cache
, i
);
1329 static void _ocf_mngt_init_attached_nonpersistent(ocf_cache_t cache
)
1331 env_atomic_set(&cache
->fallback_pt_error_counter
, 0);
1334 static void _ocf_mngt_attach_check_ram(ocf_pipeline_t pipeline
,
1335 void *priv
, ocf_pipeline_arg_t arg
)
1337 struct ocf_cache_attach_context
*context
= priv
;
1338 ocf_cache_t cache
= context
->cache
;
1339 uint64_t min_free_ram
;
1342 min_free_ram
= _ocf_mngt_calculate_ram_needed(cache
,
1343 &cache
->device
->volume
);
1345 free_ram
= env_get_free_memory();
1347 if (free_ram
< min_free_ram
) {
1348 ocf_cache_log(cache
, log_err
, "Not enough free RAM for cache "
1349 "metadata to start cache\n");
1350 ocf_cache_log(cache
, log_err
,
1351 "Available RAM: %" ENV_PRIu64
" B\n", free_ram
);
1352 ocf_cache_log(cache
, log_err
, "Needed RAM: %" ENV_PRIu64
" B\n",
1354 ocf_pipeline_finish(pipeline
, -OCF_ERR_NO_FREE_RAM
);
1357 ocf_pipeline_next(pipeline
);
1361 static void _ocf_mngt_attach_load_superblock_complete(void *priv
, int error
)
1363 struct ocf_cache_attach_context
*context
= priv
;
1364 ocf_cache_t cache
= context
->cache
;
1367 ocf_cache_log(cache
, log_err
,
1368 "ERROR: Cannot load cache state\n");
1369 ocf_pipeline_finish(context
->pipeline
,
1370 -OCF_ERR_START_CACHE_FAIL
);
1374 ocf_pipeline_next(context
->pipeline
);
1377 static void _ocf_mngt_attach_load_superblock(ocf_pipeline_t pipeline
,
1378 void *priv
, ocf_pipeline_arg_t arg
)
1380 struct ocf_cache_attach_context
*context
= priv
;
1381 ocf_cache_t cache
= context
->cache
;
1383 if (cache
->device
->init_mode
!= ocf_init_mode_load
) {
1384 ocf_pipeline_next(context
->pipeline
);
1388 ocf_cache_log(cache
, log_info
, "Loading cache state...\n");
1389 ocf_metadata_load_superblock(cache
,
1390 _ocf_mngt_attach_load_superblock_complete
, context
);
1393 static void _ocf_mngt_attach_init_instance(ocf_pipeline_t pipeline
,
1394 void *priv
, ocf_pipeline_arg_t arg
)
1396 struct ocf_cache_attach_context
*context
= priv
;
1397 ocf_cache_t cache
= context
->cache
;
1400 result
= ocf_start_cleaner(cache
);
1402 ocf_cache_log(cache
, log_err
,
1403 "Error while starting cleaner\n");
1404 ocf_pipeline_finish(context
->pipeline
, result
);
1406 context
->flags
.cleaner_started
= true;
1408 switch (cache
->device
->init_mode
) {
1409 case ocf_init_mode_init
:
1410 case ocf_init_mode_metadata_volatile
:
1411 _ocf_mngt_init_instance_init(context
);
1413 case ocf_init_mode_load
:
1414 _ocf_mngt_init_instance_load(context
);
1417 ocf_pipeline_finish(context
->pipeline
, -OCF_ERR_INVAL
);
1421 static void _ocf_mngt_attach_clean_pol(ocf_pipeline_t pipeline
,
1422 void *priv
, ocf_pipeline_arg_t arg
)
1424 struct ocf_cache_attach_context
*context
= priv
;
1425 ocf_cache_t cache
= context
->cache
;
1428 /* TODO: Should this even be here? */
1429 if (cache
->device
->init_mode
!= ocf_init_mode_load
) {
1430 result
= _ocf_mngt_cache_add_cores_t_clean_pol(cache
);
1432 ocf_pipeline_finish(context
->pipeline
, result
);
1437 ocf_pipeline_next(context
->pipeline
);
1440 static void _ocf_mngt_attach_flush_metadata_complete(void *priv
, int error
)
1442 struct ocf_cache_attach_context
*context
= priv
;
1443 ocf_cache_t cache
= context
->cache
;
1446 ocf_cache_log(cache
, log_err
,
1447 "ERROR: Cannot save cache state\n");
1448 ocf_pipeline_finish(context
->pipeline
,
1449 -OCF_ERR_WRITE_CACHE
);
1453 ocf_pipeline_next(context
->pipeline
);
1456 static void _ocf_mngt_attach_flush_metadata(ocf_pipeline_t pipeline
,
1457 void *priv
, ocf_pipeline_arg_t arg
)
1459 struct ocf_cache_attach_context
*context
= priv
;
1460 ocf_cache_t cache
= context
->cache
;
1462 ocf_metadata_flush_all(cache
,
1463 _ocf_mngt_attach_flush_metadata_complete
, context
);
1466 static void _ocf_mngt_attach_discard_complete(void *priv
, int error
)
1468 struct ocf_cache_attach_context
*context
= priv
;
1469 ocf_cache_t cache
= context
->cache
;
1470 bool discard
= cache
->device
->volume
.features
.discard_zeroes
;
1473 ocf_cache_log(cache
, log_warn
, "%s failed\n",
1474 discard
? "Discarding whole cache device" :
1475 "Overwriting cache with zeroes");
1477 if (ocf_volume_is_atomic(&cache
->device
->volume
)) {
1478 ocf_cache_log(cache
, log_err
, "This step is required"
1479 " for atomic mode!\n");
1480 ocf_pipeline_finish(context
->pipeline
, error
);
1484 ocf_cache_log(cache
, log_warn
, "This may impact cache"
1488 ocf_pipeline_next(context
->pipeline
);
1491 static void _ocf_mngt_attach_discard(ocf_pipeline_t pipeline
,
1492 void *priv
, ocf_pipeline_arg_t arg
)
1494 struct ocf_cache_attach_context
*context
= priv
;
1495 ocf_cache_t cache
= context
->cache
;
1496 uint64_t addr
= cache
->device
->metadata_offset
;
1497 uint64_t length
= ocf_volume_get_length(&cache
->device
->volume
) - addr
;
1498 bool discard
= cache
->device
->volume
.features
.discard_zeroes
;
1500 if (cache
->device
->init_mode
== ocf_init_mode_load
) {
1501 ocf_pipeline_next(context
->pipeline
);
1505 if (!context
->cfg
.discard_on_start
) {
1506 ocf_pipeline_next(context
->pipeline
);
1510 if (!discard
&& ocf_volume_is_atomic(&cache
->device
->volume
)) {
1511 /* discard doesn't zero data - need to explicitly write zeros */
1512 ocf_submit_write_zeros(&cache
->device
->volume
, addr
, length
,
1513 _ocf_mngt_attach_discard_complete
, context
);
1515 /* Discard volume after metadata */
1516 ocf_submit_volume_discard(&cache
->device
->volume
, addr
, length
,
1517 _ocf_mngt_attach_discard_complete
, context
);
1521 static void _ocf_mngt_attach_flush_complete(void *priv
, int error
)
1523 struct ocf_cache_attach_context
*context
= priv
;
1526 ocf_pipeline_finish(context
->pipeline
, error
);
1528 ocf_pipeline_next(context
->pipeline
);
1531 static void _ocf_mngt_attach_flush(ocf_pipeline_t pipeline
,
1532 void *priv
, ocf_pipeline_arg_t arg
)
1534 struct ocf_cache_attach_context
*context
= priv
;
1535 ocf_cache_t cache
= context
->cache
;
1536 bool discard
= cache
->device
->volume
.features
.discard_zeroes
;
1538 if (!discard
&& ocf_volume_is_atomic(&cache
->device
->volume
)) {
1539 ocf_submit_volume_flush(&cache
->device
->volume
,
1540 _ocf_mngt_attach_flush_complete
, context
);
1542 ocf_pipeline_next(context
->pipeline
);
1546 static void _ocf_mngt_attach_shutdown_status_complete(void *priv
, int error
)
1548 struct ocf_cache_attach_context
*context
= priv
;
1549 ocf_cache_t cache
= context
->cache
;
1552 ocf_cache_log(cache
, log_err
, "Cannot flush shutdown status\n");
1553 ocf_pipeline_finish(context
->pipeline
,
1554 -OCF_ERR_WRITE_CACHE
);
1558 ocf_pipeline_next(context
->pipeline
);
1561 static void _ocf_mngt_attach_shutdown_status(ocf_pipeline_t pipeline
,
1562 void *priv
, ocf_pipeline_arg_t arg
)
1564 struct ocf_cache_attach_context
*context
= priv
;
1565 ocf_cache_t cache
= context
->cache
;
1567 /* clear clean shutdown status */
1568 ocf_metadata_set_shutdown_status(cache
, ocf_metadata_dirty_shutdown
,
1569 _ocf_mngt_attach_shutdown_status_complete
, context
);
1572 static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline
,
1573 void *priv
, ocf_pipeline_arg_t arg
)
1575 struct ocf_cache_attach_context
*context
= priv
;
1576 ocf_cache_t cache
= context
->cache
;
1578 env_waitqueue_init(&cache
->pending_cache_wq
);
1580 env_atomic_set(&cache
->attached
, 1);
1582 ocf_pipeline_next(context
->pipeline
);
1585 static void _ocf_mngt_cache_attach_finish(ocf_pipeline_t pipeline
,
1586 void *priv
, int error
)
1588 struct ocf_cache_attach_context
*context
= priv
;
1591 _ocf_mngt_attach_handle_error(context
);
1593 context
->cmpl(context
->cache
, context
->priv1
, context
->priv2
, error
);
1595 env_vfree(context
->cfg
.uuid
.data
);
1596 ocf_pipeline_destroy(context
->pipeline
);
1599 struct ocf_pipeline_properties _ocf_mngt_cache_attach_pipeline_properties
= {
1600 .priv_size
= sizeof(struct ocf_cache_attach_context
),
1601 .finish
= _ocf_mngt_cache_attach_finish
,
1603 OCF_PL_STEP(_ocf_mngt_attach_cache_device
),
1604 OCF_PL_STEP(_ocf_mngt_attach_check_ram
),
1605 OCF_PL_STEP(_ocf_mngt_attach_load_properties
),
1606 OCF_PL_STEP(_ocf_mngt_attach_prepare_metadata
),
1607 OCF_PL_STEP(_ocf_mngt_test_volume
),
1608 OCF_PL_STEP(_ocf_mngt_attach_load_superblock
),
1609 OCF_PL_STEP(_ocf_mngt_attach_init_instance
),
1610 OCF_PL_STEP(_ocf_mngt_attach_clean_pol
),
1611 OCF_PL_STEP(_ocf_mngt_attach_flush_metadata
),
1612 OCF_PL_STEP(_ocf_mngt_attach_discard
),
1613 OCF_PL_STEP(_ocf_mngt_attach_flush
),
1614 OCF_PL_STEP(_ocf_mngt_attach_shutdown_status
),
1615 OCF_PL_STEP(_ocf_mngt_attach_post_init
),
1616 OCF_PL_STEP_TERMINATOR(),
1620 static void _ocf_mngt_cache_attach(ocf_cache_t cache
,
1621 struct ocf_mngt_cache_device_config
*cfg
, bool load
,
1622 _ocf_mngt_cache_attach_end_t cmpl
, void *priv1
, void *priv2
)
1624 struct ocf_cache_attach_context
*context
;
1625 ocf_pipeline_t pipeline
;
1629 result
= ocf_pipeline_create(&pipeline
, cache
,
1630 &_ocf_mngt_cache_attach_pipeline_properties
);
1632 cmpl(cache
, priv1
, priv2
, -OCF_ERR_NO_MEM
);
1636 context
= ocf_pipeline_get_priv(pipeline
);
1638 context
->cmpl
= cmpl
;
1639 context
->priv1
= priv1
;
1640 context
->priv2
= priv2
;
1641 context
->pipeline
= pipeline
;
1643 context
->cache
= cache
;
1644 context
->cfg
= *cfg
;
1646 data
= env_vmalloc(cfg
->uuid
.size
);
1648 result
= -OCF_ERR_NO_MEM
;
1652 result
= env_memcpy(data
, cfg
->uuid
.size
, cfg
->uuid
.data
,
1657 context
->cfg
.uuid
.data
= data
;
1659 if (cache
->metadata
.is_volatile
) {
1660 context
->init_mode
= ocf_init_mode_metadata_volatile
;
1662 context
->init_mode
= load
?
1663 ocf_init_mode_load
: ocf_init_mode_init
;
1666 _ocf_mngt_init_attached_nonpersistent(cache
);
1668 ocf_pipeline_next(pipeline
);
1674 ocf_pipeline_destroy(pipeline
);
1675 cmpl(cache
, priv1
, priv2
, result
);
1678 static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config
*cfg
)
1680 if (cfg
->id
> OCF_CACHE_ID_MAX
)
1681 return -OCF_ERR_INVAL
;
1683 if (!ocf_cache_mode_is_valid(cfg
->cache_mode
))
1684 return -OCF_ERR_INVALID_CACHE_MODE
;
1686 if (cfg
->eviction_policy
>= ocf_eviction_max
||
1687 cfg
->eviction_policy
< 0) {
1688 return -OCF_ERR_INVAL
;
1691 if (!ocf_cache_line_size_is_valid(cfg
->cache_line_size
))
1692 return -OCF_ERR_INVALID_CACHE_LINE_SIZE
;
1694 if (cfg
->metadata_layout
>= ocf_metadata_layout_max
||
1695 cfg
->metadata_layout
< 0) {
1696 return -OCF_ERR_INVAL
;
1699 if (cfg
->backfill
.queue_unblock_size
> cfg
->backfill
.max_queue_size
)
1700 return -OCF_ERR_INVAL
;
1705 static int _ocf_mngt_cache_validate_device_cfg(
1706 struct ocf_mngt_cache_device_config
*device_cfg
)
1708 if (!device_cfg
->uuid
.data
)
1709 return -OCF_ERR_INVAL
;
1711 if (device_cfg
->uuid
.size
> OCF_VOLUME_UUID_MAX_SIZE
)
1712 return -OCF_ERR_INVAL
;
1714 if (device_cfg
->cache_line_size
&&
1715 !ocf_cache_line_size_is_valid(device_cfg
->cache_line_size
))
1716 return -OCF_ERR_INVALID_CACHE_LINE_SIZE
;
1721 static const char *_ocf_cache_mode_names
[ocf_cache_mode_max
] = {
1722 [ocf_cache_mode_wt
] = "wt",
1723 [ocf_cache_mode_wb
] = "wb",
1724 [ocf_cache_mode_wa
] = "wa",
1725 [ocf_cache_mode_pt
] = "pt",
1726 [ocf_cache_mode_wi
] = "wi",
1729 static const char *_ocf_cache_mode_get_name(ocf_cache_mode_t cache_mode
)
1731 if (!ocf_cache_mode_is_valid(cache_mode
))
1734 return _ocf_cache_mode_names
[cache_mode
];
1737 int ocf_mngt_cache_start(ocf_ctx_t ctx
, ocf_cache_t
*cache
,
1738 struct ocf_mngt_cache_config
*cfg
)
1742 if (!ctx
|| !cache
|| !cfg
)
1743 return -OCF_ERR_INVAL
;
1745 result
= _ocf_mngt_cache_validate_cfg(cfg
);
1749 result
= _ocf_mngt_cache_start(ctx
, cache
, cfg
);
1751 _ocf_mng_cache_set_valid(*cache
);
1753 ocf_cache_log(*cache
, log_info
, "Successfully added\n");
1754 ocf_cache_log(*cache
, log_info
, "Cache mode : %s\n",
1755 _ocf_cache_mode_get_name(ocf_cache_get_mode(*cache
)));
1758 ocf_log(ctx
, log_err
, "Inserting cache %s failed\n",
1761 ocf_log(ctx
, log_err
, "Inserting cache failed\n");
1768 int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache
, ocf_queue_t queue
)
1770 OCF_CHECK_NULL(cache
);
1771 OCF_CHECK_NULL(queue
);
1773 if (cache
->mngt_queue
)
1774 return -OCF_ERR_INVAL
;
1776 ocf_queue_get(queue
);
1777 cache
->mngt_queue
= queue
;
1782 static void _ocf_mngt_cache_attach_complete(ocf_cache_t cache
, void *priv1
,
1783 void *priv2
, int error
)
1785 ocf_mngt_cache_attach_end_t cmpl
= priv1
;
1788 ocf_cache_log(cache
, log_info
, "Successfully attached\n");
1790 ocf_cache_log(cache
, log_err
, "Attaching cache device "
1794 cmpl(cache
, priv2
, error
);
1797 void ocf_mngt_cache_attach(ocf_cache_t cache
,
1798 struct ocf_mngt_cache_device_config
*cfg
,
1799 ocf_mngt_cache_attach_end_t cmpl
, void *priv
)
1803 OCF_CHECK_NULL(cache
);
1804 OCF_CHECK_NULL(cfg
);
1806 result
= _ocf_mngt_cache_validate_device_cfg(cfg
);
1808 cmpl(cache
, priv
, result
);
1812 _ocf_mngt_cache_attach(cache
, cfg
, false,
1813 _ocf_mngt_cache_attach_complete
, cmpl
, priv
);
1816 typedef void (*_ocf_mngt_cache_unplug_end_t
)(void *context
, int error
);
1818 struct _ocf_mngt_cache_unplug_context
{
1819 _ocf_mngt_cache_unplug_end_t cmpl
;
1824 static void _ocf_mngt_cache_unplug_complete(void *priv
, int error
)
1826 struct _ocf_mngt_cache_unplug_context
*context
= priv
;
1827 ocf_cache_t cache
= context
->cache
;
1829 ocf_volume_close(&cache
->device
->volume
);
1831 ocf_metadata_deinit_variable_size(cache
);
1832 ocf_concurrency_deinit(cache
);
1834 ocf_volume_deinit(&cache
->device
->volume
);
1836 env_vfree(cache
->device
);
1837 cache
->device
= NULL
;
1838 env_atomic_set(&cache
->attached
, 0);
1840 /* TODO: this should be removed from detach after 'attached' stats
1841 are better separated in statistics */
1842 _ocf_mngt_init_attached_nonpersistent(cache
);
1844 context
->cmpl(context
->priv
, error
? -OCF_ERR_WRITE_CACHE
: 0);
1849 * @brief Unplug caching device from cache instance. Variable size metadata
1850 * containers are deinitialiazed as well as other cacheline related
1851 * structures. Cache volume is closed.
1853 * @param cache OCF cache instance
1854 * @param stop - true if unplugging during stop - in this case we mark
1855 * clean shutdown in metadata and flush all containers.
1856 * - false if the device is to be detached from cache - loading
1857 * metadata from this device will not be possible.
1858 * @param cmpl Completion callback
1859 * @param priv Completion context
1861 static void _ocf_mngt_cache_unplug(ocf_cache_t cache
, bool stop
,
1862 _ocf_mngt_cache_unplug_end_t cmpl
, void *priv
)
1864 struct _ocf_mngt_cache_unplug_context
*context
;
1866 ENV_BUG_ON(stop
&& cache
->conf_meta
->core_count
!= 0);
1868 context
= env_vzalloc(sizeof(*context
));
1870 cmpl(priv
, -OCF_ERR_NO_MEM
);
1874 context
->cmpl
= cmpl
;
1875 context
->priv
= priv
;
1876 context
->cache
= cache
;
1878 ocf_stop_cleaner(cache
);
1880 __deinit_cleaning_policy(cache
);
1882 if (ocf_mngt_cache_is_dirty(cache
)) {
1885 cache
->conf_meta
->dirty_flushed
= DIRTY_NOT_FLUSHED
;
1887 ocf_cache_log(cache
, log_warn
, "Cache is still dirty. "
1888 "DO NOT USE your core devices until flushing "
1891 cache
->conf_meta
->dirty_flushed
= DIRTY_FLUSHED
;
1895 /* Just set correct shutdown status */
1896 ocf_metadata_set_shutdown_status(cache
, ocf_metadata_detached
,
1897 _ocf_mngt_cache_unplug_complete
, context
);
1899 /* Flush metadata */
1900 ocf_metadata_flush_all(cache
,
1901 _ocf_mngt_cache_unplug_complete
, context
);
1905 static int _ocf_mngt_cache_load_core_log(ocf_core_t core
, void *cntx
)
1907 ocf_core_log(core
, log_info
, "Successfully added\n");
1912 static void _ocf_mngt_cache_load_log(ocf_cache_t cache
)
1914 ocf_cache_mode_t cache_mode
= ocf_cache_get_mode(cache
);
1915 ocf_eviction_t eviction_type
= cache
->conf_meta
->eviction_policy_type
;
1916 ocf_cleaning_t cleaning_type
= cache
->conf_meta
->cleaning_policy_type
;
1918 ocf_cache_log(cache
, log_info
, "Successfully loaded\n");
1919 ocf_cache_log(cache
, log_info
, "Cache mode : %s\n",
1920 _ocf_cache_mode_get_name(cache_mode
));
1921 ocf_cache_log(cache
, log_info
, "Eviction policy : %s\n",
1922 evict_policy_ops
[eviction_type
].name
);
1923 ocf_cache_log(cache
, log_info
, "Cleaning policy : %s\n",
1924 cleaning_policy_ops
[cleaning_type
].name
);
1925 ocf_core_visit(cache
, _ocf_mngt_cache_load_core_log
,
1929 static void _ocf_mngt_cache_load_complete(ocf_cache_t cache
, void *priv1
,
1930 void *priv2
, int error
)
1932 ocf_mngt_cache_load_end_t cmpl
= priv1
;
1935 cmpl(cache
, priv2
, error
);
1939 _ocf_mng_cache_set_valid(cache
);
1940 _ocf_mngt_cache_load_log(cache
);
1942 cmpl(cache
, priv2
, 0);
1945 void ocf_mngt_cache_load(ocf_cache_t cache
,
1946 struct ocf_mngt_cache_device_config
*cfg
,
1947 ocf_mngt_cache_load_end_t cmpl
, void *priv
)
1951 OCF_CHECK_NULL(cache
);
1952 OCF_CHECK_NULL(cfg
);
1954 /* Load is not allowed in volatile metadata mode */
1955 if (cache
->metadata
.is_volatile
)
1956 cmpl(cache
, priv
, -EINVAL
);
1958 result
= _ocf_mngt_cache_validate_device_cfg(cfg
);
1960 cmpl(cache
, priv
, result
);
1964 _ocf_mngt_cache_attach(cache
, cfg
, true,
1965 _ocf_mngt_cache_load_complete
, cmpl
, priv
);
1968 struct ocf_mngt_cache_stop_context
{
1969 ocf_mngt_cache_stop_end_t cmpl
;
1971 ocf_pipeline_t pipeline
;
1974 char cache_name
[OCF_CACHE_NAME_SIZE
];
1975 int cache_write_error
;
1978 static void ocf_mngt_cache_stop_wait_io(ocf_pipeline_t pipeline
,
1979 void *priv
, ocf_pipeline_arg_t arg
)
1981 struct ocf_mngt_cache_stop_context
*context
= priv
;
1982 ocf_cache_t cache
= context
->cache
;
1984 /* TODO: Make this asynchronous! */
1985 ocf_cache_wait_for_io_finish(cache
);
1986 ocf_pipeline_next(pipeline
);
1989 static void ocf_mngt_cache_stop_remove_cores(ocf_pipeline_t pipeline
,
1990 void *priv
, ocf_pipeline_arg_t arg
)
1992 struct ocf_mngt_cache_stop_context
*context
= priv
;
1993 ocf_cache_t cache
= context
->cache
;
1996 no
= cache
->conf_meta
->core_count
;
1998 /* All exported objects removed, cleaning up rest. */
1999 for (i
= 0, j
= 0; j
< no
&& i
< OCF_CORE_MAX
; i
++) {
2000 if (!env_bit_test(i
, cache
->conf_meta
->valid_core_bitmap
))
2002 cache_mng_core_remove_from_cache(cache
, i
);
2003 if (ocf_cache_is_device_attached(cache
))
2004 cache_mng_core_remove_from_cleaning_pol(cache
, i
);
2005 cache_mng_core_close(cache
, i
);
2008 ENV_BUG_ON(cache
->conf_meta
->core_count
!= 0);
2010 ocf_pipeline_next(pipeline
);
2013 static void ocf_mngt_cache_stop_unplug_complete(void *priv
, int error
)
2015 struct ocf_mngt_cache_stop_context
*context
= priv
;
2017 /* short-circut execution in case of critical error */
2018 if (error
&& error
!= -OCF_ERR_WRITE_CACHE
) {
2019 ocf_pipeline_finish(context
->pipeline
, error
);
2023 /* in case of non-critical (disk write) error just remember its value */
2025 context
->cache_write_error
= error
;
2027 ocf_pipeline_next(context
->pipeline
);
2030 static void ocf_mngt_cache_stop_unplug(ocf_pipeline_t pipeline
,
2031 void *priv
, ocf_pipeline_arg_t arg
)
2033 struct ocf_mngt_cache_stop_context
*context
= priv
;
2034 ocf_cache_t cache
= context
->cache
;
2036 if (!env_atomic_read(&cache
->attached
)) {
2037 ocf_pipeline_next(pipeline
);
2041 _ocf_mngt_cache_unplug(cache
, true,
2042 ocf_mngt_cache_stop_unplug_complete
, context
);
2045 static void ocf_mngt_cache_stop_put_io_queues(ocf_pipeline_t pipeline
,
2046 void *priv
, ocf_pipeline_arg_t arg
)
2048 struct ocf_mngt_cache_stop_context
*context
= priv
;
2049 ocf_cache_t cache
= context
->cache
;
2050 ocf_queue_t queue
, tmp_queue
;
2052 list_for_each_entry_safe(queue
, tmp_queue
, &cache
->io_queues
, list
)
2053 ocf_queue_put(queue
);
2055 ocf_pipeline_next(pipeline
);
2058 static void ocf_mngt_cache_stop_finish(ocf_pipeline_t pipeline
,
2059 void *priv
, int error
)
2061 struct ocf_mngt_cache_stop_context
*context
= priv
;
2062 ocf_cache_t cache
= context
->cache
;
2063 ocf_ctx_t ctx
= context
->ctx
;
2066 env_mutex_lock(&ctx
->lock
);
2067 /* Mark device uninitialized */
2068 cache
->valid_ocf_cache_device_t
= 0;
2069 /* Remove cache from the list */
2070 list_del(&cache
->list
);
2071 env_mutex_unlock(&ctx
->lock
);
2073 env_bit_clear(ocf_cache_state_stopping
, &cache
->cache_state
);
2074 env_bit_set(ocf_cache_state_running
, &cache
->cache_state
);
2077 if (context
->cache_write_error
) {
2078 ocf_log(ctx
, log_warn
, "Stopped cache %s with errors\n",
2079 context
->cache_name
);
2081 ocf_log(ctx
, log_err
, "Stopping cache %s failed\n",
2082 context
->cache_name
);
2084 ocf_log(ctx
, log_info
, "Cache %s successfully stopped\n",
2085 context
->cache_name
);
2088 context
->cmpl(cache
, context
->priv
,
2089 error
?: context
->cache_write_error
);
2091 ocf_pipeline_destroy(context
->pipeline
);
2094 /* Finally release cache instance */
2095 ocf_mngt_cache_put(cache
);
2099 struct ocf_pipeline_properties ocf_mngt_cache_stop_pipeline_properties
= {
2100 .priv_size
= sizeof(struct ocf_mngt_cache_stop_context
),
2101 .finish
= ocf_mngt_cache_stop_finish
,
2103 OCF_PL_STEP(ocf_mngt_cache_stop_wait_io
),
2104 OCF_PL_STEP(ocf_mngt_cache_stop_remove_cores
),
2105 OCF_PL_STEP(ocf_mngt_cache_stop_unplug
),
2106 OCF_PL_STEP(ocf_mngt_cache_stop_put_io_queues
),
2107 OCF_PL_STEP_TERMINATOR(),
2111 void ocf_mngt_cache_stop(ocf_cache_t cache
,
2112 ocf_mngt_cache_stop_end_t cmpl
, void *priv
)
2114 struct ocf_mngt_cache_stop_context
*context
;
2115 ocf_pipeline_t pipeline
;
2118 OCF_CHECK_NULL(cache
);
2120 result
= ocf_pipeline_create(&pipeline
, cache
,
2121 &ocf_mngt_cache_stop_pipeline_properties
);
2123 cmpl(cache
, priv
, -OCF_ERR_NO_MEM
);
2127 context
= ocf_pipeline_get_priv(pipeline
);
2129 context
->cmpl
= cmpl
;
2130 context
->priv
= priv
;
2131 context
->pipeline
= pipeline
;
2132 context
->cache
= cache
;
2133 context
->ctx
= cache
->owner
;
2135 result
= env_strncpy(context
->cache_name
, sizeof(context
->cache_name
),
2136 ocf_cache_get_name(cache
), sizeof(context
->cache_name
));
2138 ocf_pipeline_destroy(pipeline
);
2139 cmpl(cache
, priv
, -OCF_ERR_NO_MEM
);
2143 ocf_cache_log(cache
, log_info
, "Stopping cache\n");
2145 env_bit_set(ocf_cache_state_stopping
, &cache
->cache_state
);
2146 env_bit_clear(ocf_cache_state_running
, &cache
->cache_state
);
2148 ocf_pipeline_next(pipeline
);
2151 struct ocf_mngt_cache_save_context
{
2152 ocf_mngt_cache_save_end_t cmpl
;
2154 ocf_pipeline_t pipeline
;
2158 static void ocf_mngt_cache_save_finish(ocf_pipeline_t pipeline
,
2159 void *priv
, int error
)
2161 struct ocf_mngt_cache_save_context
*context
= priv
;
2163 context
->cmpl(context
->cache
, context
->priv
, error
);
2165 ocf_pipeline_destroy(context
->pipeline
);
2168 struct ocf_pipeline_properties ocf_mngt_cache_save_pipeline_properties
= {
2169 .priv_size
= sizeof(struct ocf_mngt_cache_save_context
),
2170 .finish
= ocf_mngt_cache_save_finish
,
2172 OCF_PL_STEP_TERMINATOR(),
2176 static void ocf_mngt_cache_save_flush_sb_complete(void *priv
, int error
)
2178 struct ocf_mngt_cache_save_context
*context
= priv
;
2179 ocf_cache_t cache
= context
->cache
;
2182 ocf_cache_log(cache
, log_err
,
2183 "Failed to flush superblock! Changes "
2184 "in cache config are not persistent!\n");
2185 ocf_pipeline_finish(context
->pipeline
, -OCF_ERR_WRITE_CACHE
);
2189 ocf_pipeline_next(context
->pipeline
);
2192 void ocf_mngt_cache_save(ocf_cache_t cache
,
2193 ocf_mngt_cache_save_end_t cmpl
, void *priv
)
2195 struct ocf_mngt_cache_save_context
*context
;
2196 ocf_pipeline_t pipeline
;
2199 OCF_CHECK_NULL(cache
);
2201 result
= ocf_pipeline_create(&pipeline
, cache
,
2202 &ocf_mngt_cache_save_pipeline_properties
);
2204 cmpl(cache
, priv
, result
);
2208 context
= ocf_pipeline_get_priv(pipeline
);
2210 context
->cmpl
= cmpl
;
2211 context
->priv
= priv
;
2212 context
->pipeline
= pipeline
;
2213 context
->cache
= cache
;
2215 ocf_metadata_flush_superblock(cache
,
2216 ocf_mngt_cache_save_flush_sb_complete
, context
);
2219 static int _cache_mng_set_cache_mode(ocf_cache_t cache
, ocf_cache_mode_t mode
)
2221 ocf_cache_mode_t mode_old
= cache
->conf_meta
->cache_mode
;
2223 /* Check if IO interface type is valid */
2224 if (!ocf_cache_mode_is_valid(mode
))
2225 return -OCF_ERR_INVAL
;
2227 if (mode
== mode_old
) {
2228 ocf_cache_log(cache
, log_info
, "Cache mode '%s' is already set\n",
2229 ocf_get_io_iface_name(mode
));
2233 cache
->conf_meta
->cache_mode
= mode
;
2235 if (ocf_cache_mode_wb
== mode_old
) {
2238 for (i
= 0; i
!= OCF_CORE_MAX
; ++i
) {
2239 if (!env_bit_test(i
, cache
->conf_meta
->valid_core_bitmap
))
2241 env_atomic_set(&cache
->core_runtime_meta
[i
].
2242 initial_dirty_clines
,
2243 env_atomic_read(&cache
->
2244 core_runtime_meta
[i
].dirty_clines
));
2248 ocf_cache_log(cache
, log_info
, "Changing cache mode from '%s' to '%s' "
2249 "successful\n", ocf_get_io_iface_name(mode_old
),
2250 ocf_get_io_iface_name(mode
));
2255 int ocf_mngt_cache_set_mode(ocf_cache_t cache
, ocf_cache_mode_t mode
)
2259 OCF_CHECK_NULL(cache
);
2261 if (!ocf_cache_mode_is_valid(mode
)) {
2262 ocf_cache_log(cache
, log_err
, "Cache mode %u is invalid\n",
2264 return -OCF_ERR_INVAL
;
2267 result
= _cache_mng_set_cache_mode(cache
, mode
);
2270 const char *name
= ocf_get_io_iface_name(mode
);
2272 ocf_cache_log(cache
, log_err
, "Setting cache mode '%s' "
2279 int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache
)
2281 OCF_CHECK_NULL(cache
);
2283 if (ocf_fallback_pt_is_on(cache
)) {
2284 ocf_cache_log(cache
, log_info
,
2285 "Fallback Pass Through inactive\n");
2288 env_atomic_set(&cache
->fallback_pt_error_counter
, 0);
2293 int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache
,
2294 uint32_t new_threshold
)
2296 bool old_fallback_pt_state
, new_fallback_pt_state
;
2298 OCF_CHECK_NULL(cache
);
2300 if (new_threshold
> OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD
)
2301 return -OCF_ERR_INVAL
;
2303 old_fallback_pt_state
= ocf_fallback_pt_is_on(cache
);
2305 cache
->fallback_pt_error_threshold
= new_threshold
;
2307 new_fallback_pt_state
= ocf_fallback_pt_is_on(cache
);
2309 if (old_fallback_pt_state
!= new_fallback_pt_state
) {
2310 if (new_fallback_pt_state
) {
2311 ocf_cache_log(cache
, log_info
, "Error threshold reached. "
2312 "Fallback Pass Through activated\n");
2314 ocf_cache_log(cache
, log_info
, "Fallback Pass Through "
2322 int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache
,
2323 uint32_t *threshold
)
2325 OCF_CHECK_NULL(cache
);
2326 OCF_CHECK_NULL(threshold
);
2328 *threshold
= cache
->fallback_pt_error_threshold
;
2333 struct ocf_mngt_cache_detach_context
{
2334 ocf_mngt_cache_detach_end_t cmpl
;
2336 ocf_pipeline_t pipeline
;
2340 static void ocf_mngt_cache_detach_flush_cmpl(ocf_cache_t cache
,
2341 void *priv
, int error
)
2343 struct ocf_mngt_cache_detach_context
*context
= priv
;
2346 ocf_pipeline_finish(context
->pipeline
, error
);
2350 ocf_pipeline_next(context
->pipeline
);
2353 static void ocf_mngt_cache_detach_flush(ocf_pipeline_t pipeline
,
2354 void *priv
, ocf_pipeline_arg_t arg
)
2356 struct ocf_mngt_cache_detach_context
*context
= priv
;
2357 ocf_cache_t cache
= context
->cache
;
2359 ocf_mngt_cache_flush(cache
, true, ocf_mngt_cache_detach_flush_cmpl
,
2363 static void ocf_mngt_cache_detach_wait_pending(ocf_pipeline_t pipeline
,
2364 void *priv
, ocf_pipeline_arg_t arg
)
2366 struct ocf_mngt_cache_detach_context
*context
= priv
;
2367 ocf_cache_t cache
= context
->cache
;
2369 env_atomic_set(&cache
->attached
, 0);
2371 /* FIXME: This should be asynchronous! */
2372 env_waitqueue_wait(cache
->pending_cache_wq
,
2373 !env_atomic_read(&cache
->pending_cache_requests
));
2375 ocf_pipeline_next(context
->pipeline
);
2378 static void ocf_mngt_cache_detach_update_metadata(ocf_pipeline_t pipeline
,
2379 void *priv
, ocf_pipeline_arg_t arg
)
2381 struct ocf_mngt_cache_detach_context
*context
= priv
;
2382 ocf_cache_t cache
= context
->cache
;
2385 no
= cache
->conf_meta
->core_count
;
2387 /* remove cacheline metadata and cleaning policy meta for all cores */
2388 for (i
= 0, j
= 0; j
< no
&& i
< OCF_CORE_MAX
; i
++) {
2389 if (!env_bit_test(i
, cache
->conf_meta
->valid_core_bitmap
))
2391 cache_mng_core_deinit_attached_meta(cache
, i
);
2392 cache_mng_core_remove_from_cleaning_pol(cache
, i
);
2396 ocf_pipeline_next(context
->pipeline
);
2399 static void ocf_mngt_cache_detach_unplug_complete(void *priv
, int error
)
2401 struct ocf_mngt_cache_detach_context
*context
= priv
;
2404 ocf_pipeline_finish(context
->pipeline
, error
);
2408 ocf_pipeline_next(context
->pipeline
);
2411 static void ocf_mngt_cache_detach_unplug(ocf_pipeline_t pipeline
,
2412 void *priv
, ocf_pipeline_arg_t arg
)
2414 struct ocf_mngt_cache_detach_context
*context
= priv
;
2415 ocf_cache_t cache
= context
->cache
;
2417 /* Do the actual detach - deinit cacheline metadata,
2418 * stop cleaner thread and close cache bottom device */
2419 _ocf_mngt_cache_unplug(cache
, false,
2420 ocf_mngt_cache_detach_unplug_complete
, context
);
2423 static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline
,
2424 void *priv
, int error
)
2426 struct ocf_mngt_cache_detach_context
*context
= priv
;
2427 ocf_cache_t cache
= context
->cache
;
2429 ocf_refcnt_unfreeze(&cache
->dirty
);
2432 ocf_cache_log(cache
, log_info
, "Successfully detached\n");
2434 if (error
== -OCF_ERR_WRITE_CACHE
) {
2435 ocf_cache_log(cache
, log_warn
,
2436 "Detached cache with errors\n");
2438 ocf_cache_log(cache
, log_err
,
2439 "Detaching cache failed\n");
2443 context
->cmpl(cache
, context
->priv
, error
);
2445 ocf_pipeline_destroy(context
->pipeline
);
2448 struct ocf_pipeline_properties ocf_mngt_cache_detach_pipeline_properties
= {
2449 .priv_size
= sizeof(struct ocf_mngt_cache_detach_context
),
2450 .finish
= ocf_mngt_cache_detach_finish
,
2452 OCF_PL_STEP(ocf_mngt_cache_detach_flush
),
2453 OCF_PL_STEP(ocf_mngt_cache_detach_wait_pending
),
2454 OCF_PL_STEP(ocf_mngt_cache_detach_update_metadata
),
2455 OCF_PL_STEP(ocf_mngt_cache_detach_unplug
),
2456 OCF_PL_STEP_TERMINATOR(),
2460 void ocf_mngt_cache_detach(ocf_cache_t cache
,
2461 ocf_mngt_cache_detach_end_t cmpl
, void *priv
)
2463 struct ocf_mngt_cache_detach_context
*context
;
2464 ocf_pipeline_t pipeline
;
2467 OCF_CHECK_NULL(cache
);
2469 if (!env_atomic_read(&cache
->attached
)) {
2470 cmpl(cache
, priv
, -OCF_ERR_INVAL
);
2474 result
= ocf_pipeline_create(&pipeline
, cache
,
2475 &ocf_mngt_cache_detach_pipeline_properties
);
2477 cmpl(cache
, priv
, -OCF_ERR_NO_MEM
);
2481 context
= ocf_pipeline_get_priv(pipeline
);
2483 context
->cmpl
= cmpl
;
2484 context
->priv
= priv
;
2485 context
->pipeline
= pipeline
;
2486 context
->cache
= cache
;
2488 /* prevent dirty io */
2489 ocf_refcnt_freeze(&cache
->dirty
);
2491 ocf_pipeline_next(pipeline
);