2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
7 #include "ocf_mngt_common.h"
8 #include "ocf_mngt_core_priv.h"
9 #include "../ocf_priv.h"
10 #include "../ocf_core_priv.h"
11 #include "../ocf_queue_priv.h"
12 #include "../metadata/metadata.h"
13 #include "../engine/cache_engine.h"
14 #include "../utils/utils_part.h"
15 #include "../utils/utils_cache_line.h"
16 #include "../utils/utils_io.h"
17 #include "../utils/utils_cache_line.h"
18 #include "../utils/utils_pipeline.h"
19 #include "../utils/utils_refcnt.h"
20 #include "../utils/utils_async_lock.h"
21 #include "../concurrency/ocf_concurrency.h"
22 #include "../eviction/ops.h"
23 #include "../ocf_ctx_priv.h"
24 #include "../ocf_freelist.h"
25 #include "../cleaning/cleaning.h"
26 #include "../promotion/ops.h"
28 #define OCF_ASSERT_PLUGGED(cache) ENV_BUG_ON(!(cache)->device)
30 #define DIRTY_SHUTDOWN_ERROR_MSG "Please use --load option to restore " \
31 "previous cache state (Warning: data corruption may happen)" \
32 "\nOr initialize your cache using --force option. " \
33 "Warning: All dirty data will be lost!\n"
35 #define DIRTY_NOT_FLUSHED_ERROR_MSG "Cache closed w/ no data flushing\n" \
36 "Restart with --load or --force option\n"
39 * @brief Helpful struct to start cache
41 struct ocf_cache_mngt_init_params
{
42 bool metadata_volatile
;
48 /*!< cache that is being initialized */
51 /*!< Keep cache locked */
54 * @brief initialization state (in case of error, it is used to know
55 * which assets have to be deallocated in premature exit from function
59 /*!< cache is allocated and added to list */
61 bool metadata_inited
: 1;
62 /*!< Metadata is inited to valid state */
64 bool added_to_list
: 1;
65 /*!< Cache is added to context list */
67 bool cache_locked
: 1;
68 /*!< Cache has been locked */
71 struct ocf_metadata_init_params
{
72 ocf_cache_line_size_t line_size
;
73 /*!< Metadata cache line size */
75 ocf_metadata_layout_t layout
;
76 /*!< Metadata layout (striping/sequential) */
78 ocf_cache_mode_t cache_mode
;
81 ocf_promotion_t promotion_policy
;
85 typedef void (*_ocf_mngt_cache_attach_end_t
)(ocf_cache_t
, void *priv1
,
86 void *priv2
, int error
);
88 struct ocf_cache_attach_context
{
90 /*!< cache that is being initialized */
92 struct ocf_mngt_cache_device_config cfg
;
95 /*!< size of the device in cache lines */
97 enum ocf_mngt_cache_init_mode init_mode
;
98 /*!< cache init mode */
101 * @brief initialization state (in case of error, it is used to know
102 * which assets have to be deallocated in premature exit from function
105 bool device_alloc
: 1;
106 /*!< data structure allocated */
108 bool volume_inited
: 1;
109 /*!< uuid for cache device is allocated */
111 bool attached_metadata_inited
: 1;
112 /*!< attached metadata sections initialized */
114 bool device_opened
: 1;
115 /*!< underlying device volume is open */
117 bool cleaner_started
: 1;
118 /*!< Cleaner has been started */
120 bool promotion_initialized
: 1;
121 /*!< Promotion policy has been started */
123 bool cores_opened
: 1;
124 /*!< underlying cores are opened (happens only during
128 bool freelist_inited
: 1;
130 bool concurrency_inited
: 1;
134 ocf_cache_line_size_t line_size
;
135 /*!< Metadata cache line size */
137 ocf_metadata_layout_t layout
;
138 /*!< Metadata layout (striping/sequential) */
140 ocf_cache_mode_t cache_mode
;
143 enum ocf_metadata_shutdown_status shutdown_status
;
144 /*!< dirty or clean */
146 uint8_t dirty_flushed
;
147 /*!< is dirty data fully flushed */
150 /*!< metadata retrieval status (nonzero is sign of an error
151 * during recovery/load but is non issue in case of clean init
158 unsigned long reserved_lba_addr
;
159 ocf_pipeline_t pipeline
;
162 _ocf_mngt_cache_attach_end_t cmpl
;
166 ocf_pipeline_t pipeline
;
169 static void __init_partitions(ocf_cache_t cache
)
171 ocf_part_id_t i_part
;
173 /* Init default Partition */
174 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache
, PARTITION_DEFAULT
,
175 "unclassified", 0, PARTITION_SIZE_MAX
,
176 OCF_IO_CLASS_PRIO_LOWEST
, true));
178 /* Add other partition to the cache and make it as dummy */
179 for (i_part
= 0; i_part
< OCF_IO_CLASS_MAX
; i_part
++) {
180 ocf_refcnt_freeze(&cache
->refcnt
.cleaning
[i_part
]);
182 if (i_part
== PARTITION_DEFAULT
)
185 /* Init default Partition */
186 ENV_BUG_ON(ocf_mngt_add_partition_to_cache(cache
, i_part
,
187 "Inactive", 0, PARTITION_SIZE_MAX
,
188 OCF_IO_CLASS_PRIO_LOWEST
, false));
192 static void __init_partitions_attached(ocf_cache_t cache
)
194 ocf_part_id_t part_id
;
196 for (part_id
= 0; part_id
< OCF_IO_CLASS_MAX
; part_id
++) {
197 cache
->user_parts
[part_id
].runtime
->head
=
198 cache
->device
->collision_table_entries
;
199 cache
->user_parts
[part_id
].runtime
->curr_size
= 0;
201 ocf_eviction_initialize(cache
, part_id
);
205 static void __init_freelist(ocf_cache_t cache
)
207 uint64_t free_clines
= ocf_metadata_collision_table_entries(cache
) -
208 ocf_get_cache_occupancy(cache
);
210 ocf_freelist_populate(cache
->freelist
, free_clines
);
213 static ocf_error_t
__init_cleaning_policy(ocf_cache_t cache
)
215 ocf_cleaning_t cleaning_policy
= ocf_cleaning_default
;
217 ocf_error_t result
= 0;
219 OCF_ASSERT_PLUGGED(cache
);
221 for (i
= 0; i
< ocf_cleaning_max
; i
++) {
222 if (cleaning_policy_ops
[i
].setup
)
223 cleaning_policy_ops
[i
].setup(cache
);
226 cache
->conf_meta
->cleaning_policy_type
= ocf_cleaning_default
;
227 if (cleaning_policy_ops
[cleaning_policy
].initialize
)
228 result
= cleaning_policy_ops
[cleaning_policy
].initialize(cache
, 1);
233 static void __deinit_cleaning_policy(ocf_cache_t cache
)
235 ocf_cleaning_t cleaning_policy
;
237 cleaning_policy
= cache
->conf_meta
->cleaning_policy_type
;
238 if (cleaning_policy_ops
[cleaning_policy
].deinitialize
)
239 cleaning_policy_ops
[cleaning_policy
].deinitialize(cache
);
242 static void __init_eviction_policy(ocf_cache_t cache
,
243 ocf_eviction_t eviction
)
245 ENV_BUG_ON(eviction
< 0 || eviction
>= ocf_eviction_max
);
247 cache
->conf_meta
->eviction_policy_type
= eviction
;
250 static void __setup_promotion_policy(ocf_cache_t cache
)
254 OCF_CHECK_NULL(cache
);
256 for (i
= 0; i
< ocf_promotion_max
; i
++) {
257 if (ocf_promotion_policies
[i
].setup
)
258 ocf_promotion_policies
[i
].setup(cache
);
262 static void __deinit_promotion_policy(ocf_cache_t cache
)
264 ocf_promotion_deinit(cache
->promotion_policy
);
265 cache
->promotion_policy
= NULL
;
268 static void __init_cores(ocf_cache_t cache
)
270 /* No core devices yet */
271 cache
->conf_meta
->core_count
= 0;
272 ENV_BUG_ON(env_memset(cache
->conf_meta
->valid_core_bitmap
,
273 sizeof(cache
->conf_meta
->valid_core_bitmap
), 0));
276 static void __init_metadata_version(ocf_cache_t cache
)
278 cache
->conf_meta
->metadata_version
= METADATA_VERSION();
281 static void __reset_stats(ocf_cache_t cache
)
284 ocf_core_id_t core_id
;
287 for_each_core_all(cache
, core
, core_id
) {
288 env_atomic_set(&core
->runtime_meta
->cached_clines
, 0);
289 env_atomic_set(&core
->runtime_meta
->dirty_clines
, 0);
290 env_atomic64_set(&core
->runtime_meta
->dirty_since
, 0);
292 for (i
= 0; i
!= OCF_IO_CLASS_MAX
; i
++) {
293 env_atomic_set(&core
->runtime_meta
->
294 part_counters
[i
].cached_clines
, 0);
295 env_atomic_set(&core
->runtime_meta
->
296 part_counters
[i
].dirty_clines
, 0);
301 static ocf_error_t
init_attached_data_structures(ocf_cache_t cache
,
302 ocf_eviction_t eviction_policy
)
306 /* Lock to ensure consistency */
308 ocf_metadata_init_hash_table(cache
);
309 ocf_metadata_init_collision(cache
);
310 __init_partitions_attached(cache
);
311 __init_freelist(cache
);
313 result
= __init_cleaning_policy(cache
);
315 ocf_cache_log(cache
, log_err
,
316 "Cannot initialize cleaning policy\n");
320 __init_eviction_policy(cache
, eviction_policy
);
321 __setup_promotion_policy(cache
);
326 static void init_attached_data_structures_recovery(ocf_cache_t cache
)
328 ocf_metadata_init_hash_table(cache
);
329 ocf_metadata_init_collision(cache
);
330 __init_partitions_attached(cache
);
331 __reset_stats(cache
);
332 __init_metadata_version(cache
);
335 /****************************************************************
336 * Function for removing all uninitialized core objects *
337 * from the cache instance. *
338 * Used in case of cache initialization errors. *
339 ****************************************************************/
340 static void _ocf_mngt_close_all_uninitialized_cores(
346 for (j
= cache
->conf_meta
->core_count
, i
= 0; j
> 0; ++i
) {
347 if (!env_bit_test(i
, cache
->conf_meta
->valid_core_bitmap
))
350 volume
= &(cache
->core
[i
].volume
);
351 ocf_volume_close(volume
);
355 env_free(cache
->core
[i
].counters
);
356 cache
->core
[i
].counters
= NULL
;
358 env_bit_clear(i
, cache
->conf_meta
->valid_core_bitmap
);
361 cache
->conf_meta
->core_count
= 0;
365 * @brief routine loading metadata from cache device
366 * - attempts to open all the underlying cores
368 static int _ocf_mngt_init_instance_add_cores(
369 struct ocf_cache_attach_context
*context
)
371 ocf_cache_t cache
= context
->cache
;
373 ocf_core_id_t core_id
;
375 uint64_t hd_lines
= 0;
377 OCF_ASSERT_PLUGGED(cache
);
379 /* Count value will be re-calculated on the basis of 'valid' flag */
380 cache
->conf_meta
->core_count
= 0;
382 /* Check in metadata which cores were saved in cache metadata */
383 for_each_core_metadata(cache
, core
, core_id
) {
384 ocf_volume_t tvolume
= NULL
;
386 if (!core
->volume
.type
)
389 tvolume
= ocf_mngt_core_pool_lookup(ocf_cache_get_ctx(cache
),
390 &core
->volume
.uuid
, core
->volume
.type
);
393 * Attach bottom device to core structure
396 ocf_volume_move(&core
->volume
, tvolume
);
397 ocf_mngt_core_pool_remove(cache
->owner
, tvolume
);
400 ocf_cache_log(cache
, log_info
,
401 "Attached core %u from pool\n",
403 } else if (context
->cfg
.open_cores
) {
404 ret
= ocf_volume_open(&core
->volume
, NULL
);
405 if (ret
== -OCF_ERR_NOT_OPEN_EXC
) {
406 ocf_cache_log(cache
, log_warn
,
407 "Cannot open core %u. "
408 "Cache is busy", core_id
);
410 ocf_cache_log(cache
, log_warn
,
411 "Cannot open core %u", core_id
);
417 env_bit_set(core_id
, cache
->conf_meta
->valid_core_bitmap
);
419 cache
->conf_meta
->core_count
++;
420 core
->volume
.cache
= cache
;
422 if (ocf_mngt_core_init_front_volume(core
))
426 env_zalloc(sizeof(*core
->counters
), ENV_MEM_NORMAL
);
431 env_bit_set(ocf_cache_state_incomplete
,
432 &cache
->cache_state
);
433 cache
->ocf_core_inactive_count
++;
434 ocf_cache_log(cache
, log_warn
,
435 "Cannot find core %u in pool"
436 ", core added as inactive\n", core_id
);
440 hd_lines
= ocf_bytes_2_lines(cache
,
441 ocf_volume_get_length(&core
->volume
));
444 ocf_cache_log(cache
, log_info
,
445 "Disk lines = %" ENV_PRIu64
"\n", hd_lines
);
449 context
->flags
.cores_opened
= true;
453 _ocf_mngt_close_all_uninitialized_cores(cache
);
455 return -OCF_ERR_START_CACHE_FAIL
;
458 void _ocf_mngt_init_instance_load_complete(void *priv
, int error
)
460 struct ocf_cache_attach_context
*context
= priv
;
461 ocf_cache_t cache
= context
->cache
;
462 ocf_cleaning_t cleaning_policy
;
466 ocf_cache_log(cache
, log_err
,
467 "Cannot read cache metadata\n");
468 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_START_CACHE_FAIL
);
471 __init_freelist(cache
);
473 cleaning_policy
= cache
->conf_meta
->cleaning_policy_type
;
474 if (!cleaning_policy_ops
[cleaning_policy
].initialize
)
477 if (context
->metadata
.shutdown_status
== ocf_metadata_clean_shutdown
)
478 result
= cleaning_policy_ops
[cleaning_policy
].initialize(cache
, 0);
480 result
= cleaning_policy_ops
[cleaning_policy
].initialize(cache
, 1);
483 ocf_cache_log(cache
, log_err
,
484 "Cannot initialize cleaning policy\n");
485 OCF_PL_FINISH_RET(context
->pipeline
, result
);
489 ocf_pipeline_next(context
->pipeline
);
493 * handle load variant
495 static void _ocf_mngt_init_instance_clean_load(
496 struct ocf_cache_attach_context
*context
)
498 ocf_cache_t cache
= context
->cache
;
500 ocf_metadata_load_all(cache
,
501 _ocf_mngt_init_instance_load_complete
, context
);
505 * handle recovery variant
507 static void _ocf_mngt_init_instance_recovery(
508 struct ocf_cache_attach_context
*context
)
510 ocf_cache_t cache
= context
->cache
;
512 init_attached_data_structures_recovery(cache
);
514 ocf_cache_log(cache
, log_warn
,
515 "ERROR: Cache device did not shut down properly!\n");
517 ocf_cache_log(cache
, log_info
, "Initiating recovery sequence...\n");
519 ocf_metadata_load_recovery(cache
,
520 _ocf_mngt_init_instance_load_complete
, context
);
523 static void _ocf_mngt_init_instance_load(
524 struct ocf_cache_attach_context
*context
)
526 ocf_cache_t cache
= context
->cache
;
529 OCF_ASSERT_PLUGGED(cache
);
531 ret
= _ocf_mngt_init_instance_add_cores(context
);
533 OCF_PL_FINISH_RET(context
->pipeline
, ret
);
535 if (context
->metadata
.shutdown_status
== ocf_metadata_clean_shutdown
)
536 _ocf_mngt_init_instance_clean_load(context
);
538 _ocf_mngt_init_instance_recovery(context
);
542 * @brief allocate memory for new cache, add it to cache queue, set initial
543 * values and running state
545 static int _ocf_mngt_init_new_cache(struct ocf_cache_mngt_init_params
*params
)
547 ocf_cache_t cache
= env_vzalloc(sizeof(*cache
));
551 return -OCF_ERR_NO_MEM
;
553 if (ocf_mngt_cache_lock_init(cache
)) {
554 result
= -OCF_ERR_NO_MEM
;
558 /* Lock cache during setup - this trylock should always succeed */
559 ENV_BUG_ON(ocf_mngt_cache_trylock(cache
));
561 if (env_mutex_init(&cache
->flush_mutex
)) {
562 result
= -OCF_ERR_NO_MEM
;
566 ENV_BUG_ON(!ocf_refcnt_inc(&cache
->refcnt
.cache
));
568 /* start with freezed metadata ref counter to indicate detached device*/
569 ocf_refcnt_freeze(&cache
->refcnt
.metadata
);
571 env_atomic_set(&(cache
->last_access_ms
),
572 env_ticks_to_msecs(env_get_tick_count()));
574 env_bit_set(ocf_cache_state_initializing
, &cache
->cache_state
);
576 params
->cache
= cache
;
577 params
->flags
.cache_alloc
= true;
582 ocf_mngt_cache_lock_deinit(cache
);
589 static void _ocf_mngt_attach_cache_device(ocf_pipeline_t pipeline
,
590 void *priv
, ocf_pipeline_arg_t arg
)
592 struct ocf_cache_attach_context
*context
= priv
;
593 ocf_cache_t cache
= context
->cache
;
594 ocf_volume_type_t type
;
597 cache
->device
= env_vzalloc(sizeof(*cache
->device
));
599 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_NO_MEM
);
601 context
->flags
.device_alloc
= true;
603 cache
->device
->init_mode
= context
->init_mode
;
605 /* Prepare UUID of cache volume */
606 type
= ocf_ctx_get_volume_type(cache
->owner
, context
->cfg
.volume_type
);
608 OCF_PL_FINISH_RET(context
->pipeline
,
609 -OCF_ERR_INVAL_VOLUME_TYPE
);
612 ret
= ocf_volume_init(&cache
->device
->volume
, type
,
613 &context
->cfg
.uuid
, true);
615 OCF_PL_FINISH_RET(context
->pipeline
, ret
);
617 cache
->device
->volume
.cache
= cache
;
618 context
->flags
.volume_inited
= true;
621 * Open cache device, It has to be done first because metadata service
622 * need to know size of cache device.
624 ret
= ocf_volume_open(&cache
->device
->volume
,
625 context
->cfg
.volume_params
);
627 ocf_cache_log(cache
, log_err
, "ERROR: Cache not available\n");
628 OCF_PL_FINISH_RET(context
->pipeline
, ret
);
630 context
->flags
.device_opened
= true;
632 context
->volume_size
= ocf_volume_get_length(&cache
->device
->volume
);
634 /* Check minimum size of cache device */
635 if (context
->volume_size
< OCF_CACHE_SIZE_MIN
) {
636 ocf_cache_log(cache
, log_err
, "ERROR: Cache cache size must "
637 "be at least %llu [MiB]\n", OCF_CACHE_SIZE_MIN
/ MiB
);
638 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_INVAL_CACHE_DEV
);
641 ocf_pipeline_next(pipeline
);
645 * @brief prepare cache for init. This is first step towards initializing
648 static int _ocf_mngt_init_prepare_cache(struct ocf_cache_mngt_init_params
*param
,
649 struct ocf_mngt_cache_config
*cfg
)
654 /* Check if cache with specified name exists */
655 ret
= ocf_mngt_cache_get_by_name(param
->ctx
, cfg
->name
,
656 OCF_CACHE_NAME_SIZE
, &cache
);
658 ocf_mngt_cache_put(cache
);
659 /* Cache already exist */
660 ret
= -OCF_ERR_CACHE_EXIST
;
664 ocf_log(param
->ctx
, log_info
, "Inserting cache %s\n", cfg
->name
);
666 ret
= _ocf_mngt_init_new_cache(param
);
670 cache
= param
->cache
;
672 cache
->backfill
.max_queue_size
= cfg
->backfill
.max_queue_size
;
673 cache
->backfill
.queue_unblock_size
= cfg
->backfill
.queue_unblock_size
;
675 param
->flags
.cache_locked
= true;
677 cache
->pt_unaligned_io
= cfg
->pt_unaligned_io
;
678 cache
->use_submit_io_fast
= cfg
->use_submit_io_fast
;
680 cache
->eviction_policy_init
= cfg
->eviction_policy
;
681 cache
->metadata
.is_volatile
= cfg
->metadata_volatile
;
687 static void _ocf_mngt_test_volume_initial_write_complete(void *priv
, int error
)
689 struct ocf_cache_attach_context
*context
= priv
;
691 OCF_PL_NEXT_ON_SUCCESS_RET(context
->test
.pipeline
, error
);
694 static void _ocf_mngt_test_volume_initial_write(
695 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
697 struct ocf_cache_attach_context
*context
= priv
;
698 ocf_cache_t cache
= context
->cache
;
701 * Write buffer filled with "1"
704 ENV_BUG_ON(env_memset(context
->test
.rw_buffer
, PAGE_SIZE
, 1));
706 ocf_submit_cache_page(cache
, context
->test
.reserved_lba_addr
,
707 OCF_WRITE
, context
->test
.rw_buffer
,
708 _ocf_mngt_test_volume_initial_write_complete
, context
);
711 static void _ocf_mngt_test_volume_first_read_complete(void *priv
, int error
)
713 struct ocf_cache_attach_context
*context
= priv
;
714 ocf_cache_t cache
= context
->cache
;
718 OCF_PL_FINISH_RET(context
->test
.pipeline
, error
);
720 ret
= env_memcmp(context
->test
.rw_buffer
, PAGE_SIZE
,
721 context
->test
.cmp_buffer
, PAGE_SIZE
, &diff
);
723 OCF_PL_FINISH_RET(context
->test
.pipeline
, ret
);
726 /* we read back different data than what we had just
727 written - this is fatal error */
728 OCF_PL_FINISH_RET(context
->test
.pipeline
, -OCF_ERR_IO
);
731 if (!ocf_volume_is_atomic(&cache
->device
->volume
)) {
732 /* If not atomic, stop testing here */
733 OCF_PL_FINISH_RET(context
->test
.pipeline
, 0);
736 ocf_pipeline_next(context
->test
.pipeline
);
739 static void _ocf_mngt_test_volume_first_read(
740 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
742 struct ocf_cache_attach_context
*context
= priv
;
743 ocf_cache_t cache
= context
->cache
;
749 ENV_BUG_ON(env_memset(context
->test
.rw_buffer
, PAGE_SIZE
, 0));
750 ENV_BUG_ON(env_memset(context
->test
.cmp_buffer
, PAGE_SIZE
, 1));
752 ocf_submit_cache_page(cache
, context
->test
.reserved_lba_addr
,
753 OCF_READ
, context
->test
.rw_buffer
,
754 _ocf_mngt_test_volume_first_read_complete
, context
);
757 static void _ocf_mngt_test_volume_discard_complete(void *priv
, int error
)
759 struct ocf_cache_attach_context
*context
= priv
;
761 OCF_PL_NEXT_ON_SUCCESS_RET(context
->test
.pipeline
, error
);
764 static void _ocf_mngt_test_volume_discard(
765 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
767 struct ocf_cache_attach_context
*context
= priv
;
768 ocf_cache_t cache
= context
->cache
;
771 * Submit discard request
774 ocf_submit_volume_discard(&cache
->device
->volume
,
775 context
->test
.reserved_lba_addr
, PAGE_SIZE
,
776 _ocf_mngt_test_volume_discard_complete
, context
);
779 static void _ocf_mngt_test_volume_second_read_complete(void *priv
, int error
)
781 struct ocf_cache_attach_context
*context
= priv
;
782 ocf_cache_t cache
= context
->cache
;
786 OCF_PL_FINISH_RET(context
->test
.pipeline
, error
);
788 ret
= env_memcmp(context
->test
.rw_buffer
, PAGE_SIZE
,
789 context
->test
.cmp_buffer
, PAGE_SIZE
, &diff
);
791 OCF_PL_FINISH_RET(context
->test
.pipeline
, ret
);
794 /* discard does not cause target adresses to return 0 on
796 cache
->device
->volume
.features
.discard_zeroes
= 0;
799 ocf_pipeline_next(context
->test
.pipeline
);
802 static void _ocf_mngt_test_volume_second_read(
803 ocf_pipeline_t test_pipeline
, void *priv
, ocf_pipeline_arg_t arg
)
805 struct ocf_cache_attach_context
*context
= priv
;
806 ocf_cache_t cache
= context
->cache
;
812 ENV_BUG_ON(env_memset(context
->test
.rw_buffer
, PAGE_SIZE
, 1));
813 ENV_BUG_ON(env_memset(context
->test
.cmp_buffer
, PAGE_SIZE
, 0));
815 ocf_submit_cache_page(cache
, context
->test
.reserved_lba_addr
,
816 OCF_READ
, context
->test
.rw_buffer
,
817 _ocf_mngt_test_volume_second_read_complete
, context
);
820 static void _ocf_mngt_test_volume_finish(ocf_pipeline_t pipeline
,
821 void *priv
, int error
)
823 struct ocf_cache_attach_context
*context
= priv
;
825 env_free(context
->test
.rw_buffer
);
826 env_free(context
->test
.cmp_buffer
);
828 ocf_pipeline_destroy(context
->test
.pipeline
);
830 OCF_PL_NEXT_ON_SUCCESS_RET(context
->pipeline
, error
);
833 struct ocf_pipeline_properties _ocf_mngt_test_volume_pipeline_properties
= {
835 .finish
= _ocf_mngt_test_volume_finish
,
837 OCF_PL_STEP(_ocf_mngt_test_volume_initial_write
),
838 OCF_PL_STEP(_ocf_mngt_test_volume_first_read
),
839 OCF_PL_STEP(_ocf_mngt_test_volume_discard
),
840 OCF_PL_STEP(_ocf_mngt_test_volume_second_read
),
841 OCF_PL_STEP_TERMINATOR(),
845 static void _ocf_mngt_test_volume(ocf_pipeline_t pipeline
,
846 void *priv
, ocf_pipeline_arg_t arg
)
848 struct ocf_cache_attach_context
*context
= priv
;
849 ocf_cache_t cache
= context
->cache
;
850 ocf_pipeline_t test_pipeline
;
853 cache
->device
->volume
.features
.discard_zeroes
= 1;
855 if (!context
->cfg
.perform_test
)
856 OCF_PL_NEXT_RET(pipeline
);
858 context
->test
.reserved_lba_addr
= ocf_metadata_get_reserved_lba(cache
);
860 context
->test
.rw_buffer
= env_malloc(PAGE_SIZE
, ENV_MEM_NORMAL
);
861 if (!context
->test
.rw_buffer
)
862 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_NO_MEM
);
864 context
->test
.cmp_buffer
= env_malloc(PAGE_SIZE
, ENV_MEM_NORMAL
);
865 if (!context
->test
.cmp_buffer
)
868 result
= ocf_pipeline_create(&test_pipeline
, cache
,
869 &_ocf_mngt_test_volume_pipeline_properties
);
873 ocf_pipeline_set_priv(test_pipeline
, context
);
875 context
->test
.pipeline
= test_pipeline
;
877 OCF_PL_NEXT_RET(test_pipeline
);
880 env_free(context
->test
.rw_buffer
);
882 env_free(context
->test
.cmp_buffer
);
883 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_NO_MEM
);
887 * Prepare metadata accordingly to mode (for load/recovery read from disk)
889 static void _ocf_mngt_attach_load_properties_end(void *priv
, int error
,
890 struct ocf_metadata_load_properties
*properties
)
892 struct ocf_cache_attach_context
*context
= priv
;
893 ocf_cache_t cache
= context
->cache
;
895 context
->metadata
.status
= error
;
899 * If --load option wasn't used and old metadata doesn't exist on the
900 * device, dismiss error.
902 if (error
== -OCF_ERR_NO_METADATA
&&
903 cache
->device
->init_mode
!= ocf_init_mode_load
)
904 OCF_PL_NEXT_RET(context
->pipeline
);
906 OCF_PL_FINISH_RET(context
->pipeline
, error
);
907 } else if (cache
->device
->init_mode
!= ocf_init_mode_load
) {
909 * To prevent silent metadata overriding, return error if old metadata
910 * was detected but --load flag wasn't used.
912 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_METADATA_FOUND
);
916 * Check if name loaded from disk is the same as present one.
918 if (env_strncmp(cache
->conf_meta
->name
, OCF_CACHE_NAME_SIZE
,
919 properties
->cache_name
, OCF_CACHE_NAME_SIZE
)) {
920 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_CACHE_NAME_MISMATCH
);
923 context
->metadata
.shutdown_status
= properties
->shutdown_status
;
924 context
->metadata
.dirty_flushed
= properties
->dirty_flushed
;
926 if (cache
->device
->init_mode
== ocf_init_mode_load
) {
927 context
->metadata
.line_size
= properties
->line_size
;
928 cache
->conf_meta
->metadata_layout
= properties
->layout
;
929 cache
->conf_meta
->cache_mode
= properties
->cache_mode
;
932 ocf_pipeline_next(context
->pipeline
);
935 static void _ocf_mngt_attach_load_properties(ocf_pipeline_t pipeline
,
936 void *priv
, ocf_pipeline_arg_t arg
)
938 struct ocf_cache_attach_context
*context
= priv
;
939 ocf_cache_t cache
= context
->cache
;
941 OCF_ASSERT_PLUGGED(cache
);
943 context
->metadata
.shutdown_status
= ocf_metadata_clean_shutdown
;
944 context
->metadata
.dirty_flushed
= DIRTY_FLUSHED
;
945 context
->metadata
.line_size
= context
->cfg
.cache_line_size
;
947 if (context
->cfg
.force
)
948 OCF_PL_NEXT_RET(context
->pipeline
);
950 if (cache
->device
->init_mode
== ocf_init_mode_metadata_volatile
)
951 OCF_PL_NEXT_RET(context
->pipeline
);
953 ocf_metadata_load_properties(&cache
->device
->volume
,
954 _ocf_mngt_attach_load_properties_end
, context
);
957 static void _ocf_mngt_attach_prepare_metadata(ocf_pipeline_t pipeline
,
958 void *priv
, ocf_pipeline_arg_t arg
)
960 struct ocf_cache_attach_context
*context
= priv
;
961 ocf_cache_t cache
= context
->cache
;
964 if (context
->init_mode
== ocf_init_mode_load
&&
965 context
->metadata
.status
) {
966 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_START_CACHE_FAIL
);
969 context
->metadata
.line_size
= context
->metadata
.line_size
?:
970 cache
->metadata
.settings
.size
;
973 * Initialize variable size metadata segments
975 if (ocf_metadata_init_variable_size(cache
, context
->volume_size
,
976 context
->metadata
.line_size
,
977 cache
->conf_meta
->metadata_layout
)) {
978 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_START_CACHE_FAIL
);
980 context
->flags
.attached_metadata_inited
= true;
982 cache
->freelist
= ocf_freelist_init(cache
);
983 if (!cache
->freelist
)
984 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_START_CACHE_FAIL
);
985 context
->flags
.freelist_inited
= true;
987 ret
= ocf_concurrency_init(cache
);
989 OCF_PL_FINISH_RET(context
->pipeline
, ret
);
991 context
->flags
.concurrency_inited
= 1;
993 ocf_pipeline_next(context
->pipeline
);
997 * @brief initializing cache anew (not loading or recovering)
999 static void _ocf_mngt_init_instance_init(struct ocf_cache_attach_context
*context
)
1001 ocf_cache_t cache
= context
->cache
;
1004 if (!context
->metadata
.status
&& !context
->cfg
.force
&&
1005 context
->metadata
.shutdown_status
!=
1006 ocf_metadata_detached
) {
1008 if (context
->metadata
.shutdown_status
!=
1009 ocf_metadata_clean_shutdown
) {
1010 ocf_cache_log(cache
, log_err
, DIRTY_SHUTDOWN_ERROR_MSG
);
1011 OCF_PL_FINISH_RET(context
->pipeline
,
1012 -OCF_ERR_DIRTY_SHUTDOWN
);
1015 if (context
->metadata
.dirty_flushed
== DIRTY_NOT_FLUSHED
) {
1016 ocf_cache_log(cache
, log_err
,
1017 DIRTY_NOT_FLUSHED_ERROR_MSG
);
1018 OCF_PL_FINISH_RET(context
->pipeline
,
1019 -OCF_ERR_DIRTY_EXISTS
);
1024 result
= init_attached_data_structures(cache
, cache
->eviction_policy_init
);
1026 OCF_PL_FINISH_RET(context
->pipeline
, result
);
1028 /* In initial cache state there is no dirty data, so all dirty data is
1029 considered to be flushed
1031 cache
->conf_meta
->dirty_flushed
= true;
1033 ocf_pipeline_next(context
->pipeline
);
1036 uint64_t _ocf_mngt_calculate_ram_needed(ocf_cache_t cache
,
1037 ocf_volume_t cache_volume
)
1039 ocf_cache_line_size_t line_size
= ocf_line_size(cache
);
1040 uint64_t volume_size
= ocf_volume_get_length(cache_volume
);
1041 uint64_t const_data_size
;
1042 uint64_t cache_line_no
;
1043 uint64_t data_per_line
;
1044 uint64_t min_free_ram
;
1046 /* Superblock + per core metadata */
1047 const_data_size
= 100 * MiB
;
1049 /* Cache metadata */
1050 cache_line_no
= volume_size
/ line_size
;
1051 data_per_line
= (68 + (2 * (line_size
/ KiB
/ 4)));
1053 min_free_ram
= const_data_size
+ cache_line_no
* data_per_line
;
1055 /* 110% of calculated value */
1056 min_free_ram
= (11 * min_free_ram
) / 10;
1058 return min_free_ram
;
1061 int ocf_mngt_get_ram_needed(ocf_cache_t cache
,
1062 struct ocf_mngt_cache_device_config
*cfg
, uint64_t *ram_needed
)
1064 ocf_volume_t volume
;
1065 ocf_volume_type_t type
;
1068 OCF_CHECK_NULL(cache
);
1069 OCF_CHECK_NULL(cfg
);
1070 OCF_CHECK_NULL(ram_needed
);
1072 type
= ocf_ctx_get_volume_type(cache
->owner
, cfg
->volume_type
);
1074 return -OCF_ERR_INVAL_VOLUME_TYPE
;
1076 result
= ocf_volume_create(&volume
, type
,
1081 result
= ocf_volume_open(volume
, cfg
->volume_params
);
1083 ocf_volume_destroy(volume
);
1087 *ram_needed
= _ocf_mngt_calculate_ram_needed(cache
, volume
);
1089 ocf_volume_close(volume
);
1090 ocf_volume_destroy(volume
);
1096 * @brief for error handling do partial cleanup of datastructures upon
1097 * premature function exit.
1099 * @param ctx OCF context
1100 * @param params - startup params containing initialization status flags.
1103 static void _ocf_mngt_init_handle_error(ocf_ctx_t ctx
,
1104 struct ocf_cache_mngt_init_params
*params
)
1106 ocf_cache_t cache
= params
->cache
;
1108 if (!params
->flags
.cache_alloc
)
1111 if (params
->flags
.metadata_inited
)
1112 ocf_metadata_deinit(cache
);
1114 if (!params
->flags
.added_to_list
)
1117 env_rmutex_lock(&ctx
->lock
);
1119 list_del(&cache
->list
);
1122 env_rmutex_unlock(&ctx
->lock
);
1125 static void _ocf_mngt_attach_handle_error(
1126 struct ocf_cache_attach_context
*context
)
1128 ocf_cache_t cache
= context
->cache
;
1130 if (context
->flags
.cleaner_started
)
1131 ocf_stop_cleaner(cache
);
1133 if (context
->flags
.promotion_initialized
)
1134 __deinit_promotion_policy(cache
);
1136 if (context
->flags
.cores_opened
)
1137 _ocf_mngt_close_all_uninitialized_cores(cache
);
1139 if (context
->flags
.attached_metadata_inited
)
1140 ocf_metadata_deinit_variable_size(cache
);
1142 if (context
->flags
.device_opened
)
1143 ocf_volume_close(&cache
->device
->volume
);
1145 if (context
->flags
.concurrency_inited
)
1146 ocf_concurrency_deinit(cache
);
1148 if (context
->flags
.freelist_inited
)
1149 ocf_freelist_deinit(cache
->freelist
);
1151 if (context
->flags
.volume_inited
)
1152 ocf_volume_deinit(&cache
->device
->volume
);
1154 if (context
->flags
.device_alloc
)
1155 env_vfree(cache
->device
);
1157 ocf_pipeline_destroy(cache
->stop_pipeline
);
1160 static void _ocf_mngt_cache_init(ocf_cache_t cache
,
1161 struct ocf_cache_mngt_init_params
*params
)
1164 * Super block elements initialization
1166 cache
->conf_meta
->cache_mode
= params
->metadata
.cache_mode
;
1167 cache
->conf_meta
->metadata_layout
= params
->metadata
.layout
;
1168 cache
->conf_meta
->promotion_policy_type
= params
->metadata
.promotion_policy
;
1170 INIT_LIST_HEAD(&cache
->io_queues
);
1172 /* Init Partitions */
1173 ocf_part_init(cache
);
1175 __init_cores(cache
);
1176 __init_metadata_version(cache
);
1177 __init_partitions(cache
);
1180 static int _ocf_mngt_cache_start(ocf_ctx_t ctx
, ocf_cache_t
*cache
,
1181 struct ocf_mngt_cache_config
*cfg
)
1183 struct ocf_cache_mngt_init_params params
;
1184 ocf_cache_t tmp_cache
;
1187 ENV_BUG_ON(env_memset(¶ms
, sizeof(params
), 0));
1190 params
.metadata
.cache_mode
= cfg
->cache_mode
;
1191 params
.metadata
.layout
= cfg
->metadata_layout
;
1192 params
.metadata
.line_size
= cfg
->cache_line_size
;
1193 params
.metadata_volatile
= cfg
->metadata_volatile
;
1194 params
.metadata
.promotion_policy
= cfg
->promotion_policy
;
1195 params
.locked
= cfg
->locked
;
1197 result
= env_rmutex_lock_interruptible(&ctx
->lock
);
1199 goto _cache_mngt_init_instance_ERROR
;
1202 result
= _ocf_mngt_init_prepare_cache(¶ms
, cfg
);
1204 env_rmutex_unlock(&ctx
->lock
);
1205 goto _cache_mngt_init_instance_ERROR
;
1208 tmp_cache
= params
.cache
;
1209 tmp_cache
->owner
= ctx
;
1212 * Initialize metadata selected segments of metadata in memory
1214 result
= ocf_metadata_init(tmp_cache
, params
.metadata
.line_size
);
1216 env_rmutex_unlock(&ctx
->lock
);
1217 result
= -OCF_ERR_NO_MEM
;
1218 goto _cache_mngt_init_instance_ERROR
;
1220 params
.flags
.metadata_inited
= true;
1222 result
= ocf_cache_set_name(tmp_cache
, cfg
->name
, OCF_CACHE_NAME_SIZE
);
1224 env_rmutex_unlock(&ctx
->lock
);
1225 goto _cache_mngt_init_instance_ERROR
;
1228 list_add_tail(&tmp_cache
->list
, &ctx
->caches
);
1229 params
.flags
.added_to_list
= true;
1230 env_rmutex_unlock(&ctx
->lock
);
1232 result
= ocf_metadata_io_init(tmp_cache
);
1234 goto _cache_mngt_init_instance_ERROR
;
1236 ocf_cache_log(tmp_cache
, log_debug
, "Metadata initialized\n");
1238 _ocf_mngt_cache_init(tmp_cache
, ¶ms
);
1242 if (!params
.locked
) {
1243 /* User did not request to lock cache instance after creation -
1244 unlock it here since we have acquired the lock to
1245 perform management operations. */
1246 ocf_mngt_cache_unlock(tmp_cache
);
1247 params
.flags
.cache_locked
= false;
1254 _cache_mngt_init_instance_ERROR
:
1255 _ocf_mngt_init_handle_error(ctx
, ¶ms
);
1260 static void _ocf_mngt_cache_set_valid(ocf_cache_t cache
)
1263 * Clear initialization state and set the valid bit so we know
1266 env_bit_clear(ocf_cache_state_initializing
, &cache
->cache_state
);
1267 env_bit_set(ocf_cache_state_running
, &cache
->cache_state
);
1270 static void _ocf_mngt_init_attached_nonpersistent(ocf_cache_t cache
)
1272 env_atomic_set(&cache
->fallback_pt_error_counter
, 0);
1275 static void _ocf_mngt_attach_check_ram(ocf_pipeline_t pipeline
,
1276 void *priv
, ocf_pipeline_arg_t arg
)
1278 struct ocf_cache_attach_context
*context
= priv
;
1279 ocf_cache_t cache
= context
->cache
;
1280 uint64_t min_free_ram
;
1283 min_free_ram
= _ocf_mngt_calculate_ram_needed(cache
,
1284 &cache
->device
->volume
);
1286 free_ram
= env_get_free_memory();
1288 if (free_ram
< min_free_ram
) {
1289 ocf_cache_log(cache
, log_err
, "Not enough free RAM for cache "
1290 "metadata to start cache\n");
1291 ocf_cache_log(cache
, log_err
,
1292 "Available RAM: %" ENV_PRIu64
" B\n", free_ram
);
1293 ocf_cache_log(cache
, log_err
, "Needed RAM: %" ENV_PRIu64
" B\n",
1295 OCF_PL_FINISH_RET(pipeline
, -OCF_ERR_NO_FREE_RAM
);
1298 ocf_pipeline_next(pipeline
);
1302 static void _ocf_mngt_attach_load_superblock_complete(void *priv
, int error
)
1304 struct ocf_cache_attach_context
*context
= priv
;
1305 ocf_cache_t cache
= context
->cache
;
1307 if (cache
->conf_meta
->cachelines
!=
1308 ocf_metadata_get_cachelines_count(cache
)) {
1309 ocf_cache_log(cache
, log_err
,
1310 "ERROR: Cache device size mismatch!\n");
1311 OCF_PL_FINISH_RET(context
->pipeline
,
1312 -OCF_ERR_START_CACHE_FAIL
);
1316 ocf_cache_log(cache
, log_err
,
1317 "ERROR: Cannot load cache state\n");
1318 OCF_PL_FINISH_RET(context
->pipeline
,
1319 -OCF_ERR_START_CACHE_FAIL
);
1322 ocf_pipeline_next(context
->pipeline
);
1325 static void _ocf_mngt_attach_load_superblock(ocf_pipeline_t pipeline
,
1326 void *priv
, ocf_pipeline_arg_t arg
)
1328 struct ocf_cache_attach_context
*context
= priv
;
1329 ocf_cache_t cache
= context
->cache
;
1331 if (cache
->device
->init_mode
!= ocf_init_mode_load
)
1332 OCF_PL_NEXT_RET(context
->pipeline
);
1334 ocf_cache_log(cache
, log_info
, "Loading cache state...\n");
1335 ocf_metadata_load_superblock(cache
,
1336 _ocf_mngt_attach_load_superblock_complete
, context
);
1339 static void _ocf_mngt_attach_init_instance(ocf_pipeline_t pipeline
,
1340 void *priv
, ocf_pipeline_arg_t arg
)
1342 struct ocf_cache_attach_context
*context
= priv
;
1343 ocf_cache_t cache
= context
->cache
;
1346 result
= ocf_start_cleaner(cache
);
1348 ocf_cache_log(cache
, log_err
,
1349 "Error while starting cleaner\n");
1350 OCF_PL_FINISH_RET(context
->pipeline
, result
);
1352 context
->flags
.cleaner_started
= true;
1354 result
= ocf_promotion_init(cache
, cache
->conf_meta
->promotion_policy_type
);
1356 ocf_cache_log(cache
, log_err
,
1357 "Cannot initialize promotion policy\n");
1358 OCF_PL_FINISH_RET(context
->pipeline
, result
);
1360 context
->flags
.promotion_initialized
= true;
1362 switch (cache
->device
->init_mode
) {
1363 case ocf_init_mode_init
:
1364 case ocf_init_mode_metadata_volatile
:
1365 _ocf_mngt_init_instance_init(context
);
1367 case ocf_init_mode_load
:
1368 _ocf_mngt_init_instance_load(context
);
1371 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_INVAL
);
1375 static void _ocf_mngt_attach_flush_metadata_complete(void *priv
, int error
)
1377 struct ocf_cache_attach_context
*context
= priv
;
1378 ocf_cache_t cache
= context
->cache
;
1381 ocf_cache_log(cache
, log_err
,
1382 "ERROR: Cannot save cache state\n");
1383 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_WRITE_CACHE
);
1386 ocf_pipeline_next(context
->pipeline
);
1389 static void _ocf_mngt_attach_flush_metadata(ocf_pipeline_t pipeline
,
1390 void *priv
, ocf_pipeline_arg_t arg
)
1392 struct ocf_cache_attach_context
*context
= priv
;
1393 ocf_cache_t cache
= context
->cache
;
1395 ocf_metadata_flush_all(cache
,
1396 _ocf_mngt_attach_flush_metadata_complete
, context
);
1399 static void _ocf_mngt_attach_discard_complete(void *priv
, int error
)
1401 struct ocf_cache_attach_context
*context
= priv
;
1402 ocf_cache_t cache
= context
->cache
;
1403 bool discard
= cache
->device
->volume
.features
.discard_zeroes
;
1406 ocf_cache_log(cache
, log_warn
, "%s failed\n",
1407 discard
? "Discarding whole cache device" :
1408 "Overwriting cache with zeroes");
1410 if (ocf_volume_is_atomic(&cache
->device
->volume
)) {
1411 ocf_cache_log(cache
, log_err
, "This step is required"
1412 " for atomic mode!\n");
1413 OCF_PL_FINISH_RET(context
->pipeline
, error
);
1416 ocf_cache_log(cache
, log_warn
, "This may impact cache"
1420 ocf_pipeline_next(context
->pipeline
);
1423 static void _ocf_mngt_attach_discard(ocf_pipeline_t pipeline
,
1424 void *priv
, ocf_pipeline_arg_t arg
)
1426 struct ocf_cache_attach_context
*context
= priv
;
1427 ocf_cache_t cache
= context
->cache
;
1428 uint64_t addr
= cache
->device
->metadata_offset
;
1429 uint64_t length
= ocf_volume_get_length(&cache
->device
->volume
) - addr
;
1430 bool discard
= cache
->device
->volume
.features
.discard_zeroes
;
1432 if (cache
->device
->init_mode
== ocf_init_mode_load
)
1433 OCF_PL_NEXT_RET(context
->pipeline
);
1435 if (!context
->cfg
.discard_on_start
)
1436 OCF_PL_NEXT_RET(context
->pipeline
);
1438 if (!discard
&& ocf_volume_is_atomic(&cache
->device
->volume
)) {
1439 /* discard doesn't zero data - need to explicitly write zeros */
1440 ocf_submit_write_zeros(&cache
->device
->volume
, addr
, length
,
1441 _ocf_mngt_attach_discard_complete
, context
);
1443 /* Discard volume after metadata */
1444 ocf_submit_volume_discard(&cache
->device
->volume
, addr
, length
,
1445 _ocf_mngt_attach_discard_complete
, context
);
1449 static void _ocf_mngt_attach_flush_complete(void *priv
, int error
)
1451 struct ocf_cache_attach_context
*context
= priv
;
1453 OCF_PL_NEXT_ON_SUCCESS_RET(context
->pipeline
, error
);
1456 static void _ocf_mngt_attach_flush(ocf_pipeline_t pipeline
,
1457 void *priv
, ocf_pipeline_arg_t arg
)
1459 struct ocf_cache_attach_context
*context
= priv
;
1460 ocf_cache_t cache
= context
->cache
;
1461 bool discard
= cache
->device
->volume
.features
.discard_zeroes
;
1463 if (!discard
&& ocf_volume_is_atomic(&cache
->device
->volume
)) {
1464 ocf_submit_volume_flush(&cache
->device
->volume
,
1465 _ocf_mngt_attach_flush_complete
, context
);
1467 ocf_pipeline_next(context
->pipeline
);
1471 static void _ocf_mngt_attach_shutdown_status_complete(void *priv
, int error
)
1473 struct ocf_cache_attach_context
*context
= priv
;
1474 ocf_cache_t cache
= context
->cache
;
1477 ocf_cache_log(cache
, log_err
, "Cannot flush shutdown status\n");
1478 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_WRITE_CACHE
);
1481 ocf_pipeline_next(context
->pipeline
);
1484 static void _ocf_mngt_attach_shutdown_status(ocf_pipeline_t pipeline
,
1485 void *priv
, ocf_pipeline_arg_t arg
)
1487 struct ocf_cache_attach_context
*context
= priv
;
1488 ocf_cache_t cache
= context
->cache
;
1490 /* clear clean shutdown status */
1491 ocf_metadata_set_shutdown_status(cache
, ocf_metadata_dirty_shutdown
,
1492 _ocf_mngt_attach_shutdown_status_complete
, context
);
1495 static void _ocf_mngt_attach_post_init(ocf_pipeline_t pipeline
,
1496 void *priv
, ocf_pipeline_arg_t arg
)
1498 struct ocf_cache_attach_context
*context
= priv
;
1499 ocf_cache_t cache
= context
->cache
;
1501 ocf_cleaner_refcnt_unfreeze(cache
);
1502 ocf_refcnt_unfreeze(&cache
->refcnt
.metadata
);
1504 ocf_cache_log(cache
, log_debug
, "Cache attached\n");
1506 ocf_pipeline_next(context
->pipeline
);
1509 static void _ocf_mngt_cache_attach_finish(ocf_pipeline_t pipeline
,
1510 void *priv
, int error
)
1512 struct ocf_cache_attach_context
*context
= priv
;
1515 _ocf_mngt_attach_handle_error(context
);
1517 context
->cmpl(context
->cache
, context
->priv1
, context
->priv2
, error
);
1519 env_vfree(context
->cfg
.uuid
.data
);
1520 ocf_pipeline_destroy(context
->pipeline
);
1523 struct ocf_pipeline_properties _ocf_mngt_cache_attach_pipeline_properties
= {
1524 .priv_size
= sizeof(struct ocf_cache_attach_context
),
1525 .finish
= _ocf_mngt_cache_attach_finish
,
1527 OCF_PL_STEP(_ocf_mngt_attach_cache_device
),
1528 OCF_PL_STEP(_ocf_mngt_attach_check_ram
),
1529 OCF_PL_STEP(_ocf_mngt_attach_load_properties
),
1530 OCF_PL_STEP(_ocf_mngt_attach_prepare_metadata
),
1531 OCF_PL_STEP(_ocf_mngt_test_volume
),
1532 OCF_PL_STEP(_ocf_mngt_attach_load_superblock
),
1533 OCF_PL_STEP(_ocf_mngt_attach_init_instance
),
1534 OCF_PL_STEP(_ocf_mngt_attach_flush_metadata
),
1535 OCF_PL_STEP(_ocf_mngt_attach_discard
),
1536 OCF_PL_STEP(_ocf_mngt_attach_flush
),
1537 OCF_PL_STEP(_ocf_mngt_attach_shutdown_status
),
1538 OCF_PL_STEP(_ocf_mngt_attach_post_init
),
1539 OCF_PL_STEP_TERMINATOR(),
1543 typedef void (*_ocf_mngt_cache_unplug_end_t
)(void *context
, int error
);
1545 struct _ocf_mngt_cache_unplug_context
{
1546 _ocf_mngt_cache_unplug_end_t cmpl
;
1551 struct ocf_mngt_cache_stop_context
{
1552 /* unplug context - this is private structure of _ocf_mngt_cache_unplug,
1553 * it is member of stop context only to reserve memory in advance for
1554 * _ocf_mngt_cache_unplug, eliminating the possibility of ENOMEM error
1555 * at the point where we are effectively unable to handle it */
1556 struct _ocf_mngt_cache_unplug_context unplug_context
;
1558 ocf_mngt_cache_stop_end_t cmpl
;
1560 ocf_pipeline_t pipeline
;
1563 char cache_name
[OCF_CACHE_NAME_SIZE
];
1564 int cache_write_error
;
1567 static void ocf_mngt_cache_stop_wait_metadata_io_finish(void *priv
)
1569 struct ocf_mngt_cache_stop_context
*context
= priv
;
1571 ocf_pipeline_next(context
->pipeline
);
1574 static void ocf_mngt_cache_stop_wait_metadata_io(ocf_pipeline_t pipeline
,
1575 void *priv
, ocf_pipeline_arg_t arg
)
1577 struct ocf_mngt_cache_stop_context
*context
= priv
;
1578 ocf_cache_t cache
= context
->cache
;
1580 ocf_refcnt_freeze(&cache
->refcnt
.metadata
);
1581 ocf_refcnt_register_zero_cb(&cache
->refcnt
.metadata
,
1582 ocf_mngt_cache_stop_wait_metadata_io_finish
, context
);
1585 static void _ocf_mngt_cache_stop_remove_cores(ocf_cache_t cache
, bool attached
)
1588 ocf_core_id_t core_id
;
1589 int no
= cache
->conf_meta
->core_count
;
1591 /* All exported objects removed, cleaning up rest. */
1592 for_each_core_all(cache
, core
, core_id
) {
1593 if (!env_bit_test(core_id
, cache
->conf_meta
->valid_core_bitmap
))
1596 cache_mngt_core_remove_from_cache(core
);
1598 cache_mngt_core_remove_from_cleaning_pol(core
);
1599 cache_mngt_core_close(core
);
1603 ENV_BUG_ON(cache
->conf_meta
->core_count
!= 0);
1606 static void ocf_mngt_cache_stop_remove_cores(ocf_pipeline_t pipeline
,
1607 void *priv
, ocf_pipeline_arg_t arg
)
1609 struct ocf_mngt_cache_stop_context
*context
= priv
;
1610 ocf_cache_t cache
= context
->cache
;
1612 _ocf_mngt_cache_stop_remove_cores(cache
, true);
1614 ocf_pipeline_next(pipeline
);
1617 static void ocf_mngt_cache_stop_unplug_complete(void *priv
, int error
)
1619 struct ocf_mngt_cache_stop_context
*context
= priv
;
1622 ENV_BUG_ON(error
!= -OCF_ERR_WRITE_CACHE
);
1623 context
->cache_write_error
= error
;
1626 ocf_pipeline_next(context
->pipeline
);
1629 static void _ocf_mngt_cache_unplug(ocf_cache_t cache
, bool stop
,
1630 struct _ocf_mngt_cache_unplug_context
*context
,
1631 _ocf_mngt_cache_unplug_end_t cmpl
, void *priv
);
1633 static void ocf_mngt_cache_stop_unplug(ocf_pipeline_t pipeline
,
1634 void *priv
, ocf_pipeline_arg_t arg
)
1636 struct ocf_mngt_cache_stop_context
*context
= priv
;
1637 ocf_cache_t cache
= context
->cache
;
1639 _ocf_mngt_cache_unplug(cache
, true, &context
->unplug_context
,
1640 ocf_mngt_cache_stop_unplug_complete
, context
);
1643 static void _ocf_mngt_cache_put_io_queues(ocf_cache_t cache
)
1645 ocf_queue_t queue
, tmp_queue
;
1647 list_for_each_entry_safe(queue
, tmp_queue
, &cache
->io_queues
, list
)
1648 ocf_queue_put(queue
);
1651 static void ocf_mngt_cache_stop_put_io_queues(ocf_pipeline_t pipeline
,
1652 void *priv
, ocf_pipeline_arg_t arg
)
1654 struct ocf_mngt_cache_stop_context
*context
= priv
;
1655 ocf_cache_t cache
= context
->cache
;
1657 _ocf_mngt_cache_put_io_queues(cache
);
1659 ocf_pipeline_next(pipeline
);
1662 static void ocf_mngt_cache_remove(ocf_ctx_t ctx
, ocf_cache_t cache
)
1664 /* Mark device uninitialized */
1665 ocf_refcnt_freeze(&cache
->refcnt
.cache
);
1667 /* Deinitialize locks */
1668 ocf_mngt_cache_lock_deinit(cache
);
1669 env_mutex_destroy(&cache
->flush_mutex
);
1671 /* Remove cache from the list */
1672 env_rmutex_lock(&ctx
->lock
);
1673 list_del(&cache
->list
);
1674 env_rmutex_unlock(&ctx
->lock
);
1677 static void ocf_mngt_cache_stop_finish(ocf_pipeline_t pipeline
,
1678 void *priv
, int error
)
1680 struct ocf_mngt_cache_stop_context
*context
= priv
;
1681 ocf_cache_t cache
= context
->cache
;
1682 ocf_ctx_t ctx
= context
->ctx
;
1684 ocf_mngt_cache_stop_end_t pipeline_cmpl
;
1685 void *completion_priv
;
1688 ocf_mngt_cache_remove(context
->ctx
, cache
);
1690 /* undo metadata counter freeze */
1691 ocf_refcnt_unfreeze(&cache
->refcnt
.metadata
);
1693 env_bit_clear(ocf_cache_state_stopping
, &cache
->cache_state
);
1694 env_bit_set(ocf_cache_state_running
, &cache
->cache_state
);
1698 if (!context
->cache_write_error
) {
1699 ocf_log(ctx
, log_info
,
1700 "Cache %s successfully stopped\n",
1701 context
->cache_name
);
1703 ocf_log(ctx
, log_warn
, "Stopped cache %s with errors\n",
1704 context
->cache_name
);
1707 ocf_log(ctx
, log_err
, "Stopping cache %s failed\n",
1708 context
->cache_name
);
1712 * FIXME: Destroying pipeline before completing management operation is a
1713 * temporary workaround for insufficient object lifetime management in pyocf
1714 * Context must not be referenced after destroying pipeline as this is
1715 * typically freed upon pipeline destroy.
1717 pipeline_error
= error
?: context
->cache_write_error
;
1718 pipeline_cmpl
= context
->cmpl
;
1719 completion_priv
= context
->priv
;
1721 ocf_pipeline_destroy(context
->pipeline
);
1723 pipeline_cmpl(cache
, completion_priv
, pipeline_error
);
1726 /* Finally release cache instance */
1727 ocf_mngt_cache_put(cache
);
1731 struct ocf_pipeline_properties ocf_mngt_cache_stop_pipeline_properties
= {
1732 .priv_size
= sizeof(struct ocf_mngt_cache_stop_context
),
1733 .finish
= ocf_mngt_cache_stop_finish
,
1735 OCF_PL_STEP(ocf_mngt_cache_stop_wait_metadata_io
),
1736 OCF_PL_STEP(ocf_mngt_cache_stop_remove_cores
),
1737 OCF_PL_STEP(ocf_mngt_cache_stop_unplug
),
1738 OCF_PL_STEP(ocf_mngt_cache_stop_put_io_queues
),
1739 OCF_PL_STEP_TERMINATOR(),
1744 static void _ocf_mngt_cache_attach(ocf_cache_t cache
,
1745 struct ocf_mngt_cache_device_config
*cfg
, bool load
,
1746 _ocf_mngt_cache_attach_end_t cmpl
, void *priv1
, void *priv2
)
1748 struct ocf_cache_attach_context
*context
;
1749 ocf_pipeline_t pipeline
;
1753 result
= ocf_pipeline_create(&pipeline
, cache
,
1754 &_ocf_mngt_cache_attach_pipeline_properties
);
1756 OCF_CMPL_RET(cache
, priv1
, priv2
, -OCF_ERR_NO_MEM
);
1758 result
= ocf_pipeline_create(&cache
->stop_pipeline
, cache
,
1759 &ocf_mngt_cache_stop_pipeline_properties
);
1761 ocf_pipeline_destroy(pipeline
);
1762 OCF_CMPL_RET(cache
, priv1
, priv2
, -OCF_ERR_NO_MEM
);
1765 context
= ocf_pipeline_get_priv(pipeline
);
1767 context
->cmpl
= cmpl
;
1768 context
->priv1
= priv1
;
1769 context
->priv2
= priv2
;
1770 context
->pipeline
= pipeline
;
1772 context
->cache
= cache
;
1773 context
->cfg
= *cfg
;
1775 data
= env_vmalloc(cfg
->uuid
.size
);
1777 result
= -OCF_ERR_NO_MEM
;
1781 result
= env_memcpy(data
, cfg
->uuid
.size
, cfg
->uuid
.data
,
1786 context
->cfg
.uuid
.data
= data
;
1788 if (cache
->metadata
.is_volatile
) {
1789 context
->init_mode
= ocf_init_mode_metadata_volatile
;
1791 context
->init_mode
= load
?
1792 ocf_init_mode_load
: ocf_init_mode_init
;
1795 _ocf_mngt_init_attached_nonpersistent(cache
);
1797 OCF_PL_NEXT_RET(pipeline
);
1802 ocf_pipeline_destroy(pipeline
);
1803 ocf_pipeline_destroy(cache
->stop_pipeline
);
1804 OCF_CMPL_RET(cache
, priv1
, priv2
, result
);
1807 static int _ocf_mngt_cache_validate_cfg(struct ocf_mngt_cache_config
*cfg
)
1809 if (!strnlen(cfg
->name
, OCF_CACHE_NAME_SIZE
))
1810 return -OCF_ERR_INVAL
;
1812 if (!ocf_cache_mode_is_valid(cfg
->cache_mode
))
1813 return -OCF_ERR_INVALID_CACHE_MODE
;
1815 if (cfg
->eviction_policy
>= ocf_eviction_max
||
1816 cfg
->eviction_policy
< 0) {
1817 return -OCF_ERR_INVAL
;
1820 if (cfg
->promotion_policy
>= ocf_promotion_max
||
1821 cfg
->promotion_policy
< 0 ) {
1822 return -OCF_ERR_INVAL
;
1825 if (!ocf_cache_line_size_is_valid(cfg
->cache_line_size
))
1826 return -OCF_ERR_INVALID_CACHE_LINE_SIZE
;
1828 if (cfg
->metadata_layout
>= ocf_metadata_layout_max
||
1829 cfg
->metadata_layout
< 0) {
1830 return -OCF_ERR_INVAL
;
1833 if (cfg
->backfill
.queue_unblock_size
> cfg
->backfill
.max_queue_size
)
1834 return -OCF_ERR_INVAL
;
1839 static int _ocf_mngt_cache_validate_device_cfg(
1840 struct ocf_mngt_cache_device_config
*device_cfg
)
1842 if (!device_cfg
->uuid
.data
)
1843 return -OCF_ERR_INVAL
;
1845 if (device_cfg
->uuid
.size
> OCF_VOLUME_UUID_MAX_SIZE
)
1846 return -OCF_ERR_INVAL
;
1848 if (device_cfg
->cache_line_size
!= ocf_cache_line_size_none
&&
1849 !ocf_cache_line_size_is_valid(device_cfg
->cache_line_size
))
1850 return -OCF_ERR_INVALID_CACHE_LINE_SIZE
;
1855 static const char *_ocf_cache_mode_names
[ocf_cache_mode_max
] = {
1856 [ocf_cache_mode_wt
] = "wt",
1857 [ocf_cache_mode_wb
] = "wb",
1858 [ocf_cache_mode_wa
] = "wa",
1859 [ocf_cache_mode_pt
] = "pt",
1860 [ocf_cache_mode_wi
] = "wi",
1861 [ocf_cache_mode_wo
] = "wo",
1864 static const char *_ocf_cache_mode_get_name(ocf_cache_mode_t cache_mode
)
1866 if (!ocf_cache_mode_is_valid(cache_mode
))
1869 return _ocf_cache_mode_names
[cache_mode
];
1872 int ocf_mngt_cache_start(ocf_ctx_t ctx
, ocf_cache_t
*cache
,
1873 struct ocf_mngt_cache_config
*cfg
)
1877 if (!ctx
|| !cache
|| !cfg
)
1878 return -OCF_ERR_INVAL
;
1880 result
= _ocf_mngt_cache_validate_cfg(cfg
);
1884 result
= _ocf_mngt_cache_start(ctx
, cache
, cfg
);
1886 _ocf_mngt_cache_set_valid(*cache
);
1888 ocf_cache_log(*cache
, log_info
, "Successfully added\n");
1889 ocf_cache_log(*cache
, log_info
, "Cache mode : %s\n",
1890 _ocf_cache_mode_get_name(ocf_cache_get_mode(*cache
)));
1892 ocf_log(ctx
, log_err
, "%s: Inserting cache failed\n", cfg
->name
);
1897 int ocf_mngt_cache_set_mngt_queue(ocf_cache_t cache
, ocf_queue_t queue
)
1899 OCF_CHECK_NULL(cache
);
1900 OCF_CHECK_NULL(queue
);
1902 if (cache
->mngt_queue
)
1903 return -OCF_ERR_INVAL
;
1905 ocf_queue_get(queue
);
1906 cache
->mngt_queue
= queue
;
1911 static void _ocf_mngt_cache_attach_complete(ocf_cache_t cache
, void *priv1
,
1912 void *priv2
, int error
)
1914 ocf_mngt_cache_attach_end_t cmpl
= priv1
;
1917 ocf_cache_log(cache
, log_info
, "Successfully attached\n");
1919 ocf_cache_log(cache
, log_err
, "Attaching cache device "
1923 OCF_CMPL_RET(cache
, priv2
, error
);
1926 void ocf_mngt_cache_attach(ocf_cache_t cache
,
1927 struct ocf_mngt_cache_device_config
*cfg
,
1928 ocf_mngt_cache_attach_end_t cmpl
, void *priv
)
1932 OCF_CHECK_NULL(cache
);
1933 OCF_CHECK_NULL(cfg
);
1935 if (!cache
->mngt_queue
)
1936 OCF_CMPL_RET(cache
, priv
, -OCF_ERR_INVAL
);
1938 result
= _ocf_mngt_cache_validate_device_cfg(cfg
);
1940 OCF_CMPL_RET(cache
, priv
, result
);
1942 _ocf_mngt_cache_attach(cache
, cfg
, false,
1943 _ocf_mngt_cache_attach_complete
, cmpl
, priv
);
1946 static void _ocf_mngt_cache_unplug_complete(void *priv
, int error
)
1948 struct _ocf_mngt_cache_unplug_context
*context
= priv
;
1949 ocf_cache_t cache
= context
->cache
;
1951 ocf_volume_close(&cache
->device
->volume
);
1953 ocf_metadata_deinit_variable_size(cache
);
1954 ocf_concurrency_deinit(cache
);
1955 ocf_freelist_deinit(cache
->freelist
);
1957 ocf_volume_deinit(&cache
->device
->volume
);
1959 env_vfree(cache
->device
);
1960 cache
->device
= NULL
;
1962 /* TODO: this should be removed from detach after 'attached' stats
1963 are better separated in statistics */
1964 _ocf_mngt_init_attached_nonpersistent(cache
);
1966 context
->cmpl(context
->priv
, error
? -OCF_ERR_WRITE_CACHE
: 0);
1970 * @brief Unplug caching device from cache instance. Variable size metadata
1971 * containers are deinitialiazed as well as other cacheline related
1972 * structures. Cache volume is closed.
1974 * @param cache OCF cache instance
1975 * @param stop - true if unplugging during stop - in this case we mark
1976 * clean shutdown in metadata and flush all containers.
1977 * - false if the device is to be detached from cache - loading
1978 * metadata from this device will not be possible.
1979 * @param context - context for this call, must be zeroed
1980 * @param cmpl Completion callback
1981 * @param priv Completion context
1983 static void _ocf_mngt_cache_unplug(ocf_cache_t cache
, bool stop
,
1984 struct _ocf_mngt_cache_unplug_context
*context
,
1985 _ocf_mngt_cache_unplug_end_t cmpl
, void *priv
)
1987 ENV_BUG_ON(stop
&& cache
->conf_meta
->core_count
!= 0);
1989 context
->cmpl
= cmpl
;
1990 context
->priv
= priv
;
1991 context
->cache
= cache
;
1993 ocf_stop_cleaner(cache
);
1995 __deinit_cleaning_policy(cache
);
1996 __deinit_promotion_policy(cache
);
1998 if (ocf_mngt_cache_is_dirty(cache
)) {
2001 cache
->conf_meta
->dirty_flushed
= DIRTY_NOT_FLUSHED
;
2003 ocf_cache_log(cache
, log_warn
, "Cache is still dirty. "
2004 "DO NOT USE your core devices until flushing "
2007 cache
->conf_meta
->dirty_flushed
= DIRTY_FLUSHED
;
2011 /* Just set correct shutdown status */
2012 ocf_metadata_set_shutdown_status(cache
, ocf_metadata_detached
,
2013 _ocf_mngt_cache_unplug_complete
, context
);
2015 /* Flush metadata */
2016 ocf_metadata_flush_all(cache
,
2017 _ocf_mngt_cache_unplug_complete
, context
);
2021 static int _ocf_mngt_cache_load_core_log(ocf_core_t core
, void *cntx
)
2023 ocf_core_log(core
, log_info
, "Successfully added\n");
2028 static void _ocf_mngt_cache_load_log(ocf_cache_t cache
)
2030 ocf_cache_mode_t cache_mode
= ocf_cache_get_mode(cache
);
2031 ocf_eviction_t eviction_type
= cache
->conf_meta
->eviction_policy_type
;
2032 ocf_cleaning_t cleaning_type
= cache
->conf_meta
->cleaning_policy_type
;
2033 ocf_promotion_t promotion_type
= cache
->conf_meta
->promotion_policy_type
;
2035 ocf_cache_log(cache
, log_info
, "Successfully loaded\n");
2036 ocf_cache_log(cache
, log_info
, "Cache mode : %s\n",
2037 _ocf_cache_mode_get_name(cache_mode
));
2038 ocf_cache_log(cache
, log_info
, "Eviction policy : %s\n",
2039 evict_policy_ops
[eviction_type
].name
);
2040 ocf_cache_log(cache
, log_info
, "Cleaning policy : %s\n",
2041 cleaning_policy_ops
[cleaning_type
].name
);
2042 ocf_cache_log(cache
, log_info
, "Promotion policy : %s\n",
2043 ocf_promotion_policies
[promotion_type
].name
);
2044 ocf_core_visit(cache
, _ocf_mngt_cache_load_core_log
,
2048 static void _ocf_mngt_cache_load_complete(ocf_cache_t cache
, void *priv1
,
2049 void *priv2
, int error
)
2051 ocf_mngt_cache_load_end_t cmpl
= priv1
;
2054 OCF_CMPL_RET(cache
, priv2
, error
);
2056 _ocf_mngt_cache_set_valid(cache
);
2057 _ocf_mngt_cache_load_log(cache
);
2059 OCF_CMPL_RET(cache
, priv2
, 0);
2062 void ocf_mngt_cache_load(ocf_cache_t cache
,
2063 struct ocf_mngt_cache_device_config
*cfg
,
2064 ocf_mngt_cache_load_end_t cmpl
, void *priv
)
2068 OCF_CHECK_NULL(cache
);
2069 OCF_CHECK_NULL(cfg
);
2071 if (!cache
->mngt_queue
)
2072 OCF_CMPL_RET(cache
, priv
, -OCF_ERR_INVAL
);
2074 /* Load is not allowed in volatile metadata mode */
2075 if (cache
->metadata
.is_volatile
)
2076 OCF_CMPL_RET(cache
, priv
, -EINVAL
);
2078 result
= _ocf_mngt_cache_validate_device_cfg(cfg
);
2080 OCF_CMPL_RET(cache
, priv
, result
);
2082 _ocf_mngt_cache_attach(cache
, cfg
, true,
2083 _ocf_mngt_cache_load_complete
, cmpl
, priv
);
2086 static void ocf_mngt_cache_stop_detached(ocf_cache_t cache
,
2087 ocf_mngt_cache_stop_end_t cmpl
, void *priv
)
2089 _ocf_mngt_cache_stop_remove_cores(cache
, false);
2090 _ocf_mngt_cache_put_io_queues(cache
);
2091 ocf_mngt_cache_remove(cache
->owner
, cache
);
2092 ocf_cache_log(cache
, log_info
, "Cache %s successfully stopped\n",
2093 ocf_cache_get_name(cache
));
2094 cmpl(cache
, priv
, 0);
2095 ocf_mngt_cache_put(cache
);
2098 void ocf_mngt_cache_stop(ocf_cache_t cache
,
2099 ocf_mngt_cache_stop_end_t cmpl
, void *priv
)
2101 struct ocf_mngt_cache_stop_context
*context
;
2102 ocf_pipeline_t pipeline
;
2104 OCF_CHECK_NULL(cache
);
2106 if (!ocf_cache_is_device_attached(cache
)) {
2107 ocf_mngt_cache_stop_detached(cache
, cmpl
, priv
);
2111 ENV_BUG_ON(!cache
->mngt_queue
);
2113 pipeline
= cache
->stop_pipeline
;
2114 context
= ocf_pipeline_get_priv(pipeline
);
2116 context
->cmpl
= cmpl
;
2117 context
->priv
= priv
;
2118 context
->pipeline
= pipeline
;
2119 context
->cache
= cache
;
2120 context
->ctx
= cache
->owner
;
2122 ENV_BUG_ON(env_strncpy(context
->cache_name
, sizeof(context
->cache_name
),
2123 ocf_cache_get_name(cache
), sizeof(context
->cache_name
)));
2125 ocf_cache_log(cache
, log_info
, "Stopping cache\n");
2127 env_bit_set(ocf_cache_state_stopping
, &cache
->cache_state
);
2128 env_bit_clear(ocf_cache_state_running
, &cache
->cache_state
);
2130 ocf_pipeline_next(pipeline
);
2133 struct ocf_mngt_cache_save_context
{
2134 ocf_mngt_cache_save_end_t cmpl
;
2136 ocf_pipeline_t pipeline
;
2140 static void ocf_mngt_cache_save_finish(ocf_pipeline_t pipeline
,
2141 void *priv
, int error
)
2143 struct ocf_mngt_cache_save_context
*context
= priv
;
2145 context
->cmpl(context
->cache
, context
->priv
, error
);
2147 ocf_pipeline_destroy(context
->pipeline
);
2150 struct ocf_pipeline_properties ocf_mngt_cache_save_pipeline_properties
= {
2151 .priv_size
= sizeof(struct ocf_mngt_cache_save_context
),
2152 .finish
= ocf_mngt_cache_save_finish
,
2154 OCF_PL_STEP_TERMINATOR(),
2158 static void ocf_mngt_cache_save_flush_sb_complete(void *priv
, int error
)
2160 struct ocf_mngt_cache_save_context
*context
= priv
;
2161 ocf_cache_t cache
= context
->cache
;
2164 ocf_cache_log(cache
, log_err
,
2165 "Failed to flush superblock! Changes "
2166 "in cache config are not persistent!\n");
2167 OCF_PL_FINISH_RET(context
->pipeline
, -OCF_ERR_WRITE_CACHE
);
2170 ocf_pipeline_next(context
->pipeline
);
2173 void ocf_mngt_cache_save(ocf_cache_t cache
,
2174 ocf_mngt_cache_save_end_t cmpl
, void *priv
)
2176 struct ocf_mngt_cache_save_context
*context
;
2177 ocf_pipeline_t pipeline
;
2180 OCF_CHECK_NULL(cache
);
2182 if (!cache
->mngt_queue
)
2183 OCF_CMPL_RET(cache
, priv
, -OCF_ERR_INVAL
);
2185 result
= ocf_pipeline_create(&pipeline
, cache
,
2186 &ocf_mngt_cache_save_pipeline_properties
);
2188 OCF_CMPL_RET(cache
, priv
, result
);
2190 context
= ocf_pipeline_get_priv(pipeline
);
2192 context
->cmpl
= cmpl
;
2193 context
->priv
= priv
;
2194 context
->pipeline
= pipeline
;
2195 context
->cache
= cache
;
2197 ocf_metadata_flush_superblock(cache
,
2198 ocf_mngt_cache_save_flush_sb_complete
, context
);
2201 static void _cache_mngt_update_initial_dirty_clines(ocf_cache_t cache
)
2204 ocf_core_id_t core_id
;
2206 for_each_core(cache
, core
, core_id
) {
2207 env_atomic_set(&core
->runtime_meta
->initial_dirty_clines
,
2208 env_atomic_read(&core
->runtime_meta
->
2214 static int _cache_mngt_set_cache_mode(ocf_cache_t cache
, ocf_cache_mode_t mode
)
2216 ocf_cache_mode_t mode_old
= cache
->conf_meta
->cache_mode
;
2218 /* Check if IO interface type is valid */
2219 if (!ocf_cache_mode_is_valid(mode
))
2220 return -OCF_ERR_INVAL
;
2222 if (mode
== mode_old
) {
2223 ocf_cache_log(cache
, log_info
, "Cache mode '%s' is already set\n",
2224 ocf_get_io_iface_name(mode
));
2228 cache
->conf_meta
->cache_mode
= mode
;
2230 if (ocf_mngt_cache_mode_has_lazy_write(mode_old
) &&
2231 !ocf_mngt_cache_mode_has_lazy_write(mode
)) {
2232 _cache_mngt_update_initial_dirty_clines(cache
);
2235 ocf_cache_log(cache
, log_info
, "Changing cache mode from '%s' to '%s' "
2236 "successful\n", ocf_get_io_iface_name(mode_old
),
2237 ocf_get_io_iface_name(mode
));
2242 int ocf_mngt_cache_set_mode(ocf_cache_t cache
, ocf_cache_mode_t mode
)
2246 OCF_CHECK_NULL(cache
);
2248 if (!ocf_cache_mode_is_valid(mode
)) {
2249 ocf_cache_log(cache
, log_err
, "Cache mode %u is invalid\n",
2251 return -OCF_ERR_INVAL
;
2254 result
= _cache_mngt_set_cache_mode(cache
, mode
);
2257 const char *name
= ocf_get_io_iface_name(mode
);
2259 ocf_cache_log(cache
, log_err
, "Setting cache mode '%s' "
2266 int ocf_mngt_cache_promotion_set_policy(ocf_cache_t cache
, ocf_promotion_t type
)
2270 ocf_metadata_start_exclusive_access(&cache
->metadata
.lock
);
2272 result
= ocf_promotion_set_policy(cache
->promotion_policy
, type
);
2274 ocf_metadata_end_exclusive_access(&cache
->metadata
.lock
);
2279 ocf_promotion_t
ocf_mngt_cache_promotion_get_policy(ocf_cache_t cache
)
2281 ocf_promotion_t result
;
2283 ocf_metadata_start_shared_access(&cache
->metadata
.lock
);
2285 result
= cache
->conf_meta
->promotion_policy_type
;
2287 ocf_metadata_end_shared_access(&cache
->metadata
.lock
);
2292 int ocf_mngt_cache_promotion_get_param(ocf_cache_t cache
, ocf_promotion_t type
,
2293 uint8_t param_id
, uint32_t *param_value
)
2297 ocf_metadata_start_shared_access(&cache
->metadata
.lock
);
2299 result
= ocf_promotion_get_param(cache
, type
, param_id
, param_value
);
2301 ocf_metadata_end_shared_access(&cache
->metadata
.lock
);
2306 int ocf_mngt_cache_promotion_set_param(ocf_cache_t cache
, ocf_promotion_t type
,
2307 uint8_t param_id
, uint32_t param_value
)
2311 ocf_metadata_start_exclusive_access(&cache
->metadata
.lock
);
2313 result
= ocf_promotion_set_param(cache
, type
, param_id
, param_value
);
2315 ocf_metadata_end_exclusive_access(&cache
->metadata
.lock
);
2320 int ocf_mngt_cache_reset_fallback_pt_error_counter(ocf_cache_t cache
)
2322 OCF_CHECK_NULL(cache
);
2324 if (ocf_fallback_pt_is_on(cache
)) {
2325 ocf_cache_log(cache
, log_info
,
2326 "Fallback Pass Through inactive\n");
2329 env_atomic_set(&cache
->fallback_pt_error_counter
, 0);
2334 int ocf_mngt_cache_set_fallback_pt_error_threshold(ocf_cache_t cache
,
2335 uint32_t new_threshold
)
2337 bool old_fallback_pt_state
, new_fallback_pt_state
;
2339 OCF_CHECK_NULL(cache
);
2341 if (new_threshold
> OCF_CACHE_FALLBACK_PT_MAX_ERROR_THRESHOLD
)
2342 return -OCF_ERR_INVAL
;
2344 old_fallback_pt_state
= ocf_fallback_pt_is_on(cache
);
2346 cache
->fallback_pt_error_threshold
= new_threshold
;
2348 new_fallback_pt_state
= ocf_fallback_pt_is_on(cache
);
2350 if (old_fallback_pt_state
!= new_fallback_pt_state
) {
2351 if (new_fallback_pt_state
) {
2352 ocf_cache_log(cache
, log_info
, "Error threshold reached. "
2353 "Fallback Pass Through activated\n");
2355 ocf_cache_log(cache
, log_info
, "Fallback Pass Through "
2363 int ocf_mngt_cache_get_fallback_pt_error_threshold(ocf_cache_t cache
,
2364 uint32_t *threshold
)
2366 OCF_CHECK_NULL(cache
);
2367 OCF_CHECK_NULL(threshold
);
2369 *threshold
= cache
->fallback_pt_error_threshold
;
2374 struct ocf_mngt_cache_detach_context
{
2375 /* unplug context - this is private structure of _ocf_mngt_cache_unplug,
2376 * it is member of detach context only to reserve memory in advance for
2377 * _ocf_mngt_cache_unplug, eliminating the possibility of ENOMEM error
2378 * at the point where we are effectively unable to handle it */
2379 struct _ocf_mngt_cache_unplug_context unplug_context
;
2381 ocf_mngt_cache_detach_end_t cmpl
;
2383 ocf_pipeline_t pipeline
;
2385 int cache_write_error
;
2386 struct ocf_cleaner_wait_context cleaner_wait
;
2389 static void ocf_mngt_cache_detach_flush_cmpl(ocf_cache_t cache
,
2390 void *priv
, int error
)
2392 struct ocf_mngt_cache_detach_context
*context
= priv
;
2394 OCF_PL_NEXT_ON_SUCCESS_RET(context
->pipeline
, error
);
2397 static void ocf_mngt_cache_detach_flush(ocf_pipeline_t pipeline
,
2398 void *priv
, ocf_pipeline_arg_t arg
)
2400 struct ocf_mngt_cache_detach_context
*context
= priv
;
2401 ocf_cache_t cache
= context
->cache
;
2403 ocf_mngt_cache_flush(cache
, ocf_mngt_cache_detach_flush_cmpl
, context
);
2406 static void ocf_mngt_cache_detach_stop_cache_io_finish(void *priv
)
2408 struct ocf_mngt_cache_detach_context
*context
= priv
;
2409 ocf_pipeline_next(context
->pipeline
);
2412 static void ocf_mngt_cache_detach_stop_cache_io(ocf_pipeline_t pipeline
,
2413 void *priv
, ocf_pipeline_arg_t arg
)
2415 struct ocf_mngt_cache_detach_context
*context
= priv
;
2416 ocf_cache_t cache
= context
->cache
;
2418 ocf_refcnt_freeze(&cache
->refcnt
.metadata
);
2419 ocf_refcnt_register_zero_cb(&cache
->refcnt
.metadata
,
2420 ocf_mngt_cache_detach_stop_cache_io_finish
, context
);
2423 static void ocf_mngt_cache_detach_stop_cleaner_io_finish(void *priv
)
2425 ocf_pipeline_t pipeline
= priv
;
2426 ocf_pipeline_next(pipeline
);
2429 static void ocf_mngt_cache_detach_stop_cleaner_io(ocf_pipeline_t pipeline
,
2430 void *priv
, ocf_pipeline_arg_t arg
)
2432 struct ocf_mngt_cache_detach_context
*context
= priv
;
2433 ocf_cache_t cache
= context
->cache
;
2435 ocf_cleaner_refcnt_freeze(cache
);
2436 ocf_cleaner_refcnt_register_zero_cb(cache
, &context
->cleaner_wait
,
2437 ocf_mngt_cache_detach_stop_cleaner_io_finish
,
2441 static void ocf_mngt_cache_detach_update_metadata(ocf_pipeline_t pipeline
,
2442 void *priv
, ocf_pipeline_arg_t arg
)
2444 struct ocf_mngt_cache_detach_context
*context
= priv
;
2445 ocf_cache_t cache
= context
->cache
;
2447 ocf_core_id_t core_id
;
2448 int no
= cache
->conf_meta
->core_count
;
2450 /* remove cacheline metadata and cleaning policy meta for all cores */
2451 for_each_core_metadata(cache
, core
, core_id
) {
2452 cache_mngt_core_deinit_attached_meta(core
);
2453 cache_mngt_core_remove_from_cleaning_pol(core
);
2458 ocf_pipeline_next(context
->pipeline
);
2461 static void ocf_mngt_cache_detach_unplug_complete(void *priv
, int error
)
2463 struct ocf_mngt_cache_detach_context
*context
= priv
;
2466 ENV_BUG_ON(error
!= -OCF_ERR_WRITE_CACHE
);
2467 context
->cache_write_error
= error
;
2470 ocf_pipeline_next(context
->pipeline
);
2473 static void ocf_mngt_cache_detach_unplug(ocf_pipeline_t pipeline
,
2474 void *priv
, ocf_pipeline_arg_t arg
)
2476 struct ocf_mngt_cache_detach_context
*context
= priv
;
2477 ocf_cache_t cache
= context
->cache
;
2479 /* Do the actual detach - deinit cacheline metadata,
2480 * stop cleaner thread and close cache bottom device */
2481 _ocf_mngt_cache_unplug(cache
, false, &context
->unplug_context
,
2482 ocf_mngt_cache_detach_unplug_complete
, context
);
2485 static void ocf_mngt_cache_detach_finish(ocf_pipeline_t pipeline
,
2486 void *priv
, int error
)
2488 struct ocf_mngt_cache_detach_context
*context
= priv
;
2489 ocf_cache_t cache
= context
->cache
;
2491 ocf_refcnt_unfreeze(&cache
->refcnt
.dirty
);
2494 if (!context
->cache_write_error
) {
2495 ocf_cache_log(cache
, log_info
,
2496 "Device successfully detached\n");
2498 ocf_cache_log(cache
, log_warn
,
2499 "Device detached with errors\n");
2502 ocf_cache_log(cache
, log_err
,
2503 "Detaching device failed\n");
2506 context
->cmpl(cache
, context
->priv
,
2507 error
?: context
->cache_write_error
);
2509 ocf_pipeline_destroy(context
->pipeline
);
2510 ocf_pipeline_destroy(cache
->stop_pipeline
);
2513 struct ocf_pipeline_properties ocf_mngt_cache_detach_pipeline_properties
= {
2514 .priv_size
= sizeof(struct ocf_mngt_cache_detach_context
),
2515 .finish
= ocf_mngt_cache_detach_finish
,
2517 OCF_PL_STEP(ocf_mngt_cache_detach_flush
),
2518 OCF_PL_STEP(ocf_mngt_cache_detach_stop_cache_io
),
2519 OCF_PL_STEP(ocf_mngt_cache_detach_stop_cleaner_io
),
2520 OCF_PL_STEP(ocf_mngt_cache_detach_update_metadata
),
2521 OCF_PL_STEP(ocf_mngt_cache_detach_unplug
),
2522 OCF_PL_STEP_TERMINATOR(),
2526 void ocf_mngt_cache_detach(ocf_cache_t cache
,
2527 ocf_mngt_cache_detach_end_t cmpl
, void *priv
)
2529 struct ocf_mngt_cache_detach_context
*context
;
2530 ocf_pipeline_t pipeline
;
2533 OCF_CHECK_NULL(cache
);
2535 if (!cache
->mngt_queue
)
2536 OCF_CMPL_RET(cache
, priv
, -OCF_ERR_INVAL
);
2538 if (!ocf_cache_is_device_attached(cache
))
2539 OCF_CMPL_RET(cache
, priv
, -OCF_ERR_INVAL
);
2541 result
= ocf_pipeline_create(&pipeline
, cache
,
2542 &ocf_mngt_cache_detach_pipeline_properties
);
2544 OCF_CMPL_RET(cache
, priv
, -OCF_ERR_NO_MEM
);
2546 context
= ocf_pipeline_get_priv(pipeline
);
2548 context
->cmpl
= cmpl
;
2549 context
->priv
= priv
;
2550 context
->pipeline
= pipeline
;
2551 context
->cache
= cache
;
2553 /* prevent dirty io */
2554 ocf_refcnt_freeze(&cache
->refcnt
.dirty
);
2556 ocf_pipeline_next(pipeline
);