2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
7 #include "ocf_mngt_common.h"
8 #include "../ocf_priv.h"
9 #include "../ocf_ctx_priv.h"
10 #include "../metadata/metadata.h"
11 #include "../engine/cache_engine.h"
12 #include "../utils/utils_req.h"
13 #include "../utils/utils_device.h"
14 #include "../eviction/ops.h"
15 #include "../ocf_logger_priv.h"
16 #include "../ocf_queue_priv.h"
17 #include "../engine/engine_common.h"
20 int cache_mng_core_close(ocf_cache_t cache
, ocf_core_id_t core_id
)
22 if (!cache
->core
[core_id
].opened
)
23 return -OCF_ERR_CORE_IN_INACTIVE_STATE
;
25 ocf_volume_close(&cache
->core
[core_id
].volume
);
26 cache
->core
[core_id
].opened
= false;
31 /* Remove core from cleaning policy */
32 void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache
*cache
,
35 ocf_cleaning_t clean_pol_type
;
37 OCF_METADATA_LOCK_WR();
39 clean_pol_type
= cache
->conf_meta
->cleaning_policy_type
;
40 if (cache
->core
[core_id
].opened
) {
41 if (cleaning_policy_ops
[clean_pol_type
].remove_core
) {
42 cleaning_policy_ops
[clean_pol_type
].
43 remove_core(cache
, core_id
);
47 OCF_METADATA_UNLOCK_WR();
50 /* Deinitialize core metadata in attached metadata */
51 void cache_mng_core_deinit_attached_meta(struct ocf_cache
*cache
, int core_id
)
54 uint64_t core_size
= 0;
55 ocf_cleaning_t clean_pol_type
;
58 core
= &cache
->core
[core_id
].volume
;
60 core_size
= ocf_volume_get_length(core
);
64 OCF_METADATA_LOCK_WR();
66 clean_pol_type
= cache
->conf_meta
->cleaning_policy_type
;
69 if (cleaning_policy_ops
[clean_pol_type
].purge_range
) {
70 retry
= cleaning_policy_ops
[clean_pol_type
].purge_range(cache
,
71 core_id
, 0, core_size
);
75 /* Remove from collision_table and Partition. Put in FREELIST */
76 retry
= ocf_metadata_sparse_range(cache
, core_id
, 0,
81 OCF_METADATA_UNLOCK_WR();
83 OCF_METADATA_LOCK_WR();
87 OCF_METADATA_UNLOCK_WR();
90 /* Mark core as removed in metadata */
91 void cache_mng_core_remove_from_meta(struct ocf_cache
*cache
, int core_id
)
93 OCF_METADATA_LOCK_WR();
95 /* In metadata mark data this core was removed from cache */
96 cache
->core_conf_meta
[core_id
].added
= false;
98 /* Clear UUID of core */
99 ocf_metadata_clear_core_uuid(&cache
->core
[core_id
]);
100 cache
->core_conf_meta
[core_id
].seq_no
= OCF_SEQ_NO_INVALID
;
102 OCF_METADATA_UNLOCK_WR();
105 /* Deinit in-memory structures related to this core */
106 void cache_mng_core_remove_from_cache(struct ocf_cache
*cache
, int core_id
)
108 env_free(cache
->core
[core_id
].counters
);
109 cache
->core
[core_id
].counters
= NULL
;
110 env_bit_clear(core_id
, cache
->conf_meta
->valid_core_bitmap
);
112 if (!cache
->core
[core_id
].opened
&&
113 --cache
->ocf_core_inactive_count
== 0) {
114 env_bit_clear(ocf_cache_state_incomplete
, &cache
->cache_state
);
117 cache
->conf_meta
->core_count
--;
120 void ocf_mngt_cache_put(ocf_cache_t cache
)
122 OCF_CHECK_NULL(cache
);
124 if (env_atomic_dec_return(&cache
->ref_count
) == 0) {
125 ocf_metadata_deinit(cache
);
130 int ocf_mngt_cache_get_by_id(ocf_ctx_t ocf_ctx
, ocf_cache_id_t id
, ocf_cache_t
*cache
)
133 struct ocf_cache
*instance
= NULL
;
134 struct ocf_cache
*iter
= NULL
;
136 OCF_CHECK_NULL(ocf_ctx
);
137 OCF_CHECK_NULL(cache
);
141 if ((id
< OCF_CACHE_ID_MIN
) || (id
> OCF_CACHE_ID_MAX
)) {
142 /* Cache id out of range */
143 return -OCF_ERR_INVAL
;
146 /* Lock caches list */
147 env_mutex_lock(&ocf_ctx
->lock
);
149 list_for_each_entry(iter
, &ocf_ctx
->caches
, list
) {
150 if (iter
->cache_id
== id
) {
157 /* if cache is either fully initialized or during recovery */
158 if (instance
->valid_ocf_cache_device_t
) {
159 /* Increase reference counter */
160 env_atomic_inc(&instance
->ref_count
);
162 /* Cache not initialized yet */
167 env_mutex_unlock(&ocf_ctx
->lock
);
170 error
= -OCF_ERR_CACHE_NOT_EXIST
;
177 bool ocf_mngt_is_cache_locked(ocf_cache_t cache
)
179 if (env_rwsem_is_locked(&cache
->lock
))
182 if (env_atomic_read(&cache
->lock_waiter
))
188 static void _ocf_mngt_cache_unlock(ocf_cache_t cache
,
189 void (*unlock_fn
)(env_rwsem
*s
))
191 unlock_fn(&cache
->lock
);
192 ocf_mngt_cache_put(cache
);
195 void ocf_mngt_cache_unlock(ocf_cache_t cache
)
197 OCF_CHECK_NULL(cache
);
198 _ocf_mngt_cache_unlock(cache
, env_rwsem_up_write
);
201 void ocf_mngt_cache_read_unlock(ocf_cache_t cache
)
203 OCF_CHECK_NULL(cache
);
204 _ocf_mngt_cache_unlock(cache
, env_rwsem_up_read
);
207 static int _ocf_mngt_cache_lock(ocf_cache_t cache
, int (*lock_fn
)(env_rwsem
*s
),
208 void (*unlock_fn
)(env_rwsem
*s
))
212 /* Increment reference counter */
213 env_atomic_inc(&cache
->ref_count
);
215 env_atomic_inc(&cache
->lock_waiter
);
216 ret
= lock_fn(&cache
->lock
);
217 env_atomic_dec(&cache
->lock_waiter
);
220 ocf_mngt_cache_put(cache
);
224 if (env_bit_test(ocf_cache_state_stopping
, &cache
->cache_state
)) {
225 /* Cache already stooping, do not allow any operation */
226 ret
= -OCF_ERR_CACHE_NOT_EXIST
;
233 _ocf_mngt_cache_unlock(cache
, unlock_fn
);
238 int ocf_mngt_cache_lock(ocf_cache_t cache
)
240 OCF_CHECK_NULL(cache
);
241 return _ocf_mngt_cache_lock(cache
, env_rwsem_down_write_interruptible
,
245 int ocf_mngt_cache_read_lock(ocf_cache_t cache
)
247 OCF_CHECK_NULL(cache
);
248 return _ocf_mngt_cache_lock(cache
, env_rwsem_down_read_interruptible
,
252 int ocf_mngt_cache_trylock(ocf_cache_t cache
)
254 OCF_CHECK_NULL(cache
);
255 return _ocf_mngt_cache_lock(cache
, env_rwsem_down_write_trylock
,
259 int ocf_mngt_cache_read_trylock(ocf_cache_t cache
)
261 OCF_CHECK_NULL(cache
);
262 return _ocf_mngt_cache_lock(cache
, env_rwsem_down_read_trylock
,
266 /* if cache is either fully initialized or during recovery */
267 static bool _ocf_mngt_cache_try_get(ocf_cache_t cache
)
269 if (!!cache
->valid_ocf_cache_device_t
) {
270 /* Increase reference counter */
271 env_atomic_inc(&cache
->ref_count
);
278 int ocf_mngt_cache_get(ocf_cache_t cache
)
280 if (!_ocf_mngt_cache_try_get(cache
))
281 return -OCF_ERR_CACHE_NOT_AVAIL
;
286 static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx
, ocf_cache_t
**list
,
290 uint32_t count
= 0, i
= 0;
296 env_mutex_lock(&ocf_ctx
->lock
);
298 list_for_each_entry(iter
, &ocf_ctx
->caches
, list
) {
305 *list
= env_vmalloc(sizeof((*list
)[0]) * count
);
311 list_for_each_entry(iter
, &ocf_ctx
->caches
, list
) {
313 if (_ocf_mngt_cache_try_get(iter
))
318 /* Update size if cache list */
326 env_mutex_unlock(&ocf_ctx
->lock
);
330 int ocf_mngt_cache_visit(ocf_ctx_t ocf_ctx
, ocf_mngt_cache_visitor_t visitor
,
337 OCF_CHECK_NULL(ocf_ctx
);
338 OCF_CHECK_NULL(visitor
);
340 result
= _ocf_mngt_cache_get_list_cpy(ocf_ctx
, &list
, &size
);
347 /* Iterate over caches */
348 for (i
= 0; i
< size
; i
++) {
349 ocf_cache_t
this = list
[i
];
351 result
= visitor(this, cntx
);
358 for (i
= 0; i
< size
; i
++)
359 ocf_mngt_cache_put(list
[i
]);
366 int ocf_mngt_cache_visit_reverse(ocf_ctx_t ocf_ctx
,
367 ocf_mngt_cache_visitor_t visitor
, void *cntx
)
373 OCF_CHECK_NULL(ocf_ctx
);
374 OCF_CHECK_NULL(visitor
);
376 result
= _ocf_mngt_cache_get_list_cpy(ocf_ctx
, &list
, &size
);
383 /* Iterate over caches */
384 for (i
= size
; i
; i
--) {
385 ocf_cache_t
this = list
[i
- 1];
387 result
= visitor(this, cntx
);
394 for (i
= 0; i
< size
; i
++)
395 ocf_mngt_cache_put(list
[i
]);