]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/mngt/ocf_mngt_common.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / mngt / ocf_mngt_common.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "ocf/ocf.h"
7 #include "ocf_mngt_common.h"
8 #include "../ocf_priv.h"
9 #include "../ocf_ctx_priv.h"
10 #include "../metadata/metadata.h"
11 #include "../engine/cache_engine.h"
12 #include "../utils/utils_req.h"
13 #include "../utils/utils_device.h"
14 #include "../eviction/ops.h"
15 #include "../ocf_logger_priv.h"
16 #include "../ocf_queue_priv.h"
17 #include "../engine/engine_common.h"
18
19 /* Close if opened */
20 int cache_mng_core_close(ocf_cache_t cache, ocf_core_id_t core_id)
21 {
22 if (!cache->core[core_id].opened)
23 return -OCF_ERR_CORE_IN_INACTIVE_STATE;
24
25 ocf_volume_close(&cache->core[core_id].volume);
26 cache->core[core_id].opened = false;
27
28 return 0;
29 }
30
31 /* Remove core from cleaning policy */
32 void cache_mng_core_remove_from_cleaning_pol(struct ocf_cache *cache,
33 int core_id)
34 {
35 ocf_cleaning_t clean_pol_type;
36
37 OCF_METADATA_LOCK_WR();
38
39 clean_pol_type = cache->conf_meta->cleaning_policy_type;
40 if (cache->core[core_id].opened) {
41 if (cleaning_policy_ops[clean_pol_type].remove_core) {
42 cleaning_policy_ops[clean_pol_type].
43 remove_core(cache, core_id);
44 }
45 }
46
47 OCF_METADATA_UNLOCK_WR();
48 }
49
50 /* Deinitialize core metadata in attached metadata */
51 void cache_mng_core_deinit_attached_meta(struct ocf_cache *cache, int core_id)
52 {
53 int retry = 1;
54 uint64_t core_size = 0;
55 ocf_cleaning_t clean_pol_type;
56 ocf_volume_t core;
57
58 core = &cache->core[core_id].volume;
59
60 core_size = ocf_volume_get_length(core);
61 if (!core_size)
62 core_size = ~0ULL;
63
64 OCF_METADATA_LOCK_WR();
65
66 clean_pol_type = cache->conf_meta->cleaning_policy_type;
67 while (retry) {
68 retry = 0;
69 if (cleaning_policy_ops[clean_pol_type].purge_range) {
70 retry = cleaning_policy_ops[clean_pol_type].purge_range(cache,
71 core_id, 0, core_size);
72 }
73
74 if (!retry) {
75 /* Remove from collision_table and Partition. Put in FREELIST */
76 retry = ocf_metadata_sparse_range(cache, core_id, 0,
77 core_size);
78 }
79
80 if (retry) {
81 OCF_METADATA_UNLOCK_WR();
82 env_msleep(100);
83 OCF_METADATA_LOCK_WR();
84 }
85 }
86
87 OCF_METADATA_UNLOCK_WR();
88 }
89
90 /* Mark core as removed in metadata */
91 void cache_mng_core_remove_from_meta(struct ocf_cache *cache, int core_id)
92 {
93 OCF_METADATA_LOCK_WR();
94
95 /* In metadata mark data this core was removed from cache */
96 cache->core_conf_meta[core_id].added = false;
97
98 /* Clear UUID of core */
99 ocf_metadata_clear_core_uuid(&cache->core[core_id]);
100 cache->core_conf_meta[core_id].seq_no = OCF_SEQ_NO_INVALID;
101
102 OCF_METADATA_UNLOCK_WR();
103 }
104
105 /* Deinit in-memory structures related to this core */
106 void cache_mng_core_remove_from_cache(struct ocf_cache *cache, int core_id)
107 {
108 env_free(cache->core[core_id].counters);
109 cache->core[core_id].counters = NULL;
110 env_bit_clear(core_id, cache->conf_meta->valid_core_bitmap);
111
112 if (!cache->core[core_id].opened &&
113 --cache->ocf_core_inactive_count == 0) {
114 env_bit_clear(ocf_cache_state_incomplete, &cache->cache_state);
115 }
116
117 cache->conf_meta->core_count--;
118 }
119
120 void ocf_mngt_cache_put(ocf_cache_t cache)
121 {
122 OCF_CHECK_NULL(cache);
123
124 if (env_atomic_dec_return(&cache->ref_count) == 0) {
125 ocf_metadata_deinit(cache);
126 env_vfree(cache);
127 }
128 }
129
130 int ocf_mngt_cache_get_by_id(ocf_ctx_t ocf_ctx, ocf_cache_id_t id, ocf_cache_t *cache)
131 {
132 int error = 0;
133 struct ocf_cache *instance = NULL;
134 struct ocf_cache *iter = NULL;
135
136 OCF_CHECK_NULL(ocf_ctx);
137 OCF_CHECK_NULL(cache);
138
139 *cache = NULL;
140
141 if ((id < OCF_CACHE_ID_MIN) || (id > OCF_CACHE_ID_MAX)) {
142 /* Cache id out of range */
143 return -OCF_ERR_INVAL;
144 }
145
146 /* Lock caches list */
147 env_mutex_lock(&ocf_ctx->lock);
148
149 list_for_each_entry(iter, &ocf_ctx->caches, list) {
150 if (iter->cache_id == id) {
151 instance = iter;
152 break;
153 }
154 }
155
156 if (instance) {
157 /* if cache is either fully initialized or during recovery */
158 if (instance->valid_ocf_cache_device_t) {
159 /* Increase reference counter */
160 env_atomic_inc(&instance->ref_count);
161 } else {
162 /* Cache not initialized yet */
163 instance = NULL;
164 }
165 }
166
167 env_mutex_unlock(&ocf_ctx->lock);
168
169 if (!instance)
170 error = -OCF_ERR_CACHE_NOT_EXIST;
171 else
172 *cache = instance;
173
174 return error;
175 }
176
177 bool ocf_mngt_is_cache_locked(ocf_cache_t cache)
178 {
179 if (env_rwsem_is_locked(&cache->lock))
180 return true;
181
182 if (env_atomic_read(&cache->lock_waiter))
183 return true;
184
185 return false;
186 }
187
188 static void _ocf_mngt_cache_unlock(ocf_cache_t cache,
189 void (*unlock_fn)(env_rwsem *s))
190 {
191 unlock_fn(&cache->lock);
192 ocf_mngt_cache_put(cache);
193 }
194
195 void ocf_mngt_cache_unlock(ocf_cache_t cache)
196 {
197 OCF_CHECK_NULL(cache);
198 _ocf_mngt_cache_unlock(cache, env_rwsem_up_write);
199 }
200
201 void ocf_mngt_cache_read_unlock(ocf_cache_t cache)
202 {
203 OCF_CHECK_NULL(cache);
204 _ocf_mngt_cache_unlock(cache, env_rwsem_up_read);
205 }
206
207 static int _ocf_mngt_cache_lock(ocf_cache_t cache, int (*lock_fn)(env_rwsem *s),
208 void (*unlock_fn)(env_rwsem *s))
209 {
210 int ret;
211
212 /* Increment reference counter */
213 env_atomic_inc(&cache->ref_count);
214
215 env_atomic_inc(&cache->lock_waiter);
216 ret = lock_fn(&cache->lock);
217 env_atomic_dec(&cache->lock_waiter);
218
219 if (ret) {
220 ocf_mngt_cache_put(cache);
221 return ret;
222 }
223
224 if (env_bit_test(ocf_cache_state_stopping, &cache->cache_state)) {
225 /* Cache already stooping, do not allow any operation */
226 ret = -OCF_ERR_CACHE_NOT_EXIST;
227 goto unlock;
228 }
229
230 return 0;
231
232 unlock:
233 _ocf_mngt_cache_unlock(cache, unlock_fn);
234
235 return ret;
236 }
237
238 int ocf_mngt_cache_lock(ocf_cache_t cache)
239 {
240 OCF_CHECK_NULL(cache);
241 return _ocf_mngt_cache_lock(cache, env_rwsem_down_write_interruptible,
242 env_rwsem_up_write);
243 }
244
245 int ocf_mngt_cache_read_lock(ocf_cache_t cache)
246 {
247 OCF_CHECK_NULL(cache);
248 return _ocf_mngt_cache_lock(cache, env_rwsem_down_read_interruptible,
249 env_rwsem_up_read);
250 }
251
252 int ocf_mngt_cache_trylock(ocf_cache_t cache)
253 {
254 OCF_CHECK_NULL(cache);
255 return _ocf_mngt_cache_lock(cache, env_rwsem_down_write_trylock,
256 env_rwsem_up_write);
257 }
258
259 int ocf_mngt_cache_read_trylock(ocf_cache_t cache)
260 {
261 OCF_CHECK_NULL(cache);
262 return _ocf_mngt_cache_lock(cache, env_rwsem_down_read_trylock,
263 env_rwsem_up_read);
264 }
265
266 /* if cache is either fully initialized or during recovery */
267 static bool _ocf_mngt_cache_try_get(ocf_cache_t cache)
268 {
269 if (!!cache->valid_ocf_cache_device_t) {
270 /* Increase reference counter */
271 env_atomic_inc(&cache->ref_count);
272 return true;
273 }
274
275 return false;
276 }
277
278 int ocf_mngt_cache_get(ocf_cache_t cache)
279 {
280 if (!_ocf_mngt_cache_try_get(cache))
281 return -OCF_ERR_CACHE_NOT_AVAIL;
282
283 return 0;
284 }
285
286 static int _ocf_mngt_cache_get_list_cpy(ocf_ctx_t ocf_ctx, ocf_cache_t **list,
287 uint32_t *size)
288 {
289 int result = 0;
290 uint32_t count = 0, i = 0;
291 ocf_cache_t iter;
292
293 *list = NULL;
294 *size = 0;
295
296 env_mutex_lock(&ocf_ctx->lock);
297
298 list_for_each_entry(iter, &ocf_ctx->caches, list) {
299 count++;
300 }
301
302 if (!count)
303 goto END;
304
305 *list = env_vmalloc(sizeof((*list)[0]) * count);
306 if (*list == NULL) {
307 result = -ENOMEM;
308 goto END;
309 }
310
311 list_for_each_entry(iter, &ocf_ctx->caches, list) {
312
313 if (_ocf_mngt_cache_try_get(iter))
314 (*list)[i++] = iter;
315 }
316
317 if (i) {
318 /* Update size if cache list */
319 *size = i;
320 } else {
321 env_vfree(*list);
322 *list = NULL;
323 }
324
325 END:
326 env_mutex_unlock(&ocf_ctx->lock);
327 return result;
328 }
329
330 int ocf_mngt_cache_visit(ocf_ctx_t ocf_ctx, ocf_mngt_cache_visitor_t visitor,
331 void *cntx)
332 {
333 ocf_cache_t *list;
334 uint32_t size, i;
335 int result;
336
337 OCF_CHECK_NULL(ocf_ctx);
338 OCF_CHECK_NULL(visitor);
339
340 result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size);
341 if (result)
342 return result;
343
344 if (size == 0)
345 return 0;
346
347 /* Iterate over caches */
348 for (i = 0; i < size; i++) {
349 ocf_cache_t this = list[i];
350
351 result = visitor(this, cntx);
352
353 if (result)
354 break;
355 }
356
357 /* Put caches */
358 for (i = 0; i < size; i++)
359 ocf_mngt_cache_put(list[i]);
360
361 env_vfree(list);
362
363 return result;
364 }
365
366 int ocf_mngt_cache_visit_reverse(ocf_ctx_t ocf_ctx,
367 ocf_mngt_cache_visitor_t visitor, void *cntx)
368 {
369 ocf_cache_t *list;
370 uint32_t size, i;
371 int result;
372
373 OCF_CHECK_NULL(ocf_ctx);
374 OCF_CHECK_NULL(visitor);
375
376 result = _ocf_mngt_cache_get_list_cpy(ocf_ctx, &list, &size);
377 if (result)
378 return result;
379
380 if (size == 0)
381 return 0;
382
383 /* Iterate over caches */
384 for (i = size; i; i--) {
385 ocf_cache_t this = list[i - 1];
386
387 result = visitor(this, cntx);
388
389 if (result)
390 break;
391 }
392
393 /* Put caches */
394 for (i = 0; i < size; i++)
395 ocf_mngt_cache_put(list[i]);
396
397 env_vfree(list);
398
399 return result;
400 }