2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
8 #include "ocf_core_priv.h"
9 #include "ocf_io_priv.h"
10 #include "metadata/metadata.h"
11 #include "engine/cache_engine.h"
12 #include "utils/utils_req.h"
13 #include "utils/utils_part.h"
14 #include "utils/utils_device.h"
15 #include "ocf_request.h"
16 #include "ocf_trace_priv.h"
18 struct ocf_core_volume
{
22 ocf_cache_t
ocf_core_get_cache(ocf_core_t core
)
25 return core
->volume
.cache
;
28 ocf_volume_t
ocf_core_get_volume(ocf_core_t core
)
34 ocf_volume_t
ocf_core_get_front_volume(ocf_core_t core
)
37 return &core
->front_volume
;
40 ocf_core_id_t
ocf_core_get_id(ocf_core_t core
)
42 struct ocf_cache
*cache
;
43 ocf_core_id_t core_id
;
47 cache
= core
->volume
.cache
;
48 core_id
= core
- cache
->core
;
53 int ocf_core_set_name(ocf_core_t core
, const char *src
, size_t src_size
)
58 return env_strncpy(core
->name
, sizeof(core
->name
), src
, src_size
);
61 const char *ocf_core_get_name(ocf_core_t core
)
68 ocf_core_state_t
ocf_core_get_state(ocf_core_t core
)
73 ocf_core_state_active
: ocf_core_state_inactive
;
76 bool ocf_core_is_valid(ocf_cache_t cache
, ocf_core_id_t id
)
78 OCF_CHECK_NULL(cache
);
80 if (id
> OCF_CORE_ID_MAX
|| id
< OCF_CORE_ID_MIN
)
83 if (!env_bit_test(id
, cache
->conf_meta
->valid_core_bitmap
))
89 int ocf_core_get(ocf_cache_t cache
, ocf_core_id_t id
, ocf_core_t
*core
)
91 OCF_CHECK_NULL(cache
);
93 if (!ocf_core_is_valid(cache
, id
))
94 return -OCF_ERR_CORE_NOT_AVAIL
;
96 *core
= &cache
->core
[id
];
100 uint32_t ocf_core_get_seq_cutoff_threshold(ocf_core_t core
)
102 uint32_t core_id
= ocf_core_get_id(core
);
103 ocf_cache_t cache
= ocf_core_get_cache(core
);
105 return cache
->core_conf_meta
[core_id
].seq_cutoff_threshold
;
108 ocf_seq_cutoff_policy
ocf_core_get_seq_cutoff_policy(ocf_core_t core
)
110 uint32_t core_id
= ocf_core_get_id(core
);
111 ocf_cache_t cache
= ocf_core_get_cache(core
);
113 return cache
->core_conf_meta
[core_id
].seq_cutoff_policy
;
116 int ocf_core_visit(ocf_cache_t cache
, ocf_core_visitor_t visitor
, void *cntx
,
122 OCF_CHECK_NULL(cache
);
125 return -OCF_ERR_INVAL
;
127 for (id
= 0; id
< OCF_CORE_MAX
; id
++) {
128 if (!env_bit_test(id
, cache
->conf_meta
->valid_core_bitmap
))
131 if (only_opened
&& !cache
->core
[id
].opened
)
134 result
= visitor(&cache
->core
[id
], cntx
);
142 /* *** HELPER FUNCTIONS *** */
144 static inline struct ocf_core_io
*ocf_io_to_core_io(struct ocf_io
*io
)
146 return ocf_io_get_priv(io
);
149 static inline ocf_core_t
ocf_volume_to_core(ocf_volume_t volume
)
151 struct ocf_core_volume
*core_volume
= ocf_volume_get_priv(volume
);
153 return core_volume
->core
;
156 static inline int ocf_io_set_dirty(ocf_cache_t cache
,
157 struct ocf_core_io
*core_io
)
159 core_io
->dirty
= ocf_refcnt_inc(&cache
->dirty
);
160 return core_io
->dirty
? 0 : -EBUSY
;
163 static inline void dec_counter_if_req_was_dirty(struct ocf_core_io
*core_io
,
170 ocf_refcnt_dec(&cache
->dirty
);
173 static inline int ocf_core_validate_io(struct ocf_io
*io
)
183 if (io
->addr
>= ocf_volume_get_length(io
->volume
))
186 if (io
->addr
+ io
->bytes
> ocf_volume_get_length(io
->volume
))
189 if (io
->io_class
>= OCF_IO_CLASS_MAX
)
192 if (io
->dir
!= OCF_READ
&& io
->dir
!= OCF_WRITE
)
201 /* Core volume I/O must not be queued on management queue - this would
202 * break I/O accounting code, resulting in use-after-free type of errors
203 * after cache detach, core remove etc. */
204 core
= ocf_volume_to_core(io
->volume
);
205 if (io
->io_queue
== ocf_core_get_cache(core
)->mngt_queue
)
211 static void ocf_req_complete(struct ocf_request
*req
, int error
)
214 ocf_trace_io_cmpl(ocf_io_to_core_io(req
->io
), req
->cache
);
217 ocf_io_end(req
->io
, error
);
219 dec_counter_if_req_was_dirty(ocf_io_to_core_io(req
->io
), req
->cache
);
221 /* Invalidate OCF IO, it is not valid after completion */
226 void ocf_core_submit_io_mode(struct ocf_io
*io
, ocf_cache_mode_t cache_mode
)
228 struct ocf_core_io
*core_io
;
229 ocf_req_cache_mode_t req_cache_mode
;
236 ret
= ocf_core_validate_io(io
);
242 core_io
= ocf_io_to_core_io(io
);
244 core
= ocf_volume_to_core(io
->volume
);
245 cache
= ocf_core_get_cache(core
);
247 ocf_trace_init_io(core_io
, cache
);
249 if (unlikely(!env_bit_test(ocf_cache_state_running
,
250 &cache
->cache_state
))) {
251 ocf_io_end(io
, -EIO
);
255 /* TODO: instead of casting ocf_cache_mode_t to ocf_req_cache_mode_t
256 we can resolve IO interface here and get rid of the latter. */
257 req_cache_mode
= cache_mode
;
259 if (cache_mode
== ocf_cache_mode_none
)
260 req_cache_mode
= ocf_get_effective_cache_mode(cache
, core
, io
);
261 if (req_cache_mode
== ocf_req_cache_mode_wb
&&
262 ocf_io_set_dirty(cache
, core_io
)) {
263 req_cache_mode
= ocf_req_cache_mode_wt
;
266 core_io
->req
= ocf_req_new(io
->io_queue
, core
, io
->addr
, io
->bytes
,
269 dec_counter_if_req_was_dirty(core_io
, cache
);
270 io
->end(io
, -ENOMEM
);
274 if (core_io
->req
->d2c
)
275 req_cache_mode
= ocf_req_cache_mode_d2c
;
277 core_io
->req
->part_id
= ocf_part_class2id(cache
, io
->io_class
);
278 core_io
->req
->data
= core_io
->data
;
279 core_io
->req
->complete
= ocf_req_complete
;
280 core_io
->req
->io
= io
;
282 ocf_seq_cutoff_update(core
, core_io
->req
);
284 ocf_core_update_stats(core
, io
);
286 if (io
->dir
== OCF_WRITE
)
287 ocf_trace_io(core_io
, ocf_event_operation_wr
, cache
);
288 else if (io
->dir
== OCF_READ
)
289 ocf_trace_io(core_io
, ocf_event_operation_rd
, cache
);
292 ret
= ocf_engine_hndl_req(core_io
->req
, req_cache_mode
);
294 dec_counter_if_req_was_dirty(core_io
, cache
);
295 ocf_req_put(core_io
->req
);
300 int ocf_core_submit_io_fast(struct ocf_io
*io
)
302 struct ocf_core_io
*core_io
;
303 ocf_req_cache_mode_t req_cache_mode
;
304 struct ocf_event_io trace_event
;
305 struct ocf_request
*req
;
313 ret
= ocf_core_validate_io(io
);
317 core_io
= ocf_io_to_core_io(io
);
319 core
= ocf_volume_to_core(io
->volume
);
320 cache
= ocf_core_get_cache(core
);
322 if (unlikely(!env_bit_test(ocf_cache_state_running
,
323 &cache
->cache_state
))) {
324 ocf_io_end(io
, -EIO
);
328 req_cache_mode
= ocf_get_effective_cache_mode(cache
, core
, io
);
329 if (req_cache_mode
== ocf_req_cache_mode_wb
&&
330 ocf_io_set_dirty(cache
, core_io
)) {
331 req_cache_mode
= ocf_req_cache_mode_wt
;
334 switch (req_cache_mode
) {
335 case ocf_req_cache_mode_pt
:
337 case ocf_req_cache_mode_wb
:
338 req_cache_mode
= ocf_req_cache_mode_fast
;
341 if (cache
->use_submit_io_fast
)
343 if (io
->dir
== OCF_WRITE
)
346 req_cache_mode
= ocf_req_cache_mode_fast
;
349 core_io
->req
= ocf_req_new_extended(io
->io_queue
, core
,
350 io
->addr
, io
->bytes
, io
->dir
);
351 // We need additional pointer to req in case completion arrives before
352 // we leave this function and core_io is freed
356 dec_counter_if_req_was_dirty(core_io
, cache
);
357 io
->end(io
, -ENOMEM
);
361 dec_counter_if_req_was_dirty(core_io
, cache
);
366 req
->part_id
= ocf_part_class2id(cache
, io
->io_class
);
367 req
->data
= core_io
->data
;
368 req
->complete
= ocf_req_complete
;
371 ocf_core_update_stats(core
, io
);
373 if (cache
->trace
.trace_callback
) {
374 if (io
->dir
== OCF_WRITE
)
375 ocf_trace_prep_io_event(&trace_event
, core_io
, ocf_event_operation_wr
);
376 else if (io
->dir
== OCF_READ
)
377 ocf_trace_prep_io_event(&trace_event
, core_io
, ocf_event_operation_rd
);
382 fast
= ocf_engine_hndl_fast_req(req
, req_cache_mode
);
383 if (fast
!= OCF_FAST_PATH_NO
) {
384 ocf_trace_push(io
->io_queue
, &trace_event
, sizeof(trace_event
));
385 ocf_seq_cutoff_update(core
, req
);
389 dec_counter_if_req_was_dirty(core_io
, cache
);
396 static void ocf_core_volume_submit_io(struct ocf_io
*io
)
398 ocf_core_submit_io_mode(io
, ocf_cache_mode_none
);
401 static void ocf_core_volume_submit_flush(struct ocf_io
*io
)
403 struct ocf_core_io
*core_io
;
410 ret
= ocf_core_validate_io(io
);
416 core_io
= ocf_io_to_core_io(io
);
418 core
= ocf_volume_to_core(io
->volume
);
419 cache
= ocf_core_get_cache(core
);
421 if (unlikely(!env_bit_test(ocf_cache_state_running
,
422 &cache
->cache_state
))) {
423 ocf_io_end(io
, -EIO
);
427 core_io
->req
= ocf_req_new(io
->io_queue
, core
, io
->addr
, io
->bytes
,
430 ocf_io_end(io
, -ENOMEM
);
434 core_io
->req
->complete
= ocf_req_complete
;
435 core_io
->req
->io
= io
;
436 core_io
->req
->data
= core_io
->data
;
438 ocf_trace_io(core_io
, ocf_event_operation_flush
, cache
);
440 ocf_engine_hndl_ops_req(core_io
->req
);
443 static void ocf_core_volume_submit_discard(struct ocf_io
*io
)
445 struct ocf_core_io
*core_io
;
452 ret
= ocf_core_validate_io(io
);
458 core_io
= ocf_io_to_core_io(io
);
460 core
= ocf_volume_to_core(io
->volume
);
461 cache
= ocf_core_get_cache(core
);
463 if (unlikely(!env_bit_test(ocf_cache_state_running
,
464 &cache
->cache_state
))) {
465 ocf_io_end(io
, -EIO
);
469 core_io
->req
= ocf_req_new_discard(io
->io_queue
, core
,
470 io
->addr
, io
->bytes
, OCF_WRITE
);
472 ocf_io_end(io
, -ENOMEM
);
476 core_io
->req
->complete
= ocf_req_complete
;
477 core_io
->req
->io
= io
;
478 core_io
->req
->data
= core_io
->data
;
480 ocf_trace_io(core_io
, ocf_event_operation_discard
, cache
);
482 ocf_engine_hndl_discard_req(core_io
->req
);
485 /* *** VOLUME OPS *** */
487 static int ocf_core_volume_open(ocf_volume_t volume
, void *volume_params
)
489 struct ocf_core_volume
*core_volume
= ocf_volume_get_priv(volume
);
490 const struct ocf_volume_uuid
*uuid
= ocf_volume_get_uuid(volume
);
491 ocf_core_t core
= (ocf_core_t
)uuid
->data
;
493 core_volume
->core
= core
;
498 static void ocf_core_volume_close(ocf_volume_t volume
)
502 static unsigned int ocf_core_volume_get_max_io_size(ocf_volume_t volume
)
504 ocf_core_t core
= ocf_volume_to_core(volume
);
506 return ocf_volume_get_max_io_size(&core
->volume
);
509 static uint64_t ocf_core_volume_get_byte_length(ocf_volume_t volume
)
511 ocf_core_t core
= ocf_volume_to_core(volume
);
513 return ocf_volume_get_length(&core
->volume
);
519 static int ocf_core_io_set_data(struct ocf_io
*io
,
520 ctx_data_t
*data
, uint32_t offset
)
522 struct ocf_core_io
*core_io
;
529 core_io
= ocf_io_to_core_io(io
);
530 core_io
->data
= data
;
535 static ctx_data_t
*ocf_core_io_get_data(struct ocf_io
*io
)
537 struct ocf_core_io
*core_io
;
541 core_io
= ocf_io_to_core_io(io
);
542 return core_io
->data
;
545 const struct ocf_volume_properties ocf_core_volume_properties
= {
547 .io_priv_size
= sizeof(struct ocf_core_io
),
548 .volume_priv_size
= sizeof(struct ocf_core_volume
),
553 .submit_io
= ocf_core_volume_submit_io
,
554 .submit_flush
= ocf_core_volume_submit_flush
,
555 .submit_discard
= ocf_core_volume_submit_discard
,
556 .submit_metadata
= NULL
,
558 .open
= ocf_core_volume_open
,
559 .close
= ocf_core_volume_close
,
560 .get_max_io_size
= ocf_core_volume_get_max_io_size
,
561 .get_length
= ocf_core_volume_get_byte_length
,
564 .set_data
= ocf_core_io_set_data
,
565 .get_data
= ocf_core_io_get_data
,
569 int ocf_core_volume_type_init(ocf_ctx_t ctx
)
571 return ocf_ctx_register_volume_type(ctx
, 0,
572 &ocf_core_volume_properties
);
575 void ocf_core_volume_type_deinit(ocf_ctx_t ctx
)
577 ocf_ctx_unregister_volume_type(ctx
, 0);