2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
8 #include "metadata/metadata.h"
9 #include "engine/cache_engine.h"
10 #include "utils/utils_part.h"
11 #include "utils/utils_cache_line.h"
12 #include "utils/utils_core.h"
14 #ifdef OCF_DEBUG_STATS
15 static void ocf_stats_debug_init(struct ocf_counters_debug
*stats
)
19 for (i
= 0; i
< IO_PACKET_NO
; i
++) {
20 env_atomic64_set(&stats
->read_size
[i
], 0);
21 env_atomic64_set(&stats
->write_size
[i
], 0);
24 for (i
= 0; i
< IO_ALIGN_NO
; i
++) {
25 env_atomic64_set(&stats
->read_align
[i
], 0);
26 env_atomic64_set(&stats
->write_align
[i
], 0);
31 static void ocf_stats_req_init(struct ocf_counters_req
*stats
)
33 env_atomic64_set(&stats
->full_miss
, 0);
34 env_atomic64_set(&stats
->partial_miss
, 0);
35 env_atomic64_set(&stats
->total
, 0);
36 env_atomic64_set(&stats
->pass_through
, 0);
39 static void ocf_stats_block_init(struct ocf_counters_block
*stats
)
41 env_atomic64_set(&stats
->read_bytes
, 0);
42 env_atomic64_set(&stats
->write_bytes
, 0);
45 static void ocf_stats_part_init(struct ocf_counters_part
*stats
)
47 ocf_stats_req_init(&stats
->read_reqs
);
48 ocf_stats_req_init(&stats
->write_reqs
);
50 ocf_stats_block_init(&stats
->blocks
);
53 static void ocf_stats_error_init(struct ocf_counters_error
*stats
)
55 env_atomic_set(&stats
->read
, 0);
56 env_atomic_set(&stats
->write
, 0);
60 /********************************************************************
61 * Function that resets stats, debug and breakdown counters.
62 * If reset is set the following stats won't be reset:
65 * - debug_counters_read_reqs_issued_seq_hits
66 * - debug_counters_read_reqs_issued_not_seq_hits
67 * - debug_counters_read_reqs_issued_read_miss_schedule
68 * - debug_counters_write_reqs_thread
69 * - debug_counters_write_reqs_issued_only_hdd
70 * - debug_counters_write_reqs_issued_both_devs
71 *********************************************************************/
72 void ocf_core_stats_initialize(ocf_core_t core
)
74 struct ocf_counters_core
*exp_obj_stats
;
79 exp_obj_stats
= core
->counters
;
81 ocf_stats_block_init(&exp_obj_stats
->core_blocks
);
82 ocf_stats_block_init(&exp_obj_stats
->cache_blocks
);
84 ocf_stats_error_init(&exp_obj_stats
->cache_errors
);
85 ocf_stats_error_init(&exp_obj_stats
->core_errors
);
87 for (i
= 0; i
!= OCF_IO_CLASS_MAX
; i
++)
88 ocf_stats_part_init(&exp_obj_stats
->part_counters
[i
]);
90 #ifdef OCF_DEBUG_STATS
91 ocf_stats_debug_init(&exp_obj_stats
->debug_stats
);
95 void ocf_core_stats_initialize_all(ocf_cache_t cache
)
99 for (id
= 0; id
< OCF_CORE_MAX
; id
++) {
100 if (!env_bit_test(id
, cache
->conf_meta
->valid_core_bitmap
))
103 ocf_core_stats_initialize(&cache
->core
[id
]);
107 static void copy_req_stats(struct ocf_stats_req
*dest
,
108 const struct ocf_counters_req
*from
)
110 dest
->partial_miss
= env_atomic64_read(&from
->partial_miss
);
111 dest
->full_miss
= env_atomic64_read(&from
->full_miss
);
112 dest
->total
= env_atomic64_read(&from
->total
);
113 dest
->pass_through
= env_atomic64_read(&from
->pass_through
);
116 static void accum_req_stats(struct ocf_stats_req
*dest
,
117 const struct ocf_counters_req
*from
)
119 dest
->partial_miss
+= env_atomic64_read(&from
->partial_miss
);
120 dest
->full_miss
+= env_atomic64_read(&from
->full_miss
);
121 dest
->total
+= env_atomic64_read(&from
->total
);
122 dest
->pass_through
+= env_atomic64_read(&from
->pass_through
);
125 static void copy_block_stats(struct ocf_stats_block
*dest
,
126 const struct ocf_counters_block
*from
)
128 dest
->read
= env_atomic64_read(&from
->read_bytes
);
129 dest
->write
= env_atomic64_read(&from
->write_bytes
);
132 static void accum_block_stats(struct ocf_stats_block
*dest
,
133 const struct ocf_counters_block
*from
)
135 dest
->read
+= env_atomic64_read(&from
->read_bytes
);
136 dest
->write
+= env_atomic64_read(&from
->write_bytes
);
139 static void copy_error_stats(struct ocf_stats_error
*dest
,
140 const struct ocf_counters_error
*from
)
142 dest
->read
= env_atomic_read(&from
->read
);
143 dest
->write
= env_atomic_read(&from
->write
);
146 #ifdef OCF_DEBUG_STATS
147 static void copy_debug_stats(struct ocf_stats_core_debug
*dest
,
148 const struct ocf_counters_debug
*from
)
152 for (i
= 0; i
< IO_PACKET_NO
; i
++) {
153 dest
->read_size
[i
] = env_atomic64_read(&from
->read_size
[i
]);
154 dest
->write_size
[i
] = env_atomic64_read(&from
->write_size
[i
]);
157 for (i
= 0; i
< IO_ALIGN_NO
; i
++) {
158 dest
->read_align
[i
] = env_atomic64_read(&from
->read_align
[i
]);
159 dest
->write_align
[i
] = env_atomic64_read(&from
->write_align
[i
]);
164 int ocf_core_io_class_get_stats(ocf_core_t core
, ocf_part_id_t part_id
,
165 struct ocf_stats_io_class
*stats
)
169 uint32_t cache_occupancy_total
= 0;
170 struct ocf_counters_part
*part_stat
;
171 ocf_core_id_t core_id
;
173 OCF_CHECK_NULL(core
);
174 OCF_CHECK_NULL(stats
);
176 if (part_id
< OCF_IO_CLASS_ID_MIN
|| part_id
> OCF_IO_CLASS_ID_MAX
)
177 return -OCF_ERR_INVAL
;
179 core_id
= ocf_core_get_id(core
);
180 cache
= ocf_core_get_cache(core
);
182 if (!ocf_part_is_valid(&cache
->user_parts
[part_id
]))
183 return -OCF_ERR_IO_CLASS_NOT_EXIST
;
185 for_each_core(cache
, i
) {
186 cache_occupancy_total
+= env_atomic_read(
187 &cache
->core_runtime_meta
[i
].cached_clines
);
190 part_stat
= &core
->counters
->part_counters
[part_id
];
192 stats
->occupancy_clines
= env_atomic_read(&cache
->
193 core_runtime_meta
[core_id
].part_counters
[part_id
].
195 stats
->dirty_clines
= env_atomic_read(&cache
->
196 core_runtime_meta
[core_id
].part_counters
[part_id
].
199 stats
->free_clines
= cache
->conf_meta
->cachelines
-
200 cache_occupancy_total
;
202 copy_req_stats(&stats
->read_reqs
, &part_stat
->read_reqs
);
203 copy_req_stats(&stats
->write_reqs
, &part_stat
->write_reqs
);
205 copy_block_stats(&stats
->blocks
, &part_stat
->blocks
);
210 static uint32_t _calc_dirty_for(uint64_t dirty_since
)
213 (env_ticks_to_msecs(env_get_tick_count() - dirty_since
) / 1000)
217 int ocf_core_get_stats(ocf_core_t core
, struct ocf_stats_core
*stats
)
220 ocf_core_id_t core_id
;
222 struct ocf_counters_core
*core_stats
= NULL
;
223 struct ocf_counters_part
*curr
= NULL
;
225 OCF_CHECK_NULL(core
);
227 core_id
= ocf_core_get_id(core
);
228 cache
= ocf_core_get_cache(core
);
231 return -OCF_ERR_INVAL
;
233 core_stats
= core
->counters
;
235 ENV_BUG_ON(env_memset(stats
, sizeof(*stats
), 0));
237 stats
->core_size_bytes
= ocf_volume_get_length(
238 &cache
->core
[core_id
].volume
);
239 stats
->core_size
= ocf_bytes_2_lines_round_up(cache
,
240 stats
->core_size_bytes
);
241 stats
->seq_cutoff_threshold
= ocf_core_get_seq_cutoff_threshold(core
);
242 stats
->seq_cutoff_policy
= ocf_core_get_seq_cutoff_policy(core
);
245 env_atomic_read(&cache
->core_runtime_meta
[core_id
].cached_clines
);
247 copy_block_stats(&stats
->core_volume
, &core_stats
->core_blocks
);
248 copy_block_stats(&stats
->cache_volume
, &core_stats
->cache_blocks
);
250 copy_error_stats(&stats
->core_errors
,
251 &core_stats
->core_errors
);
252 copy_error_stats(&stats
->cache_errors
,
253 &core_stats
->cache_errors
);
255 #ifdef OCF_DEBUG_STATS
256 copy_debug_stats(&stats
->debug_stat
,
257 &core_stats
->debug_stats
);
260 for (i
= 0; i
!= OCF_IO_CLASS_MAX
; i
++) {
261 curr
= &core_stats
->part_counters
[i
];
263 accum_req_stats(&stats
->read_reqs
,
265 accum_req_stats(&stats
->write_reqs
,
268 accum_block_stats(&stats
->core
, &curr
->blocks
);
270 stats
->cache_occupancy
+= env_atomic_read(&cache
->
271 core_runtime_meta
[core_id
].part_counters
[i
].
273 stats
->dirty
+= env_atomic_read(&cache
->
274 core_runtime_meta
[core_id
].part_counters
[i
].
278 stats
->flushed
= env_atomic_read(&core
->flushed
);
280 stats
->dirty_for
= _calc_dirty_for(
281 env_atomic64_read(&cache
->core_runtime_meta
[core_id
].dirty_since
));
286 #ifdef OCF_DEBUG_STATS
288 #define IO_ALIGNMENT_SIZE (IO_ALIGN_NO)
289 #define IO_PACKET_SIZE ((IO_PACKET_NO) - 1)
291 static uint32_t io_alignment
[IO_ALIGNMENT_SIZE
] = {
292 512, 1 * KiB
, 2 * KiB
, 4 * KiB
295 static int to_align_idx(uint64_t off
)
299 for (i
= IO_ALIGNMENT_SIZE
- 1; i
>= 0; i
--) {
300 if (off
% io_alignment
[i
] == 0)
304 return IO_ALIGNMENT_SIZE
;
307 static uint32_t io_packet_size
[IO_PACKET_SIZE
] = {
308 512, 1 * KiB
, 2 * KiB
, 4 * KiB
, 8 * KiB
,
309 16 * KiB
, 32 * KiB
, 64 * KiB
, 128 * KiB
,
314 static int to_packet_idx(uint32_t len
)
318 for (i
= 0; i
< IO_PACKET_SIZE
; i
++) {
319 if (len
== io_packet_size
[i
])
323 return IO_PACKET_SIZE
;
326 void ocf_core_update_stats(ocf_core_t core
, struct ocf_io
*io
)
328 struct ocf_counters_debug
*stats
;
331 OCF_CHECK_NULL(core
);
334 core_id
= ocf_core_get_id(core
);
335 cache
= ocf_core_get_cache(core
);
337 stats
= &core
->counters
->debug_stats
;
339 idx
= to_packet_idx(io
->bytes
);
340 if (io
->dir
== OCF_WRITE
)
341 env_atomic64_inc(&stats
->write_size
[idx
]);
343 env_atomic64_inc(&stats
->read_size
[idx
]);
345 idx
= to_align_idx(io
->addr
);
346 if (io
->dir
== OCF_WRITE
)
347 env_atomic64_inc(&stats
->write_align
[idx
]);
349 env_atomic64_inc(&stats
->read_align
[idx
]);
354 void ocf_core_update_stats(ocf_core_t core
, struct ocf_io
*io
) {}