]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/ocf/src/ocf_stats.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / ocf / src / ocf_stats.c
1 /*
2 * Copyright(c) 2012-2018 Intel Corporation
3 * SPDX-License-Identifier: BSD-3-Clause-Clear
4 */
5
6 #include "ocf/ocf.h"
7 #include "ocf_priv.h"
8 #include "metadata/metadata.h"
9 #include "engine/cache_engine.h"
10 #include "utils/utils_part.h"
11 #include "utils/utils_cache_line.h"
12 #include "utils/utils_core.h"
13
14 #ifdef OCF_DEBUG_STATS
15 static void ocf_stats_debug_init(struct ocf_counters_debug *stats)
16 {
17 int i;
18
19 for (i = 0; i < IO_PACKET_NO; i++) {
20 env_atomic64_set(&stats->read_size[i], 0);
21 env_atomic64_set(&stats->write_size[i], 0);
22 }
23
24 for (i = 0; i < IO_ALIGN_NO; i++) {
25 env_atomic64_set(&stats->read_align[i], 0);
26 env_atomic64_set(&stats->write_align[i], 0);
27 }
28 }
29 #endif
30
31 static void ocf_stats_req_init(struct ocf_counters_req *stats)
32 {
33 env_atomic64_set(&stats->full_miss, 0);
34 env_atomic64_set(&stats->partial_miss, 0);
35 env_atomic64_set(&stats->total, 0);
36 env_atomic64_set(&stats->pass_through, 0);
37 }
38
39 static void ocf_stats_block_init(struct ocf_counters_block *stats)
40 {
41 env_atomic64_set(&stats->read_bytes, 0);
42 env_atomic64_set(&stats->write_bytes, 0);
43 }
44
45 static void ocf_stats_part_init(struct ocf_counters_part *stats)
46 {
47 ocf_stats_req_init(&stats->read_reqs);
48 ocf_stats_req_init(&stats->write_reqs);
49
50 ocf_stats_block_init(&stats->blocks);
51 }
52
53 static void ocf_stats_error_init(struct ocf_counters_error *stats)
54 {
55 env_atomic_set(&stats->read, 0);
56 env_atomic_set(&stats->write, 0);
57 }
58
59
60 /********************************************************************
61 * Function that resets stats, debug and breakdown counters.
62 * If reset is set the following stats won't be reset:
63 * - cache_occupancy
64 * - queue_length
65 * - debug_counters_read_reqs_issued_seq_hits
66 * - debug_counters_read_reqs_issued_not_seq_hits
67 * - debug_counters_read_reqs_issued_read_miss_schedule
68 * - debug_counters_write_reqs_thread
69 * - debug_counters_write_reqs_issued_only_hdd
70 * - debug_counters_write_reqs_issued_both_devs
71 *********************************************************************/
72 void ocf_core_stats_initialize(ocf_core_t core)
73 {
74 struct ocf_counters_core *exp_obj_stats;
75 int i;
76
77 OCF_CHECK_NULL(core);
78
79 exp_obj_stats = core->counters;
80
81 ocf_stats_block_init(&exp_obj_stats->core_blocks);
82 ocf_stats_block_init(&exp_obj_stats->cache_blocks);
83
84 ocf_stats_error_init(&exp_obj_stats->cache_errors);
85 ocf_stats_error_init(&exp_obj_stats->core_errors);
86
87 for (i = 0; i != OCF_IO_CLASS_MAX; i++)
88 ocf_stats_part_init(&exp_obj_stats->part_counters[i]);
89
90 #ifdef OCF_DEBUG_STATS
91 ocf_stats_debug_init(&exp_obj_stats->debug_stats);
92 #endif
93 }
94
95 void ocf_core_stats_initialize_all(ocf_cache_t cache)
96 {
97 ocf_core_id_t id;
98
99 for (id = 0; id < OCF_CORE_MAX; id++) {
100 if (!env_bit_test(id, cache->conf_meta->valid_core_bitmap))
101 continue;
102
103 ocf_core_stats_initialize(&cache->core[id]);
104 }
105 }
106
107 static void copy_req_stats(struct ocf_stats_req *dest,
108 const struct ocf_counters_req *from)
109 {
110 dest->partial_miss = env_atomic64_read(&from->partial_miss);
111 dest->full_miss = env_atomic64_read(&from->full_miss);
112 dest->total = env_atomic64_read(&from->total);
113 dest->pass_through = env_atomic64_read(&from->pass_through);
114 }
115
116 static void accum_req_stats(struct ocf_stats_req *dest,
117 const struct ocf_counters_req *from)
118 {
119 dest->partial_miss += env_atomic64_read(&from->partial_miss);
120 dest->full_miss += env_atomic64_read(&from->full_miss);
121 dest->total += env_atomic64_read(&from->total);
122 dest->pass_through += env_atomic64_read(&from->pass_through);
123 }
124
125 static void copy_block_stats(struct ocf_stats_block *dest,
126 const struct ocf_counters_block *from)
127 {
128 dest->read = env_atomic64_read(&from->read_bytes);
129 dest->write = env_atomic64_read(&from->write_bytes);
130 }
131
132 static void accum_block_stats(struct ocf_stats_block *dest,
133 const struct ocf_counters_block *from)
134 {
135 dest->read += env_atomic64_read(&from->read_bytes);
136 dest->write += env_atomic64_read(&from->write_bytes);
137 }
138
139 static void copy_error_stats(struct ocf_stats_error *dest,
140 const struct ocf_counters_error *from)
141 {
142 dest->read = env_atomic_read(&from->read);
143 dest->write = env_atomic_read(&from->write);
144 }
145
146 #ifdef OCF_DEBUG_STATS
147 static void copy_debug_stats(struct ocf_stats_core_debug *dest,
148 const struct ocf_counters_debug *from)
149 {
150 int i;
151
152 for (i = 0; i < IO_PACKET_NO; i++) {
153 dest->read_size[i] = env_atomic64_read(&from->read_size[i]);
154 dest->write_size[i] = env_atomic64_read(&from->write_size[i]);
155 }
156
157 for (i = 0; i < IO_ALIGN_NO; i++) {
158 dest->read_align[i] = env_atomic64_read(&from->read_align[i]);
159 dest->write_align[i] = env_atomic64_read(&from->write_align[i]);
160 }
161 }
162 #endif
163
164 int ocf_core_io_class_get_stats(ocf_core_t core, ocf_part_id_t part_id,
165 struct ocf_stats_io_class *stats)
166 {
167 ocf_cache_t cache;
168 uint32_t i;
169 uint32_t cache_occupancy_total = 0;
170 struct ocf_counters_part *part_stat;
171 ocf_core_id_t core_id;
172
173 OCF_CHECK_NULL(core);
174 OCF_CHECK_NULL(stats);
175
176 if (part_id < OCF_IO_CLASS_ID_MIN || part_id > OCF_IO_CLASS_ID_MAX)
177 return -OCF_ERR_INVAL;
178
179 core_id = ocf_core_get_id(core);
180 cache = ocf_core_get_cache(core);
181
182 if (!ocf_part_is_valid(&cache->user_parts[part_id]))
183 return -OCF_ERR_IO_CLASS_NOT_EXIST;
184
185 for_each_core(cache, i) {
186 cache_occupancy_total += env_atomic_read(
187 &cache->core_runtime_meta[i].cached_clines);
188 }
189
190 part_stat = &core->counters->part_counters[part_id];
191
192 stats->occupancy_clines = env_atomic_read(&cache->
193 core_runtime_meta[core_id].part_counters[part_id].
194 cached_clines);
195 stats->dirty_clines = env_atomic_read(&cache->
196 core_runtime_meta[core_id].part_counters[part_id].
197 dirty_clines);
198
199 stats->free_clines = cache->conf_meta->cachelines -
200 cache_occupancy_total;
201
202 copy_req_stats(&stats->read_reqs, &part_stat->read_reqs);
203 copy_req_stats(&stats->write_reqs, &part_stat->write_reqs);
204
205 copy_block_stats(&stats->blocks, &part_stat->blocks);
206
207 return 0;
208 }
209
210 static uint32_t _calc_dirty_for(uint64_t dirty_since)
211 {
212 return dirty_since ?
213 (env_ticks_to_msecs(env_get_tick_count() - dirty_since) / 1000)
214 : 0;
215 }
216
217 int ocf_core_get_stats(ocf_core_t core, struct ocf_stats_core *stats)
218 {
219 uint32_t i;
220 ocf_core_id_t core_id;
221 ocf_cache_t cache;
222 struct ocf_counters_core *core_stats = NULL;
223 struct ocf_counters_part *curr = NULL;
224
225 OCF_CHECK_NULL(core);
226
227 core_id = ocf_core_get_id(core);
228 cache = ocf_core_get_cache(core);
229
230 if (!stats)
231 return -OCF_ERR_INVAL;
232
233 core_stats = core->counters;
234
235 ENV_BUG_ON(env_memset(stats, sizeof(*stats), 0));
236
237 stats->core_size_bytes = ocf_volume_get_length(
238 &cache->core[core_id].volume);
239 stats->core_size = ocf_bytes_2_lines_round_up(cache,
240 stats->core_size_bytes);
241 stats->seq_cutoff_threshold = ocf_core_get_seq_cutoff_threshold(core);
242 stats->seq_cutoff_policy = ocf_core_get_seq_cutoff_policy(core);
243
244
245 env_atomic_read(&cache->core_runtime_meta[core_id].cached_clines);
246
247 copy_block_stats(&stats->core_volume, &core_stats->core_blocks);
248 copy_block_stats(&stats->cache_volume, &core_stats->cache_blocks);
249
250 copy_error_stats(&stats->core_errors,
251 &core_stats->core_errors);
252 copy_error_stats(&stats->cache_errors,
253 &core_stats->cache_errors);
254
255 #ifdef OCF_DEBUG_STATS
256 copy_debug_stats(&stats->debug_stat,
257 &core_stats->debug_stats);
258 #endif
259
260 for (i = 0; i != OCF_IO_CLASS_MAX; i++) {
261 curr = &core_stats->part_counters[i];
262
263 accum_req_stats(&stats->read_reqs,
264 &curr->read_reqs);
265 accum_req_stats(&stats->write_reqs,
266 &curr->write_reqs);
267
268 accum_block_stats(&stats->core, &curr->blocks);
269
270 stats->cache_occupancy += env_atomic_read(&cache->
271 core_runtime_meta[core_id].part_counters[i].
272 cached_clines);
273 stats->dirty += env_atomic_read(&cache->
274 core_runtime_meta[core_id].part_counters[i].
275 dirty_clines);
276 }
277
278 stats->flushed = env_atomic_read(&core->flushed);
279
280 stats->dirty_for = _calc_dirty_for(
281 env_atomic64_read(&cache->core_runtime_meta[core_id].dirty_since));
282
283 return 0;
284 }
285
286 #ifdef OCF_DEBUG_STATS
287
288 #define IO_ALIGNMENT_SIZE (IO_ALIGN_NO)
289 #define IO_PACKET_SIZE ((IO_PACKET_NO) - 1)
290
291 static uint32_t io_alignment[IO_ALIGNMENT_SIZE] = {
292 512, 1 * KiB, 2 * KiB, 4 * KiB
293 };
294
295 static int to_align_idx(uint64_t off)
296 {
297 int i;
298
299 for (i = IO_ALIGNMENT_SIZE - 1; i >= 0; i--) {
300 if (off % io_alignment[i] == 0)
301 return i;
302 }
303
304 return IO_ALIGNMENT_SIZE;
305 }
306
307 static uint32_t io_packet_size[IO_PACKET_SIZE] = {
308 512, 1 * KiB, 2 * KiB, 4 * KiB, 8 * KiB,
309 16 * KiB, 32 * KiB, 64 * KiB, 128 * KiB,
310 256 * KiB, 512 * KiB
311 };
312
313
314 static int to_packet_idx(uint32_t len)
315 {
316 int i = 0;
317
318 for (i = 0; i < IO_PACKET_SIZE; i++) {
319 if (len == io_packet_size[i])
320 return i;
321 }
322
323 return IO_PACKET_SIZE;
324 }
325
326 void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io)
327 {
328 struct ocf_counters_debug *stats;
329 int idx;
330
331 OCF_CHECK_NULL(core);
332 OCF_CHECK_NULL(io);
333
334 core_id = ocf_core_get_id(core);
335 cache = ocf_core_get_cache(core);
336
337 stats = &core->counters->debug_stats;
338
339 idx = to_packet_idx(io->bytes);
340 if (io->dir == OCF_WRITE)
341 env_atomic64_inc(&stats->write_size[idx]);
342 else
343 env_atomic64_inc(&stats->read_size[idx]);
344
345 idx = to_align_idx(io->addr);
346 if (io->dir == OCF_WRITE)
347 env_atomic64_inc(&stats->write_align[idx]);
348 else
349 env_atomic64_inc(&stats->read_align[idx]);
350 }
351
352 #else
353
354 void ocf_core_update_stats(ocf_core_t core, struct ocf_io *io) {}
355
356 #endif