]> git.proxmox.com Git - mirror_ovs.git/blob - lib/coverage.c
cirrus: Use FreeBSD 12.2.
[mirror_ovs.git] / lib / coverage.c
1 /*
2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <config.h>
18 #include "coverage.h"
19 #include <inttypes.h>
20 #include <stdlib.h>
21 #include "openvswitch/dynamic-string.h"
22 #include "hash.h"
23 #include "svec.h"
24 #include "timeval.h"
25 #include "unixctl.h"
26 #include "util.h"
27 #include "openvswitch/vlog.h"
28
29 VLOG_DEFINE_THIS_MODULE(coverage);
30
31 /* The coverage counters. */
32 static struct coverage_counter **coverage_counters = NULL;
33 static size_t n_coverage_counters = 0;
34 static size_t allocated_coverage_counters = 0;
35
36 static struct ovs_mutex coverage_mutex = OVS_MUTEX_INITIALIZER;
37
38 DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time, LLONG_MIN);
39 static long long int coverage_run_time = LLONG_MIN;
40
41 /* Index counter used to compute the moving average array's index. */
42 static unsigned int idx_count = 0;
43
44 static void coverage_read(struct svec *);
45 static unsigned int coverage_array_sum(const unsigned int *arr,
46 const unsigned int len);
47 static bool coverage_read_counter(const char *name,
48 unsigned long long int *count);
49
50 /* Registers a coverage counter with the coverage core */
51 void
52 coverage_counter_register(struct coverage_counter* counter)
53 {
54 if (n_coverage_counters >= allocated_coverage_counters) {
55 coverage_counters = x2nrealloc(coverage_counters,
56 &allocated_coverage_counters,
57 sizeof(struct coverage_counter*));
58 }
59 coverage_counters[n_coverage_counters++] = counter;
60 }
61
62 static void
63 coverage_unixctl_show(struct unixctl_conn *conn, int argc OVS_UNUSED,
64 const char *argv[] OVS_UNUSED, void *aux OVS_UNUSED)
65 {
66 struct svec lines;
67 char *reply;
68
69 svec_init(&lines);
70 coverage_read(&lines);
71 reply = svec_join(&lines, "\n", "\n");
72 unixctl_command_reply(conn, reply);
73 free(reply);
74 svec_destroy(&lines);
75 }
76
77 static void
78 coverage_unixctl_read_counter(struct unixctl_conn *conn, int argc OVS_UNUSED,
79 const char *argv[], void *aux OVS_UNUSED)
80 {
81 unsigned long long count;
82 char *reply;
83 bool ok;
84
85 ok = coverage_read_counter(argv[1], &count);
86 if (!ok) {
87 unixctl_command_reply_error(conn, "No such counter");
88 return;
89 }
90
91 reply = xasprintf("%llu\n", count);
92 unixctl_command_reply(conn, reply);
93 free(reply);
94 }
95
96 void
97 coverage_init(void)
98 {
99 unixctl_command_register("coverage/show", "", 0, 0,
100 coverage_unixctl_show, NULL);
101 unixctl_command_register("coverage/read-counter", "COUNTER", 1, 1,
102 coverage_unixctl_read_counter, NULL);
103 }
104
105 /* Sorts coverage counters in descending order by total, within equal
106 * totals alphabetically by name. */
107 static int
108 compare_coverage_counters(const void *a_, const void *b_)
109 {
110 const struct coverage_counter *const *ap = a_;
111 const struct coverage_counter *const *bp = b_;
112 const struct coverage_counter *a = *ap;
113 const struct coverage_counter *b = *bp;
114 if (a->total != b->total) {
115 return a->total < b->total ? 1 : -1;
116 } else {
117 return strcmp(a->name, b->name);
118 }
119 }
120
121 static uint32_t
122 coverage_hash(void)
123 {
124 struct coverage_counter **c;
125 uint32_t hash = 0;
126 int n_groups, i;
127
128 /* Sort coverage counters into groups with equal totals. */
129 c = xmalloc(n_coverage_counters * sizeof *c);
130 ovs_mutex_lock(&coverage_mutex);
131 for (i = 0; i < n_coverage_counters; i++) {
132 c[i] = coverage_counters[i];
133 }
134 ovs_mutex_unlock(&coverage_mutex);
135 qsort(c, n_coverage_counters, sizeof *c, compare_coverage_counters);
136
137 /* Hash the names in each group along with the rank. */
138 n_groups = 0;
139 for (i = 0; i < n_coverage_counters; ) {
140 int j;
141
142 if (!c[i]->total) {
143 break;
144 }
145 n_groups++;
146 hash = hash_int(i, hash);
147 for (j = i; j < n_coverage_counters; j++) {
148 if (c[j]->total != c[i]->total) {
149 break;
150 }
151 hash = hash_string(c[j]->name, hash);
152 }
153 i = j;
154 }
155
156 free(c);
157
158 return hash_int(n_groups, hash);
159 }
160
161 static bool
162 coverage_hit(uint32_t hash)
163 {
164 enum { HIT_BITS = 1024, BITS_PER_WORD = 32 };
165 static uint32_t hit[HIT_BITS / BITS_PER_WORD];
166 BUILD_ASSERT_DECL(IS_POW2(HIT_BITS));
167
168 static long long int next_clear = LLONG_MIN;
169
170 unsigned int bit_index = hash & (HIT_BITS - 1);
171 unsigned int word_index = bit_index / BITS_PER_WORD;
172 unsigned int word_mask = 1u << (bit_index % BITS_PER_WORD);
173
174 /* Expire coverage hash suppression once a day. */
175 if (time_msec() >= next_clear) {
176 memset(hit, 0, sizeof hit);
177 next_clear = time_msec() + 60 * 60 * 24 * 1000LL;
178 }
179
180 if (hit[word_index] & word_mask) {
181 return true;
182 } else {
183 hit[word_index] |= word_mask;
184 return false;
185 }
186 }
187
188 /* Logs the coverage counters, unless a similar set of events has already been
189 * logged.
190 *
191 * This function logs at log level VLL_INFO. Use care before adjusting this
192 * level, because depending on its configuration, syslogd can write changes
193 * synchronously, which can cause the coverage messages to take several seconds
194 * to write. */
195 void
196 coverage_log(void)
197 {
198 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(1, 3);
199
200 if (!VLOG_DROP_INFO(&rl)) {
201 uint32_t hash = coverage_hash();
202 if (coverage_hit(hash)) {
203 VLOG_INFO("Skipping details of duplicate event coverage for "
204 "hash=%08"PRIx32, hash);
205 } else {
206 struct svec lines;
207 const char *line;
208 size_t i;
209
210 svec_init(&lines);
211 coverage_read(&lines);
212 SVEC_FOR_EACH (i, line, &lines) {
213 VLOG_INFO("%s", line);
214 }
215 svec_destroy(&lines);
216 }
217 }
218 }
219
220 /* Adds coverage counter information to 'lines'. */
221 static void
222 coverage_read(struct svec *lines)
223 {
224 struct coverage_counter **c = coverage_counters;
225 unsigned long long int *totals;
226 size_t n_never_hit;
227 uint32_t hash;
228 size_t i;
229
230 hash = coverage_hash();
231
232 n_never_hit = 0;
233 svec_add_nocopy(lines,
234 xasprintf("Event coverage, avg rate over last: %d "
235 "seconds, last minute, last hour, "
236 "hash=%08"PRIx32":",
237 COVERAGE_RUN_INTERVAL/1000, hash));
238
239 totals = xmalloc(n_coverage_counters * sizeof *totals);
240 ovs_mutex_lock(&coverage_mutex);
241 for (i = 0; i < n_coverage_counters; i++) {
242 totals[i] = c[i]->total;
243 }
244 ovs_mutex_unlock(&coverage_mutex);
245
246 for (i = 0; i < n_coverage_counters; i++) {
247 if (totals[i]) {
248 /* Shows the averaged per-second rates for the last
249 * COVERAGE_RUN_INTERVAL interval, the last minute and
250 * the last hour. */
251 svec_add_nocopy(lines,
252 xasprintf("%-24s %5.1f/sec %9.3f/sec "
253 "%13.4f/sec total: %llu",
254 c[i]->name,
255 (c[i]->min[(idx_count - 1) % MIN_AVG_LEN]
256 * 1000.0 / COVERAGE_RUN_INTERVAL),
257 coverage_array_sum(c[i]->min, MIN_AVG_LEN) / 60.0,
258 coverage_array_sum(c[i]->hr, HR_AVG_LEN) / 3600.0,
259 totals[i]));
260 } else {
261 n_never_hit++;
262 }
263 }
264
265 svec_add_nocopy(lines, xasprintf("%"PRIuSIZE" events never hit", n_never_hit));
266 free(totals);
267 }
268
269 /* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
270 * synchronize per-thread counters with global counters. Every thread maintains
271 * a separate timer to ensure all counters are periodically aggregated.
272 *
273 * Uses 'ovs_mutex_trylock()' if 'trylock' is true. This is to prevent
274 * multiple performance-critical threads contending over the 'coverage_mutex'.
275 *
276 * */
277 static void
278 coverage_clear__(bool trylock)
279 {
280 long long int now, *thread_time;
281
282 now = time_msec();
283 thread_time = coverage_clear_time_get();
284
285 /* Initialize the coverage_clear_time. */
286 if (*thread_time == LLONG_MIN) {
287 *thread_time = now + COVERAGE_CLEAR_INTERVAL;
288 }
289
290 if (now >= *thread_time) {
291 size_t i;
292
293 if (trylock) {
294 /* Returns if cannot acquire lock. */
295 if (ovs_mutex_trylock(&coverage_mutex)) {
296 return;
297 }
298 } else {
299 ovs_mutex_lock(&coverage_mutex);
300 }
301
302 for (i = 0; i < n_coverage_counters; i++) {
303 struct coverage_counter *c = coverage_counters[i];
304 c->total += c->count();
305 }
306 ovs_mutex_unlock(&coverage_mutex);
307 *thread_time = now + COVERAGE_CLEAR_INTERVAL;
308 }
309 }
310
311 void
312 coverage_clear(void)
313 {
314 coverage_clear__(false);
315 }
316
317 void
318 coverage_try_clear(void)
319 {
320 coverage_clear__(true);
321 }
322
323 /* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
324 * coverage counters' 'min' and 'hr' array. 'min' array is for cumulating
325 * per second counts into per minute count. 'hr' array is for cumulating per
326 * minute counts into per hour count. Every thread may call this function. */
327 void
328 coverage_run(void)
329 {
330 struct coverage_counter **c = coverage_counters;
331 long long int now;
332
333 ovs_mutex_lock(&coverage_mutex);
334 now = time_msec();
335 /* Initialize the coverage_run_time. */
336 if (coverage_run_time == LLONG_MIN) {
337 coverage_run_time = now + COVERAGE_RUN_INTERVAL;
338 }
339
340 if (now >= coverage_run_time) {
341 size_t i, j;
342 /* Computes the number of COVERAGE_RUN_INTERVAL slots, since
343 * it is possible that the actual run interval is multiple of
344 * COVERAGE_RUN_INTERVAL. */
345 int slots = (now - coverage_run_time) / COVERAGE_RUN_INTERVAL + 1;
346
347 for (i = 0; i < n_coverage_counters; i++) {
348 unsigned int count, portion;
349 unsigned int idx = idx_count;
350
351 /* Computes the differences between the current total and the one
352 * recorded in last invocation of coverage_run(). */
353 count = c[i]->total - c[i]->last_total;
354 c[i]->last_total = c[i]->total;
355 /* The count over the time interval is evenly distributed
356 * among slots by calculating the portion. */
357 portion = count / slots;
358
359 for (j = 0; j < slots; j++) {
360 /* Updates the index variables. */
361 /* The m_idx is increased from 0 to MIN_AVG_LEN - 1. Every
362 * time the m_idx finishes a cycle (a cycle is one minute),
363 * the h_idx is incremented by 1. */
364 unsigned int m_idx = idx % MIN_AVG_LEN;
365 unsigned int h_idx = idx / MIN_AVG_LEN;
366
367 c[i]->min[m_idx] = portion + (j == (slots - 1)
368 ? count % slots : 0);
369 c[i]->hr[h_idx] = m_idx == 0
370 ? c[i]->min[m_idx]
371 : (c[i]->hr[h_idx] + c[i]->min[m_idx]);
372 /* This is to guarantee that h_idx ranges from 0 to 59. */
373 idx = (idx + 1) % (MIN_AVG_LEN * HR_AVG_LEN);
374 }
375 }
376
377 /* Updates the global index variables. */
378 idx_count = (idx_count + slots) % (MIN_AVG_LEN * HR_AVG_LEN);
379 /* Updates the run time. */
380 coverage_run_time = now + COVERAGE_RUN_INTERVAL;
381 }
382 ovs_mutex_unlock(&coverage_mutex);
383 }
384
385 static unsigned int
386 coverage_array_sum(const unsigned int *arr, const unsigned int len)
387 {
388 unsigned int sum = 0;
389 size_t i;
390
391 ovs_mutex_lock(&coverage_mutex);
392 for (i = 0; i < len; i++) {
393 sum += arr[i];
394 }
395 ovs_mutex_unlock(&coverage_mutex);
396 return sum;
397 }
398
399 static bool
400 coverage_read_counter(const char *name, unsigned long long int *count)
401 {
402 for (size_t i = 0; i < n_coverage_counters; i++) {
403 struct coverage_counter *c = coverage_counters[i];
404
405 if (!strcmp(c->name, name)) {
406 ovs_mutex_lock(&coverage_mutex);
407 c->total += c->count();
408 *count = c->total;
409 ovs_mutex_unlock(&coverage_mutex);
410
411 return true;
412 }
413 }
414
415 return false;
416 }