2 * Copyright (c) 2009, 2010, 2011, 2012, 2013, 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include "openvswitch/dynamic-string.h"
27 #include "openvswitch/vlog.h"
29 VLOG_DEFINE_THIS_MODULE(coverage
);
31 /* The coverage counters. */
32 static struct coverage_counter
**coverage_counters
= NULL
;
33 static size_t n_coverage_counters
= 0;
34 static size_t allocated_coverage_counters
= 0;
36 static struct ovs_mutex coverage_mutex
= OVS_MUTEX_INITIALIZER
;
38 DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time
, LLONG_MIN
);
39 static long long int coverage_run_time
= LLONG_MIN
;
41 /* Index counter used to compute the moving average array's index. */
42 static unsigned int idx_count
= 0;
44 static void coverage_read(struct svec
*);
45 static unsigned int coverage_array_sum(const unsigned int *arr
,
46 const unsigned int len
);
47 static bool coverage_read_counter(const char *name
,
48 unsigned long long int *count
);
50 /* Registers a coverage counter with the coverage core */
52 coverage_counter_register(struct coverage_counter
* counter
)
54 if (n_coverage_counters
>= allocated_coverage_counters
) {
55 coverage_counters
= x2nrealloc(coverage_counters
,
56 &allocated_coverage_counters
,
57 sizeof(struct coverage_counter
*));
59 coverage_counters
[n_coverage_counters
++] = counter
;
63 coverage_unixctl_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
64 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
70 coverage_read(&lines
);
71 reply
= svec_join(&lines
, "\n", "\n");
72 unixctl_command_reply(conn
, reply
);
78 coverage_unixctl_read_counter(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
79 const char *argv
[], void *aux OVS_UNUSED
)
81 unsigned long long count
;
85 ok
= coverage_read_counter(argv
[1], &count
);
87 unixctl_command_reply_error(conn
, "No such counter");
91 reply
= xasprintf("%llu\n", count
);
92 unixctl_command_reply(conn
, reply
);
99 unixctl_command_register("coverage/show", "", 0, 0,
100 coverage_unixctl_show
, NULL
);
101 unixctl_command_register("coverage/read-counter", "COUNTER", 1, 1,
102 coverage_unixctl_read_counter
, NULL
);
105 /* Sorts coverage counters in descending order by total, within equal
106 * totals alphabetically by name. */
108 compare_coverage_counters(const void *a_
, const void *b_
)
110 const struct coverage_counter
*const *ap
= a_
;
111 const struct coverage_counter
*const *bp
= b_
;
112 const struct coverage_counter
*a
= *ap
;
113 const struct coverage_counter
*b
= *bp
;
114 if (a
->total
!= b
->total
) {
115 return a
->total
< b
->total
? 1 : -1;
117 return strcmp(a
->name
, b
->name
);
124 struct coverage_counter
**c
;
128 /* Sort coverage counters into groups with equal totals. */
129 c
= xmalloc(n_coverage_counters
* sizeof *c
);
130 ovs_mutex_lock(&coverage_mutex
);
131 for (i
= 0; i
< n_coverage_counters
; i
++) {
132 c
[i
] = coverage_counters
[i
];
134 ovs_mutex_unlock(&coverage_mutex
);
135 qsort(c
, n_coverage_counters
, sizeof *c
, compare_coverage_counters
);
137 /* Hash the names in each group along with the rank. */
139 for (i
= 0; i
< n_coverage_counters
; ) {
146 hash
= hash_int(i
, hash
);
147 for (j
= i
; j
< n_coverage_counters
; j
++) {
148 if (c
[j
]->total
!= c
[i
]->total
) {
151 hash
= hash_string(c
[j
]->name
, hash
);
158 return hash_int(n_groups
, hash
);
162 coverage_hit(uint32_t hash
)
164 enum { HIT_BITS
= 1024, BITS_PER_WORD
= 32 };
165 static uint32_t hit
[HIT_BITS
/ BITS_PER_WORD
];
166 BUILD_ASSERT_DECL(IS_POW2(HIT_BITS
));
168 static long long int next_clear
= LLONG_MIN
;
170 unsigned int bit_index
= hash
& (HIT_BITS
- 1);
171 unsigned int word_index
= bit_index
/ BITS_PER_WORD
;
172 unsigned int word_mask
= 1u << (bit_index
% BITS_PER_WORD
);
174 /* Expire coverage hash suppression once a day. */
175 if (time_msec() >= next_clear
) {
176 memset(hit
, 0, sizeof hit
);
177 next_clear
= time_msec() + 60 * 60 * 24 * 1000LL;
180 if (hit
[word_index
] & word_mask
) {
183 hit
[word_index
] |= word_mask
;
188 /* Logs the coverage counters, unless a similar set of events has already been
191 * This function logs at log level VLL_INFO. Use care before adjusting this
192 * level, because depending on its configuration, syslogd can write changes
193 * synchronously, which can cause the coverage messages to take several seconds
198 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 3);
200 if (!VLOG_DROP_INFO(&rl
)) {
201 uint32_t hash
= coverage_hash();
202 if (coverage_hit(hash
)) {
203 VLOG_INFO("Skipping details of duplicate event coverage for "
204 "hash=%08"PRIx32
, hash
);
211 coverage_read(&lines
);
212 SVEC_FOR_EACH (i
, line
, &lines
) {
213 VLOG_INFO("%s", line
);
215 svec_destroy(&lines
);
220 /* Adds coverage counter information to 'lines'. */
222 coverage_read(struct svec
*lines
)
224 struct coverage_counter
**c
= coverage_counters
;
225 unsigned long long int *totals
;
230 hash
= coverage_hash();
233 svec_add_nocopy(lines
,
234 xasprintf("Event coverage, avg rate over last: %d "
235 "seconds, last minute, last hour, "
237 COVERAGE_RUN_INTERVAL
/1000, hash
));
239 totals
= xmalloc(n_coverage_counters
* sizeof *totals
);
240 ovs_mutex_lock(&coverage_mutex
);
241 for (i
= 0; i
< n_coverage_counters
; i
++) {
242 totals
[i
] = c
[i
]->total
;
244 ovs_mutex_unlock(&coverage_mutex
);
246 for (i
= 0; i
< n_coverage_counters
; i
++) {
248 /* Shows the averaged per-second rates for the last
249 * COVERAGE_RUN_INTERVAL interval, the last minute and
251 svec_add_nocopy(lines
,
252 xasprintf("%-24s %5.1f/sec %9.3f/sec "
253 "%13.4f/sec total: %llu",
255 (c
[i
]->min
[(idx_count
- 1) % MIN_AVG_LEN
]
256 * 1000.0 / COVERAGE_RUN_INTERVAL
),
257 coverage_array_sum(c
[i
]->min
, MIN_AVG_LEN
) / 60.0,
258 coverage_array_sum(c
[i
]->hr
, HR_AVG_LEN
) / 3600.0,
265 svec_add_nocopy(lines
, xasprintf("%"PRIuSIZE
" events never hit", n_never_hit
));
269 /* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
270 * synchronize per-thread counters with global counters. Every thread maintains
271 * a separate timer to ensure all counters are periodically aggregated.
273 * Uses 'ovs_mutex_trylock()' if 'trylock' is true. This is to prevent
274 * multiple performance-critical threads contending over the 'coverage_mutex'.
278 coverage_clear__(bool trylock
)
280 long long int now
, *thread_time
;
283 thread_time
= coverage_clear_time_get();
285 /* Initialize the coverage_clear_time. */
286 if (*thread_time
== LLONG_MIN
) {
287 *thread_time
= now
+ COVERAGE_CLEAR_INTERVAL
;
290 if (now
>= *thread_time
) {
294 /* Returns if cannot acquire lock. */
295 if (ovs_mutex_trylock(&coverage_mutex
)) {
299 ovs_mutex_lock(&coverage_mutex
);
302 for (i
= 0; i
< n_coverage_counters
; i
++) {
303 struct coverage_counter
*c
= coverage_counters
[i
];
304 c
->total
+= c
->count();
306 ovs_mutex_unlock(&coverage_mutex
);
307 *thread_time
= now
+ COVERAGE_CLEAR_INTERVAL
;
314 coverage_clear__(false);
318 coverage_try_clear(void)
320 coverage_clear__(true);
323 /* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
324 * coverage counters' 'min' and 'hr' array. 'min' array is for cumulating
325 * per second counts into per minute count. 'hr' array is for cumulating per
326 * minute counts into per hour count. Every thread may call this function. */
330 struct coverage_counter
**c
= coverage_counters
;
333 ovs_mutex_lock(&coverage_mutex
);
335 /* Initialize the coverage_run_time. */
336 if (coverage_run_time
== LLONG_MIN
) {
337 coverage_run_time
= now
+ COVERAGE_RUN_INTERVAL
;
340 if (now
>= coverage_run_time
) {
342 /* Computes the number of COVERAGE_RUN_INTERVAL slots, since
343 * it is possible that the actual run interval is multiple of
344 * COVERAGE_RUN_INTERVAL. */
345 int slots
= (now
- coverage_run_time
) / COVERAGE_RUN_INTERVAL
+ 1;
347 for (i
= 0; i
< n_coverage_counters
; i
++) {
348 unsigned int count
, portion
;
349 unsigned int idx
= idx_count
;
351 /* Computes the differences between the current total and the one
352 * recorded in last invocation of coverage_run(). */
353 count
= c
[i
]->total
- c
[i
]->last_total
;
354 c
[i
]->last_total
= c
[i
]->total
;
355 /* The count over the time interval is evenly distributed
356 * among slots by calculating the portion. */
357 portion
= count
/ slots
;
359 for (j
= 0; j
< slots
; j
++) {
360 /* Updates the index variables. */
361 /* The m_idx is increased from 0 to MIN_AVG_LEN - 1. Every
362 * time the m_idx finishes a cycle (a cycle is one minute),
363 * the h_idx is incremented by 1. */
364 unsigned int m_idx
= idx
% MIN_AVG_LEN
;
365 unsigned int h_idx
= idx
/ MIN_AVG_LEN
;
367 c
[i
]->min
[m_idx
] = portion
+ (j
== (slots
- 1)
368 ? count
% slots
: 0);
369 c
[i
]->hr
[h_idx
] = m_idx
== 0
371 : (c
[i
]->hr
[h_idx
] + c
[i
]->min
[m_idx
]);
372 /* This is to guarantee that h_idx ranges from 0 to 59. */
373 idx
= (idx
+ 1) % (MIN_AVG_LEN
* HR_AVG_LEN
);
377 /* Updates the global index variables. */
378 idx_count
= (idx_count
+ slots
) % (MIN_AVG_LEN
* HR_AVG_LEN
);
379 /* Updates the run time. */
380 coverage_run_time
= now
+ COVERAGE_RUN_INTERVAL
;
382 ovs_mutex_unlock(&coverage_mutex
);
386 coverage_array_sum(const unsigned int *arr
, const unsigned int len
)
388 unsigned int sum
= 0;
391 ovs_mutex_lock(&coverage_mutex
);
392 for (i
= 0; i
< len
; i
++) {
395 ovs_mutex_unlock(&coverage_mutex
);
400 coverage_read_counter(const char *name
, unsigned long long int *count
)
402 for (size_t i
= 0; i
< n_coverage_counters
; i
++) {
403 struct coverage_counter
*c
= coverage_counters
[i
];
405 if (!strcmp(c
->name
, name
)) {
406 ovs_mutex_lock(&coverage_mutex
);
407 c
->total
+= c
->count();
409 ovs_mutex_unlock(&coverage_mutex
);