2 * Copyright (c) 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
21 #include "dynamic-string.h"
29 VLOG_DEFINE_THIS_MODULE(coverage
);
31 /* The coverage counters. */
32 #if USE_LINKER_SECTIONS
33 extern struct coverage_counter
*__start_coverage
[];
34 extern struct coverage_counter
*__stop_coverage
[];
35 #define coverage_counters __start_coverage
36 #define n_coverage_counters (__stop_coverage - __start_coverage)
37 #else /* !USE_LINKER_SECTIONS */
38 #define COVERAGE_COUNTER(COUNTER) \
39 DECLARE_EXTERN_PER_THREAD_DATA(unsigned int, \
41 DEFINE_EXTERN_PER_THREAD_DATA(counter_##COUNTER, 0); \
42 static unsigned int COUNTER##_count(void) \
44 unsigned int *countp = counter_##COUNTER##_get(); \
45 unsigned int count = *countp; \
49 extern struct coverage_counter counter_##COUNTER; \
50 struct coverage_counter counter_##COUNTER \
51 = { #COUNTER, COUNTER##_count, 0, 0, {0}, {0} };
52 #include "coverage.def"
53 #undef COVERAGE_COUNTER
55 extern struct coverage_counter
*coverage_counters
[];
56 struct coverage_counter
*coverage_counters
[] = {
57 #define COVERAGE_COUNTER(NAME) &counter_##NAME,
58 #include "coverage.def"
59 #undef COVERAGE_COUNTER
61 #define n_coverage_counters ARRAY_SIZE(coverage_counters)
62 #endif /* !USE_LINKER_SECTIONS */
64 static struct ovs_mutex coverage_mutex
= OVS_MUTEX_INITIALIZER
;
66 DEFINE_STATIC_PER_THREAD_DATA(long long int, coverage_clear_time
, LLONG_MIN
);
67 static long long int coverage_run_time
= LLONG_MIN
;
69 /* Index counter used to compute the moving average array's index. */
70 static unsigned int idx_count
= 0;
72 static void coverage_read(struct svec
*);
73 static unsigned int coverage_array_sum(const unsigned int *arr
,
74 const unsigned int len
);
77 coverage_unixctl_show(struct unixctl_conn
*conn
, int argc OVS_UNUSED
,
78 const char *argv
[] OVS_UNUSED
, void *aux OVS_UNUSED
)
84 coverage_read(&lines
);
85 reply
= svec_join(&lines
, "\n", "\n");
86 unixctl_command_reply(conn
, reply
);
94 unixctl_command_register("coverage/show", "", 0, 0,
95 coverage_unixctl_show
, NULL
);
98 /* Sorts coverage counters in descending order by total, within equal
99 * totals alphabetically by name. */
101 compare_coverage_counters(const void *a_
, const void *b_
)
103 const struct coverage_counter
*const *ap
= a_
;
104 const struct coverage_counter
*const *bp
= b_
;
105 const struct coverage_counter
*a
= *ap
;
106 const struct coverage_counter
*b
= *bp
;
107 if (a
->total
!= b
->total
) {
108 return a
->total
< b
->total
? 1 : -1;
110 return strcmp(a
->name
, b
->name
);
117 struct coverage_counter
**c
;
121 /* Sort coverage counters into groups with equal totals. */
122 c
= xmalloc(n_coverage_counters
* sizeof *c
);
123 ovs_mutex_lock(&coverage_mutex
);
124 for (i
= 0; i
< n_coverage_counters
; i
++) {
125 c
[i
] = coverage_counters
[i
];
127 ovs_mutex_unlock(&coverage_mutex
);
128 qsort(c
, n_coverage_counters
, sizeof *c
, compare_coverage_counters
);
130 /* Hash the names in each group along with the rank. */
132 for (i
= 0; i
< n_coverage_counters
; ) {
139 hash
= hash_int(i
, hash
);
140 for (j
= i
; j
< n_coverage_counters
; j
++) {
141 if (c
[j
]->total
!= c
[i
]->total
) {
144 hash
= hash_string(c
[j
]->name
, hash
);
151 return hash_int(n_groups
, hash
);
155 coverage_hit(uint32_t hash
)
157 enum { HIT_BITS
= 1024, BITS_PER_WORD
= 32 };
158 static uint32_t hit
[HIT_BITS
/ BITS_PER_WORD
];
159 BUILD_ASSERT_DECL(IS_POW2(HIT_BITS
));
161 static long long int next_clear
= LLONG_MIN
;
163 unsigned int bit_index
= hash
& (HIT_BITS
- 1);
164 unsigned int word_index
= bit_index
/ BITS_PER_WORD
;
165 unsigned int word_mask
= 1u << (bit_index
% BITS_PER_WORD
);
167 /* Expire coverage hash suppression once a day. */
168 if (time_msec() >= next_clear
) {
169 memset(hit
, 0, sizeof hit
);
170 next_clear
= time_msec() + 60 * 60 * 24 * 1000LL;
173 if (hit
[word_index
] & word_mask
) {
176 hit
[word_index
] |= word_mask
;
181 /* Logs the coverage counters, unless a similar set of events has already been
184 * This function logs at log level VLL_INFO. Use care before adjusting this
185 * level, because depending on its configuration, syslogd can write changes
186 * synchronously, which can cause the coverage messages to take several seconds
191 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(1, 3);
193 if (!VLOG_DROP_INFO(&rl
)) {
194 uint32_t hash
= coverage_hash();
195 if (coverage_hit(hash
)) {
196 VLOG_INFO("Skipping details of duplicate event coverage for "
197 "hash=%08"PRIx32
, hash
);
204 coverage_read(&lines
);
205 SVEC_FOR_EACH (i
, line
, &lines
) {
206 VLOG_INFO("%s", line
);
208 svec_destroy(&lines
);
213 /* Adds coverage counter information to 'lines'. */
215 coverage_read(struct svec
*lines
)
217 struct coverage_counter
**c
= coverage_counters
;
218 unsigned long long int *totals
;
223 hash
= coverage_hash();
226 svec_add_nocopy(lines
,
227 xasprintf("Event coverage, avg rate over last: %d "
228 "seconds, last minute, last hour, "
230 COVERAGE_RUN_INTERVAL
/1000, hash
));
232 totals
= xmalloc(n_coverage_counters
* sizeof *totals
);
233 ovs_mutex_lock(&coverage_mutex
);
234 for (i
= 0; i
< n_coverage_counters
; i
++) {
235 totals
[i
] = c
[i
]->total
;
237 ovs_mutex_unlock(&coverage_mutex
);
239 for (i
= 0; i
< n_coverage_counters
; i
++) {
241 /* Shows the averaged per-second rates for the last
242 * COVERAGE_RUN_INTERVAL interval, the last minute and
244 svec_add_nocopy(lines
,
245 xasprintf("%-24s %5.1f/sec %9.3f/sec "
246 "%13.4f/sec total: %llu",
248 (c
[i
]->min
[(idx_count
- 1) % MIN_AVG_LEN
]
249 * 1000.0 / COVERAGE_RUN_INTERVAL
),
250 coverage_array_sum(c
[i
]->min
, MIN_AVG_LEN
) / 60.0,
251 coverage_array_sum(c
[i
]->hr
, HR_AVG_LEN
) / 3600.0,
258 svec_add_nocopy(lines
, xasprintf("%"PRIuSIZE
" events never hit", n_never_hit
));
262 /* Runs approximately every COVERAGE_CLEAR_INTERVAL amount of time to
263 * synchronize per-thread counters with global counters. Every thread maintains
264 * a separate timer to ensure all counters are periodically aggregated. */
268 long long int now
, *thread_time
;
271 thread_time
= coverage_clear_time_get();
273 /* Initialize the coverage_clear_time. */
274 if (*thread_time
== LLONG_MIN
) {
275 *thread_time
= now
+ COVERAGE_CLEAR_INTERVAL
;
278 if (now
>= *thread_time
) {
281 ovs_mutex_lock(&coverage_mutex
);
282 for (i
= 0; i
< n_coverage_counters
; i
++) {
283 struct coverage_counter
*c
= coverage_counters
[i
];
284 c
->total
+= c
->count();
286 ovs_mutex_unlock(&coverage_mutex
);
287 *thread_time
= now
+ COVERAGE_CLEAR_INTERVAL
;
291 /* Runs approximately every COVERAGE_RUN_INTERVAL amount of time to update the
292 * coverage counters' 'min' and 'hr' array. 'min' array is for cumulating
293 * per second counts into per minute count. 'hr' array is for cumulating per
294 * minute counts into per hour count. Every thread may call this function. */
298 /* Defines the moving average array index variables. */
299 static unsigned int min_idx
, hr_idx
;
300 struct coverage_counter
**c
= coverage_counters
;
303 ovs_mutex_lock(&coverage_mutex
);
305 /* Initialize the coverage_run_time. */
306 if (coverage_run_time
== LLONG_MIN
) {
307 coverage_run_time
= now
+ COVERAGE_RUN_INTERVAL
;
310 if (now
>= coverage_run_time
) {
312 /* Computes the number of COVERAGE_RUN_INTERVAL slots, since
313 * it is possible that the actual run interval is multiple of
314 * COVERAGE_RUN_INTERVAL. */
315 int slots
= (now
- coverage_run_time
) / COVERAGE_RUN_INTERVAL
+ 1;
317 for (i
= 0; i
< n_coverage_counters
; i
++) {
318 unsigned int count
, portion
;
319 unsigned int m_idx
= min_idx
;
320 unsigned int h_idx
= hr_idx
;
321 unsigned int idx
= idx_count
;
323 /* Computes the differences between the current total and the one
324 * recorded in last invocation of coverage_run(). */
325 count
= c
[i
]->total
- c
[i
]->last_total
;
326 c
[i
]->last_total
= c
[i
]->total
;
327 /* The count over the time interval is evenly distributed
328 * among slots by calculating the portion. */
329 portion
= count
/ slots
;
331 for (j
= 0; j
< slots
; j
++) {
332 /* Updates the index variables. */
333 /* The m_idx is increased from 0 to MIN_AVG_LEN - 1. Every
334 * time the m_idx finishes a cycle (a cycle is one minute),
335 * the h_idx is incremented by 1. */
336 m_idx
= idx
% MIN_AVG_LEN
;
337 h_idx
= idx
/ MIN_AVG_LEN
;
339 c
[i
]->min
[m_idx
] = portion
+ (j
== (slots
- 1)
340 ? count
% slots
: 0);
341 c
[i
]->hr
[h_idx
] = m_idx
== 0
343 : (c
[i
]->hr
[h_idx
] + c
[i
]->min
[m_idx
]);
344 /* This is to guarantee that h_idx ranges from 0 to 59. */
345 idx
= (idx
+ 1) % (MIN_AVG_LEN
* HR_AVG_LEN
);
349 /* Updates the global index variables. */
350 idx_count
= (idx_count
+ slots
) % (MIN_AVG_LEN
* HR_AVG_LEN
);
351 min_idx
= idx_count
% MIN_AVG_LEN
;
352 hr_idx
= idx_count
/ MIN_AVG_LEN
;
353 /* Updates the run time. */
354 coverage_run_time
= now
+ COVERAGE_RUN_INTERVAL
;
356 ovs_mutex_unlock(&coverage_mutex
);
360 coverage_array_sum(const unsigned int *arr
, const unsigned int len
)
362 unsigned int sum
= 0;
365 ovs_mutex_lock(&coverage_mutex
);
366 for (i
= 0; i
< len
; i
++) {
369 ovs_mutex_unlock(&coverage_mutex
);