]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/app/test/test_timer.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / app / test / test_timer.c
CommitLineData
11fdf7f2
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
7c673cae
FG
3 */
4
5#include "test.h"
6
7/*
8 * Timer
9 * =====
10 *
11 * #. Stress test 1.
12 *
13 * The objective of the timer stress tests is to check that there are no
14 * race conditions in list and status management. This test launches,
15 * resets and stops the timer very often on many cores at the same
16 * time.
17 *
18 * - Only one timer is used for this test.
19 * - On each core, the rte_timer_manage() function is called from the main
20 * loop every 3 microseconds.
21 * - In the main loop, the timer may be reset (randomly, with a
22 * probability of 0.5 %) 100 microseconds later on a random core, or
23 * stopped (with a probability of 0.5 % also).
24 * - In callback, the timer is can be reset (randomly, with a
25 * probability of 0.5 %) 100 microseconds later on the same core or
26 * on another core (same probability), or stopped (same
27 * probability).
28 *
29 * # Stress test 2.
30 *
31 * The objective of this test is similar to the first in that it attempts
32 * to find if there are any race conditions in the timer library. However,
33 * it is less complex in terms of operations performed and duration, as it
34 * is designed to have a predictable outcome that can be tested.
35 *
36 * - A set of timers is initialized for use by the test
37 * - All cores then simultaneously are set to schedule all the timers at
38 * the same time, so conflicts should occur.
39 * - Then there is a delay while we wait for the timers to expire
40 * - Then the master lcore calls timer_manage() and we check that all
41 * timers have had their callbacks called exactly once - no more no less.
42 * - Then we repeat the process, except after setting up the timers, we have
43 * all cores randomly reschedule them.
44 * - Again we check that the expected number of callbacks has occurred when
45 * we call timer-manage.
46 *
47 * #. Basic test.
48 *
49 * This test performs basic functional checks of the timers. The test
50 * uses four different timers that are loaded and stopped under
51 * specific conditions in specific contexts.
52 *
53 * - Four timers are used for this test.
54 * - On each core, the rte_timer_manage() function is called from main loop
55 * every 3 microseconds.
56 *
57 * The autotest python script checks that the behavior is correct:
58 *
59 * - timer0
60 *
61 * - At initialization, timer0 is loaded by the master core, on master core
62 * in "single" mode (time = 1 second).
63 * - In the first 19 callbacks, timer0 is reloaded on the same core,
64 * then, it is explicitly stopped at the 20th call.
65 * - At t=25s, timer0 is reloaded once by timer2.
66 *
67 * - timer1
68 *
69 * - At initialization, timer1 is loaded by the master core, on the
70 * master core in "single" mode (time = 2 seconds).
71 * - In the first 9 callbacks, timer1 is reloaded on another
72 * core. After the 10th callback, timer1 is not reloaded anymore.
73 *
74 * - timer2
75 *
76 * - At initialization, timer2 is loaded by the master core, on the
77 * master core in "periodical" mode (time = 1 second).
78 * - In the callback, when t=25s, it stops timer3 and reloads timer0
79 * on the current core.
80 *
81 * - timer3
82 *
83 * - At initialization, timer3 is loaded by the master core, on
84 * another core in "periodical" mode (time = 1 second).
85 * - It is stopped at t=25s by timer2.
86 */
87
88#include <stdio.h>
89#include <stdarg.h>
90#include <string.h>
91#include <stdlib.h>
92#include <stdint.h>
93#include <inttypes.h>
94#include <sys/queue.h>
95#include <math.h>
96
97#include <rte_common.h>
98#include <rte_log.h>
99#include <rte_memory.h>
7c673cae
FG
100#include <rte_launch.h>
101#include <rte_cycles.h>
102#include <rte_eal.h>
103#include <rte_per_lcore.h>
104#include <rte_lcore.h>
105#include <rte_atomic.h>
106#include <rte_timer.h>
107#include <rte_random.h>
108#include <rte_malloc.h>
11fdf7f2 109#include <rte_pause.h>
7c673cae
FG
110
111#define TEST_DURATION_S 1 /* in seconds */
112#define NB_TIMER 4
113
114#define RTE_LOGTYPE_TESTTIMER RTE_LOGTYPE_USER3
115
116static volatile uint64_t end_time;
117static volatile int test_failed;
118
119struct mytimerinfo {
120 struct rte_timer tim;
121 unsigned id;
122 unsigned count;
123};
124
125static struct mytimerinfo mytiminfo[NB_TIMER];
126
127static void timer_basic_cb(struct rte_timer *tim, void *arg);
128
129static void
130mytimer_reset(struct mytimerinfo *timinfo, uint64_t ticks,
131 enum rte_timer_type type, unsigned tim_lcore,
132 rte_timer_cb_t fct)
133{
134 rte_timer_reset_sync(&timinfo->tim, ticks, type, tim_lcore,
135 fct, timinfo);
136}
137
138/* timer callback for stress tests */
139static void
f67539c2
TL
140timer_stress_cb(__rte_unused struct rte_timer *tim,
141 __rte_unused void *arg)
7c673cae
FG
142{
143 long r;
144 unsigned lcore_id = rte_lcore_id();
145 uint64_t hz = rte_get_timer_hz();
146
147 if (rte_timer_pending(tim))
148 return;
149
150 r = rte_rand();
151 if ((r & 0xff) == 0) {
152 mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
153 timer_stress_cb);
154 }
155 else if ((r & 0xff) == 1) {
156 mytimer_reset(&mytiminfo[0], hz, SINGLE,
157 rte_get_next_lcore(lcore_id, 0, 1),
158 timer_stress_cb);
159 }
160 else if ((r & 0xff) == 2) {
161 rte_timer_stop(&mytiminfo[0].tim);
162 }
163}
164
165static int
f67539c2 166timer_stress_main_loop(__rte_unused void *arg)
7c673cae
FG
167{
168 uint64_t hz = rte_get_timer_hz();
169 unsigned lcore_id = rte_lcore_id();
170 uint64_t cur_time;
171 int64_t diff = 0;
172 long r;
173
174 while (diff >= 0) {
175
176 /* call the timer handler on each core */
177 rte_timer_manage();
178
179 /* simulate the processing of a packet
180 * (1 us = 2000 cycles at 2 Ghz) */
181 rte_delay_us(1);
182
183 /* randomly stop or reset timer */
184 r = rte_rand();
185 lcore_id = rte_get_next_lcore(lcore_id, 0, 1);
186 if ((r & 0xff) == 0) {
187 /* 100 us */
188 mytimer_reset(&mytiminfo[0], hz/10000, SINGLE, lcore_id,
189 timer_stress_cb);
190 }
191 else if ((r & 0xff) == 1) {
192 rte_timer_stop_sync(&mytiminfo[0].tim);
193 }
194 cur_time = rte_get_timer_cycles();
195 diff = end_time - cur_time;
196 }
197
198 lcore_id = rte_lcore_id();
199 RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
200
201 return 0;
202}
203
204/* Need to synchronize slave lcores through multiple steps. */
205enum { SLAVE_WAITING = 1, SLAVE_RUN_SIGNAL, SLAVE_RUNNING, SLAVE_FINISHED };
206static rte_atomic16_t slave_state[RTE_MAX_LCORE];
207
208static void
209master_init_slaves(void)
210{
211 unsigned i;
212
213 RTE_LCORE_FOREACH_SLAVE(i) {
214 rte_atomic16_set(&slave_state[i], SLAVE_WAITING);
215 }
216}
217
218static void
219master_start_slaves(void)
220{
221 unsigned i;
222
223 RTE_LCORE_FOREACH_SLAVE(i) {
224 rte_atomic16_set(&slave_state[i], SLAVE_RUN_SIGNAL);
225 }
226 RTE_LCORE_FOREACH_SLAVE(i) {
227 while (rte_atomic16_read(&slave_state[i]) != SLAVE_RUNNING)
228 rte_pause();
229 }
230}
231
232static void
233master_wait_for_slaves(void)
234{
235 unsigned i;
236
237 RTE_LCORE_FOREACH_SLAVE(i) {
238 while (rte_atomic16_read(&slave_state[i]) != SLAVE_FINISHED)
239 rte_pause();
240 }
241}
242
243static void
244slave_wait_to_start(void)
245{
246 unsigned lcore_id = rte_lcore_id();
247
248 while (rte_atomic16_read(&slave_state[lcore_id]) != SLAVE_RUN_SIGNAL)
249 rte_pause();
250 rte_atomic16_set(&slave_state[lcore_id], SLAVE_RUNNING);
251}
252
253static void
254slave_finish(void)
255{
256 unsigned lcore_id = rte_lcore_id();
257
258 rte_atomic16_set(&slave_state[lcore_id], SLAVE_FINISHED);
259}
260
261
262static volatile int cb_count = 0;
263
264/* callback for second stress test. will only be called
265 * on master lcore */
266static void
267timer_stress2_cb(struct rte_timer *tim __rte_unused, void *arg __rte_unused)
268{
269 cb_count++;
270}
271
272#define NB_STRESS2_TIMERS 8192
273
274static int
f67539c2 275timer_stress2_main_loop(__rte_unused void *arg)
7c673cae
FG
276{
277 static struct rte_timer *timers;
278 int i, ret;
279 uint64_t delay = rte_get_timer_hz() / 20;
280 unsigned lcore_id = rte_lcore_id();
281 unsigned master = rte_get_master_lcore();
282 int32_t my_collisions = 0;
283 static rte_atomic32_t collisions;
284
285 if (lcore_id == master) {
286 cb_count = 0;
287 test_failed = 0;
288 rte_atomic32_set(&collisions, 0);
289 master_init_slaves();
290 timers = rte_malloc(NULL, sizeof(*timers) * NB_STRESS2_TIMERS, 0);
291 if (timers == NULL) {
292 printf("Test Failed\n");
293 printf("- Cannot allocate memory for timers\n" );
294 test_failed = 1;
295 master_start_slaves();
296 goto cleanup;
297 }
298 for (i = 0; i < NB_STRESS2_TIMERS; i++)
299 rte_timer_init(&timers[i]);
300 master_start_slaves();
301 } else {
302 slave_wait_to_start();
303 if (test_failed)
304 goto cleanup;
305 }
306
307 /* have all cores schedule all timers on master lcore */
308 for (i = 0; i < NB_STRESS2_TIMERS; i++) {
309 ret = rte_timer_reset(&timers[i], delay, SINGLE, master,
310 timer_stress2_cb, NULL);
311 /* there will be collisions when multiple cores simultaneously
312 * configure the same timers */
313 if (ret != 0)
314 my_collisions++;
315 }
316 if (my_collisions != 0)
317 rte_atomic32_add(&collisions, my_collisions);
318
319 /* wait long enough for timers to expire */
320 rte_delay_ms(100);
321
322 /* all cores rendezvous */
323 if (lcore_id == master) {
324 master_wait_for_slaves();
325 } else {
326 slave_finish();
327 }
328
329 /* now check that we get the right number of callbacks */
330 if (lcore_id == master) {
331 my_collisions = rte_atomic32_read(&collisions);
332 if (my_collisions != 0)
333 printf("- %d timer reset collisions (OK)\n", my_collisions);
334 rte_timer_manage();
335 if (cb_count != NB_STRESS2_TIMERS) {
336 printf("Test Failed\n");
337 printf("- Stress test 2, part 1 failed\n");
338 printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
339 cb_count);
340 test_failed = 1;
341 master_start_slaves();
342 goto cleanup;
343 }
344 cb_count = 0;
345
346 /* proceed */
347 master_start_slaves();
348 } else {
349 /* proceed */
350 slave_wait_to_start();
351 if (test_failed)
352 goto cleanup;
353 }
354
355 /* now test again, just stop and restart timers at random after init*/
356 for (i = 0; i < NB_STRESS2_TIMERS; i++)
357 rte_timer_reset(&timers[i], delay, SINGLE, master,
358 timer_stress2_cb, NULL);
359
360 /* pick random timer to reset, stopping them first half the time */
361 for (i = 0; i < 100000; i++) {
362 int r = rand() % NB_STRESS2_TIMERS;
363 if (i % 2)
364 rte_timer_stop(&timers[r]);
365 rte_timer_reset(&timers[r], delay, SINGLE, master,
366 timer_stress2_cb, NULL);
367 }
368
369 /* wait long enough for timers to expire */
370 rte_delay_ms(100);
371
372 /* now check that we get the right number of callbacks */
373 if (lcore_id == master) {
374 master_wait_for_slaves();
375
376 rte_timer_manage();
377 if (cb_count != NB_STRESS2_TIMERS) {
378 printf("Test Failed\n");
379 printf("- Stress test 2, part 2 failed\n");
380 printf("- Expected %d callbacks, got %d\n", NB_STRESS2_TIMERS,
381 cb_count);
382 test_failed = 1;
383 } else {
384 printf("Test OK\n");
385 }
386 }
387
388cleanup:
389 if (lcore_id == master) {
390 master_wait_for_slaves();
391 if (timers != NULL) {
392 rte_free(timers);
393 timers = NULL;
394 }
395 } else {
396 slave_finish();
397 }
398
399 return 0;
400}
401
402/* timer callback for basic tests */
403static void
404timer_basic_cb(struct rte_timer *tim, void *arg)
405{
406 struct mytimerinfo *timinfo = arg;
407 uint64_t hz = rte_get_timer_hz();
408 unsigned lcore_id = rte_lcore_id();
409 uint64_t cur_time = rte_get_timer_cycles();
410
411 if (rte_timer_pending(tim))
412 return;
413
414 timinfo->count ++;
415
416 RTE_LOG(INFO, TESTTIMER,
417 "%"PRIu64": callback id=%u count=%u on core %u\n",
418 cur_time, timinfo->id, timinfo->count, lcore_id);
419
420 /* reload timer 0 on same core */
421 if (timinfo->id == 0 && timinfo->count < 20) {
422 mytimer_reset(timinfo, hz, SINGLE, lcore_id, timer_basic_cb);
423 return;
424 }
425
426 /* reload timer 1 on next core */
427 if (timinfo->id == 1 && timinfo->count < 10) {
428 mytimer_reset(timinfo, hz*2, SINGLE,
429 rte_get_next_lcore(lcore_id, 0, 1),
430 timer_basic_cb);
431 return;
432 }
433
434 /* Explicitelly stop timer 0. Once stop() called, we can even
435 * erase the content of the structure: it is not referenced
436 * anymore by any code (in case of dynamic structure, it can
437 * be freed) */
438 if (timinfo->id == 0 && timinfo->count == 20) {
439
440 /* stop_sync() is not needed, because we know that the
441 * status of timer is only modified by this core */
442 rte_timer_stop(tim);
443 memset(tim, 0xAA, sizeof(struct rte_timer));
444 return;
445 }
446
447 /* stop timer3, and restart a new timer0 (it was removed 5
448 * seconds ago) for a single shot */
449 if (timinfo->id == 2 && timinfo->count == 25) {
450 rte_timer_stop_sync(&mytiminfo[3].tim);
451
452 /* need to reinit because structure was erased with 0xAA */
453 rte_timer_init(&mytiminfo[0].tim);
454 mytimer_reset(&mytiminfo[0], hz, SINGLE, lcore_id,
455 timer_basic_cb);
456 }
457}
458
459static int
f67539c2 460timer_basic_main_loop(__rte_unused void *arg)
7c673cae
FG
461{
462 uint64_t hz = rte_get_timer_hz();
463 unsigned lcore_id = rte_lcore_id();
464 uint64_t cur_time;
465 int64_t diff = 0;
466
467 /* launch all timers on core 0 */
468 if (lcore_id == rte_get_master_lcore()) {
469 mytimer_reset(&mytiminfo[0], hz/4, SINGLE, lcore_id,
470 timer_basic_cb);
471 mytimer_reset(&mytiminfo[1], hz/2, SINGLE, lcore_id,
472 timer_basic_cb);
473 mytimer_reset(&mytiminfo[2], hz/4, PERIODICAL, lcore_id,
474 timer_basic_cb);
475 mytimer_reset(&mytiminfo[3], hz/4, PERIODICAL,
476 rte_get_next_lcore(lcore_id, 0, 1),
477 timer_basic_cb);
478 }
479
480 while (diff >= 0) {
481
482 /* call the timer handler on each core */
483 rte_timer_manage();
484
485 /* simulate the processing of a packet
486 * (3 us = 6000 cycles at 2 Ghz) */
487 rte_delay_us(3);
488
489 cur_time = rte_get_timer_cycles();
490 diff = end_time - cur_time;
491 }
492 RTE_LOG(INFO, TESTTIMER, "core %u finished\n", lcore_id);
493
494 return 0;
495}
496
497static int
498timer_sanity_check(void)
499{
500#ifdef RTE_LIBEAL_USE_HPET
501 if (eal_timer_source != EAL_TIMER_HPET) {
502 printf("Not using HPET, can't sanity check timer sources\n");
503 return 0;
504 }
505
506 const uint64_t t_hz = rte_get_tsc_hz();
507 const uint64_t h_hz = rte_get_hpet_hz();
508 printf("Hertz values: TSC = %"PRIu64", HPET = %"PRIu64"\n", t_hz, h_hz);
509
510 const uint64_t tsc_start = rte_get_tsc_cycles();
511 const uint64_t hpet_start = rte_get_hpet_cycles();
512 rte_delay_ms(100); /* delay 1/10 second */
513 const uint64_t tsc_end = rte_get_tsc_cycles();
514 const uint64_t hpet_end = rte_get_hpet_cycles();
515 printf("Measured cycles: TSC = %"PRIu64", HPET = %"PRIu64"\n",
516 tsc_end-tsc_start, hpet_end-hpet_start);
517
518 const double tsc_time = (double)(tsc_end - tsc_start)/t_hz;
519 const double hpet_time = (double)(hpet_end - hpet_start)/h_hz;
520 /* get the percentage that the times differ by */
521 const double time_diff = fabs(tsc_time - hpet_time)*100/tsc_time;
522 printf("Measured time: TSC = %.4f, HPET = %.4f\n", tsc_time, hpet_time);
523
524 printf("Elapsed time measured by TSC and HPET differ by %f%%\n",
525 time_diff);
526 if (time_diff > 0.1) {
527 printf("Error times differ by >0.1%%");
528 return -1;
529 }
530#endif
531 return 0;
532}
533
534static int
535test_timer(void)
536{
537 unsigned i;
538 uint64_t cur_time;
539 uint64_t hz;
540
f67539c2
TL
541 if (rte_lcore_count() < 2) {
542 printf("Not enough cores for timer_autotest, expecting at least 2\n");
543 return TEST_SKIPPED;
544 }
545
7c673cae
FG
546 /* sanity check our timer sources and timer config values */
547 if (timer_sanity_check() < 0) {
548 printf("Timer sanity checks failed\n");
549 return TEST_FAILED;
550 }
551
7c673cae
FG
552 /* init timer */
553 for (i=0; i<NB_TIMER; i++) {
554 memset(&mytiminfo[i], 0, sizeof(struct mytimerinfo));
555 mytiminfo[i].id = i;
556 rte_timer_init(&mytiminfo[i].tim);
557 }
558
559 /* calculate the "end of test" time */
560 cur_time = rte_get_timer_cycles();
561 hz = rte_get_timer_hz();
562 end_time = cur_time + (hz * TEST_DURATION_S);
563
564 /* start other cores */
565 printf("Start timer stress tests\n");
566 rte_eal_mp_remote_launch(timer_stress_main_loop, NULL, CALL_MASTER);
567 rte_eal_mp_wait_lcore();
568
569 /* stop timer 0 used for stress test */
570 rte_timer_stop_sync(&mytiminfo[0].tim);
571
572 /* run a second, slightly different set of stress tests */
573 printf("\nStart timer stress tests 2\n");
574 test_failed = 0;
575 rte_eal_mp_remote_launch(timer_stress2_main_loop, NULL, CALL_MASTER);
576 rte_eal_mp_wait_lcore();
577 if (test_failed)
578 return TEST_FAILED;
579
580 /* calculate the "end of test" time */
581 cur_time = rte_get_timer_cycles();
582 hz = rte_get_timer_hz();
583 end_time = cur_time + (hz * TEST_DURATION_S);
584
585 /* start other cores */
586 printf("\nStart timer basic tests\n");
587 rte_eal_mp_remote_launch(timer_basic_main_loop, NULL, CALL_MASTER);
588 rte_eal_mp_wait_lcore();
589
590 /* stop all timers */
591 for (i=0; i<NB_TIMER; i++) {
592 rte_timer_stop_sync(&mytiminfo[i].tim);
593 }
594
595 rte_timer_dump_stats(stdout);
596
597 return TEST_SUCCESS;
598}
599
600REGISTER_TEST_COMMAND(timer_autotest, test_timer);