]> git.proxmox.com Git - mirror_qemu.git/blame - tests/rcutorture.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / tests / rcutorture.c
CommitLineData
8fda74a5
PB
1/*
2 * rcutorture.c: simple user-level performance/stress test of RCU.
3 *
4 * Usage:
5 * ./rcu <nreaders> rperf [ <seconds> ]
6 * Run a read-side performance test with the specified
7 * number of readers for <seconds> seconds.
8 * ./rcu <nupdaters> uperf [ <seconds> ]
9 * Run an update-side performance test with the specified
10 * number of updaters and specified duration.
11 * ./rcu <nreaders> perf [ <seconds> ]
12 * Run a combined read/update performance test with the specified
13 * number of readers and one updater and specified duration.
14 *
15 * The above tests produce output as follows:
16 *
17 * n_reads: 46008000 n_updates: 146026 nreaders: 2 nupdaters: 1 duration: 1
18 * ns/read: 43.4707 ns/update: 6848.1
19 *
20 * The first line lists the total number of RCU reads and updates executed
21 * during the test, the number of reader threads, the number of updater
22 * threads, and the duration of the test in seconds. The second line
23 * lists the average duration of each type of operation in nanoseconds,
24 * or "nan" if the corresponding type of operation was not performed.
25 *
26 * ./rcu <nreaders> stress [ <seconds> ]
27 * Run a stress test with the specified number of readers and
28 * one updater.
29 *
30 * This test produces output as follows:
31 *
32 * n_reads: 114633217 n_updates: 3903415 n_mberror: 0
33 * rcu_stress_count: 114618391 14826 0 0 0 0 0 0 0 0 0
34 *
35 * The first line lists the number of RCU read and update operations
36 * executed, followed by the number of memory-ordering violations
37 * (which will be zero in a correct RCU implementation). The second
38 * line lists the number of readers observing progressively more stale
39 * data. A correct RCU implementation will have all but the first two
40 * numbers non-zero.
41 *
42 * This program is free software; you can redistribute it and/or modify
43 * it under the terms of the GNU General Public License as published by
44 * the Free Software Foundation; either version 2 of the License, or
45 * (at your option) any later version.
46 *
47 * This program is distributed in the hope that it will be useful,
48 * but WITHOUT ANY WARRANTY; without even the implied warranty of
49 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
50 * GNU General Public License for more details.
51 *
52 * You should have received a copy of the GNU General Public License
53 * along with this program; if not, write to the Free Software
54 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
55 *
56 * Copyright (c) 2008 Paul E. McKenney, IBM Corporation.
57 */
58
59/*
60 * Test variables.
61 */
62
681c28a3 63#include "qemu/osdep.h"
8fda74a5
PB
64#include "qemu/atomic.h"
65#include "qemu/rcu.h"
8fda74a5
PB
66#include "qemu/thread.h"
67
68long long n_reads = 0LL;
69long n_updates = 0L;
70int nthreadsrunning;
71
72#define GOFLAG_INIT 0
73#define GOFLAG_RUN 1
74#define GOFLAG_STOP 2
75
76static volatile int goflag = GOFLAG_INIT;
77
78#define RCU_READ_RUN 1000
79
80#define NR_THREADS 100
8a5956ad 81static QemuMutex counts_mutex;
8fda74a5
PB
82static QemuThread threads[NR_THREADS];
83static struct rcu_reader_data *data[NR_THREADS];
84static int n_threads;
85
86static void create_thread(void *(*func)(void *))
87{
88 if (n_threads >= NR_THREADS) {
89 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
90 exit(-1);
91 }
92 qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
93 QEMU_THREAD_JOINABLE);
94 n_threads++;
95}
96
97static void wait_all_threads(void)
98{
99 int i;
100
101 for (i = 0; i < n_threads; i++) {
102 qemu_thread_join(&threads[i]);
103 }
104 n_threads = 0;
105}
106
107/*
108 * Performance test.
109 */
110
111static void *rcu_read_perf_test(void *arg)
112{
113 int i;
114 long long n_reads_local = 0;
115
116 rcu_register_thread();
117
118 *(struct rcu_reader_data **)arg = &rcu_reader;
119 atomic_inc(&nthreadsrunning);
120 while (goflag == GOFLAG_INIT) {
121 g_usleep(1000);
122 }
123 while (goflag == GOFLAG_RUN) {
124 for (i = 0; i < RCU_READ_RUN; i++) {
125 rcu_read_lock();
126 rcu_read_unlock();
127 }
128 n_reads_local += RCU_READ_RUN;
129 }
8a5956ad
PB
130 qemu_mutex_lock(&counts_mutex);
131 n_reads += n_reads_local;
132 qemu_mutex_unlock(&counts_mutex);
8fda74a5
PB
133
134 rcu_unregister_thread();
135 return NULL;
136}
137
138static void *rcu_update_perf_test(void *arg)
139{
140 long long n_updates_local = 0;
141
142 rcu_register_thread();
143
144 *(struct rcu_reader_data **)arg = &rcu_reader;
145 atomic_inc(&nthreadsrunning);
146 while (goflag == GOFLAG_INIT) {
147 g_usleep(1000);
148 }
149 while (goflag == GOFLAG_RUN) {
150 synchronize_rcu();
151 n_updates_local++;
152 }
8a5956ad
PB
153 qemu_mutex_lock(&counts_mutex);
154 n_updates += n_updates_local;
155 qemu_mutex_unlock(&counts_mutex);
8fda74a5
PB
156
157 rcu_unregister_thread();
158 return NULL;
159}
160
161static void perftestinit(void)
162{
163 nthreadsrunning = 0;
164}
165
166static void perftestrun(int nthreads, int duration, int nreaders, int nupdaters)
167{
168 while (atomic_read(&nthreadsrunning) < nthreads) {
169 g_usleep(1000);
170 }
171 goflag = GOFLAG_RUN;
172 g_usleep(duration * G_USEC_PER_SEC);
173 goflag = GOFLAG_STOP;
174 wait_all_threads();
175 printf("n_reads: %lld n_updates: %ld nreaders: %d nupdaters: %d duration: %d\n",
176 n_reads, n_updates, nreaders, nupdaters, duration);
177 printf("ns/read: %g ns/update: %g\n",
178 ((duration * 1000*1000*1000.*(double)nreaders) /
179 (double)n_reads),
180 ((duration * 1000*1000*1000.*(double)nupdaters) /
181 (double)n_updates));
182 exit(0);
183}
184
185static void perftest(int nreaders, int duration)
186{
187 int i;
188
189 perftestinit();
190 for (i = 0; i < nreaders; i++) {
191 create_thread(rcu_read_perf_test);
192 }
193 create_thread(rcu_update_perf_test);
194 perftestrun(i + 1, duration, nreaders, 1);
195}
196
197static void rperftest(int nreaders, int duration)
198{
199 int i;
200
201 perftestinit();
202 for (i = 0; i < nreaders; i++) {
203 create_thread(rcu_read_perf_test);
204 }
205 perftestrun(i, duration, nreaders, 0);
206}
207
208static void uperftest(int nupdaters, int duration)
209{
210 int i;
211
212 perftestinit();
213 for (i = 0; i < nupdaters; i++) {
214 create_thread(rcu_update_perf_test);
215 }
216 perftestrun(i, duration, 0, nupdaters);
217}
218
219/*
220 * Stress test.
221 */
222
223#define RCU_STRESS_PIPE_LEN 10
224
225struct rcu_stress {
226 int pipe_count;
227 int mbtest;
228};
229
230struct rcu_stress rcu_stress_array[RCU_STRESS_PIPE_LEN] = { { 0 } };
231struct rcu_stress *rcu_stress_current;
232int rcu_stress_idx;
233
234int n_mberror;
235long long rcu_stress_count[RCU_STRESS_PIPE_LEN + 1];
236
237
238static void *rcu_read_stress_test(void *arg)
239{
240 int i;
8fda74a5
PB
241 struct rcu_stress *p;
242 int pc;
243 long long n_reads_local = 0;
8a5956ad 244 long long rcu_stress_local[RCU_STRESS_PIPE_LEN + 1] = { 0 };
8fda74a5
PB
245 volatile int garbage = 0;
246
247 rcu_register_thread();
248
249 *(struct rcu_reader_data **)arg = &rcu_reader;
250 while (goflag == GOFLAG_INIT) {
251 g_usleep(1000);
252 }
253 while (goflag == GOFLAG_RUN) {
254 rcu_read_lock();
255 p = atomic_rcu_read(&rcu_stress_current);
256 if (p->mbtest == 0) {
257 n_mberror++;
258 }
d62cb4f2 259 rcu_read_lock();
8fda74a5
PB
260 for (i = 0; i < 100; i++) {
261 garbage++;
262 }
d62cb4f2 263 rcu_read_unlock();
8fda74a5
PB
264 pc = p->pipe_count;
265 rcu_read_unlock();
266 if ((pc > RCU_STRESS_PIPE_LEN) || (pc < 0)) {
267 pc = RCU_STRESS_PIPE_LEN;
268 }
8a5956ad 269 rcu_stress_local[pc]++;
8fda74a5 270 n_reads_local++;
8fda74a5 271 }
8a5956ad
PB
272 qemu_mutex_lock(&counts_mutex);
273 n_reads += n_reads_local;
274 for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
275 rcu_stress_count[i] += rcu_stress_local[i];
276 }
277 qemu_mutex_unlock(&counts_mutex);
8fda74a5
PB
278
279 rcu_unregister_thread();
280 return NULL;
281}
282
283static void *rcu_update_stress_test(void *arg)
284{
285 int i;
286 struct rcu_stress *p;
287
288 rcu_register_thread();
289
290 *(struct rcu_reader_data **)arg = &rcu_reader;
291 while (goflag == GOFLAG_INIT) {
292 g_usleep(1000);
293 }
294 while (goflag == GOFLAG_RUN) {
295 i = rcu_stress_idx + 1;
296 if (i >= RCU_STRESS_PIPE_LEN) {
297 i = 0;
298 }
299 p = &rcu_stress_array[i];
300 p->mbtest = 0;
301 smp_mb();
302 p->pipe_count = 0;
303 p->mbtest = 1;
304 atomic_rcu_set(&rcu_stress_current, p);
305 rcu_stress_idx = i;
306 for (i = 0; i < RCU_STRESS_PIPE_LEN; i++) {
307 if (i != rcu_stress_idx) {
308 rcu_stress_array[i].pipe_count++;
309 }
310 }
311 synchronize_rcu();
312 n_updates++;
313 }
314
315 rcu_unregister_thread();
316 return NULL;
317}
318
319static void *rcu_fake_update_stress_test(void *arg)
320{
321 rcu_register_thread();
322
323 *(struct rcu_reader_data **)arg = &rcu_reader;
324 while (goflag == GOFLAG_INIT) {
325 g_usleep(1000);
326 }
327 while (goflag == GOFLAG_RUN) {
328 synchronize_rcu();
329 g_usleep(1000);
330 }
331
332 rcu_unregister_thread();
333 return NULL;
334}
335
336static void stresstest(int nreaders, int duration)
337{
338 int i;
339
340 rcu_stress_current = &rcu_stress_array[0];
341 rcu_stress_current->pipe_count = 0;
342 rcu_stress_current->mbtest = 1;
343 for (i = 0; i < nreaders; i++) {
344 create_thread(rcu_read_stress_test);
345 }
346 create_thread(rcu_update_stress_test);
347 for (i = 0; i < 5; i++) {
348 create_thread(rcu_fake_update_stress_test);
349 }
350 goflag = GOFLAG_RUN;
351 g_usleep(duration * G_USEC_PER_SEC);
352 goflag = GOFLAG_STOP;
353 wait_all_threads();
354 printf("n_reads: %lld n_updates: %ld n_mberror: %d\n",
355 n_reads, n_updates, n_mberror);
356 printf("rcu_stress_count:");
357 for (i = 0; i <= RCU_STRESS_PIPE_LEN; i++) {
358 printf(" %lld", rcu_stress_count[i]);
359 }
360 printf("\n");
361 exit(0);
362}
363
364/* GTest interface */
365
366static void gtest_stress(int nreaders, int duration)
367{
368 int i;
369
370 rcu_stress_current = &rcu_stress_array[0];
371 rcu_stress_current->pipe_count = 0;
372 rcu_stress_current->mbtest = 1;
373 for (i = 0; i < nreaders; i++) {
374 create_thread(rcu_read_stress_test);
375 }
376 create_thread(rcu_update_stress_test);
377 for (i = 0; i < 5; i++) {
378 create_thread(rcu_fake_update_stress_test);
379 }
380 goflag = GOFLAG_RUN;
381 g_usleep(duration * G_USEC_PER_SEC);
382 goflag = GOFLAG_STOP;
383 wait_all_threads();
384 g_assert_cmpint(n_mberror, ==, 0);
385 for (i = 2; i <= RCU_STRESS_PIPE_LEN; i++) {
386 g_assert_cmpint(rcu_stress_count[i], ==, 0);
387 }
388}
389
390static void gtest_stress_1_1(void)
391{
392 gtest_stress(1, 1);
393}
394
395static void gtest_stress_10_1(void)
396{
397 gtest_stress(10, 1);
398}
399
400static void gtest_stress_1_5(void)
401{
402 gtest_stress(1, 5);
403}
404
405static void gtest_stress_10_5(void)
406{
407 gtest_stress(10, 5);
408}
409
410/*
411 * Mainprogram.
412 */
413
414static void usage(int argc, char *argv[])
415{
416 fprintf(stderr, "Usage: %s [nreaders [ perf | stress ] ]\n", argv[0]);
417 exit(-1);
418}
419
420int main(int argc, char *argv[])
421{
422 int nreaders = 1;
423 int duration = 1;
424
8a5956ad 425 qemu_mutex_init(&counts_mutex);
8fda74a5
PB
426 if (argc >= 2 && argv[1][0] == '-') {
427 g_test_init(&argc, &argv, NULL);
428 if (g_test_quick()) {
429 g_test_add_func("/rcu/torture/1reader", gtest_stress_1_1);
430 g_test_add_func("/rcu/torture/10readers", gtest_stress_10_1);
431 } else {
432 g_test_add_func("/rcu/torture/1reader", gtest_stress_1_5);
433 g_test_add_func("/rcu/torture/10readers", gtest_stress_10_5);
434 }
435 return g_test_run();
436 }
437
438 if (argc >= 2) {
439 nreaders = strtoul(argv[1], NULL, 0);
440 }
441 if (argc > 3) {
442 duration = strtoul(argv[3], NULL, 0);
443 }
444 if (argc < 3 || strcmp(argv[2], "stress") == 0) {
445 stresstest(nreaders, duration);
446 } else if (strcmp(argv[2], "rperf") == 0) {
447 rperftest(nreaders, duration);
448 } else if (strcmp(argv[2], "uperf") == 0) {
449 uperftest(nreaders, duration);
450 } else if (strcmp(argv[2], "perf") == 0) {
451 perftest(nreaders, duration);
452 }
453 usage(argc, argv);
454 return 0;
455}