]> git.proxmox.com Git - mirror_qemu.git/blob - tests/test-rcu-list.c
rcu tests: fix compilation on 32-bit ppc
[mirror_qemu.git] / tests / test-rcu-list.c
1 /*
2 * rcuq_test.c
3 *
4 * usage: rcuq_test <readers> <duration>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 *
20 * Copyright (c) 2013 Mike D. Day, IBM Corporation.
21 */
22
23 #include <glib.h>
24 #include <stdlib.h>
25 #include <stdio.h>
26 #include <string.h>
27 #include "qemu/atomic.h"
28 #include "qemu/rcu.h"
29 #include "qemu/compiler.h"
30 #include "qemu/osdep.h"
31 #include "qemu/thread.h"
32 #include "qemu/rcu_queue.h"
33
34 /*
35 * Test variables.
36 */
37
38 static QemuMutex counts_mutex;
39 static long long n_reads = 0LL;
40 static long long n_updates = 0LL;
41 static long long n_reclaims = 0LL;
42 static long long n_nodes_removed = 0LL;
43 static long long n_nodes = 0LL;
44 static int g_test_in_charge = 0;
45
46 static int nthreadsrunning;
47
48 #define GOFLAG_INIT 0
49 #define GOFLAG_RUN 1
50 #define GOFLAG_STOP 2
51
52 static volatile int goflag = GOFLAG_INIT;
53
54 #define RCU_READ_RUN 1000
55 #define RCU_UPDATE_RUN 10
56 #define NR_THREADS 100
57 #define RCU_Q_LEN 100
58
59 static QemuThread threads[NR_THREADS];
60 static struct rcu_reader_data *data[NR_THREADS];
61 static int n_threads;
62
63 static int select_random_el(int max)
64 {
65 return (rand() % max);
66 }
67
68
69 static void create_thread(void *(*func)(void *))
70 {
71 if (n_threads >= NR_THREADS) {
72 fprintf(stderr, "Thread limit of %d exceeded!\n", NR_THREADS);
73 exit(-1);
74 }
75 qemu_thread_create(&threads[n_threads], "test", func, &data[n_threads],
76 QEMU_THREAD_JOINABLE);
77 n_threads++;
78 }
79
80 static void wait_all_threads(void)
81 {
82 int i;
83
84 for (i = 0; i < n_threads; i++) {
85 qemu_thread_join(&threads[i]);
86 }
87 n_threads = 0;
88 }
89
90
91 struct list_element {
92 QLIST_ENTRY(list_element) entry;
93 struct rcu_head rcu;
94 };
95
96 static void reclaim_list_el(struct rcu_head *prcu)
97 {
98 struct list_element *el = container_of(prcu, struct list_element, rcu);
99 g_free(el);
100 /* Accessed only from call_rcu thread. */
101 n_reclaims++;
102 }
103
104 static QLIST_HEAD(q_list_head, list_element) Q_list_head;
105
106 static void *rcu_q_reader(void *arg)
107 {
108 long long n_reads_local = 0;
109 struct list_element *el;
110
111 *(struct rcu_reader_data **)arg = &rcu_reader;
112 atomic_inc(&nthreadsrunning);
113 while (goflag == GOFLAG_INIT) {
114 g_usleep(1000);
115 }
116
117 while (goflag == GOFLAG_RUN) {
118 rcu_read_lock();
119 QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
120 n_reads_local++;
121 if (goflag == GOFLAG_STOP) {
122 break;
123 }
124 }
125 rcu_read_unlock();
126
127 g_usleep(100);
128 }
129 qemu_mutex_lock(&counts_mutex);
130 n_reads += n_reads_local;
131 qemu_mutex_unlock(&counts_mutex);
132 return NULL;
133 }
134
135
136 static void *rcu_q_updater(void *arg)
137 {
138 int j, target_el;
139 long long n_nodes_local = 0;
140 long long n_updates_local = 0;
141 long long n_removed_local = 0;
142 struct list_element *el, *prev_el;
143
144 *(struct rcu_reader_data **)arg = &rcu_reader;
145 atomic_inc(&nthreadsrunning);
146 while (goflag == GOFLAG_INIT) {
147 g_usleep(1000);
148 }
149
150 while (goflag == GOFLAG_RUN) {
151 target_el = select_random_el(RCU_Q_LEN);
152 j = 0;
153 /* FOREACH_RCU could work here but let's use both macros */
154 QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
155 j++;
156 if (target_el == j) {
157 QLIST_REMOVE_RCU(prev_el, entry);
158 /* may be more than one updater in the future */
159 call_rcu1(&prev_el->rcu, reclaim_list_el);
160 n_removed_local++;
161 break;
162 }
163 }
164 if (goflag == GOFLAG_STOP) {
165 break;
166 }
167 target_el = select_random_el(RCU_Q_LEN);
168 j = 0;
169 QLIST_FOREACH_RCU(el, &Q_list_head, entry) {
170 j++;
171 if (target_el == j) {
172 prev_el = g_new(struct list_element, 1);
173 n_nodes += n_nodes_local;
174 QLIST_INSERT_BEFORE_RCU(el, prev_el, entry);
175 break;
176 }
177 }
178
179 n_updates_local += 2;
180 synchronize_rcu();
181 }
182 synchronize_rcu();
183 qemu_mutex_lock(&counts_mutex);
184 n_nodes += n_nodes_local;
185 n_updates += n_updates_local;
186 n_nodes_removed += n_removed_local;
187 qemu_mutex_unlock(&counts_mutex);
188 return NULL;
189 }
190
191 static void rcu_qtest_init(void)
192 {
193 struct list_element *new_el;
194 int i;
195 nthreadsrunning = 0;
196 srand(time(0));
197 for (i = 0; i < RCU_Q_LEN; i++) {
198 new_el = g_new(struct list_element, 1);
199 QLIST_INSERT_HEAD_RCU(&Q_list_head, new_el, entry);
200 }
201 qemu_mutex_lock(&counts_mutex);
202 n_nodes += RCU_Q_LEN;
203 qemu_mutex_unlock(&counts_mutex);
204 }
205
206 static void rcu_qtest_run(int duration, int nreaders)
207 {
208 int nthreads = nreaders + 1;
209 while (atomic_read(&nthreadsrunning) < nthreads) {
210 g_usleep(1000);
211 }
212
213 goflag = GOFLAG_RUN;
214 sleep(duration);
215 goflag = GOFLAG_STOP;
216 wait_all_threads();
217 }
218
219
220 static void rcu_qtest(const char *test, int duration, int nreaders)
221 {
222 int i;
223 long long n_removed_local = 0;
224
225 struct list_element *el, *prev_el;
226
227 rcu_qtest_init();
228 for (i = 0; i < nreaders; i++) {
229 create_thread(rcu_q_reader);
230 }
231 create_thread(rcu_q_updater);
232 rcu_qtest_run(duration, nreaders);
233
234 QLIST_FOREACH_SAFE_RCU(prev_el, &Q_list_head, entry, el) {
235 QLIST_REMOVE_RCU(prev_el, entry);
236 call_rcu1(&prev_el->rcu, reclaim_list_el);
237 n_removed_local++;
238 }
239 qemu_mutex_lock(&counts_mutex);
240 n_nodes_removed += n_removed_local;
241 qemu_mutex_unlock(&counts_mutex);
242 synchronize_rcu();
243 while (n_nodes_removed > n_reclaims) {
244 g_usleep(100);
245 synchronize_rcu();
246 }
247 if (g_test_in_charge) {
248 g_assert_cmpint(n_nodes_removed, ==, n_reclaims);
249 } else {
250 printf("%s: %d readers; 1 updater; nodes read: " \
251 "%lld, nodes removed: %lld; nodes reclaimed: %lld\n",
252 test, nthreadsrunning - 1, n_reads, n_nodes_removed, n_reclaims);
253 exit(0);
254 }
255 }
256
257 static void usage(int argc, char *argv[])
258 {
259 fprintf(stderr, "Usage: %s duration nreaders\n", argv[0]);
260 exit(-1);
261 }
262
263 static int gtest_seconds;
264
265 static void gtest_rcuq_one(void)
266 {
267 rcu_qtest("rcuqtest", gtest_seconds / 4, 1);
268 }
269
270 static void gtest_rcuq_few(void)
271 {
272 rcu_qtest("rcuqtest", gtest_seconds / 4, 5);
273 }
274
275 static void gtest_rcuq_many(void)
276 {
277 rcu_qtest("rcuqtest", gtest_seconds / 2, 20);
278 }
279
280
281 int main(int argc, char *argv[])
282 {
283 int duration = 0, readers = 0;
284
285 qemu_mutex_init(&counts_mutex);
286 if (argc >= 2) {
287 if (argv[1][0] == '-') {
288 g_test_init(&argc, &argv, NULL);
289 if (g_test_quick()) {
290 gtest_seconds = 4;
291 } else {
292 gtest_seconds = 20;
293 }
294 g_test_add_func("/rcu/qlist/single-threaded", gtest_rcuq_one);
295 g_test_add_func("/rcu/qlist/short-few", gtest_rcuq_few);
296 g_test_add_func("/rcu/qlist/long-many", gtest_rcuq_many);
297 g_test_in_charge = 1;
298 return g_test_run();
299 }
300 duration = strtoul(argv[1], NULL, 0);
301 }
302 if (argc >= 3) {
303 readers = strtoul(argv[2], NULL, 0);
304 }
305 if (duration && readers) {
306 rcu_qtest(argv[0], duration, readers);
307 return 0;
308 }
309
310 usage(argc, argv);
311 return -1;
312 }