]> git.proxmox.com Git - mirror_qemu.git/blame - util/rcu.c
target/i386: Rename tcg_cpu_FOO() to include 'x86'
[mirror_qemu.git] / util / rcu.c
CommitLineData
7911747b
PB
1/*
2 * urcu-mb.c
3 *
4 * Userspace RCU library with explicit memory barriers
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright 2015 Red Hat, Inc.
9 *
10 * Ported to QEMU by Paolo Bonzini <pbonzini@redhat.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
27 */
28
aafd7584 29#include "qemu/osdep.h"
7911747b
PB
30#include "qemu/rcu.h"
31#include "qemu/atomic.h"
26387f86 32#include "qemu/thread.h"
a4649824 33#include "qemu/main-loop.h"
6e8a355d 34#include "qemu/lockable.h"
5a22ab71
YZ
35#if defined(CONFIG_MALLOC_TRIM)
36#include <malloc.h>
37#endif
7911747b
PB
38
39/*
40 * Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
41 * Bits 1 and above are defined in synchronize_rcu.
42 */
43#define RCU_GP_LOCKED (1UL << 0)
44#define RCU_GP_CTR (1UL << 1)
45
46unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
47
48QemuEvent rcu_gp_event;
ef149763 49static int in_drain_call_rcu;
c097a60b
WC
50static QemuMutex rcu_registry_lock;
51static QemuMutex rcu_sync_lock;
7911747b
PB
52
53/*
54 * Check whether a quiescent state was crossed between the beginning of
55 * update_counter_and_wait and now.
56 */
57static inline int rcu_gp_ongoing(unsigned long *ctr)
58{
59 unsigned long v;
60
d73415a3 61 v = qatomic_read(ctr);
7911747b
PB
62 return v && (v != rcu_gp_ctr);
63}
64
65/* Written to only by each individual reader. Read by both the reader and the
66 * writers.
67 */
17c78154 68QEMU_DEFINE_CO_TLS(struct rcu_reader_data, rcu_reader)
7911747b 69
c097a60b 70/* Protected by rcu_registry_lock. */
7911747b
PB
71typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
72static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
73
74/* Wait for previous parity/grace period to be empty of readers. */
75static void wait_for_readers(void)
76{
77 ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
78 struct rcu_reader_data *index, *tmp;
79
80 for (;;) {
81 /* We want to be notified of changes made to rcu_gp_ongoing
82 * while we walk the list.
83 */
84 qemu_event_reset(&rcu_gp_event);
85
7911747b 86 QLIST_FOREACH(index, &registry, node) {
d73415a3 87 qatomic_set(&index->waiting, true);
7911747b
PB
88 }
89
77a8b846 90 /* Here, order the stores to index->waiting before the loads of
c8d3877e 91 * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
77a8b846 92 * ensuring that the loads of index->ctr are sequentially consistent.
6e288b00
PB
93 *
94 * If this is the last iteration, this barrier also prevents
95 * frees from seeping upwards, and orders the two wait phases
96 * on architectures with 32-bit longs; see synchronize_rcu().
e11131b0 97 */
c8d3877e 98 smp_mb_global();
7911747b
PB
99
100 QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
101 if (!rcu_gp_ongoing(&index->ctr)) {
102 QLIST_REMOVE(index, node);
103 QLIST_INSERT_HEAD(&qsreaders, index, node);
104
6e288b00 105 /* No need for memory barriers here, worst of all we
7911747b
PB
106 * get some extra futex wakeups.
107 */
d73415a3 108 qatomic_set(&index->waiting, false);
ef149763
GK
109 } else if (qatomic_read(&in_drain_call_rcu)) {
110 notifier_list_notify(&index->force_rcu, NULL);
7911747b
PB
111 }
112 }
113
7911747b
PB
114 if (QLIST_EMPTY(&registry)) {
115 break;
116 }
117
c097a60b
WC
118 /* Wait for one thread to report a quiescent state and try again.
119 * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
120 * wait too much time.
121 *
122 * rcu_register_thread() may add nodes to &registry; it will not
123 * wake up synchronize_rcu, but that is okay because at least another
124 * thread must exit its RCU read-side critical section before
125 * synchronize_rcu is done. The next iteration of the loop will
126 * move the new thread's rcu_reader from &registry to &qsreaders,
127 * because rcu_gp_ongoing() will return false.
128 *
129 * rcu_unregister_thread() may remove nodes from &qsreaders instead
130 * of &registry if it runs during qemu_event_wait. That's okay;
131 * the node then will not be added back to &registry by QLIST_SWAP
132 * below. The invariant is that the node is part of one list when
133 * rcu_registry_lock is released.
7911747b 134 */
c097a60b 135 qemu_mutex_unlock(&rcu_registry_lock);
7911747b 136 qemu_event_wait(&rcu_gp_event);
c097a60b 137 qemu_mutex_lock(&rcu_registry_lock);
7911747b
PB
138 }
139
140 /* put back the reader list in the registry */
141 QLIST_SWAP(&registry, &qsreaders, node);
142}
143
144void synchronize_rcu(void)
145{
6e8a355d 146 QEMU_LOCK_GUARD(&rcu_sync_lock);
7911747b 147
77a8b846 148 /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
c8d3877e 149 * Pairs with smp_mb_placeholder() in rcu_read_lock().
6e288b00
PB
150 *
151 * Also orders write to RCU-protected pointers before
152 * write to rcu_gp_ctr.
77a8b846 153 */
c8d3877e 154 smp_mb_global();
77a8b846 155
6e8a355d 156 QEMU_LOCK_GUARD(&rcu_registry_lock);
7911747b 157 if (!QLIST_EMPTY(&registry)) {
7911747b
PB
158 if (sizeof(rcu_gp_ctr) < 8) {
159 /* For architectures with 32-bit longs, a two-subphases algorithm
160 * ensures we do not encounter overflow bugs.
161 *
162 * Switch parity: 0 -> 1, 1 -> 0.
163 */
6e288b00 164 qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
7911747b 165 wait_for_readers();
6e288b00 166 qatomic_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
7911747b
PB
167 } else {
168 /* Increment current grace period. */
6e288b00 169 qatomic_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
7911747b
PB
170 }
171
172 wait_for_readers();
173 }
7911747b
PB
174}
175
26387f86
PB
176
177#define RCU_CALL_MIN_SIZE 30
178
179/* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
180 * from liburcu. Note that head is only used by the consumer.
181 */
182static struct rcu_head dummy;
183static struct rcu_head *head = &dummy, **tail = &dummy.next;
184static int rcu_call_count;
185static QemuEvent rcu_call_ready_event;
186
187static void enqueue(struct rcu_head *node)
188{
189 struct rcu_head **old_tail;
190
191 node->next = NULL;
8f593ba9
PB
192
193 /*
194 * Make this node the tail of the list. The node will be
195 * used by further enqueue operations, but it will not
196 * be dequeued yet...
197 */
d73415a3 198 old_tail = qatomic_xchg(&tail, &node->next);
8f593ba9
PB
199
200 /*
201 * ... until it is pointed to from another item in the list.
202 * In the meantime, try_dequeue() will find a NULL next pointer
203 * and loop.
204 *
205 * Synchronizes with qatomic_load_acquire() in try_dequeue().
206 */
207 qatomic_store_release(old_tail, node);
26387f86
PB
208}
209
210static struct rcu_head *try_dequeue(void)
211{
212 struct rcu_head *node, *next;
213
214retry:
8f593ba9
PB
215 /* Head is only written by this thread, so no need for barriers. */
216 node = head;
217
218 /*
219 * If the head node has NULL in its next pointer, the value is
220 * wrong and we need to wait until its enqueuer finishes the update.
221 */
222 next = qatomic_load_acquire(&node->next);
223 if (!next) {
224 return NULL;
225 }
226
227 /*
228 * Test for an empty list, which we do not expect. Note that for
26387f86
PB
229 * the consumer head and tail are always consistent. The head
230 * is consistent because only the consumer reads/writes it.
231 * The tail, because it is the first step in the enqueuing.
232 * It is only the next pointers that might be inconsistent.
233 */
8f593ba9 234 if (head == &dummy && qatomic_read(&tail) == &dummy.next) {
26387f86
PB
235 abort();
236 }
237
8f593ba9
PB
238 /*
239 * Since we are the sole consumer, and we excluded the empty case
26387f86
PB
240 * above, the queue will always have at least two nodes: the
241 * dummy node, and the one being removed. So we do not need to update
242 * the tail pointer.
243 */
244 head = next;
245
246 /* If we dequeued the dummy node, add it back at the end and retry. */
247 if (node == &dummy) {
248 enqueue(node);
249 goto retry;
250 }
251
252 return node;
253}
254
255static void *call_rcu_thread(void *opaque)
256{
257 struct rcu_head *node;
258
ab28bd23
PB
259 rcu_register_thread();
260
26387f86
PB
261 for (;;) {
262 int tries = 0;
d73415a3 263 int n = qatomic_read(&rcu_call_count);
26387f86
PB
264
265 /* Heuristically wait for a decent number of callbacks to pile up.
266 * Fetch rcu_call_count now, we only must process elements that were
267 * added before synchronize_rcu() starts.
268 */
a7d1d636
PB
269 while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
270 g_usleep(10000);
271 if (n == 0) {
272 qemu_event_reset(&rcu_call_ready_event);
d73415a3 273 n = qatomic_read(&rcu_call_count);
a7d1d636 274 if (n == 0) {
5a22ab71
YZ
275#if defined(CONFIG_MALLOC_TRIM)
276 malloc_trim(4 * 1024 * 1024);
277#endif
a7d1d636
PB
278 qemu_event_wait(&rcu_call_ready_event);
279 }
26387f86 280 }
d73415a3 281 n = qatomic_read(&rcu_call_count);
26387f86
PB
282 }
283
d73415a3 284 qatomic_sub(&rcu_call_count, n);
26387f86 285 synchronize_rcu();
195801d7 286 bql_lock();
26387f86
PB
287 while (n > 0) {
288 node = try_dequeue();
289 while (!node) {
195801d7 290 bql_unlock();
26387f86
PB
291 qemu_event_reset(&rcu_call_ready_event);
292 node = try_dequeue();
293 if (!node) {
294 qemu_event_wait(&rcu_call_ready_event);
295 node = try_dequeue();
296 }
195801d7 297 bql_lock();
26387f86
PB
298 }
299
300 n--;
301 node->func(node);
302 }
195801d7 303 bql_unlock();
26387f86
PB
304 }
305 abort();
306}
307
308void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
309{
310 node->func = func;
311 enqueue(node);
d73415a3 312 qatomic_inc(&rcu_call_count);
26387f86
PB
313 qemu_event_set(&rcu_call_ready_event);
314}
315
d816614c
ML
316
317struct rcu_drain {
318 struct rcu_head rcu;
319 QemuEvent drain_complete_event;
320};
321
322static void drain_rcu_callback(struct rcu_head *node)
323{
324 struct rcu_drain *event = (struct rcu_drain *)node;
325 qemu_event_set(&event->drain_complete_event);
326}
327
328/*
329 * This function ensures that all pending RCU callbacks
330 * on the current thread are done executing
331
332 * drops big qemu lock during the wait to allow RCU thread
333 * to process the callbacks
334 *
335 */
336
337void drain_call_rcu(void)
338{
339 struct rcu_drain rcu_drain;
195801d7 340 bool locked = bql_locked();
d816614c
ML
341
342 memset(&rcu_drain, 0, sizeof(struct rcu_drain));
343 qemu_event_init(&rcu_drain.drain_complete_event, false);
344
345 if (locked) {
195801d7 346 bql_unlock();
d816614c
ML
347 }
348
349
350 /*
351 * RCU callbacks are invoked in the same order as in which they
352 * are registered, thus we can be sure that when 'drain_rcu_callback'
353 * is called, all RCU callbacks that were registered on this thread
354 * prior to calling this function are completed.
355 *
356 * Note that since we have only one global queue of the RCU callbacks,
357 * we also end up waiting for most of RCU callbacks that were registered
d02d06f8 358 * on the other threads, but this is a side effect that shouldn't be
d816614c
ML
359 * assumed.
360 */
361
ef149763 362 qatomic_inc(&in_drain_call_rcu);
d816614c
ML
363 call_rcu1(&rcu_drain.rcu, drain_rcu_callback);
364 qemu_event_wait(&rcu_drain.drain_complete_event);
ef149763 365 qatomic_dec(&in_drain_call_rcu);
d816614c
ML
366
367 if (locked) {
195801d7 368 bql_lock();
d816614c
ML
369 }
370
371}
372
7911747b
PB
373void rcu_register_thread(void)
374{
17c78154 375 assert(get_ptr_rcu_reader()->ctr == 0);
c097a60b 376 qemu_mutex_lock(&rcu_registry_lock);
17c78154 377 QLIST_INSERT_HEAD(&registry, get_ptr_rcu_reader(), node);
c097a60b 378 qemu_mutex_unlock(&rcu_registry_lock);
7911747b
PB
379}
380
381void rcu_unregister_thread(void)
382{
c097a60b 383 qemu_mutex_lock(&rcu_registry_lock);
17c78154 384 QLIST_REMOVE(get_ptr_rcu_reader(), node);
c097a60b 385 qemu_mutex_unlock(&rcu_registry_lock);
7911747b
PB
386}
387
ef149763
GK
388void rcu_add_force_rcu_notifier(Notifier *n)
389{
390 qemu_mutex_lock(&rcu_registry_lock);
17c78154 391 notifier_list_add(&get_ptr_rcu_reader()->force_rcu, n);
ef149763
GK
392 qemu_mutex_unlock(&rcu_registry_lock);
393}
394
395void rcu_remove_force_rcu_notifier(Notifier *n)
396{
397 qemu_mutex_lock(&rcu_registry_lock);
398 notifier_remove(n);
399 qemu_mutex_unlock(&rcu_registry_lock);
400}
401
21b7cf9e 402static void rcu_init_complete(void)
7911747b 403{
26387f86
PB
404 QemuThread thread;
405
c097a60b
WC
406 qemu_mutex_init(&rcu_registry_lock);
407 qemu_mutex_init(&rcu_sync_lock);
7911747b 408 qemu_event_init(&rcu_gp_event, true);
26387f86
PB
409
410 qemu_event_init(&rcu_call_ready_event, false);
21b7cf9e 411
a4a411fb 412 /* The caller is assumed to have BQL, so the call_rcu thread
21b7cf9e
PB
413 * must have been quiescent even after forking, just recreate it.
414 */
26387f86
PB
415 qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
416 NULL, QEMU_THREAD_DETACHED);
417
7911747b
PB
418 rcu_register_thread();
419}
21b7cf9e 420
73c6e401
PB
421static int atfork_depth = 1;
422
423void rcu_enable_atfork(void)
424{
425 atfork_depth++;
426}
427
428void rcu_disable_atfork(void)
429{
430 atfork_depth--;
431}
432
21b7cf9e
PB
433#ifdef CONFIG_POSIX
434static void rcu_init_lock(void)
435{
73c6e401
PB
436 if (atfork_depth < 1) {
437 return;
438 }
439
c097a60b
WC
440 qemu_mutex_lock(&rcu_sync_lock);
441 qemu_mutex_lock(&rcu_registry_lock);
21b7cf9e
PB
442}
443
444static void rcu_init_unlock(void)
445{
73c6e401
PB
446 if (atfork_depth < 1) {
447 return;
448 }
449
c097a60b
WC
450 qemu_mutex_unlock(&rcu_registry_lock);
451 qemu_mutex_unlock(&rcu_sync_lock);
21b7cf9e
PB
452}
453
2a96a552 454static void rcu_init_child(void)
21b7cf9e 455{
2a96a552
PB
456 if (atfork_depth < 1) {
457 return;
458 }
459
21b7cf9e
PB
460 memset(&registry, 0, sizeof(registry));
461 rcu_init_complete();
462}
2a96a552 463#endif
21b7cf9e
PB
464
465static void __attribute__((__constructor__)) rcu_init(void)
466{
c8d3877e 467 smp_mb_global_init();
21b7cf9e 468#ifdef CONFIG_POSIX
2a96a552 469 pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
21b7cf9e
PB
470#endif
471 rcu_init_complete();
472}