]> git.proxmox.com Git - mirror_qemu.git/blame - util/rcu.c
target-i386: fix "info lapic" segfault on isapc
[mirror_qemu.git] / util / rcu.c
CommitLineData
7911747b
PB
1/*
2 * urcu-mb.c
3 *
4 * Userspace RCU library with explicit memory barriers
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright 2015 Red Hat, Inc.
9 *
10 * Ported to QEMU by Paolo Bonzini <pbonzini@redhat.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
27 */
28
aafd7584 29#include "qemu/osdep.h"
26387f86 30#include "qemu-common.h"
7911747b
PB
31#include "qemu/rcu.h"
32#include "qemu/atomic.h"
26387f86 33#include "qemu/thread.h"
a4649824 34#include "qemu/main-loop.h"
7911747b
PB
35
36/*
37 * Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
38 * Bits 1 and above are defined in synchronize_rcu.
39 */
40#define RCU_GP_LOCKED (1UL << 0)
41#define RCU_GP_CTR (1UL << 1)
42
43unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
44
45QemuEvent rcu_gp_event;
c097a60b
WC
46static QemuMutex rcu_registry_lock;
47static QemuMutex rcu_sync_lock;
7911747b
PB
48
49/*
50 * Check whether a quiescent state was crossed between the beginning of
51 * update_counter_and_wait and now.
52 */
53static inline int rcu_gp_ongoing(unsigned long *ctr)
54{
55 unsigned long v;
56
57 v = atomic_read(ctr);
58 return v && (v != rcu_gp_ctr);
59}
60
61/* Written to only by each individual reader. Read by both the reader and the
62 * writers.
63 */
64__thread struct rcu_reader_data rcu_reader;
65
c097a60b 66/* Protected by rcu_registry_lock. */
7911747b
PB
67typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
68static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
69
70/* Wait for previous parity/grace period to be empty of readers. */
71static void wait_for_readers(void)
72{
73 ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
74 struct rcu_reader_data *index, *tmp;
75
76 for (;;) {
77 /* We want to be notified of changes made to rcu_gp_ongoing
78 * while we walk the list.
79 */
80 qemu_event_reset(&rcu_gp_event);
81
82 /* Instead of using atomic_mb_set for index->waiting, and
83 * atomic_mb_read for index->ctr, memory barriers are placed
84 * manually since writes to different threads are independent.
e11131b0
PB
85 * qemu_event_reset has acquire semantics, so no memory barrier
86 * is needed here.
7911747b 87 */
7911747b
PB
88 QLIST_FOREACH(index, &registry, node) {
89 atomic_set(&index->waiting, true);
90 }
91
e11131b0
PB
92 /* Here, order the stores to index->waiting before the
93 * loads of index->ctr.
94 */
7911747b
PB
95 smp_mb();
96
97 QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
98 if (!rcu_gp_ongoing(&index->ctr)) {
99 QLIST_REMOVE(index, node);
100 QLIST_INSERT_HEAD(&qsreaders, index, node);
101
102 /* No need for mb_set here, worst of all we
103 * get some extra futex wakeups.
104 */
105 atomic_set(&index->waiting, false);
106 }
107 }
108
7911747b
PB
109 if (QLIST_EMPTY(&registry)) {
110 break;
111 }
112
c097a60b
WC
113 /* Wait for one thread to report a quiescent state and try again.
114 * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
115 * wait too much time.
116 *
117 * rcu_register_thread() may add nodes to &registry; it will not
118 * wake up synchronize_rcu, but that is okay because at least another
119 * thread must exit its RCU read-side critical section before
120 * synchronize_rcu is done. The next iteration of the loop will
121 * move the new thread's rcu_reader from &registry to &qsreaders,
122 * because rcu_gp_ongoing() will return false.
123 *
124 * rcu_unregister_thread() may remove nodes from &qsreaders instead
125 * of &registry if it runs during qemu_event_wait. That's okay;
126 * the node then will not be added back to &registry by QLIST_SWAP
127 * below. The invariant is that the node is part of one list when
128 * rcu_registry_lock is released.
7911747b 129 */
c097a60b 130 qemu_mutex_unlock(&rcu_registry_lock);
7911747b 131 qemu_event_wait(&rcu_gp_event);
c097a60b 132 qemu_mutex_lock(&rcu_registry_lock);
7911747b
PB
133 }
134
135 /* put back the reader list in the registry */
136 QLIST_SWAP(&registry, &qsreaders, node);
137}
138
139void synchronize_rcu(void)
140{
c097a60b
WC
141 qemu_mutex_lock(&rcu_sync_lock);
142 qemu_mutex_lock(&rcu_registry_lock);
7911747b
PB
143
144 if (!QLIST_EMPTY(&registry)) {
145 /* In either case, the atomic_mb_set below blocks stores that free
146 * old RCU-protected pointers.
147 */
148 if (sizeof(rcu_gp_ctr) < 8) {
149 /* For architectures with 32-bit longs, a two-subphases algorithm
150 * ensures we do not encounter overflow bugs.
151 *
152 * Switch parity: 0 -> 1, 1 -> 0.
153 */
154 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
155 wait_for_readers();
156 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
157 } else {
158 /* Increment current grace period. */
159 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
160 }
161
162 wait_for_readers();
163 }
164
c097a60b
WC
165 qemu_mutex_unlock(&rcu_registry_lock);
166 qemu_mutex_unlock(&rcu_sync_lock);
7911747b
PB
167}
168
26387f86
PB
169
170#define RCU_CALL_MIN_SIZE 30
171
172/* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
173 * from liburcu. Note that head is only used by the consumer.
174 */
175static struct rcu_head dummy;
176static struct rcu_head *head = &dummy, **tail = &dummy.next;
177static int rcu_call_count;
178static QemuEvent rcu_call_ready_event;
179
180static void enqueue(struct rcu_head *node)
181{
182 struct rcu_head **old_tail;
183
184 node->next = NULL;
185 old_tail = atomic_xchg(&tail, &node->next);
186 atomic_mb_set(old_tail, node);
187}
188
189static struct rcu_head *try_dequeue(void)
190{
191 struct rcu_head *node, *next;
192
193retry:
194 /* Test for an empty list, which we do not expect. Note that for
195 * the consumer head and tail are always consistent. The head
196 * is consistent because only the consumer reads/writes it.
197 * The tail, because it is the first step in the enqueuing.
198 * It is only the next pointers that might be inconsistent.
199 */
200 if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
201 abort();
202 }
203
204 /* If the head node has NULL in its next pointer, the value is
205 * wrong and we need to wait until its enqueuer finishes the update.
206 */
207 node = head;
208 next = atomic_mb_read(&head->next);
209 if (!next) {
210 return NULL;
211 }
212
213 /* Since we are the sole consumer, and we excluded the empty case
214 * above, the queue will always have at least two nodes: the
215 * dummy node, and the one being removed. So we do not need to update
216 * the tail pointer.
217 */
218 head = next;
219
220 /* If we dequeued the dummy node, add it back at the end and retry. */
221 if (node == &dummy) {
222 enqueue(node);
223 goto retry;
224 }
225
226 return node;
227}
228
229static void *call_rcu_thread(void *opaque)
230{
231 struct rcu_head *node;
232
ab28bd23
PB
233 rcu_register_thread();
234
26387f86
PB
235 for (;;) {
236 int tries = 0;
237 int n = atomic_read(&rcu_call_count);
238
239 /* Heuristically wait for a decent number of callbacks to pile up.
240 * Fetch rcu_call_count now, we only must process elements that were
241 * added before synchronize_rcu() starts.
242 */
a7d1d636
PB
243 while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
244 g_usleep(10000);
245 if (n == 0) {
246 qemu_event_reset(&rcu_call_ready_event);
26387f86 247 n = atomic_read(&rcu_call_count);
a7d1d636
PB
248 if (n == 0) {
249 qemu_event_wait(&rcu_call_ready_event);
250 }
26387f86 251 }
a7d1d636 252 n = atomic_read(&rcu_call_count);
26387f86
PB
253 }
254
255 atomic_sub(&rcu_call_count, n);
256 synchronize_rcu();
a4649824 257 qemu_mutex_lock_iothread();
26387f86
PB
258 while (n > 0) {
259 node = try_dequeue();
260 while (!node) {
a4649824 261 qemu_mutex_unlock_iothread();
26387f86
PB
262 qemu_event_reset(&rcu_call_ready_event);
263 node = try_dequeue();
264 if (!node) {
265 qemu_event_wait(&rcu_call_ready_event);
266 node = try_dequeue();
267 }
a4649824 268 qemu_mutex_lock_iothread();
26387f86
PB
269 }
270
271 n--;
272 node->func(node);
273 }
a4649824 274 qemu_mutex_unlock_iothread();
26387f86
PB
275 }
276 abort();
277}
278
279void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
280{
281 node->func = func;
282 enqueue(node);
283 atomic_inc(&rcu_call_count);
284 qemu_event_set(&rcu_call_ready_event);
285}
286
7911747b
PB
287void rcu_register_thread(void)
288{
289 assert(rcu_reader.ctr == 0);
c097a60b 290 qemu_mutex_lock(&rcu_registry_lock);
7911747b 291 QLIST_INSERT_HEAD(&registry, &rcu_reader, node);
c097a60b 292 qemu_mutex_unlock(&rcu_registry_lock);
7911747b
PB
293}
294
295void rcu_unregister_thread(void)
296{
c097a60b 297 qemu_mutex_lock(&rcu_registry_lock);
7911747b 298 QLIST_REMOVE(&rcu_reader, node);
c097a60b 299 qemu_mutex_unlock(&rcu_registry_lock);
7911747b
PB
300}
301
21b7cf9e 302static void rcu_init_complete(void)
7911747b 303{
26387f86
PB
304 QemuThread thread;
305
c097a60b
WC
306 qemu_mutex_init(&rcu_registry_lock);
307 qemu_mutex_init(&rcu_sync_lock);
7911747b 308 qemu_event_init(&rcu_gp_event, true);
26387f86
PB
309
310 qemu_event_init(&rcu_call_ready_event, false);
21b7cf9e
PB
311
312 /* The caller is assumed to have iothread lock, so the call_rcu thread
313 * must have been quiescent even after forking, just recreate it.
314 */
26387f86
PB
315 qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
316 NULL, QEMU_THREAD_DETACHED);
317
7911747b
PB
318 rcu_register_thread();
319}
21b7cf9e
PB
320
321#ifdef CONFIG_POSIX
322static void rcu_init_lock(void)
323{
c097a60b
WC
324 qemu_mutex_lock(&rcu_sync_lock);
325 qemu_mutex_lock(&rcu_registry_lock);
21b7cf9e
PB
326}
327
328static void rcu_init_unlock(void)
329{
c097a60b
WC
330 qemu_mutex_unlock(&rcu_registry_lock);
331 qemu_mutex_unlock(&rcu_sync_lock);
21b7cf9e 332}
a59629fc 333#endif
21b7cf9e 334
a59629fc 335void rcu_after_fork(void)
21b7cf9e 336{
21b7cf9e
PB
337 memset(&registry, 0, sizeof(registry));
338 rcu_init_complete();
339}
21b7cf9e
PB
340
341static void __attribute__((__constructor__)) rcu_init(void)
342{
343#ifdef CONFIG_POSIX
05620f85 344 pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_unlock);
21b7cf9e
PB
345#endif
346 rcu_init_complete();
347}