]> git.proxmox.com Git - mirror_qemu.git/blame - util/rcu.c
Merge remote-tracking branch 'remotes/xtensa/tags/20190326-xtensa' into staging
[mirror_qemu.git] / util / rcu.c
CommitLineData
7911747b
PB
1/*
2 * urcu-mb.c
3 *
4 * Userspace RCU library with explicit memory barriers
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 * Copyright 2015 Red Hat, Inc.
9 *
10 * Ported to QEMU by Paolo Bonzini <pbonzini@redhat.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 *
26 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
27 */
28
aafd7584 29#include "qemu/osdep.h"
26387f86 30#include "qemu-common.h"
7911747b
PB
31#include "qemu/rcu.h"
32#include "qemu/atomic.h"
26387f86 33#include "qemu/thread.h"
a4649824 34#include "qemu/main-loop.h"
5a22ab71
YZ
35#if defined(CONFIG_MALLOC_TRIM)
36#include <malloc.h>
37#endif
7911747b
PB
38
39/*
40 * Global grace period counter. Bit 0 is always one in rcu_gp_ctr.
41 * Bits 1 and above are defined in synchronize_rcu.
42 */
43#define RCU_GP_LOCKED (1UL << 0)
44#define RCU_GP_CTR (1UL << 1)
45
46unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
47
48QemuEvent rcu_gp_event;
c097a60b
WC
49static QemuMutex rcu_registry_lock;
50static QemuMutex rcu_sync_lock;
7911747b
PB
51
52/*
53 * Check whether a quiescent state was crossed between the beginning of
54 * update_counter_and_wait and now.
55 */
56static inline int rcu_gp_ongoing(unsigned long *ctr)
57{
58 unsigned long v;
59
60 v = atomic_read(ctr);
61 return v && (v != rcu_gp_ctr);
62}
63
64/* Written to only by each individual reader. Read by both the reader and the
65 * writers.
66 */
67__thread struct rcu_reader_data rcu_reader;
68
c097a60b 69/* Protected by rcu_registry_lock. */
7911747b
PB
70typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
71static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
72
73/* Wait for previous parity/grace period to be empty of readers. */
74static void wait_for_readers(void)
75{
76 ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
77 struct rcu_reader_data *index, *tmp;
78
79 for (;;) {
80 /* We want to be notified of changes made to rcu_gp_ongoing
81 * while we walk the list.
82 */
83 qemu_event_reset(&rcu_gp_event);
84
85 /* Instead of using atomic_mb_set for index->waiting, and
86 * atomic_mb_read for index->ctr, memory barriers are placed
87 * manually since writes to different threads are independent.
e11131b0
PB
88 * qemu_event_reset has acquire semantics, so no memory barrier
89 * is needed here.
7911747b 90 */
7911747b
PB
91 QLIST_FOREACH(index, &registry, node) {
92 atomic_set(&index->waiting, true);
93 }
94
77a8b846 95 /* Here, order the stores to index->waiting before the loads of
c8d3877e 96 * index->ctr. Pairs with smp_mb_placeholder() in rcu_read_unlock(),
77a8b846 97 * ensuring that the loads of index->ctr are sequentially consistent.
e11131b0 98 */
c8d3877e 99 smp_mb_global();
7911747b
PB
100
101 QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
102 if (!rcu_gp_ongoing(&index->ctr)) {
103 QLIST_REMOVE(index, node);
104 QLIST_INSERT_HEAD(&qsreaders, index, node);
105
106 /* No need for mb_set here, worst of all we
107 * get some extra futex wakeups.
108 */
109 atomic_set(&index->waiting, false);
110 }
111 }
112
7911747b
PB
113 if (QLIST_EMPTY(&registry)) {
114 break;
115 }
116
c097a60b
WC
117 /* Wait for one thread to report a quiescent state and try again.
118 * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
119 * wait too much time.
120 *
121 * rcu_register_thread() may add nodes to &registry; it will not
122 * wake up synchronize_rcu, but that is okay because at least another
123 * thread must exit its RCU read-side critical section before
124 * synchronize_rcu is done. The next iteration of the loop will
125 * move the new thread's rcu_reader from &registry to &qsreaders,
126 * because rcu_gp_ongoing() will return false.
127 *
128 * rcu_unregister_thread() may remove nodes from &qsreaders instead
129 * of &registry if it runs during qemu_event_wait. That's okay;
130 * the node then will not be added back to &registry by QLIST_SWAP
131 * below. The invariant is that the node is part of one list when
132 * rcu_registry_lock is released.
7911747b 133 */
c097a60b 134 qemu_mutex_unlock(&rcu_registry_lock);
7911747b 135 qemu_event_wait(&rcu_gp_event);
c097a60b 136 qemu_mutex_lock(&rcu_registry_lock);
7911747b
PB
137 }
138
139 /* put back the reader list in the registry */
140 QLIST_SWAP(&registry, &qsreaders, node);
141}
142
143void synchronize_rcu(void)
144{
c097a60b 145 qemu_mutex_lock(&rcu_sync_lock);
7911747b 146
77a8b846 147 /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
c8d3877e 148 * Pairs with smp_mb_placeholder() in rcu_read_lock().
77a8b846 149 */
c8d3877e 150 smp_mb_global();
77a8b846
PB
151
152 qemu_mutex_lock(&rcu_registry_lock);
7911747b
PB
153 if (!QLIST_EMPTY(&registry)) {
154 /* In either case, the atomic_mb_set below blocks stores that free
155 * old RCU-protected pointers.
156 */
157 if (sizeof(rcu_gp_ctr) < 8) {
158 /* For architectures with 32-bit longs, a two-subphases algorithm
159 * ensures we do not encounter overflow bugs.
160 *
161 * Switch parity: 0 -> 1, 1 -> 0.
162 */
163 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
164 wait_for_readers();
165 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
166 } else {
167 /* Increment current grace period. */
168 atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
169 }
170
171 wait_for_readers();
172 }
173
c097a60b
WC
174 qemu_mutex_unlock(&rcu_registry_lock);
175 qemu_mutex_unlock(&rcu_sync_lock);
7911747b
PB
176}
177
26387f86
PB
178
179#define RCU_CALL_MIN_SIZE 30
180
181/* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
182 * from liburcu. Note that head is only used by the consumer.
183 */
184static struct rcu_head dummy;
185static struct rcu_head *head = &dummy, **tail = &dummy.next;
186static int rcu_call_count;
187static QemuEvent rcu_call_ready_event;
188
189static void enqueue(struct rcu_head *node)
190{
191 struct rcu_head **old_tail;
192
193 node->next = NULL;
194 old_tail = atomic_xchg(&tail, &node->next);
195 atomic_mb_set(old_tail, node);
196}
197
198static struct rcu_head *try_dequeue(void)
199{
200 struct rcu_head *node, *next;
201
202retry:
203 /* Test for an empty list, which we do not expect. Note that for
204 * the consumer head and tail are always consistent. The head
205 * is consistent because only the consumer reads/writes it.
206 * The tail, because it is the first step in the enqueuing.
207 * It is only the next pointers that might be inconsistent.
208 */
209 if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
210 abort();
211 }
212
213 /* If the head node has NULL in its next pointer, the value is
214 * wrong and we need to wait until its enqueuer finishes the update.
215 */
216 node = head;
217 next = atomic_mb_read(&head->next);
218 if (!next) {
219 return NULL;
220 }
221
222 /* Since we are the sole consumer, and we excluded the empty case
223 * above, the queue will always have at least two nodes: the
224 * dummy node, and the one being removed. So we do not need to update
225 * the tail pointer.
226 */
227 head = next;
228
229 /* If we dequeued the dummy node, add it back at the end and retry. */
230 if (node == &dummy) {
231 enqueue(node);
232 goto retry;
233 }
234
235 return node;
236}
237
238static void *call_rcu_thread(void *opaque)
239{
240 struct rcu_head *node;
241
ab28bd23
PB
242 rcu_register_thread();
243
26387f86
PB
244 for (;;) {
245 int tries = 0;
246 int n = atomic_read(&rcu_call_count);
247
248 /* Heuristically wait for a decent number of callbacks to pile up.
249 * Fetch rcu_call_count now, we only must process elements that were
250 * added before synchronize_rcu() starts.
251 */
a7d1d636
PB
252 while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
253 g_usleep(10000);
254 if (n == 0) {
255 qemu_event_reset(&rcu_call_ready_event);
26387f86 256 n = atomic_read(&rcu_call_count);
a7d1d636 257 if (n == 0) {
5a22ab71
YZ
258#if defined(CONFIG_MALLOC_TRIM)
259 malloc_trim(4 * 1024 * 1024);
260#endif
a7d1d636
PB
261 qemu_event_wait(&rcu_call_ready_event);
262 }
26387f86 263 }
a7d1d636 264 n = atomic_read(&rcu_call_count);
26387f86
PB
265 }
266
267 atomic_sub(&rcu_call_count, n);
268 synchronize_rcu();
a4649824 269 qemu_mutex_lock_iothread();
26387f86
PB
270 while (n > 0) {
271 node = try_dequeue();
272 while (!node) {
a4649824 273 qemu_mutex_unlock_iothread();
26387f86
PB
274 qemu_event_reset(&rcu_call_ready_event);
275 node = try_dequeue();
276 if (!node) {
277 qemu_event_wait(&rcu_call_ready_event);
278 node = try_dequeue();
279 }
a4649824 280 qemu_mutex_lock_iothread();
26387f86
PB
281 }
282
283 n--;
284 node->func(node);
285 }
a4649824 286 qemu_mutex_unlock_iothread();
26387f86
PB
287 }
288 abort();
289}
290
291void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
292{
293 node->func = func;
294 enqueue(node);
295 atomic_inc(&rcu_call_count);
296 qemu_event_set(&rcu_call_ready_event);
297}
298
7911747b
PB
299void rcu_register_thread(void)
300{
301 assert(rcu_reader.ctr == 0);
c097a60b 302 qemu_mutex_lock(&rcu_registry_lock);
7911747b 303 QLIST_INSERT_HEAD(&registry, &rcu_reader, node);
c097a60b 304 qemu_mutex_unlock(&rcu_registry_lock);
7911747b
PB
305}
306
307void rcu_unregister_thread(void)
308{
c097a60b 309 qemu_mutex_lock(&rcu_registry_lock);
7911747b 310 QLIST_REMOVE(&rcu_reader, node);
c097a60b 311 qemu_mutex_unlock(&rcu_registry_lock);
7911747b
PB
312}
313
21b7cf9e 314static void rcu_init_complete(void)
7911747b 315{
26387f86
PB
316 QemuThread thread;
317
c097a60b
WC
318 qemu_mutex_init(&rcu_registry_lock);
319 qemu_mutex_init(&rcu_sync_lock);
7911747b 320 qemu_event_init(&rcu_gp_event, true);
26387f86
PB
321
322 qemu_event_init(&rcu_call_ready_event, false);
21b7cf9e
PB
323
324 /* The caller is assumed to have iothread lock, so the call_rcu thread
325 * must have been quiescent even after forking, just recreate it.
326 */
26387f86
PB
327 qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
328 NULL, QEMU_THREAD_DETACHED);
329
7911747b
PB
330 rcu_register_thread();
331}
21b7cf9e 332
73c6e401
PB
333static int atfork_depth = 1;
334
335void rcu_enable_atfork(void)
336{
337 atfork_depth++;
338}
339
340void rcu_disable_atfork(void)
341{
342 atfork_depth--;
343}
344
21b7cf9e
PB
345#ifdef CONFIG_POSIX
346static void rcu_init_lock(void)
347{
73c6e401
PB
348 if (atfork_depth < 1) {
349 return;
350 }
351
c097a60b
WC
352 qemu_mutex_lock(&rcu_sync_lock);
353 qemu_mutex_lock(&rcu_registry_lock);
21b7cf9e
PB
354}
355
356static void rcu_init_unlock(void)
357{
73c6e401
PB
358 if (atfork_depth < 1) {
359 return;
360 }
361
c097a60b
WC
362 qemu_mutex_unlock(&rcu_registry_lock);
363 qemu_mutex_unlock(&rcu_sync_lock);
21b7cf9e
PB
364}
365
2a96a552 366static void rcu_init_child(void)
21b7cf9e 367{
2a96a552
PB
368 if (atfork_depth < 1) {
369 return;
370 }
371
21b7cf9e
PB
372 memset(&registry, 0, sizeof(registry));
373 rcu_init_complete();
374}
2a96a552 375#endif
21b7cf9e
PB
376
377static void __attribute__((__constructor__)) rcu_init(void)
378{
c8d3877e 379 smp_mb_global_init();
21b7cf9e 380#ifdef CONFIG_POSIX
2a96a552 381 pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
21b7cf9e
PB
382#endif
383 rcu_init_complete();
384}