]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/thread.h
util: Cleanup and rename os_mem_prealloc()
[mirror_qemu.git] / include / qemu / thread.h
CommitLineData
2a6a4076
MA
1#ifndef QEMU_THREAD_H
2#define QEMU_THREAD_H
e5d355d1 3
ac9a9eba
GD
4#include "qemu/processor.h"
5#include "qemu/atomic.h"
65097429 6
e5d355d1 7typedef struct QemuCond QemuCond;
38b14db3 8typedef struct QemuSemaphore QemuSemaphore;
c7c4d063 9typedef struct QemuEvent QemuEvent;
51dee5e4 10typedef struct QemuLockCnt QemuLockCnt;
e5d355d1
AL
11typedef struct QemuThread QemuThread;
12
9257d46d 13#ifdef _WIN32
1de7afc9 14#include "qemu/thread-win32.h"
9257d46d 15#else
1de7afc9 16#include "qemu/thread-posix.h"
9257d46d
PB
17#endif
18
fe9959a2
EC
19/* include QSP header once QemuMutex, QemuCond etc. are defined */
20#include "qemu/qsp.h"
21
cf218714
JK
22#define QEMU_THREAD_JOINABLE 0
23#define QEMU_THREAD_DETACHED 1
24
e5d355d1 25void qemu_mutex_init(QemuMutex *mutex);
313b1d69 26void qemu_mutex_destroy(QemuMutex *mutex);
6c27a0de
AB
27int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
28void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
29void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
30
4b193bb7
RH
31void qemu_rec_mutex_init(QemuRecMutex *mutex);
32void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
33void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
34int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
9c75bae7 35void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
4b193bb7 36
fe9959a2
EC
37typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
38typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
39typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
40typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
41typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
42 int l);
3dcc9c6e
YK
43typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
44 const char *f, int l);
fe9959a2 45
cb764d06 46extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
fe9959a2
EC
47extern QemuMutexLockFunc qemu_mutex_lock_func;
48extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
49extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
50extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
51extern QemuCondWaitFunc qemu_cond_wait_func;
3dcc9c6e 52extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
fe9959a2
EC
53
54/* convenience macros to bypass the profiler */
55#define qemu_mutex_lock__raw(m) \
56 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
57#define qemu_mutex_trylock__raw(m) \
58 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
59
07d66672
PB
60#ifdef __COVERITY__
61/*
62 * Coverity is severely confused by the indirect function calls,
63 * hide them.
64 */
65#define qemu_mutex_lock(m) \
2e798024 66 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
07d66672 67#define qemu_mutex_trylock(m) \
2e798024 68 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
07d66672 69#define qemu_rec_mutex_lock(m) \
2e798024 70 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
07d66672 71#define qemu_rec_mutex_trylock(m) \
2e798024 72 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
07d66672 73#define qemu_cond_wait(c, m) \
2e798024 74 qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
3dcc9c6e 75#define qemu_cond_timedwait(c, m, ms) \
2e798024 76 qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
07d66672 77#else
fe9959a2 78#define qemu_mutex_lock(m) ({ \
d73415a3 79 QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
fe9959a2
EC
80 _f(m, __FILE__, __LINE__); \
81 })
82
d73415a3
SH
83#define qemu_mutex_trylock(m) ({ \
84 QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
85 _f(m, __FILE__, __LINE__); \
fe9959a2
EC
86 })
87
d73415a3
SH
88#define qemu_rec_mutex_lock(m) ({ \
89 QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
90 _f(m, __FILE__, __LINE__); \
fe9959a2
EC
91 })
92
93#define qemu_rec_mutex_trylock(m) ({ \
94 QemuRecMutexTrylockFunc _f; \
d73415a3 95 _f = qatomic_read(&qemu_rec_mutex_trylock_func); \
fe9959a2
EC
96 _f(m, __FILE__, __LINE__); \
97 })
98
99#define qemu_cond_wait(c, m) ({ \
d73415a3 100 QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
fe9959a2
EC
101 _f(c, m, __FILE__, __LINE__); \
102 })
3dcc9c6e
YK
103
104#define qemu_cond_timedwait(c, m, ms) ({ \
d73415a3 105 QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
3dcc9c6e
YK
106 _f(c, m, ms, __FILE__, __LINE__); \
107 })
07d66672 108#endif
fe9959a2 109
6c27a0de
AB
110#define qemu_mutex_unlock(mutex) \
111 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
112
9c75bae7
RH
113#define qemu_rec_mutex_unlock(mutex) \
114 qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
115
6c27a0de
AB
116static inline void (qemu_mutex_lock)(QemuMutex *mutex)
117{
118 qemu_mutex_lock(mutex);
119}
120
121static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
122{
123 return qemu_mutex_trylock(mutex);
124}
125
126static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
127{
128 qemu_mutex_unlock(mutex);
129}
feadec63 130
fe9959a2
EC
131static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
132{
133 qemu_rec_mutex_lock(mutex);
134}
135
136static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
137{
138 return qemu_rec_mutex_trylock(mutex);
139}
140
9c75bae7
RH
141static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
142{
143 qemu_rec_mutex_unlock(mutex);
144}
145
e5d355d1 146void qemu_cond_init(QemuCond *cond);
313b1d69 147void qemu_cond_destroy(QemuCond *cond);
9257d46d
PB
148
149/*
150 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
151 * and pthread_cond_broadcast can be called except while the same mutex is
152 * held as in the corresponding pthread_cond_wait calls!
153 */
e5d355d1
AL
154void qemu_cond_signal(QemuCond *cond);
155void qemu_cond_broadcast(QemuCond *cond);
6c27a0de
AB
156void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
157 const char *file, const int line);
3dcc9c6e
YK
158bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
159 const char *file, const int line);
6c27a0de 160
6c27a0de
AB
161static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
162{
163 qemu_cond_wait(cond, mutex);
164}
e5d355d1 165
3dcc9c6e
YK
166/* Returns true if timeout has not expired, and false otherwise */
167static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
168 int ms)
169{
170 return qemu_cond_timedwait(cond, mutex, ms);
171}
172
38b14db3
PB
173void qemu_sem_init(QemuSemaphore *sem, int init);
174void qemu_sem_post(QemuSemaphore *sem);
175void qemu_sem_wait(QemuSemaphore *sem);
176int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
177void qemu_sem_destroy(QemuSemaphore *sem);
178
c7c4d063
PB
179void qemu_event_init(QemuEvent *ev, bool init);
180void qemu_event_set(QemuEvent *ev);
181void qemu_event_reset(QemuEvent *ev);
182void qemu_event_wait(QemuEvent *ev);
183void qemu_event_destroy(QemuEvent *ev);
184
4900116e 185void qemu_thread_create(QemuThread *thread, const char *name,
cf218714
JK
186 void *(*start_routine)(void *),
187 void *arg, int mode);
188void *qemu_thread_join(QemuThread *thread);
b7680cb6 189void qemu_thread_get_self(QemuThread *thread);
2d797b65 190bool qemu_thread_is_self(QemuThread *thread);
8905770b 191G_NORETURN void qemu_thread_exit(void *retval);
8f480de0 192void qemu_thread_naming(bool enable);
313b1d69 193
ef57137f 194struct Notifier;
ca95173c
PM
195/**
196 * qemu_thread_atexit_add:
197 * @notifier: Notifier to add
198 *
199 * Add the specified notifier to a list which will be run via
200 * notifier_list_notify() when this thread exits (either by calling
201 * qemu_thread_exit() or by returning from its start_routine).
202 * The usual usage is that the caller passes a Notifier which is
203 * a per-thread variable; it can then use the callback to free
204 * other per-thread data.
205 *
206 * If the thread exits as part of the entire process exiting,
207 * it is unspecified whether notifiers are called or not.
208 */
ef57137f 209void qemu_thread_atexit_add(struct Notifier *notifier);
ca95173c
PM
210/**
211 * qemu_thread_atexit_remove:
212 * @notifier: Notifier to remove
213 *
214 * Remove the specified notifier from the thread-exit notification
215 * list. It is not valid to try to remove a notifier which is not
216 * on the list.
217 */
ef57137f
PB
218void qemu_thread_atexit_remove(struct Notifier *notifier);
219
45a9595a
EC
220#ifdef CONFIG_TSAN
221#include <sanitizer/tsan_interface.h>
222#endif
223
e70372fc 224struct QemuSpin {
ac9a9eba 225 int value;
e70372fc 226};
ac9a9eba
GD
227
228static inline void qemu_spin_init(QemuSpin *spin)
229{
230 __sync_lock_release(&spin->value);
45a9595a
EC
231#ifdef CONFIG_TSAN
232 __tsan_mutex_create(spin, __tsan_mutex_not_static);
233#endif
ac9a9eba
GD
234}
235
45a9595a
EC
236/* const parameter because the only purpose here is the TSAN annotation */
237static inline void qemu_spin_destroy(const QemuSpin *spin)
238{
239#ifdef CONFIG_TSAN
240 __tsan_mutex_destroy((void *)spin, __tsan_mutex_not_static);
241#endif
242}
4384a70d 243
ac9a9eba
GD
244static inline void qemu_spin_lock(QemuSpin *spin)
245{
45a9595a
EC
246#ifdef CONFIG_TSAN
247 __tsan_mutex_pre_lock(spin, 0);
248#endif
ac9a9eba 249 while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
d73415a3 250 while (qatomic_read(&spin->value)) {
ac9a9eba
GD
251 cpu_relax();
252 }
253 }
45a9595a
EC
254#ifdef CONFIG_TSAN
255 __tsan_mutex_post_lock(spin, 0, 0);
256#endif
ac9a9eba
GD
257}
258
259static inline bool qemu_spin_trylock(QemuSpin *spin)
260{
45a9595a
EC
261#ifdef CONFIG_TSAN
262 __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
263#endif
264 bool busy = __sync_lock_test_and_set(&spin->value, true);
265#ifdef CONFIG_TSAN
266 unsigned flags = __tsan_mutex_try_lock;
267 flags |= busy ? __tsan_mutex_try_lock_failed : 0;
268 __tsan_mutex_post_lock(spin, flags, 0);
269#endif
270 return busy;
ac9a9eba
GD
271}
272
273static inline bool qemu_spin_locked(QemuSpin *spin)
274{
d73415a3 275 return qatomic_read(&spin->value);
ac9a9eba
GD
276}
277
278static inline void qemu_spin_unlock(QemuSpin *spin)
279{
45a9595a
EC
280#ifdef CONFIG_TSAN
281 __tsan_mutex_pre_unlock(spin, 0);
282#endif
ac9a9eba 283 __sync_lock_release(&spin->value);
45a9595a
EC
284#ifdef CONFIG_TSAN
285 __tsan_mutex_post_unlock(spin, 0);
286#endif
ac9a9eba
GD
287}
288
51dee5e4 289struct QemuLockCnt {
fbcc3e50 290#ifndef CONFIG_LINUX
51dee5e4 291 QemuMutex mutex;
fbcc3e50 292#endif
51dee5e4
PB
293 unsigned count;
294};
295
296/**
297 * qemu_lockcnt_init: initialize a QemuLockcnt
298 * @lockcnt: the lockcnt to initialize
299 *
300 * Initialize lockcnt's counter to zero and prepare its mutex
301 * for usage.
302 */
303void qemu_lockcnt_init(QemuLockCnt *lockcnt);
304
305/**
306 * qemu_lockcnt_destroy: destroy a QemuLockcnt
307 * @lockcnt: the lockcnt to destruct
308 *
309 * Destroy lockcnt's mutex.
310 */
311void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
312
313/**
314 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
315 * @lockcnt: the lockcnt to operate on
316 *
317 * If the lockcnt's count is zero, wait for critical sections
318 * to finish and increment lockcnt's count to 1. If the count
319 * is not zero, just increment it.
320 *
321 * Because this function can wait on the mutex, it must not be
322 * called while the lockcnt's mutex is held by the current thread.
323 * For the same reason, qemu_lockcnt_inc can also contribute to
324 * AB-BA deadlocks. This is a sample deadlock scenario:
325 *
326 * thread 1 thread 2
327 * -------------------------------------------------------
328 * qemu_lockcnt_lock(&lc1);
329 * qemu_lockcnt_lock(&lc2);
330 * qemu_lockcnt_inc(&lc2);
331 * qemu_lockcnt_inc(&lc1);
332 */
333void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
334
335/**
336 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
337 * @lockcnt: the lockcnt to operate on
338 */
339void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
340
341/**
342 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
343 * possibly lock it.
344 * @lockcnt: the lockcnt to operate on
345 *
346 * Decrement lockcnt's count. If the new count is zero, lock
347 * the mutex and return true. Otherwise, return false.
348 */
349bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
350
351/**
352 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
353 * lock it.
354 * @lockcnt: the lockcnt to operate on
355 *
356 * If the count is 1, decrement the count to zero, lock
357 * the mutex and return true. Otherwise, return false.
358 */
359bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
360
361/**
362 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
363 * @lockcnt: the lockcnt to operate on
364 *
365 * Remember that concurrent visits are not blocked unless the count is
366 * also zero. You can use qemu_lockcnt_count to check for this inside a
367 * critical section.
368 */
369void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
370
371/**
372 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
373 * @lockcnt: the lockcnt to operate on.
374 */
375void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
376
377/**
378 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
379 * @lockcnt: the lockcnt to operate on.
380 *
381 * This is the same as
382 *
383 * qemu_lockcnt_unlock(lockcnt);
384 * qemu_lockcnt_inc(lockcnt);
385 *
386 * but more efficient.
387 */
388void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
389
390/**
391 * qemu_lockcnt_count: query a LockCnt's count.
392 * @lockcnt: the lockcnt to query.
393 *
394 * Note that the count can change at any time. Still, while the
395 * lockcnt is locked, one can usefully check whether the count
396 * is non-zero.
397 */
398unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
399
e5d355d1 400#endif