]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/thread.h
Merge tag 'for-upstream' of https://repo.or.cz/qemu/kevin into staging
[mirror_qemu.git] / include / qemu / thread.h
CommitLineData
2a6a4076
MA
1#ifndef QEMU_THREAD_H
2#define QEMU_THREAD_H
e5d355d1 3
ac9a9eba
GD
4#include "qemu/processor.h"
5#include "qemu/atomic.h"
65097429 6
e5d355d1 7typedef struct QemuCond QemuCond;
38b14db3 8typedef struct QemuSemaphore QemuSemaphore;
c7c4d063 9typedef struct QemuEvent QemuEvent;
51dee5e4 10typedef struct QemuLockCnt QemuLockCnt;
e5d355d1
AL
11typedef struct QemuThread QemuThread;
12
9257d46d 13#ifdef _WIN32
1de7afc9 14#include "qemu/thread-win32.h"
9257d46d 15#else
1de7afc9 16#include "qemu/thread-posix.h"
9257d46d
PB
17#endif
18
fe9959a2
EC
19/* include QSP header once QemuMutex, QemuCond etc. are defined */
20#include "qemu/qsp.h"
21
cf218714
JK
22#define QEMU_THREAD_JOINABLE 0
23#define QEMU_THREAD_DETACHED 1
24
e5d355d1 25void qemu_mutex_init(QemuMutex *mutex);
313b1d69 26void qemu_mutex_destroy(QemuMutex *mutex);
6c27a0de
AB
27int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
28void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
29void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
30
4b193bb7
RH
31void qemu_rec_mutex_init(QemuRecMutex *mutex);
32void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
33void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
34int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
9c75bae7 35void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
4b193bb7 36
fe9959a2
EC
37typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
38typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
39typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
40typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
41typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
42 int l);
3dcc9c6e
YK
43typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
44 const char *f, int l);
fe9959a2 45
cb764d06 46extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
fe9959a2
EC
47extern QemuMutexLockFunc qemu_mutex_lock_func;
48extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
49extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
50extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
51extern QemuCondWaitFunc qemu_cond_wait_func;
3dcc9c6e 52extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
fe9959a2
EC
53
54/* convenience macros to bypass the profiler */
55#define qemu_mutex_lock__raw(m) \
56 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
57#define qemu_mutex_trylock__raw(m) \
58 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
59
07d66672
PB
60#ifdef __COVERITY__
61/*
62 * Coverity is severely confused by the indirect function calls,
63 * hide them.
64 */
65#define qemu_mutex_lock(m) \
2e798024 66 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
07d66672 67#define qemu_mutex_trylock(m) \
2e798024 68 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
07d66672 69#define qemu_rec_mutex_lock(m) \
2e798024 70 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
07d66672 71#define qemu_rec_mutex_trylock(m) \
2e798024 72 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
07d66672 73#define qemu_cond_wait(c, m) \
2e798024 74 qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
3dcc9c6e 75#define qemu_cond_timedwait(c, m, ms) \
2e798024 76 qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
07d66672 77#else
fe9959a2 78#define qemu_mutex_lock(m) ({ \
d73415a3 79 QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
fe9959a2
EC
80 _f(m, __FILE__, __LINE__); \
81 })
82
d73415a3
SH
83#define qemu_mutex_trylock(m) ({ \
84 QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
85 _f(m, __FILE__, __LINE__); \
fe9959a2
EC
86 })
87
d73415a3
SH
88#define qemu_rec_mutex_lock(m) ({ \
89 QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
90 _f(m, __FILE__, __LINE__); \
fe9959a2
EC
91 })
92
93#define qemu_rec_mutex_trylock(m) ({ \
94 QemuRecMutexTrylockFunc _f; \
d73415a3 95 _f = qatomic_read(&qemu_rec_mutex_trylock_func); \
fe9959a2
EC
96 _f(m, __FILE__, __LINE__); \
97 })
98
99#define qemu_cond_wait(c, m) ({ \
d73415a3 100 QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
fe9959a2
EC
101 _f(c, m, __FILE__, __LINE__); \
102 })
3dcc9c6e
YK
103
104#define qemu_cond_timedwait(c, m, ms) ({ \
d73415a3 105 QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
3dcc9c6e
YK
106 _f(c, m, ms, __FILE__, __LINE__); \
107 })
07d66672 108#endif
fe9959a2 109
6c27a0de
AB
110#define qemu_mutex_unlock(mutex) \
111 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
112
9c75bae7
RH
113#define qemu_rec_mutex_unlock(mutex) \
114 qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
115
6c27a0de
AB
116static inline void (qemu_mutex_lock)(QemuMutex *mutex)
117{
118 qemu_mutex_lock(mutex);
119}
120
121static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
122{
123 return qemu_mutex_trylock(mutex);
124}
125
126static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
127{
128 qemu_mutex_unlock(mutex);
129}
feadec63 130
fe9959a2
EC
131static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
132{
133 qemu_rec_mutex_lock(mutex);
134}
135
136static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
137{
138 return qemu_rec_mutex_trylock(mutex);
139}
140
9c75bae7
RH
141static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
142{
143 qemu_rec_mutex_unlock(mutex);
144}
145
e5d355d1 146void qemu_cond_init(QemuCond *cond);
313b1d69 147void qemu_cond_destroy(QemuCond *cond);
9257d46d
PB
148
149/*
150 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
151 * and pthread_cond_broadcast can be called except while the same mutex is
152 * held as in the corresponding pthread_cond_wait calls!
153 */
e5d355d1
AL
154void qemu_cond_signal(QemuCond *cond);
155void qemu_cond_broadcast(QemuCond *cond);
6c27a0de
AB
156void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
157 const char *file, const int line);
3dcc9c6e
YK
158bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
159 const char *file, const int line);
6c27a0de 160
6c27a0de
AB
161static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
162{
163 qemu_cond_wait(cond, mutex);
164}
e5d355d1 165
3dcc9c6e
YK
166/* Returns true if timeout has not expired, and false otherwise */
167static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
168 int ms)
169{
170 return qemu_cond_timedwait(cond, mutex, ms);
171}
172
38b14db3
PB
173void qemu_sem_init(QemuSemaphore *sem, int init);
174void qemu_sem_post(QemuSemaphore *sem);
175void qemu_sem_wait(QemuSemaphore *sem);
176int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
177void qemu_sem_destroy(QemuSemaphore *sem);
178
c7c4d063
PB
179void qemu_event_init(QemuEvent *ev, bool init);
180void qemu_event_set(QemuEvent *ev);
181void qemu_event_reset(QemuEvent *ev);
182void qemu_event_wait(QemuEvent *ev);
183void qemu_event_destroy(QemuEvent *ev);
184
4900116e 185void qemu_thread_create(QemuThread *thread, const char *name,
cf218714
JK
186 void *(*start_routine)(void *),
187 void *arg, int mode);
7730f32c
DH
188int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus,
189 unsigned long nbits);
190int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus,
191 unsigned long *nbits);
cf218714 192void *qemu_thread_join(QemuThread *thread);
b7680cb6 193void qemu_thread_get_self(QemuThread *thread);
2d797b65 194bool qemu_thread_is_self(QemuThread *thread);
8905770b 195G_NORETURN void qemu_thread_exit(void *retval);
8f480de0 196void qemu_thread_naming(bool enable);
313b1d69 197
ef57137f 198struct Notifier;
ca95173c
PM
199/**
200 * qemu_thread_atexit_add:
201 * @notifier: Notifier to add
202 *
203 * Add the specified notifier to a list which will be run via
204 * notifier_list_notify() when this thread exits (either by calling
205 * qemu_thread_exit() or by returning from its start_routine).
206 * The usual usage is that the caller passes a Notifier which is
207 * a per-thread variable; it can then use the callback to free
208 * other per-thread data.
209 *
210 * If the thread exits as part of the entire process exiting,
211 * it is unspecified whether notifiers are called or not.
212 */
ef57137f 213void qemu_thread_atexit_add(struct Notifier *notifier);
ca95173c
PM
214/**
215 * qemu_thread_atexit_remove:
216 * @notifier: Notifier to remove
217 *
218 * Remove the specified notifier from the thread-exit notification
219 * list. It is not valid to try to remove a notifier which is not
220 * on the list.
221 */
ef57137f
PB
222void qemu_thread_atexit_remove(struct Notifier *notifier);
223
45a9595a
EC
224#ifdef CONFIG_TSAN
225#include <sanitizer/tsan_interface.h>
226#endif
227
e70372fc 228struct QemuSpin {
ac9a9eba 229 int value;
e70372fc 230};
ac9a9eba
GD
231
232static inline void qemu_spin_init(QemuSpin *spin)
233{
7ed9e721 234 qatomic_set(&spin->value, 0);
45a9595a
EC
235#ifdef CONFIG_TSAN
236 __tsan_mutex_create(spin, __tsan_mutex_not_static);
237#endif
ac9a9eba
GD
238}
239
047e2bd3 240static inline void qemu_spin_destroy(QemuSpin *spin)
45a9595a
EC
241{
242#ifdef CONFIG_TSAN
047e2bd3 243 __tsan_mutex_destroy(spin, __tsan_mutex_not_static);
45a9595a
EC
244#endif
245}
4384a70d 246
ac9a9eba
GD
247static inline void qemu_spin_lock(QemuSpin *spin)
248{
45a9595a
EC
249#ifdef CONFIG_TSAN
250 __tsan_mutex_pre_lock(spin, 0);
251#endif
7ed9e721 252 while (unlikely(qatomic_xchg(&spin->value, 1))) {
d73415a3 253 while (qatomic_read(&spin->value)) {
ac9a9eba
GD
254 cpu_relax();
255 }
256 }
45a9595a
EC
257#ifdef CONFIG_TSAN
258 __tsan_mutex_post_lock(spin, 0, 0);
259#endif
ac9a9eba
GD
260}
261
262static inline bool qemu_spin_trylock(QemuSpin *spin)
263{
45a9595a
EC
264#ifdef CONFIG_TSAN
265 __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
266#endif
7ed9e721 267 bool busy = qatomic_xchg(&spin->value, true);
45a9595a
EC
268#ifdef CONFIG_TSAN
269 unsigned flags = __tsan_mutex_try_lock;
270 flags |= busy ? __tsan_mutex_try_lock_failed : 0;
271 __tsan_mutex_post_lock(spin, flags, 0);
272#endif
273 return busy;
ac9a9eba
GD
274}
275
276static inline bool qemu_spin_locked(QemuSpin *spin)
277{
d73415a3 278 return qatomic_read(&spin->value);
ac9a9eba
GD
279}
280
281static inline void qemu_spin_unlock(QemuSpin *spin)
282{
45a9595a
EC
283#ifdef CONFIG_TSAN
284 __tsan_mutex_pre_unlock(spin, 0);
285#endif
7ed9e721 286 qatomic_store_release(&spin->value, 0);
45a9595a
EC
287#ifdef CONFIG_TSAN
288 __tsan_mutex_post_unlock(spin, 0);
289#endif
ac9a9eba
GD
290}
291
51dee5e4 292struct QemuLockCnt {
fbcc3e50 293#ifndef CONFIG_LINUX
51dee5e4 294 QemuMutex mutex;
fbcc3e50 295#endif
51dee5e4
PB
296 unsigned count;
297};
298
299/**
300 * qemu_lockcnt_init: initialize a QemuLockcnt
301 * @lockcnt: the lockcnt to initialize
302 *
303 * Initialize lockcnt's counter to zero and prepare its mutex
304 * for usage.
305 */
306void qemu_lockcnt_init(QemuLockCnt *lockcnt);
307
308/**
309 * qemu_lockcnt_destroy: destroy a QemuLockcnt
310 * @lockcnt: the lockcnt to destruct
311 *
312 * Destroy lockcnt's mutex.
313 */
314void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
315
316/**
317 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
318 * @lockcnt: the lockcnt to operate on
319 *
320 * If the lockcnt's count is zero, wait for critical sections
321 * to finish and increment lockcnt's count to 1. If the count
322 * is not zero, just increment it.
323 *
324 * Because this function can wait on the mutex, it must not be
325 * called while the lockcnt's mutex is held by the current thread.
326 * For the same reason, qemu_lockcnt_inc can also contribute to
327 * AB-BA deadlocks. This is a sample deadlock scenario:
328 *
329 * thread 1 thread 2
330 * -------------------------------------------------------
331 * qemu_lockcnt_lock(&lc1);
332 * qemu_lockcnt_lock(&lc2);
333 * qemu_lockcnt_inc(&lc2);
334 * qemu_lockcnt_inc(&lc1);
335 */
336void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
337
338/**
339 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
340 * @lockcnt: the lockcnt to operate on
341 */
342void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
343
344/**
345 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
346 * possibly lock it.
347 * @lockcnt: the lockcnt to operate on
348 *
349 * Decrement lockcnt's count. If the new count is zero, lock
350 * the mutex and return true. Otherwise, return false.
351 */
352bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
353
354/**
355 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
356 * lock it.
357 * @lockcnt: the lockcnt to operate on
358 *
359 * If the count is 1, decrement the count to zero, lock
360 * the mutex and return true. Otherwise, return false.
361 */
362bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
363
364/**
365 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
366 * @lockcnt: the lockcnt to operate on
367 *
368 * Remember that concurrent visits are not blocked unless the count is
369 * also zero. You can use qemu_lockcnt_count to check for this inside a
370 * critical section.
371 */
372void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
373
374/**
375 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
376 * @lockcnt: the lockcnt to operate on.
377 */
378void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
379
380/**
381 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
382 * @lockcnt: the lockcnt to operate on.
383 *
384 * This is the same as
385 *
386 * qemu_lockcnt_unlock(lockcnt);
387 * qemu_lockcnt_inc(lockcnt);
388 *
389 * but more efficient.
390 */
391void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
392
393/**
394 * qemu_lockcnt_count: query a LockCnt's count.
395 * @lockcnt: the lockcnt to query.
396 *
397 * Note that the count can change at any time. Still, while the
398 * lockcnt is locked, one can usefully check whether the count
399 * is non-zero.
400 */
401unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
402
e5d355d1 403#endif