]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/thread.h
Merge tag 'pull-loongarch-20231221' of https://gitlab.com/gaosong/qemu into staging
[mirror_qemu.git] / include / qemu / thread.h
CommitLineData
2a6a4076
MA
1#ifndef QEMU_THREAD_H
2#define QEMU_THREAD_H
e5d355d1 3
ac9a9eba
GD
4#include "qemu/processor.h"
5#include "qemu/atomic.h"
deb9c2ad 6#include "qemu/clang-tsa.h"
65097429 7
e5d355d1 8typedef struct QemuCond QemuCond;
38b14db3 9typedef struct QemuSemaphore QemuSemaphore;
c7c4d063 10typedef struct QemuEvent QemuEvent;
51dee5e4 11typedef struct QemuLockCnt QemuLockCnt;
e5d355d1
AL
12typedef struct QemuThread QemuThread;
13
9257d46d 14#ifdef _WIN32
1de7afc9 15#include "qemu/thread-win32.h"
9257d46d 16#else
1de7afc9 17#include "qemu/thread-posix.h"
9257d46d
PB
18#endif
19
fe9959a2
EC
20/* include QSP header once QemuMutex, QemuCond etc. are defined */
21#include "qemu/qsp.h"
22
cf218714
JK
23#define QEMU_THREAD_JOINABLE 0
24#define QEMU_THREAD_DETACHED 1
25
e5d355d1 26void qemu_mutex_init(QemuMutex *mutex);
313b1d69 27void qemu_mutex_destroy(QemuMutex *mutex);
deb9c2ad
EGE
28int TSA_NO_TSA qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file,
29 const int line);
30void TSA_NO_TSA qemu_mutex_lock_impl(QemuMutex *mutex, const char *file,
31 const int line);
32void TSA_NO_TSA qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file,
33 const int line);
6c27a0de 34
4b193bb7
RH
35void qemu_rec_mutex_init(QemuRecMutex *mutex);
36void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
37void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
38int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
9c75bae7 39void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
4b193bb7 40
fe9959a2
EC
41typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
42typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
43typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
44typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
45typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
46 int l);
3dcc9c6e
YK
47typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
48 const char *f, int l);
fe9959a2 49
cb764d06 50extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
fe9959a2
EC
51extern QemuMutexLockFunc qemu_mutex_lock_func;
52extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
53extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
54extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
55extern QemuCondWaitFunc qemu_cond_wait_func;
3dcc9c6e 56extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
fe9959a2
EC
57
58/* convenience macros to bypass the profiler */
59#define qemu_mutex_lock__raw(m) \
60 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
61#define qemu_mutex_trylock__raw(m) \
62 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
63
07d66672
PB
64#ifdef __COVERITY__
65/*
66 * Coverity is severely confused by the indirect function calls,
67 * hide them.
68 */
69#define qemu_mutex_lock(m) \
2e798024 70 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
07d66672 71#define qemu_mutex_trylock(m) \
2e798024 72 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
07d66672 73#define qemu_rec_mutex_lock(m) \
2e798024 74 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
07d66672 75#define qemu_rec_mutex_trylock(m) \
2e798024 76 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
07d66672 77#define qemu_cond_wait(c, m) \
2e798024 78 qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
3dcc9c6e 79#define qemu_cond_timedwait(c, m, ms) \
2e798024 80 qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
07d66672 81#else
fe9959a2 82#define qemu_mutex_lock(m) ({ \
d73415a3 83 QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
fe9959a2
EC
84 _f(m, __FILE__, __LINE__); \
85 })
86
d73415a3
SH
87#define qemu_mutex_trylock(m) ({ \
88 QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
89 _f(m, __FILE__, __LINE__); \
fe9959a2
EC
90 })
91
d73415a3
SH
92#define qemu_rec_mutex_lock(m) ({ \
93 QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
94 _f(m, __FILE__, __LINE__); \
fe9959a2
EC
95 })
96
97#define qemu_rec_mutex_trylock(m) ({ \
98 QemuRecMutexTrylockFunc _f; \
d73415a3 99 _f = qatomic_read(&qemu_rec_mutex_trylock_func); \
fe9959a2
EC
100 _f(m, __FILE__, __LINE__); \
101 })
102
103#define qemu_cond_wait(c, m) ({ \
d73415a3 104 QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
fe9959a2
EC
105 _f(c, m, __FILE__, __LINE__); \
106 })
3dcc9c6e
YK
107
108#define qemu_cond_timedwait(c, m, ms) ({ \
d73415a3 109 QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
3dcc9c6e
YK
110 _f(c, m, ms, __FILE__, __LINE__); \
111 })
07d66672 112#endif
fe9959a2 113
6c27a0de
AB
114#define qemu_mutex_unlock(mutex) \
115 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
116
9c75bae7
RH
117#define qemu_rec_mutex_unlock(mutex) \
118 qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
119
6c27a0de
AB
120static inline void (qemu_mutex_lock)(QemuMutex *mutex)
121{
122 qemu_mutex_lock(mutex);
123}
124
125static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
126{
127 return qemu_mutex_trylock(mutex);
128}
129
130static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
131{
132 qemu_mutex_unlock(mutex);
133}
feadec63 134
fe9959a2
EC
135static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
136{
137 qemu_rec_mutex_lock(mutex);
138}
139
140static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
141{
142 return qemu_rec_mutex_trylock(mutex);
143}
144
9c75bae7
RH
145static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
146{
147 qemu_rec_mutex_unlock(mutex);
148}
149
e5d355d1 150void qemu_cond_init(QemuCond *cond);
313b1d69 151void qemu_cond_destroy(QemuCond *cond);
9257d46d
PB
152
153/*
154 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
155 * and pthread_cond_broadcast can be called except while the same mutex is
156 * held as in the corresponding pthread_cond_wait calls!
157 */
e5d355d1
AL
158void qemu_cond_signal(QemuCond *cond);
159void qemu_cond_broadcast(QemuCond *cond);
deb9c2ad
EGE
160void TSA_NO_TSA qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
161 const char *file, const int line);
3dcc9c6e
YK
162bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
163 const char *file, const int line);
6c27a0de 164
6c27a0de
AB
165static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
166{
167 qemu_cond_wait(cond, mutex);
168}
e5d355d1 169
3dcc9c6e
YK
170/* Returns true if timeout has not expired, and false otherwise */
171static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
172 int ms)
173{
174 return qemu_cond_timedwait(cond, mutex, ms);
175}
176
38b14db3
PB
177void qemu_sem_init(QemuSemaphore *sem, int init);
178void qemu_sem_post(QemuSemaphore *sem);
179void qemu_sem_wait(QemuSemaphore *sem);
180int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
181void qemu_sem_destroy(QemuSemaphore *sem);
182
c7c4d063
PB
183void qemu_event_init(QemuEvent *ev, bool init);
184void qemu_event_set(QemuEvent *ev);
185void qemu_event_reset(QemuEvent *ev);
186void qemu_event_wait(QemuEvent *ev);
187void qemu_event_destroy(QemuEvent *ev);
188
4900116e 189void qemu_thread_create(QemuThread *thread, const char *name,
cf218714
JK
190 void *(*start_routine)(void *),
191 void *arg, int mode);
7730f32c
DH
192int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus,
193 unsigned long nbits);
194int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus,
195 unsigned long *nbits);
cf218714 196void *qemu_thread_join(QemuThread *thread);
b7680cb6 197void qemu_thread_get_self(QemuThread *thread);
2d797b65 198bool qemu_thread_is_self(QemuThread *thread);
8905770b 199G_NORETURN void qemu_thread_exit(void *retval);
8f480de0 200void qemu_thread_naming(bool enable);
313b1d69 201
ef57137f 202struct Notifier;
ca95173c
PM
203/**
204 * qemu_thread_atexit_add:
205 * @notifier: Notifier to add
206 *
207 * Add the specified notifier to a list which will be run via
208 * notifier_list_notify() when this thread exits (either by calling
209 * qemu_thread_exit() or by returning from its start_routine).
210 * The usual usage is that the caller passes a Notifier which is
211 * a per-thread variable; it can then use the callback to free
212 * other per-thread data.
213 *
214 * If the thread exits as part of the entire process exiting,
215 * it is unspecified whether notifiers are called or not.
216 */
ef57137f 217void qemu_thread_atexit_add(struct Notifier *notifier);
ca95173c
PM
218/**
219 * qemu_thread_atexit_remove:
220 * @notifier: Notifier to remove
221 *
222 * Remove the specified notifier from the thread-exit notification
223 * list. It is not valid to try to remove a notifier which is not
224 * on the list.
225 */
ef57137f
PB
226void qemu_thread_atexit_remove(struct Notifier *notifier);
227
45a9595a
EC
228#ifdef CONFIG_TSAN
229#include <sanitizer/tsan_interface.h>
230#endif
231
e70372fc 232struct QemuSpin {
ac9a9eba 233 int value;
e70372fc 234};
ac9a9eba
GD
235
236static inline void qemu_spin_init(QemuSpin *spin)
237{
7ed9e721 238 qatomic_set(&spin->value, 0);
45a9595a
EC
239#ifdef CONFIG_TSAN
240 __tsan_mutex_create(spin, __tsan_mutex_not_static);
241#endif
ac9a9eba
GD
242}
243
047e2bd3 244static inline void qemu_spin_destroy(QemuSpin *spin)
45a9595a
EC
245{
246#ifdef CONFIG_TSAN
047e2bd3 247 __tsan_mutex_destroy(spin, __tsan_mutex_not_static);
45a9595a
EC
248#endif
249}
4384a70d 250
ac9a9eba
GD
251static inline void qemu_spin_lock(QemuSpin *spin)
252{
45a9595a
EC
253#ifdef CONFIG_TSAN
254 __tsan_mutex_pre_lock(spin, 0);
255#endif
7ed9e721 256 while (unlikely(qatomic_xchg(&spin->value, 1))) {
d73415a3 257 while (qatomic_read(&spin->value)) {
ac9a9eba
GD
258 cpu_relax();
259 }
260 }
45a9595a
EC
261#ifdef CONFIG_TSAN
262 __tsan_mutex_post_lock(spin, 0, 0);
263#endif
ac9a9eba
GD
264}
265
266static inline bool qemu_spin_trylock(QemuSpin *spin)
267{
45a9595a
EC
268#ifdef CONFIG_TSAN
269 __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
270#endif
7ed9e721 271 bool busy = qatomic_xchg(&spin->value, true);
45a9595a
EC
272#ifdef CONFIG_TSAN
273 unsigned flags = __tsan_mutex_try_lock;
274 flags |= busy ? __tsan_mutex_try_lock_failed : 0;
275 __tsan_mutex_post_lock(spin, flags, 0);
276#endif
277 return busy;
ac9a9eba
GD
278}
279
280static inline bool qemu_spin_locked(QemuSpin *spin)
281{
d73415a3 282 return qatomic_read(&spin->value);
ac9a9eba
GD
283}
284
285static inline void qemu_spin_unlock(QemuSpin *spin)
286{
45a9595a
EC
287#ifdef CONFIG_TSAN
288 __tsan_mutex_pre_unlock(spin, 0);
289#endif
7ed9e721 290 qatomic_store_release(&spin->value, 0);
45a9595a
EC
291#ifdef CONFIG_TSAN
292 __tsan_mutex_post_unlock(spin, 0);
293#endif
ac9a9eba
GD
294}
295
51dee5e4 296struct QemuLockCnt {
fbcc3e50 297#ifndef CONFIG_LINUX
51dee5e4 298 QemuMutex mutex;
fbcc3e50 299#endif
51dee5e4
PB
300 unsigned count;
301};
302
303/**
304 * qemu_lockcnt_init: initialize a QemuLockcnt
305 * @lockcnt: the lockcnt to initialize
306 *
307 * Initialize lockcnt's counter to zero and prepare its mutex
308 * for usage.
309 */
310void qemu_lockcnt_init(QemuLockCnt *lockcnt);
311
312/**
313 * qemu_lockcnt_destroy: destroy a QemuLockcnt
314 * @lockcnt: the lockcnt to destruct
315 *
316 * Destroy lockcnt's mutex.
317 */
318void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
319
320/**
321 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
322 * @lockcnt: the lockcnt to operate on
323 *
324 * If the lockcnt's count is zero, wait for critical sections
325 * to finish and increment lockcnt's count to 1. If the count
326 * is not zero, just increment it.
327 *
328 * Because this function can wait on the mutex, it must not be
329 * called while the lockcnt's mutex is held by the current thread.
330 * For the same reason, qemu_lockcnt_inc can also contribute to
331 * AB-BA deadlocks. This is a sample deadlock scenario:
332 *
333 * thread 1 thread 2
334 * -------------------------------------------------------
335 * qemu_lockcnt_lock(&lc1);
336 * qemu_lockcnt_lock(&lc2);
337 * qemu_lockcnt_inc(&lc2);
338 * qemu_lockcnt_inc(&lc1);
339 */
340void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
341
342/**
343 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
344 * @lockcnt: the lockcnt to operate on
345 */
346void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
347
348/**
349 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
350 * possibly lock it.
351 * @lockcnt: the lockcnt to operate on
352 *
353 * Decrement lockcnt's count. If the new count is zero, lock
354 * the mutex and return true. Otherwise, return false.
355 */
356bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
357
358/**
359 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
360 * lock it.
361 * @lockcnt: the lockcnt to operate on
362 *
363 * If the count is 1, decrement the count to zero, lock
364 * the mutex and return true. Otherwise, return false.
365 */
366bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
367
368/**
369 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
370 * @lockcnt: the lockcnt to operate on
371 *
372 * Remember that concurrent visits are not blocked unless the count is
373 * also zero. You can use qemu_lockcnt_count to check for this inside a
374 * critical section.
375 */
376void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
377
378/**
379 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
380 * @lockcnt: the lockcnt to operate on.
381 */
382void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
383
384/**
385 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
386 * @lockcnt: the lockcnt to operate on.
387 *
388 * This is the same as
389 *
390 * qemu_lockcnt_unlock(lockcnt);
391 * qemu_lockcnt_inc(lockcnt);
392 *
393 * but more efficient.
394 */
395void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
396
397/**
398 * qemu_lockcnt_count: query a LockCnt's count.
399 * @lockcnt: the lockcnt to query.
400 *
401 * Note that the count can change at any time. Still, while the
402 * lockcnt is locked, one can usefully check whether the count
403 * is non-zero.
404 */
405unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
406
e5d355d1 407#endif