]> git.proxmox.com Git - mirror_qemu.git/blame - include/qemu/thread.h
Merge remote-tracking branch 'remotes/huth-gitlab/tags/pull-request-2018-12-17' into...
[mirror_qemu.git] / include / qemu / thread.h
CommitLineData
2a6a4076
MA
1#ifndef QEMU_THREAD_H
2#define QEMU_THREAD_H
e5d355d1 3
ac9a9eba
GD
4#include "qemu/processor.h"
5#include "qemu/atomic.h"
65097429 6
e5d355d1 7typedef struct QemuCond QemuCond;
38b14db3 8typedef struct QemuSemaphore QemuSemaphore;
c7c4d063 9typedef struct QemuEvent QemuEvent;
51dee5e4 10typedef struct QemuLockCnt QemuLockCnt;
e5d355d1
AL
11typedef struct QemuThread QemuThread;
12
9257d46d 13#ifdef _WIN32
1de7afc9 14#include "qemu/thread-win32.h"
9257d46d 15#else
1de7afc9 16#include "qemu/thread-posix.h"
9257d46d
PB
17#endif
18
fe9959a2
EC
19/* include QSP header once QemuMutex, QemuCond etc. are defined */
20#include "qemu/qsp.h"
21
cf218714
JK
22#define QEMU_THREAD_JOINABLE 0
23#define QEMU_THREAD_DETACHED 1
24
e5d355d1 25void qemu_mutex_init(QemuMutex *mutex);
313b1d69 26void qemu_mutex_destroy(QemuMutex *mutex);
6c27a0de
AB
27int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
28void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
29void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
30
fe9959a2
EC
31typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
32typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
33typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
34typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
35typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
36 int l);
37
cb764d06 38extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
fe9959a2
EC
39extern QemuMutexLockFunc qemu_mutex_lock_func;
40extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
41extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
42extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
43extern QemuCondWaitFunc qemu_cond_wait_func;
44
45/* convenience macros to bypass the profiler */
46#define qemu_mutex_lock__raw(m) \
47 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
48#define qemu_mutex_trylock__raw(m) \
49 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
50
07d66672
PB
51#ifdef __COVERITY__
52/*
53 * Coverity is severely confused by the indirect function calls,
54 * hide them.
55 */
56#define qemu_mutex_lock(m) \
57 qemu_mutex_lock_impl(m, __FILE__, __LINE__);
58#define qemu_mutex_trylock(m) \
59 qemu_mutex_trylock_impl(m, __FILE__, __LINE__);
60#define qemu_rec_mutex_lock(m) \
61 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__);
62#define qemu_rec_mutex_trylock(m) \
63 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__);
64#define qemu_cond_wait(c, m) \
65 qemu_cond_wait_impl(c, m, __FILE__, __LINE__);
66#else
fe9959a2
EC
67#define qemu_mutex_lock(m) ({ \
68 QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \
69 _f(m, __FILE__, __LINE__); \
70 })
71
72#define qemu_mutex_trylock(m) ({ \
73 QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
74 _f(m, __FILE__, __LINE__); \
75 })
76
77#define qemu_rec_mutex_lock(m) ({ \
78 QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
79 _f(m, __FILE__, __LINE__); \
80 })
81
82#define qemu_rec_mutex_trylock(m) ({ \
83 QemuRecMutexTrylockFunc _f; \
84 _f = atomic_read(&qemu_rec_mutex_trylock_func); \
85 _f(m, __FILE__, __LINE__); \
86 })
87
88#define qemu_cond_wait(c, m) ({ \
89 QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \
90 _f(c, m, __FILE__, __LINE__); \
91 })
07d66672 92#endif
fe9959a2 93
6c27a0de
AB
94#define qemu_mutex_unlock(mutex) \
95 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
96
97static inline void (qemu_mutex_lock)(QemuMutex *mutex)
98{
99 qemu_mutex_lock(mutex);
100}
101
102static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
103{
104 return qemu_mutex_trylock(mutex);
105}
106
107static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
108{
109 qemu_mutex_unlock(mutex);
110}
feadec63 111
fe9959a2
EC
112static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
113{
114 qemu_rec_mutex_lock(mutex);
115}
116
117static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
118{
119 return qemu_rec_mutex_trylock(mutex);
120}
121
feadec63
PB
122/* Prototypes for other functions are in thread-posix.h/thread-win32.h. */
123void qemu_rec_mutex_init(QemuRecMutex *mutex);
e5d355d1
AL
124
125void qemu_cond_init(QemuCond *cond);
313b1d69 126void qemu_cond_destroy(QemuCond *cond);
9257d46d
PB
127
128/*
129 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
130 * and pthread_cond_broadcast can be called except while the same mutex is
131 * held as in the corresponding pthread_cond_wait calls!
132 */
e5d355d1
AL
133void qemu_cond_signal(QemuCond *cond);
134void qemu_cond_broadcast(QemuCond *cond);
6c27a0de
AB
135void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
136 const char *file, const int line);
137
6c27a0de
AB
138static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
139{
140 qemu_cond_wait(cond, mutex);
141}
e5d355d1 142
38b14db3
PB
143void qemu_sem_init(QemuSemaphore *sem, int init);
144void qemu_sem_post(QemuSemaphore *sem);
145void qemu_sem_wait(QemuSemaphore *sem);
146int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
147void qemu_sem_destroy(QemuSemaphore *sem);
148
c7c4d063
PB
149void qemu_event_init(QemuEvent *ev, bool init);
150void qemu_event_set(QemuEvent *ev);
151void qemu_event_reset(QemuEvent *ev);
152void qemu_event_wait(QemuEvent *ev);
153void qemu_event_destroy(QemuEvent *ev);
154
4900116e 155void qemu_thread_create(QemuThread *thread, const char *name,
cf218714
JK
156 void *(*start_routine)(void *),
157 void *arg, int mode);
158void *qemu_thread_join(QemuThread *thread);
b7680cb6 159void qemu_thread_get_self(QemuThread *thread);
2d797b65 160bool qemu_thread_is_self(QemuThread *thread);
313b1d69 161void qemu_thread_exit(void *retval);
8f480de0 162void qemu_thread_naming(bool enable);
313b1d69 163
ef57137f 164struct Notifier;
ca95173c
PM
165/**
166 * qemu_thread_atexit_add:
167 * @notifier: Notifier to add
168 *
169 * Add the specified notifier to a list which will be run via
170 * notifier_list_notify() when this thread exits (either by calling
171 * qemu_thread_exit() or by returning from its start_routine).
172 * The usual usage is that the caller passes a Notifier which is
173 * a per-thread variable; it can then use the callback to free
174 * other per-thread data.
175 *
176 * If the thread exits as part of the entire process exiting,
177 * it is unspecified whether notifiers are called or not.
178 */
ef57137f 179void qemu_thread_atexit_add(struct Notifier *notifier);
ca95173c
PM
180/**
181 * qemu_thread_atexit_remove:
182 * @notifier: Notifier to remove
183 *
184 * Remove the specified notifier from the thread-exit notification
185 * list. It is not valid to try to remove a notifier which is not
186 * on the list.
187 */
ef57137f
PB
188void qemu_thread_atexit_remove(struct Notifier *notifier);
189
e70372fc 190struct QemuSpin {
ac9a9eba 191 int value;
e70372fc 192};
ac9a9eba
GD
193
194static inline void qemu_spin_init(QemuSpin *spin)
195{
196 __sync_lock_release(&spin->value);
197}
198
199static inline void qemu_spin_lock(QemuSpin *spin)
200{
201 while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
202 while (atomic_read(&spin->value)) {
203 cpu_relax();
204 }
205 }
206}
207
208static inline bool qemu_spin_trylock(QemuSpin *spin)
209{
210 return __sync_lock_test_and_set(&spin->value, true);
211}
212
213static inline bool qemu_spin_locked(QemuSpin *spin)
214{
215 return atomic_read(&spin->value);
216}
217
218static inline void qemu_spin_unlock(QemuSpin *spin)
219{
220 __sync_lock_release(&spin->value);
221}
222
51dee5e4 223struct QemuLockCnt {
fbcc3e50 224#ifndef CONFIG_LINUX
51dee5e4 225 QemuMutex mutex;
fbcc3e50 226#endif
51dee5e4
PB
227 unsigned count;
228};
229
230/**
231 * qemu_lockcnt_init: initialize a QemuLockcnt
232 * @lockcnt: the lockcnt to initialize
233 *
234 * Initialize lockcnt's counter to zero and prepare its mutex
235 * for usage.
236 */
237void qemu_lockcnt_init(QemuLockCnt *lockcnt);
238
239/**
240 * qemu_lockcnt_destroy: destroy a QemuLockcnt
241 * @lockcnt: the lockcnt to destruct
242 *
243 * Destroy lockcnt's mutex.
244 */
245void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
246
247/**
248 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
249 * @lockcnt: the lockcnt to operate on
250 *
251 * If the lockcnt's count is zero, wait for critical sections
252 * to finish and increment lockcnt's count to 1. If the count
253 * is not zero, just increment it.
254 *
255 * Because this function can wait on the mutex, it must not be
256 * called while the lockcnt's mutex is held by the current thread.
257 * For the same reason, qemu_lockcnt_inc can also contribute to
258 * AB-BA deadlocks. This is a sample deadlock scenario:
259 *
260 * thread 1 thread 2
261 * -------------------------------------------------------
262 * qemu_lockcnt_lock(&lc1);
263 * qemu_lockcnt_lock(&lc2);
264 * qemu_lockcnt_inc(&lc2);
265 * qemu_lockcnt_inc(&lc1);
266 */
267void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
268
269/**
270 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
271 * @lockcnt: the lockcnt to operate on
272 */
273void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
274
275/**
276 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
277 * possibly lock it.
278 * @lockcnt: the lockcnt to operate on
279 *
280 * Decrement lockcnt's count. If the new count is zero, lock
281 * the mutex and return true. Otherwise, return false.
282 */
283bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
284
285/**
286 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
287 * lock it.
288 * @lockcnt: the lockcnt to operate on
289 *
290 * If the count is 1, decrement the count to zero, lock
291 * the mutex and return true. Otherwise, return false.
292 */
293bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
294
295/**
296 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
297 * @lockcnt: the lockcnt to operate on
298 *
299 * Remember that concurrent visits are not blocked unless the count is
300 * also zero. You can use qemu_lockcnt_count to check for this inside a
301 * critical section.
302 */
303void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
304
305/**
306 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
307 * @lockcnt: the lockcnt to operate on.
308 */
309void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
310
311/**
312 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
313 * @lockcnt: the lockcnt to operate on.
314 *
315 * This is the same as
316 *
317 * qemu_lockcnt_unlock(lockcnt);
318 * qemu_lockcnt_inc(lockcnt);
319 *
320 * but more efficient.
321 */
322void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
323
324/**
325 * qemu_lockcnt_count: query a LockCnt's count.
326 * @lockcnt: the lockcnt to query.
327 *
328 * Note that the count can change at any time. Still, while the
329 * lockcnt is locked, one can usefully check whether the count
330 * is non-zero.
331 */
332unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
333
e5d355d1 334#endif