]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/thread.h
Merge remote-tracking branch 'remotes/vivier2/tags/linux-user-for-6.1-pull-request...
[mirror_qemu.git] / include / qemu / thread.h
1 #ifndef QEMU_THREAD_H
2 #define QEMU_THREAD_H
3
4 #include "qemu/processor.h"
5 #include "qemu/atomic.h"
6
7 typedef struct QemuCond QemuCond;
8 typedef struct QemuSemaphore QemuSemaphore;
9 typedef struct QemuEvent QemuEvent;
10 typedef struct QemuLockCnt QemuLockCnt;
11 typedef struct QemuThread QemuThread;
12
13 #ifdef _WIN32
14 #include "qemu/thread-win32.h"
15 #else
16 #include "qemu/thread-posix.h"
17 #endif
18
19 /* include QSP header once QemuMutex, QemuCond etc. are defined */
20 #include "qemu/qsp.h"
21
22 #define QEMU_THREAD_JOINABLE 0
23 #define QEMU_THREAD_DETACHED 1
24
25 void qemu_mutex_init(QemuMutex *mutex);
26 void qemu_mutex_destroy(QemuMutex *mutex);
27 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
28 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
29 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
30
31 void qemu_rec_mutex_init(QemuRecMutex *mutex);
32 void qemu_rec_mutex_destroy(QemuRecMutex *mutex);
33 void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line);
34 int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line);
35 void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line);
36
37 typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
38 typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
39 typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
40 typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
41 typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
42 int l);
43 typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms,
44 const char *f, int l);
45
46 extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
47 extern QemuMutexLockFunc qemu_mutex_lock_func;
48 extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
49 extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
50 extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
51 extern QemuCondWaitFunc qemu_cond_wait_func;
52 extern QemuCondTimedWaitFunc qemu_cond_timedwait_func;
53
54 /* convenience macros to bypass the profiler */
55 #define qemu_mutex_lock__raw(m) \
56 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
57 #define qemu_mutex_trylock__raw(m) \
58 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
59
60 #ifdef __COVERITY__
61 /*
62 * Coverity is severely confused by the indirect function calls,
63 * hide them.
64 */
65 #define qemu_mutex_lock(m) \
66 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
67 #define qemu_mutex_trylock(m) \
68 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
69 #define qemu_rec_mutex_lock(m) \
70 qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__)
71 #define qemu_rec_mutex_trylock(m) \
72 qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__)
73 #define qemu_cond_wait(c, m) \
74 qemu_cond_wait_impl(c, m, __FILE__, __LINE__)
75 #define qemu_cond_timedwait(c, m, ms) \
76 qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__)
77 #else
78 #define qemu_mutex_lock(m) ({ \
79 QemuMutexLockFunc _f = qatomic_read(&qemu_mutex_lock_func); \
80 _f(m, __FILE__, __LINE__); \
81 })
82
83 #define qemu_mutex_trylock(m) ({ \
84 QemuMutexTrylockFunc _f = qatomic_read(&qemu_mutex_trylock_func); \
85 _f(m, __FILE__, __LINE__); \
86 })
87
88 #define qemu_rec_mutex_lock(m) ({ \
89 QemuRecMutexLockFunc _f = qatomic_read(&qemu_rec_mutex_lock_func);\
90 _f(m, __FILE__, __LINE__); \
91 })
92
93 #define qemu_rec_mutex_trylock(m) ({ \
94 QemuRecMutexTrylockFunc _f; \
95 _f = qatomic_read(&qemu_rec_mutex_trylock_func); \
96 _f(m, __FILE__, __LINE__); \
97 })
98
99 #define qemu_cond_wait(c, m) ({ \
100 QemuCondWaitFunc _f = qatomic_read(&qemu_cond_wait_func); \
101 _f(c, m, __FILE__, __LINE__); \
102 })
103
104 #define qemu_cond_timedwait(c, m, ms) ({ \
105 QemuCondTimedWaitFunc _f = qatomic_read(&qemu_cond_timedwait_func);\
106 _f(c, m, ms, __FILE__, __LINE__); \
107 })
108 #endif
109
110 #define qemu_mutex_unlock(mutex) \
111 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
112
113 #define qemu_rec_mutex_unlock(mutex) \
114 qemu_rec_mutex_unlock_impl(mutex, __FILE__, __LINE__)
115
116 static inline void (qemu_mutex_lock)(QemuMutex *mutex)
117 {
118 qemu_mutex_lock(mutex);
119 }
120
121 static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
122 {
123 return qemu_mutex_trylock(mutex);
124 }
125
126 static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
127 {
128 qemu_mutex_unlock(mutex);
129 }
130
131 static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
132 {
133 qemu_rec_mutex_lock(mutex);
134 }
135
136 static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
137 {
138 return qemu_rec_mutex_trylock(mutex);
139 }
140
141 static inline void (qemu_rec_mutex_unlock)(QemuRecMutex *mutex)
142 {
143 qemu_rec_mutex_unlock(mutex);
144 }
145
146 void qemu_cond_init(QemuCond *cond);
147 void qemu_cond_destroy(QemuCond *cond);
148
149 /*
150 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
151 * and pthread_cond_broadcast can be called except while the same mutex is
152 * held as in the corresponding pthread_cond_wait calls!
153 */
154 void qemu_cond_signal(QemuCond *cond);
155 void qemu_cond_broadcast(QemuCond *cond);
156 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
157 const char *file, const int line);
158 bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
159 const char *file, const int line);
160
161 static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
162 {
163 qemu_cond_wait(cond, mutex);
164 }
165
166 /* Returns true if timeout has not expired, and false otherwise */
167 static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex,
168 int ms)
169 {
170 return qemu_cond_timedwait(cond, mutex, ms);
171 }
172
173 void qemu_sem_init(QemuSemaphore *sem, int init);
174 void qemu_sem_post(QemuSemaphore *sem);
175 void qemu_sem_wait(QemuSemaphore *sem);
176 int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
177 void qemu_sem_destroy(QemuSemaphore *sem);
178
179 void qemu_event_init(QemuEvent *ev, bool init);
180 void qemu_event_set(QemuEvent *ev);
181 void qemu_event_reset(QemuEvent *ev);
182 void qemu_event_wait(QemuEvent *ev);
183 void qemu_event_destroy(QemuEvent *ev);
184
185 void qemu_thread_create(QemuThread *thread, const char *name,
186 void *(*start_routine)(void *),
187 void *arg, int mode);
188 void *qemu_thread_join(QemuThread *thread);
189 void qemu_thread_get_self(QemuThread *thread);
190 bool qemu_thread_is_self(QemuThread *thread);
191 void qemu_thread_exit(void *retval) QEMU_NORETURN;
192 void qemu_thread_naming(bool enable);
193
194 struct Notifier;
195 /**
196 * qemu_thread_atexit_add:
197 * @notifier: Notifier to add
198 *
199 * Add the specified notifier to a list which will be run via
200 * notifier_list_notify() when this thread exits (either by calling
201 * qemu_thread_exit() or by returning from its start_routine).
202 * The usual usage is that the caller passes a Notifier which is
203 * a per-thread variable; it can then use the callback to free
204 * other per-thread data.
205 *
206 * If the thread exits as part of the entire process exiting,
207 * it is unspecified whether notifiers are called or not.
208 */
209 void qemu_thread_atexit_add(struct Notifier *notifier);
210 /**
211 * qemu_thread_atexit_remove:
212 * @notifier: Notifier to remove
213 *
214 * Remove the specified notifier from the thread-exit notification
215 * list. It is not valid to try to remove a notifier which is not
216 * on the list.
217 */
218 void qemu_thread_atexit_remove(struct Notifier *notifier);
219
220 #ifdef CONFIG_TSAN
221 #include <sanitizer/tsan_interface.h>
222 #endif
223
224 struct QemuSpin {
225 int value;
226 };
227
228 static inline void qemu_spin_init(QemuSpin *spin)
229 {
230 __sync_lock_release(&spin->value);
231 #ifdef CONFIG_TSAN
232 __tsan_mutex_create(spin, __tsan_mutex_not_static);
233 #endif
234 }
235
236 /* const parameter because the only purpose here is the TSAN annotation */
237 static inline void qemu_spin_destroy(const QemuSpin *spin)
238 {
239 #ifdef CONFIG_TSAN
240 __tsan_mutex_destroy((void *)spin, __tsan_mutex_not_static);
241 #endif
242 }
243
244 static inline void qemu_spin_lock(QemuSpin *spin)
245 {
246 #ifdef CONFIG_TSAN
247 __tsan_mutex_pre_lock(spin, 0);
248 #endif
249 while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
250 while (qatomic_read(&spin->value)) {
251 cpu_relax();
252 }
253 }
254 #ifdef CONFIG_TSAN
255 __tsan_mutex_post_lock(spin, 0, 0);
256 #endif
257 }
258
259 static inline bool qemu_spin_trylock(QemuSpin *spin)
260 {
261 #ifdef CONFIG_TSAN
262 __tsan_mutex_pre_lock(spin, __tsan_mutex_try_lock);
263 #endif
264 bool busy = __sync_lock_test_and_set(&spin->value, true);
265 #ifdef CONFIG_TSAN
266 unsigned flags = __tsan_mutex_try_lock;
267 flags |= busy ? __tsan_mutex_try_lock_failed : 0;
268 __tsan_mutex_post_lock(spin, flags, 0);
269 #endif
270 return busy;
271 }
272
273 static inline bool qemu_spin_locked(QemuSpin *spin)
274 {
275 return qatomic_read(&spin->value);
276 }
277
278 static inline void qemu_spin_unlock(QemuSpin *spin)
279 {
280 #ifdef CONFIG_TSAN
281 __tsan_mutex_pre_unlock(spin, 0);
282 #endif
283 __sync_lock_release(&spin->value);
284 #ifdef CONFIG_TSAN
285 __tsan_mutex_post_unlock(spin, 0);
286 #endif
287 }
288
289 struct QemuLockCnt {
290 #ifndef CONFIG_LINUX
291 QemuMutex mutex;
292 #endif
293 unsigned count;
294 };
295
296 /**
297 * qemu_lockcnt_init: initialize a QemuLockcnt
298 * @lockcnt: the lockcnt to initialize
299 *
300 * Initialize lockcnt's counter to zero and prepare its mutex
301 * for usage.
302 */
303 void qemu_lockcnt_init(QemuLockCnt *lockcnt);
304
305 /**
306 * qemu_lockcnt_destroy: destroy a QemuLockcnt
307 * @lockcnt: the lockcnt to destruct
308 *
309 * Destroy lockcnt's mutex.
310 */
311 void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
312
313 /**
314 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
315 * @lockcnt: the lockcnt to operate on
316 *
317 * If the lockcnt's count is zero, wait for critical sections
318 * to finish and increment lockcnt's count to 1. If the count
319 * is not zero, just increment it.
320 *
321 * Because this function can wait on the mutex, it must not be
322 * called while the lockcnt's mutex is held by the current thread.
323 * For the same reason, qemu_lockcnt_inc can also contribute to
324 * AB-BA deadlocks. This is a sample deadlock scenario:
325 *
326 * thread 1 thread 2
327 * -------------------------------------------------------
328 * qemu_lockcnt_lock(&lc1);
329 * qemu_lockcnt_lock(&lc2);
330 * qemu_lockcnt_inc(&lc2);
331 * qemu_lockcnt_inc(&lc1);
332 */
333 void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
334
335 /**
336 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
337 * @lockcnt: the lockcnt to operate on
338 */
339 void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
340
341 /**
342 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
343 * possibly lock it.
344 * @lockcnt: the lockcnt to operate on
345 *
346 * Decrement lockcnt's count. If the new count is zero, lock
347 * the mutex and return true. Otherwise, return false.
348 */
349 bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
350
351 /**
352 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
353 * lock it.
354 * @lockcnt: the lockcnt to operate on
355 *
356 * If the count is 1, decrement the count to zero, lock
357 * the mutex and return true. Otherwise, return false.
358 */
359 bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
360
361 /**
362 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
363 * @lockcnt: the lockcnt to operate on
364 *
365 * Remember that concurrent visits are not blocked unless the count is
366 * also zero. You can use qemu_lockcnt_count to check for this inside a
367 * critical section.
368 */
369 void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
370
371 /**
372 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
373 * @lockcnt: the lockcnt to operate on.
374 */
375 void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
376
377 /**
378 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
379 * @lockcnt: the lockcnt to operate on.
380 *
381 * This is the same as
382 *
383 * qemu_lockcnt_unlock(lockcnt);
384 * qemu_lockcnt_inc(lockcnt);
385 *
386 * but more efficient.
387 */
388 void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
389
390 /**
391 * qemu_lockcnt_count: query a LockCnt's count.
392 * @lockcnt: the lockcnt to query.
393 *
394 * Note that the count can change at any time. Still, while the
395 * lockcnt is locked, one can usefully check whether the count
396 * is non-zero.
397 */
398 unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
399
400 #endif