]>
Commit | Line | Data |
---|---|---|
2a6a4076 MA |
1 | #ifndef QEMU_THREAD_H |
2 | #define QEMU_THREAD_H | |
e5d355d1 | 3 | |
ac9a9eba GD |
4 | #include "qemu/processor.h" |
5 | #include "qemu/atomic.h" | |
65097429 | 6 | |
e5d355d1 | 7 | typedef struct QemuCond QemuCond; |
38b14db3 | 8 | typedef struct QemuSemaphore QemuSemaphore; |
c7c4d063 | 9 | typedef struct QemuEvent QemuEvent; |
51dee5e4 | 10 | typedef struct QemuLockCnt QemuLockCnt; |
e5d355d1 AL |
11 | typedef struct QemuThread QemuThread; |
12 | ||
9257d46d | 13 | #ifdef _WIN32 |
1de7afc9 | 14 | #include "qemu/thread-win32.h" |
9257d46d | 15 | #else |
1de7afc9 | 16 | #include "qemu/thread-posix.h" |
9257d46d PB |
17 | #endif |
18 | ||
fe9959a2 EC |
19 | /* include QSP header once QemuMutex, QemuCond etc. are defined */ |
20 | #include "qemu/qsp.h" | |
21 | ||
cf218714 JK |
22 | #define QEMU_THREAD_JOINABLE 0 |
23 | #define QEMU_THREAD_DETACHED 1 | |
24 | ||
e5d355d1 | 25 | void qemu_mutex_init(QemuMutex *mutex); |
313b1d69 | 26 | void qemu_mutex_destroy(QemuMutex *mutex); |
6c27a0de AB |
27 | int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line); |
28 | void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line); | |
29 | void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line); | |
30 | ||
fe9959a2 EC |
31 | typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l); |
32 | typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l); | |
33 | typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l); | |
34 | typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l); | |
35 | typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f, | |
36 | int l); | |
3dcc9c6e YK |
37 | typedef bool (*QemuCondTimedWaitFunc)(QemuCond *c, QemuMutex *m, int ms, |
38 | const char *f, int l); | |
fe9959a2 | 39 | |
cb764d06 | 40 | extern QemuMutexLockFunc qemu_bql_mutex_lock_func; |
fe9959a2 EC |
41 | extern QemuMutexLockFunc qemu_mutex_lock_func; |
42 | extern QemuMutexTrylockFunc qemu_mutex_trylock_func; | |
43 | extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func; | |
44 | extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func; | |
45 | extern QemuCondWaitFunc qemu_cond_wait_func; | |
3dcc9c6e | 46 | extern QemuCondTimedWaitFunc qemu_cond_timedwait_func; |
fe9959a2 EC |
47 | |
48 | /* convenience macros to bypass the profiler */ | |
49 | #define qemu_mutex_lock__raw(m) \ | |
50 | qemu_mutex_lock_impl(m, __FILE__, __LINE__) | |
51 | #define qemu_mutex_trylock__raw(m) \ | |
52 | qemu_mutex_trylock_impl(m, __FILE__, __LINE__) | |
53 | ||
07d66672 PB |
54 | #ifdef __COVERITY__ |
55 | /* | |
56 | * Coverity is severely confused by the indirect function calls, | |
57 | * hide them. | |
58 | */ | |
59 | #define qemu_mutex_lock(m) \ | |
2e798024 | 60 | qemu_mutex_lock_impl(m, __FILE__, __LINE__) |
07d66672 | 61 | #define qemu_mutex_trylock(m) \ |
2e798024 | 62 | qemu_mutex_trylock_impl(m, __FILE__, __LINE__) |
07d66672 | 63 | #define qemu_rec_mutex_lock(m) \ |
2e798024 | 64 | qemu_rec_mutex_lock_impl(m, __FILE__, __LINE__) |
07d66672 | 65 | #define qemu_rec_mutex_trylock(m) \ |
2e798024 | 66 | qemu_rec_mutex_trylock_impl(m, __FILE__, __LINE__) |
07d66672 | 67 | #define qemu_cond_wait(c, m) \ |
2e798024 | 68 | qemu_cond_wait_impl(c, m, __FILE__, __LINE__) |
3dcc9c6e | 69 | #define qemu_cond_timedwait(c, m, ms) \ |
2e798024 | 70 | qemu_cond_timedwait_impl(c, m, ms, __FILE__, __LINE__) |
07d66672 | 71 | #else |
fe9959a2 EC |
72 | #define qemu_mutex_lock(m) ({ \ |
73 | QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \ | |
74 | _f(m, __FILE__, __LINE__); \ | |
75 | }) | |
76 | ||
77 | #define qemu_mutex_trylock(m) ({ \ | |
78 | QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \ | |
79 | _f(m, __FILE__, __LINE__); \ | |
80 | }) | |
81 | ||
82 | #define qemu_rec_mutex_lock(m) ({ \ | |
83 | QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \ | |
84 | _f(m, __FILE__, __LINE__); \ | |
85 | }) | |
86 | ||
87 | #define qemu_rec_mutex_trylock(m) ({ \ | |
88 | QemuRecMutexTrylockFunc _f; \ | |
89 | _f = atomic_read(&qemu_rec_mutex_trylock_func); \ | |
90 | _f(m, __FILE__, __LINE__); \ | |
91 | }) | |
92 | ||
93 | #define qemu_cond_wait(c, m) ({ \ | |
94 | QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \ | |
95 | _f(c, m, __FILE__, __LINE__); \ | |
96 | }) | |
3dcc9c6e YK |
97 | |
98 | #define qemu_cond_timedwait(c, m, ms) ({ \ | |
99 | QemuCondTimedWaitFunc _f = atomic_read(&qemu_cond_timedwait_func); \ | |
100 | _f(c, m, ms, __FILE__, __LINE__); \ | |
101 | }) | |
07d66672 | 102 | #endif |
fe9959a2 | 103 | |
6c27a0de AB |
104 | #define qemu_mutex_unlock(mutex) \ |
105 | qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__) | |
106 | ||
107 | static inline void (qemu_mutex_lock)(QemuMutex *mutex) | |
108 | { | |
109 | qemu_mutex_lock(mutex); | |
110 | } | |
111 | ||
112 | static inline int (qemu_mutex_trylock)(QemuMutex *mutex) | |
113 | { | |
114 | return qemu_mutex_trylock(mutex); | |
115 | } | |
116 | ||
117 | static inline void (qemu_mutex_unlock)(QemuMutex *mutex) | |
118 | { | |
119 | qemu_mutex_unlock(mutex); | |
120 | } | |
feadec63 | 121 | |
fe9959a2 EC |
122 | static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex) |
123 | { | |
124 | qemu_rec_mutex_lock(mutex); | |
125 | } | |
126 | ||
127 | static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex) | |
128 | { | |
129 | return qemu_rec_mutex_trylock(mutex); | |
130 | } | |
131 | ||
feadec63 PB |
132 | /* Prototypes for other functions are in thread-posix.h/thread-win32.h. */ |
133 | void qemu_rec_mutex_init(QemuRecMutex *mutex); | |
e5d355d1 AL |
134 | |
135 | void qemu_cond_init(QemuCond *cond); | |
313b1d69 | 136 | void qemu_cond_destroy(QemuCond *cond); |
9257d46d PB |
137 | |
138 | /* | |
139 | * IMPORTANT: The implementation does not guarantee that pthread_cond_signal | |
140 | * and pthread_cond_broadcast can be called except while the same mutex is | |
141 | * held as in the corresponding pthread_cond_wait calls! | |
142 | */ | |
e5d355d1 AL |
143 | void qemu_cond_signal(QemuCond *cond); |
144 | void qemu_cond_broadcast(QemuCond *cond); | |
6c27a0de AB |
145 | void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, |
146 | const char *file, const int line); | |
3dcc9c6e YK |
147 | bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms, |
148 | const char *file, const int line); | |
6c27a0de | 149 | |
6c27a0de AB |
150 | static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex) |
151 | { | |
152 | qemu_cond_wait(cond, mutex); | |
153 | } | |
e5d355d1 | 154 | |
3dcc9c6e YK |
155 | /* Returns true if timeout has not expired, and false otherwise */ |
156 | static inline bool (qemu_cond_timedwait)(QemuCond *cond, QemuMutex *mutex, | |
157 | int ms) | |
158 | { | |
159 | return qemu_cond_timedwait(cond, mutex, ms); | |
160 | } | |
161 | ||
38b14db3 PB |
162 | void qemu_sem_init(QemuSemaphore *sem, int init); |
163 | void qemu_sem_post(QemuSemaphore *sem); | |
164 | void qemu_sem_wait(QemuSemaphore *sem); | |
165 | int qemu_sem_timedwait(QemuSemaphore *sem, int ms); | |
166 | void qemu_sem_destroy(QemuSemaphore *sem); | |
167 | ||
c7c4d063 PB |
168 | void qemu_event_init(QemuEvent *ev, bool init); |
169 | void qemu_event_set(QemuEvent *ev); | |
170 | void qemu_event_reset(QemuEvent *ev); | |
171 | void qemu_event_wait(QemuEvent *ev); | |
172 | void qemu_event_destroy(QemuEvent *ev); | |
173 | ||
4900116e | 174 | void qemu_thread_create(QemuThread *thread, const char *name, |
cf218714 JK |
175 | void *(*start_routine)(void *), |
176 | void *arg, int mode); | |
177 | void *qemu_thread_join(QemuThread *thread); | |
b7680cb6 | 178 | void qemu_thread_get_self(QemuThread *thread); |
2d797b65 | 179 | bool qemu_thread_is_self(QemuThread *thread); |
c08790f4 | 180 | void qemu_thread_exit(void *retval) QEMU_NORETURN; |
8f480de0 | 181 | void qemu_thread_naming(bool enable); |
313b1d69 | 182 | |
ef57137f | 183 | struct Notifier; |
ca95173c PM |
184 | /** |
185 | * qemu_thread_atexit_add: | |
186 | * @notifier: Notifier to add | |
187 | * | |
188 | * Add the specified notifier to a list which will be run via | |
189 | * notifier_list_notify() when this thread exits (either by calling | |
190 | * qemu_thread_exit() or by returning from its start_routine). | |
191 | * The usual usage is that the caller passes a Notifier which is | |
192 | * a per-thread variable; it can then use the callback to free | |
193 | * other per-thread data. | |
194 | * | |
195 | * If the thread exits as part of the entire process exiting, | |
196 | * it is unspecified whether notifiers are called or not. | |
197 | */ | |
ef57137f | 198 | void qemu_thread_atexit_add(struct Notifier *notifier); |
ca95173c PM |
199 | /** |
200 | * qemu_thread_atexit_remove: | |
201 | * @notifier: Notifier to remove | |
202 | * | |
203 | * Remove the specified notifier from the thread-exit notification | |
204 | * list. It is not valid to try to remove a notifier which is not | |
205 | * on the list. | |
206 | */ | |
ef57137f PB |
207 | void qemu_thread_atexit_remove(struct Notifier *notifier); |
208 | ||
e70372fc | 209 | struct QemuSpin { |
ac9a9eba | 210 | int value; |
e70372fc | 211 | }; |
ac9a9eba GD |
212 | |
213 | static inline void qemu_spin_init(QemuSpin *spin) | |
214 | { | |
215 | __sync_lock_release(&spin->value); | |
216 | } | |
217 | ||
4384a70d EC |
218 | static inline void qemu_spin_destroy(QemuSpin *spin) |
219 | { } | |
220 | ||
ac9a9eba GD |
221 | static inline void qemu_spin_lock(QemuSpin *spin) |
222 | { | |
223 | while (unlikely(__sync_lock_test_and_set(&spin->value, true))) { | |
224 | while (atomic_read(&spin->value)) { | |
225 | cpu_relax(); | |
226 | } | |
227 | } | |
228 | } | |
229 | ||
230 | static inline bool qemu_spin_trylock(QemuSpin *spin) | |
231 | { | |
232 | return __sync_lock_test_and_set(&spin->value, true); | |
233 | } | |
234 | ||
235 | static inline bool qemu_spin_locked(QemuSpin *spin) | |
236 | { | |
237 | return atomic_read(&spin->value); | |
238 | } | |
239 | ||
240 | static inline void qemu_spin_unlock(QemuSpin *spin) | |
241 | { | |
242 | __sync_lock_release(&spin->value); | |
243 | } | |
244 | ||
51dee5e4 | 245 | struct QemuLockCnt { |
fbcc3e50 | 246 | #ifndef CONFIG_LINUX |
51dee5e4 | 247 | QemuMutex mutex; |
fbcc3e50 | 248 | #endif |
51dee5e4 PB |
249 | unsigned count; |
250 | }; | |
251 | ||
252 | /** | |
253 | * qemu_lockcnt_init: initialize a QemuLockcnt | |
254 | * @lockcnt: the lockcnt to initialize | |
255 | * | |
256 | * Initialize lockcnt's counter to zero and prepare its mutex | |
257 | * for usage. | |
258 | */ | |
259 | void qemu_lockcnt_init(QemuLockCnt *lockcnt); | |
260 | ||
261 | /** | |
262 | * qemu_lockcnt_destroy: destroy a QemuLockcnt | |
263 | * @lockcnt: the lockcnt to destruct | |
264 | * | |
265 | * Destroy lockcnt's mutex. | |
266 | */ | |
267 | void qemu_lockcnt_destroy(QemuLockCnt *lockcnt); | |
268 | ||
269 | /** | |
270 | * qemu_lockcnt_inc: increment a QemuLockCnt's counter | |
271 | * @lockcnt: the lockcnt to operate on | |
272 | * | |
273 | * If the lockcnt's count is zero, wait for critical sections | |
274 | * to finish and increment lockcnt's count to 1. If the count | |
275 | * is not zero, just increment it. | |
276 | * | |
277 | * Because this function can wait on the mutex, it must not be | |
278 | * called while the lockcnt's mutex is held by the current thread. | |
279 | * For the same reason, qemu_lockcnt_inc can also contribute to | |
280 | * AB-BA deadlocks. This is a sample deadlock scenario: | |
281 | * | |
282 | * thread 1 thread 2 | |
283 | * ------------------------------------------------------- | |
284 | * qemu_lockcnt_lock(&lc1); | |
285 | * qemu_lockcnt_lock(&lc2); | |
286 | * qemu_lockcnt_inc(&lc2); | |
287 | * qemu_lockcnt_inc(&lc1); | |
288 | */ | |
289 | void qemu_lockcnt_inc(QemuLockCnt *lockcnt); | |
290 | ||
291 | /** | |
292 | * qemu_lockcnt_dec: decrement a QemuLockCnt's counter | |
293 | * @lockcnt: the lockcnt to operate on | |
294 | */ | |
295 | void qemu_lockcnt_dec(QemuLockCnt *lockcnt); | |
296 | ||
297 | /** | |
298 | * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and | |
299 | * possibly lock it. | |
300 | * @lockcnt: the lockcnt to operate on | |
301 | * | |
302 | * Decrement lockcnt's count. If the new count is zero, lock | |
303 | * the mutex and return true. Otherwise, return false. | |
304 | */ | |
305 | bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt); | |
306 | ||
307 | /** | |
308 | * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and | |
309 | * lock it. | |
310 | * @lockcnt: the lockcnt to operate on | |
311 | * | |
312 | * If the count is 1, decrement the count to zero, lock | |
313 | * the mutex and return true. Otherwise, return false. | |
314 | */ | |
315 | bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt); | |
316 | ||
317 | /** | |
318 | * qemu_lockcnt_lock: lock a QemuLockCnt's mutex. | |
319 | * @lockcnt: the lockcnt to operate on | |
320 | * | |
321 | * Remember that concurrent visits are not blocked unless the count is | |
322 | * also zero. You can use qemu_lockcnt_count to check for this inside a | |
323 | * critical section. | |
324 | */ | |
325 | void qemu_lockcnt_lock(QemuLockCnt *lockcnt); | |
326 | ||
327 | /** | |
328 | * qemu_lockcnt_unlock: release a QemuLockCnt's mutex. | |
329 | * @lockcnt: the lockcnt to operate on. | |
330 | */ | |
331 | void qemu_lockcnt_unlock(QemuLockCnt *lockcnt); | |
332 | ||
333 | /** | |
334 | * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt. | |
335 | * @lockcnt: the lockcnt to operate on. | |
336 | * | |
337 | * This is the same as | |
338 | * | |
339 | * qemu_lockcnt_unlock(lockcnt); | |
340 | * qemu_lockcnt_inc(lockcnt); | |
341 | * | |
342 | * but more efficient. | |
343 | */ | |
344 | void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt); | |
345 | ||
346 | /** | |
347 | * qemu_lockcnt_count: query a LockCnt's count. | |
348 | * @lockcnt: the lockcnt to query. | |
349 | * | |
350 | * Note that the count can change at any time. Still, while the | |
351 | * lockcnt is locked, one can usefully check whether the count | |
352 | * is non-zero. | |
353 | */ | |
354 | unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt); | |
355 | ||
e5d355d1 | 356 | #endif |