]> git.proxmox.com Git - mirror_qemu.git/blob - include/qemu/thread.h
Merge remote-tracking branch 'remotes/cody/tags/block-pull-request' into staging
[mirror_qemu.git] / include / qemu / thread.h
1 #ifndef QEMU_THREAD_H
2 #define QEMU_THREAD_H
3
4 #include "qemu/processor.h"
5 #include "qemu/atomic.h"
6
7 typedef struct QemuCond QemuCond;
8 typedef struct QemuSemaphore QemuSemaphore;
9 typedef struct QemuEvent QemuEvent;
10 typedef struct QemuLockCnt QemuLockCnt;
11 typedef struct QemuThread QemuThread;
12
13 #ifdef _WIN32
14 #include "qemu/thread-win32.h"
15 #else
16 #include "qemu/thread-posix.h"
17 #endif
18
19 /* include QSP header once QemuMutex, QemuCond etc. are defined */
20 #include "qemu/qsp.h"
21
22 #define QEMU_THREAD_JOINABLE 0
23 #define QEMU_THREAD_DETACHED 1
24
25 void qemu_mutex_init(QemuMutex *mutex);
26 void qemu_mutex_destroy(QemuMutex *mutex);
27 int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line);
28 void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line);
29 void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line);
30
31 typedef void (*QemuMutexLockFunc)(QemuMutex *m, const char *f, int l);
32 typedef int (*QemuMutexTrylockFunc)(QemuMutex *m, const char *f, int l);
33 typedef void (*QemuRecMutexLockFunc)(QemuRecMutex *m, const char *f, int l);
34 typedef int (*QemuRecMutexTrylockFunc)(QemuRecMutex *m, const char *f, int l);
35 typedef void (*QemuCondWaitFunc)(QemuCond *c, QemuMutex *m, const char *f,
36 int l);
37
38 extern QemuMutexLockFunc qemu_bql_mutex_lock_func;
39 extern QemuMutexLockFunc qemu_mutex_lock_func;
40 extern QemuMutexTrylockFunc qemu_mutex_trylock_func;
41 extern QemuRecMutexLockFunc qemu_rec_mutex_lock_func;
42 extern QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func;
43 extern QemuCondWaitFunc qemu_cond_wait_func;
44
45 /* convenience macros to bypass the profiler */
46 #define qemu_mutex_lock__raw(m) \
47 qemu_mutex_lock_impl(m, __FILE__, __LINE__)
48 #define qemu_mutex_trylock__raw(m) \
49 qemu_mutex_trylock_impl(m, __FILE__, __LINE__)
50
51 #define qemu_mutex_lock(m) ({ \
52 QemuMutexLockFunc _f = atomic_read(&qemu_mutex_lock_func); \
53 _f(m, __FILE__, __LINE__); \
54 })
55
56 #define qemu_mutex_trylock(m) ({ \
57 QemuMutexTrylockFunc _f = atomic_read(&qemu_mutex_trylock_func); \
58 _f(m, __FILE__, __LINE__); \
59 })
60
61 #define qemu_rec_mutex_lock(m) ({ \
62 QemuRecMutexLockFunc _f = atomic_read(&qemu_rec_mutex_lock_func); \
63 _f(m, __FILE__, __LINE__); \
64 })
65
66 #define qemu_rec_mutex_trylock(m) ({ \
67 QemuRecMutexTrylockFunc _f; \
68 _f = atomic_read(&qemu_rec_mutex_trylock_func); \
69 _f(m, __FILE__, __LINE__); \
70 })
71
72 #define qemu_cond_wait(c, m) ({ \
73 QemuCondWaitFunc _f = atomic_read(&qemu_cond_wait_func); \
74 _f(c, m, __FILE__, __LINE__); \
75 })
76
77 #define qemu_mutex_unlock(mutex) \
78 qemu_mutex_unlock_impl(mutex, __FILE__, __LINE__)
79
80 static inline void (qemu_mutex_lock)(QemuMutex *mutex)
81 {
82 qemu_mutex_lock(mutex);
83 }
84
85 static inline int (qemu_mutex_trylock)(QemuMutex *mutex)
86 {
87 return qemu_mutex_trylock(mutex);
88 }
89
90 static inline void (qemu_mutex_unlock)(QemuMutex *mutex)
91 {
92 qemu_mutex_unlock(mutex);
93 }
94
95 static inline void (qemu_rec_mutex_lock)(QemuRecMutex *mutex)
96 {
97 qemu_rec_mutex_lock(mutex);
98 }
99
100 static inline int (qemu_rec_mutex_trylock)(QemuRecMutex *mutex)
101 {
102 return qemu_rec_mutex_trylock(mutex);
103 }
104
105 /* Prototypes for other functions are in thread-posix.h/thread-win32.h. */
106 void qemu_rec_mutex_init(QemuRecMutex *mutex);
107
108 void qemu_cond_init(QemuCond *cond);
109 void qemu_cond_destroy(QemuCond *cond);
110
111 /*
112 * IMPORTANT: The implementation does not guarantee that pthread_cond_signal
113 * and pthread_cond_broadcast can be called except while the same mutex is
114 * held as in the corresponding pthread_cond_wait calls!
115 */
116 void qemu_cond_signal(QemuCond *cond);
117 void qemu_cond_broadcast(QemuCond *cond);
118 void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex,
119 const char *file, const int line);
120
121 static inline void (qemu_cond_wait)(QemuCond *cond, QemuMutex *mutex)
122 {
123 qemu_cond_wait(cond, mutex);
124 }
125
126 void qemu_sem_init(QemuSemaphore *sem, int init);
127 void qemu_sem_post(QemuSemaphore *sem);
128 void qemu_sem_wait(QemuSemaphore *sem);
129 int qemu_sem_timedwait(QemuSemaphore *sem, int ms);
130 void qemu_sem_destroy(QemuSemaphore *sem);
131
132 void qemu_event_init(QemuEvent *ev, bool init);
133 void qemu_event_set(QemuEvent *ev);
134 void qemu_event_reset(QemuEvent *ev);
135 void qemu_event_wait(QemuEvent *ev);
136 void qemu_event_destroy(QemuEvent *ev);
137
138 void qemu_thread_create(QemuThread *thread, const char *name,
139 void *(*start_routine)(void *),
140 void *arg, int mode);
141 void *qemu_thread_join(QemuThread *thread);
142 void qemu_thread_get_self(QemuThread *thread);
143 bool qemu_thread_is_self(QemuThread *thread);
144 void qemu_thread_exit(void *retval);
145 void qemu_thread_naming(bool enable);
146
147 struct Notifier;
148 void qemu_thread_atexit_add(struct Notifier *notifier);
149 void qemu_thread_atexit_remove(struct Notifier *notifier);
150
151 struct QemuSpin {
152 int value;
153 };
154
155 static inline void qemu_spin_init(QemuSpin *spin)
156 {
157 __sync_lock_release(&spin->value);
158 }
159
160 static inline void qemu_spin_lock(QemuSpin *spin)
161 {
162 while (unlikely(__sync_lock_test_and_set(&spin->value, true))) {
163 while (atomic_read(&spin->value)) {
164 cpu_relax();
165 }
166 }
167 }
168
169 static inline bool qemu_spin_trylock(QemuSpin *spin)
170 {
171 return __sync_lock_test_and_set(&spin->value, true);
172 }
173
174 static inline bool qemu_spin_locked(QemuSpin *spin)
175 {
176 return atomic_read(&spin->value);
177 }
178
179 static inline void qemu_spin_unlock(QemuSpin *spin)
180 {
181 __sync_lock_release(&spin->value);
182 }
183
184 struct QemuLockCnt {
185 #ifndef CONFIG_LINUX
186 QemuMutex mutex;
187 #endif
188 unsigned count;
189 };
190
191 /**
192 * qemu_lockcnt_init: initialize a QemuLockcnt
193 * @lockcnt: the lockcnt to initialize
194 *
195 * Initialize lockcnt's counter to zero and prepare its mutex
196 * for usage.
197 */
198 void qemu_lockcnt_init(QemuLockCnt *lockcnt);
199
200 /**
201 * qemu_lockcnt_destroy: destroy a QemuLockcnt
202 * @lockcnt: the lockcnt to destruct
203 *
204 * Destroy lockcnt's mutex.
205 */
206 void qemu_lockcnt_destroy(QemuLockCnt *lockcnt);
207
208 /**
209 * qemu_lockcnt_inc: increment a QemuLockCnt's counter
210 * @lockcnt: the lockcnt to operate on
211 *
212 * If the lockcnt's count is zero, wait for critical sections
213 * to finish and increment lockcnt's count to 1. If the count
214 * is not zero, just increment it.
215 *
216 * Because this function can wait on the mutex, it must not be
217 * called while the lockcnt's mutex is held by the current thread.
218 * For the same reason, qemu_lockcnt_inc can also contribute to
219 * AB-BA deadlocks. This is a sample deadlock scenario:
220 *
221 * thread 1 thread 2
222 * -------------------------------------------------------
223 * qemu_lockcnt_lock(&lc1);
224 * qemu_lockcnt_lock(&lc2);
225 * qemu_lockcnt_inc(&lc2);
226 * qemu_lockcnt_inc(&lc1);
227 */
228 void qemu_lockcnt_inc(QemuLockCnt *lockcnt);
229
230 /**
231 * qemu_lockcnt_dec: decrement a QemuLockCnt's counter
232 * @lockcnt: the lockcnt to operate on
233 */
234 void qemu_lockcnt_dec(QemuLockCnt *lockcnt);
235
236 /**
237 * qemu_lockcnt_dec_and_lock: decrement a QemuLockCnt's counter and
238 * possibly lock it.
239 * @lockcnt: the lockcnt to operate on
240 *
241 * Decrement lockcnt's count. If the new count is zero, lock
242 * the mutex and return true. Otherwise, return false.
243 */
244 bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt);
245
246 /**
247 * qemu_lockcnt_dec_if_lock: possibly decrement a QemuLockCnt's counter and
248 * lock it.
249 * @lockcnt: the lockcnt to operate on
250 *
251 * If the count is 1, decrement the count to zero, lock
252 * the mutex and return true. Otherwise, return false.
253 */
254 bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt);
255
256 /**
257 * qemu_lockcnt_lock: lock a QemuLockCnt's mutex.
258 * @lockcnt: the lockcnt to operate on
259 *
260 * Remember that concurrent visits are not blocked unless the count is
261 * also zero. You can use qemu_lockcnt_count to check for this inside a
262 * critical section.
263 */
264 void qemu_lockcnt_lock(QemuLockCnt *lockcnt);
265
266 /**
267 * qemu_lockcnt_unlock: release a QemuLockCnt's mutex.
268 * @lockcnt: the lockcnt to operate on.
269 */
270 void qemu_lockcnt_unlock(QemuLockCnt *lockcnt);
271
272 /**
273 * qemu_lockcnt_inc_and_unlock: combined unlock/increment on a QemuLockCnt.
274 * @lockcnt: the lockcnt to operate on.
275 *
276 * This is the same as
277 *
278 * qemu_lockcnt_unlock(lockcnt);
279 * qemu_lockcnt_inc(lockcnt);
280 *
281 * but more efficient.
282 */
283 void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt);
284
285 /**
286 * qemu_lockcnt_count: query a LockCnt's count.
287 * @lockcnt: the lockcnt to query.
288 *
289 * Note that the count can change at any time. Still, while the
290 * lockcnt is locked, one can usefully check whether the count
291 * is non-zero.
292 */
293 unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt);
294
295 #endif