]>
Commit | Line | Data |
---|---|---|
e5d355d1 AL |
1 | /* |
2 | * Wrappers around mutex/cond/thread functions | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2009 | |
5 | * | |
6 | * Author: | |
7 | * Marcelo Tosatti <mtosatti@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | #include <stdlib.h> | |
14 | #include <stdio.h> | |
15 | #include <errno.h> | |
16 | #include <time.h> | |
17 | #include <signal.h> | |
18 | #include <stdint.h> | |
19 | #include <string.h> | |
38b14db3 PB |
20 | #include <limits.h> |
21 | #include <unistd.h> | |
22 | #include <sys/time.h> | |
c7c4d063 PB |
23 | #ifdef __linux__ |
24 | #include <sys/syscall.h> | |
25 | #include <linux/futex.h> | |
26 | #endif | |
1de7afc9 | 27 | #include "qemu/thread.h" |
c7c4d063 | 28 | #include "qemu/atomic.h" |
e5d355d1 | 29 | |
8f480de0 DDAG |
30 | static bool name_threads; |
31 | ||
32 | void qemu_thread_naming(bool enable) | |
33 | { | |
34 | name_threads = enable; | |
35 | } | |
36 | ||
e5d355d1 AL |
37 | static void error_exit(int err, const char *msg) |
38 | { | |
39 | fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err)); | |
53380ac3 | 40 | abort(); |
e5d355d1 AL |
41 | } |
42 | ||
43 | void qemu_mutex_init(QemuMutex *mutex) | |
44 | { | |
45 | int err; | |
89b48b56 | 46 | pthread_mutexattr_t mutexattr; |
e5d355d1 | 47 | |
89b48b56 PB |
48 | pthread_mutexattr_init(&mutexattr); |
49 | pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK); | |
50 | err = pthread_mutex_init(&mutex->lock, &mutexattr); | |
51 | pthread_mutexattr_destroy(&mutexattr); | |
e5d355d1 AL |
52 | if (err) |
53 | error_exit(err, __func__); | |
54 | } | |
55 | ||
313b1d69 CC |
56 | void qemu_mutex_destroy(QemuMutex *mutex) |
57 | { | |
58 | int err; | |
59 | ||
60 | err = pthread_mutex_destroy(&mutex->lock); | |
61 | if (err) | |
62 | error_exit(err, __func__); | |
63 | } | |
64 | ||
e5d355d1 AL |
65 | void qemu_mutex_lock(QemuMutex *mutex) |
66 | { | |
67 | int err; | |
68 | ||
69 | err = pthread_mutex_lock(&mutex->lock); | |
70 | if (err) | |
71 | error_exit(err, __func__); | |
72 | } | |
73 | ||
74 | int qemu_mutex_trylock(QemuMutex *mutex) | |
75 | { | |
76 | return pthread_mutex_trylock(&mutex->lock); | |
77 | } | |
78 | ||
e5d355d1 AL |
79 | void qemu_mutex_unlock(QemuMutex *mutex) |
80 | { | |
81 | int err; | |
82 | ||
83 | err = pthread_mutex_unlock(&mutex->lock); | |
84 | if (err) | |
85 | error_exit(err, __func__); | |
86 | } | |
87 | ||
88 | void qemu_cond_init(QemuCond *cond) | |
89 | { | |
90 | int err; | |
91 | ||
92 | err = pthread_cond_init(&cond->cond, NULL); | |
93 | if (err) | |
94 | error_exit(err, __func__); | |
95 | } | |
96 | ||
313b1d69 CC |
97 | void qemu_cond_destroy(QemuCond *cond) |
98 | { | |
99 | int err; | |
100 | ||
101 | err = pthread_cond_destroy(&cond->cond); | |
102 | if (err) | |
103 | error_exit(err, __func__); | |
104 | } | |
105 | ||
e5d355d1 AL |
106 | void qemu_cond_signal(QemuCond *cond) |
107 | { | |
108 | int err; | |
109 | ||
110 | err = pthread_cond_signal(&cond->cond); | |
111 | if (err) | |
112 | error_exit(err, __func__); | |
113 | } | |
114 | ||
115 | void qemu_cond_broadcast(QemuCond *cond) | |
116 | { | |
117 | int err; | |
118 | ||
119 | err = pthread_cond_broadcast(&cond->cond); | |
120 | if (err) | |
121 | error_exit(err, __func__); | |
122 | } | |
123 | ||
124 | void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) | |
125 | { | |
126 | int err; | |
127 | ||
128 | err = pthread_cond_wait(&cond->cond, &mutex->lock); | |
129 | if (err) | |
130 | error_exit(err, __func__); | |
131 | } | |
132 | ||
38b14db3 PB |
133 | void qemu_sem_init(QemuSemaphore *sem, int init) |
134 | { | |
135 | int rc; | |
136 | ||
927fa909 | 137 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 PB |
138 | rc = pthread_mutex_init(&sem->lock, NULL); |
139 | if (rc != 0) { | |
140 | error_exit(rc, __func__); | |
141 | } | |
142 | rc = pthread_cond_init(&sem->cond, NULL); | |
143 | if (rc != 0) { | |
144 | error_exit(rc, __func__); | |
145 | } | |
146 | if (init < 0) { | |
147 | error_exit(EINVAL, __func__); | |
148 | } | |
149 | sem->count = init; | |
150 | #else | |
38b14db3 PB |
151 | rc = sem_init(&sem->sem, 0, init); |
152 | if (rc < 0) { | |
153 | error_exit(errno, __func__); | |
154 | } | |
c166cb72 | 155 | #endif |
38b14db3 PB |
156 | } |
157 | ||
158 | void qemu_sem_destroy(QemuSemaphore *sem) | |
159 | { | |
160 | int rc; | |
161 | ||
927fa909 | 162 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 PB |
163 | rc = pthread_cond_destroy(&sem->cond); |
164 | if (rc < 0) { | |
165 | error_exit(rc, __func__); | |
166 | } | |
167 | rc = pthread_mutex_destroy(&sem->lock); | |
168 | if (rc < 0) { | |
169 | error_exit(rc, __func__); | |
170 | } | |
171 | #else | |
38b14db3 PB |
172 | rc = sem_destroy(&sem->sem); |
173 | if (rc < 0) { | |
174 | error_exit(errno, __func__); | |
175 | } | |
c166cb72 | 176 | #endif |
38b14db3 PB |
177 | } |
178 | ||
179 | void qemu_sem_post(QemuSemaphore *sem) | |
180 | { | |
181 | int rc; | |
182 | ||
927fa909 | 183 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 | 184 | pthread_mutex_lock(&sem->lock); |
79761c66 | 185 | if (sem->count == UINT_MAX) { |
c166cb72 | 186 | rc = EINVAL; |
c166cb72 | 187 | } else { |
79761c66 IT |
188 | sem->count++; |
189 | rc = pthread_cond_signal(&sem->cond); | |
c166cb72 PB |
190 | } |
191 | pthread_mutex_unlock(&sem->lock); | |
192 | if (rc != 0) { | |
193 | error_exit(rc, __func__); | |
194 | } | |
195 | #else | |
38b14db3 PB |
196 | rc = sem_post(&sem->sem); |
197 | if (rc < 0) { | |
198 | error_exit(errno, __func__); | |
199 | } | |
c166cb72 PB |
200 | #endif |
201 | } | |
202 | ||
203 | static void compute_abs_deadline(struct timespec *ts, int ms) | |
204 | { | |
205 | struct timeval tv; | |
206 | gettimeofday(&tv, NULL); | |
207 | ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000; | |
208 | ts->tv_sec = tv.tv_sec + ms / 1000; | |
209 | if (ts->tv_nsec >= 1000000000) { | |
210 | ts->tv_sec++; | |
211 | ts->tv_nsec -= 1000000000; | |
212 | } | |
38b14db3 PB |
213 | } |
214 | ||
215 | int qemu_sem_timedwait(QemuSemaphore *sem, int ms) | |
216 | { | |
217 | int rc; | |
c166cb72 PB |
218 | struct timespec ts; |
219 | ||
927fa909 | 220 | #if defined(__APPLE__) || defined(__NetBSD__) |
79761c66 | 221 | rc = 0; |
c166cb72 PB |
222 | compute_abs_deadline(&ts, ms); |
223 | pthread_mutex_lock(&sem->lock); | |
79761c66 | 224 | while (sem->count == 0) { |
c166cb72 PB |
225 | rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts); |
226 | if (rc == ETIMEDOUT) { | |
227 | break; | |
228 | } | |
229 | if (rc != 0) { | |
230 | error_exit(rc, __func__); | |
231 | } | |
232 | } | |
79761c66 IT |
233 | if (rc != ETIMEDOUT) { |
234 | --sem->count; | |
235 | } | |
c166cb72 PB |
236 | pthread_mutex_unlock(&sem->lock); |
237 | return (rc == ETIMEDOUT ? -1 : 0); | |
238 | #else | |
38b14db3 PB |
239 | if (ms <= 0) { |
240 | /* This is cheaper than sem_timedwait. */ | |
241 | do { | |
242 | rc = sem_trywait(&sem->sem); | |
243 | } while (rc == -1 && errno == EINTR); | |
244 | if (rc == -1 && errno == EAGAIN) { | |
245 | return -1; | |
246 | } | |
247 | } else { | |
c166cb72 | 248 | compute_abs_deadline(&ts, ms); |
38b14db3 PB |
249 | do { |
250 | rc = sem_timedwait(&sem->sem, &ts); | |
251 | } while (rc == -1 && errno == EINTR); | |
252 | if (rc == -1 && errno == ETIMEDOUT) { | |
253 | return -1; | |
254 | } | |
255 | } | |
256 | if (rc < 0) { | |
257 | error_exit(errno, __func__); | |
258 | } | |
259 | return 0; | |
c166cb72 | 260 | #endif |
38b14db3 PB |
261 | } |
262 | ||
263 | void qemu_sem_wait(QemuSemaphore *sem) | |
264 | { | |
79761c66 IT |
265 | int rc; |
266 | ||
927fa909 | 267 | #if defined(__APPLE__) || defined(__NetBSD__) |
c166cb72 | 268 | pthread_mutex_lock(&sem->lock); |
79761c66 IT |
269 | while (sem->count == 0) { |
270 | rc = pthread_cond_wait(&sem->cond, &sem->lock); | |
271 | if (rc != 0) { | |
272 | error_exit(rc, __func__); | |
273 | } | |
c166cb72 | 274 | } |
79761c66 | 275 | --sem->count; |
c166cb72 PB |
276 | pthread_mutex_unlock(&sem->lock); |
277 | #else | |
38b14db3 PB |
278 | do { |
279 | rc = sem_wait(&sem->sem); | |
280 | } while (rc == -1 && errno == EINTR); | |
281 | if (rc < 0) { | |
282 | error_exit(errno, __func__); | |
283 | } | |
c166cb72 | 284 | #endif |
38b14db3 PB |
285 | } |
286 | ||
c7c4d063 PB |
287 | #ifdef __linux__ |
288 | #define futex(...) syscall(__NR_futex, __VA_ARGS__) | |
289 | ||
290 | static inline void futex_wake(QemuEvent *ev, int n) | |
291 | { | |
292 | futex(ev, FUTEX_WAKE, n, NULL, NULL, 0); | |
293 | } | |
294 | ||
295 | static inline void futex_wait(QemuEvent *ev, unsigned val) | |
296 | { | |
297 | futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0); | |
298 | } | |
299 | #else | |
300 | static inline void futex_wake(QemuEvent *ev, int n) | |
301 | { | |
302 | if (n == 1) { | |
303 | pthread_cond_signal(&ev->cond); | |
304 | } else { | |
305 | pthread_cond_broadcast(&ev->cond); | |
306 | } | |
307 | } | |
308 | ||
309 | static inline void futex_wait(QemuEvent *ev, unsigned val) | |
310 | { | |
311 | pthread_mutex_lock(&ev->lock); | |
312 | if (ev->value == val) { | |
313 | pthread_cond_wait(&ev->cond, &ev->lock); | |
314 | } | |
315 | pthread_mutex_unlock(&ev->lock); | |
316 | } | |
317 | #endif | |
318 | ||
319 | /* Valid transitions: | |
320 | * - free->set, when setting the event | |
321 | * - busy->set, when setting the event, followed by futex_wake | |
322 | * - set->free, when resetting the event | |
323 | * - free->busy, when waiting | |
324 | * | |
325 | * set->busy does not happen (it can be observed from the outside but | |
326 | * it really is set->free->busy). | |
327 | * | |
328 | * busy->free provably cannot happen; to enforce it, the set->free transition | |
329 | * is done with an OR, which becomes a no-op if the event has concurrently | |
330 | * transitioned to free or busy. | |
331 | */ | |
332 | ||
333 | #define EV_SET 0 | |
334 | #define EV_FREE 1 | |
335 | #define EV_BUSY -1 | |
336 | ||
337 | void qemu_event_init(QemuEvent *ev, bool init) | |
338 | { | |
339 | #ifndef __linux__ | |
340 | pthread_mutex_init(&ev->lock, NULL); | |
341 | pthread_cond_init(&ev->cond, NULL); | |
342 | #endif | |
343 | ||
344 | ev->value = (init ? EV_SET : EV_FREE); | |
345 | } | |
346 | ||
347 | void qemu_event_destroy(QemuEvent *ev) | |
348 | { | |
349 | #ifndef __linux__ | |
350 | pthread_mutex_destroy(&ev->lock); | |
351 | pthread_cond_destroy(&ev->cond); | |
352 | #endif | |
353 | } | |
354 | ||
355 | void qemu_event_set(QemuEvent *ev) | |
356 | { | |
357 | if (atomic_mb_read(&ev->value) != EV_SET) { | |
358 | if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) { | |
359 | /* There were waiters, wake them up. */ | |
360 | futex_wake(ev, INT_MAX); | |
361 | } | |
362 | } | |
363 | } | |
364 | ||
365 | void qemu_event_reset(QemuEvent *ev) | |
366 | { | |
367 | if (atomic_mb_read(&ev->value) == EV_SET) { | |
368 | /* | |
369 | * If there was a concurrent reset (or even reset+wait), | |
370 | * do nothing. Otherwise change EV_SET->EV_FREE. | |
371 | */ | |
372 | atomic_or(&ev->value, EV_FREE); | |
373 | } | |
374 | } | |
375 | ||
376 | void qemu_event_wait(QemuEvent *ev) | |
377 | { | |
378 | unsigned value; | |
379 | ||
380 | value = atomic_mb_read(&ev->value); | |
381 | if (value != EV_SET) { | |
382 | if (value == EV_FREE) { | |
383 | /* | |
384 | * Leave the event reset and tell qemu_event_set that there | |
385 | * are waiters. No need to retry, because there cannot be | |
386 | * a concurent busy->free transition. After the CAS, the | |
387 | * event will be either set or busy. | |
388 | */ | |
389 | if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) { | |
390 | return; | |
391 | } | |
392 | } | |
393 | futex_wait(ev, EV_BUSY); | |
394 | } | |
395 | } | |
396 | ||
4900116e | 397 | void qemu_thread_create(QemuThread *thread, const char *name, |
e5d355d1 | 398 | void *(*start_routine)(void*), |
cf218714 | 399 | void *arg, int mode) |
e5d355d1 | 400 | { |
cf218714 | 401 | sigset_t set, oldset; |
e5d355d1 | 402 | int err; |
8763046b | 403 | pthread_attr_t attr; |
e5d355d1 | 404 | |
8763046b JK |
405 | err = pthread_attr_init(&attr); |
406 | if (err) { | |
407 | error_exit(err, __func__); | |
408 | } | |
409 | if (mode == QEMU_THREAD_DETACHED) { | |
410 | err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); | |
411 | if (err) { | |
412 | error_exit(err, __func__); | |
413 | } | |
414 | } | |
55541c8a | 415 | |
cf218714 | 416 | /* Leave signal handling to the iothread. */ |
55541c8a PB |
417 | sigfillset(&set); |
418 | pthread_sigmask(SIG_SETMASK, &set, &oldset); | |
8763046b | 419 | err = pthread_create(&thread->thread, &attr, start_routine, arg); |
e5d355d1 AL |
420 | if (err) |
421 | error_exit(err, __func__); | |
55541c8a | 422 | |
4900116e DDAG |
423 | #ifdef _GNU_SOURCE |
424 | if (name_threads) { | |
425 | pthread_setname_np(thread->thread, name); | |
426 | } | |
427 | #endif | |
428 | ||
55541c8a | 429 | pthread_sigmask(SIG_SETMASK, &oldset, NULL); |
8763046b JK |
430 | |
431 | pthread_attr_destroy(&attr); | |
e5d355d1 AL |
432 | } |
433 | ||
b7680cb6 | 434 | void qemu_thread_get_self(QemuThread *thread) |
e5d355d1 AL |
435 | { |
436 | thread->thread = pthread_self(); | |
437 | } | |
438 | ||
2d797b65 | 439 | bool qemu_thread_is_self(QemuThread *thread) |
e5d355d1 | 440 | { |
b7680cb6 | 441 | return pthread_equal(pthread_self(), thread->thread); |
e5d355d1 AL |
442 | } |
443 | ||
313b1d69 CC |
444 | void qemu_thread_exit(void *retval) |
445 | { | |
446 | pthread_exit(retval); | |
447 | } | |
8763046b JK |
448 | |
449 | void *qemu_thread_join(QemuThread *thread) | |
450 | { | |
451 | int err; | |
452 | void *ret; | |
453 | ||
454 | err = pthread_join(thread->thread, &ret); | |
455 | if (err) { | |
456 | error_exit(err, __func__); | |
457 | } | |
458 | return ret; | |
459 | } |