]> git.proxmox.com Git - mirror_qemu.git/blob - util/qemu-thread-posix.c
qemu-thread: add per-thread atexit functions
[mirror_qemu.git] / util / qemu-thread-posix.c
1 /*
2 * Wrappers around mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2009
5 *
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13 #include <stdlib.h>
14 #include <stdio.h>
15 #include <errno.h>
16 #include <time.h>
17 #include <signal.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <limits.h>
21 #include <unistd.h>
22 #include <sys/time.h>
23 #ifdef __linux__
24 #include <sys/syscall.h>
25 #include <linux/futex.h>
26 #endif
27 #include "qemu/thread.h"
28 #include "qemu/atomic.h"
29 #include "qemu/notify.h"
30
31 static bool name_threads;
32
33 void qemu_thread_naming(bool enable)
34 {
35 name_threads = enable;
36
37 #ifndef CONFIG_THREAD_SETNAME_BYTHREAD
38 /* This is a debugging option, not fatal */
39 if (enable) {
40 fprintf(stderr, "qemu: thread naming not supported on this host\n");
41 }
42 #endif
43 }
44
45 static void error_exit(int err, const char *msg)
46 {
47 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
48 abort();
49 }
50
51 void qemu_mutex_init(QemuMutex *mutex)
52 {
53 int err;
54 pthread_mutexattr_t mutexattr;
55
56 pthread_mutexattr_init(&mutexattr);
57 pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_ERRORCHECK);
58 err = pthread_mutex_init(&mutex->lock, &mutexattr);
59 pthread_mutexattr_destroy(&mutexattr);
60 if (err)
61 error_exit(err, __func__);
62 }
63
64 void qemu_mutex_destroy(QemuMutex *mutex)
65 {
66 int err;
67
68 err = pthread_mutex_destroy(&mutex->lock);
69 if (err)
70 error_exit(err, __func__);
71 }
72
73 void qemu_mutex_lock(QemuMutex *mutex)
74 {
75 int err;
76
77 err = pthread_mutex_lock(&mutex->lock);
78 if (err)
79 error_exit(err, __func__);
80 }
81
82 int qemu_mutex_trylock(QemuMutex *mutex)
83 {
84 return pthread_mutex_trylock(&mutex->lock);
85 }
86
87 void qemu_mutex_unlock(QemuMutex *mutex)
88 {
89 int err;
90
91 err = pthread_mutex_unlock(&mutex->lock);
92 if (err)
93 error_exit(err, __func__);
94 }
95
96 void qemu_cond_init(QemuCond *cond)
97 {
98 int err;
99
100 err = pthread_cond_init(&cond->cond, NULL);
101 if (err)
102 error_exit(err, __func__);
103 }
104
105 void qemu_cond_destroy(QemuCond *cond)
106 {
107 int err;
108
109 err = pthread_cond_destroy(&cond->cond);
110 if (err)
111 error_exit(err, __func__);
112 }
113
114 void qemu_cond_signal(QemuCond *cond)
115 {
116 int err;
117
118 err = pthread_cond_signal(&cond->cond);
119 if (err)
120 error_exit(err, __func__);
121 }
122
123 void qemu_cond_broadcast(QemuCond *cond)
124 {
125 int err;
126
127 err = pthread_cond_broadcast(&cond->cond);
128 if (err)
129 error_exit(err, __func__);
130 }
131
132 void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
133 {
134 int err;
135
136 err = pthread_cond_wait(&cond->cond, &mutex->lock);
137 if (err)
138 error_exit(err, __func__);
139 }
140
141 void qemu_sem_init(QemuSemaphore *sem, int init)
142 {
143 int rc;
144
145 #if defined(__APPLE__) || defined(__NetBSD__)
146 rc = pthread_mutex_init(&sem->lock, NULL);
147 if (rc != 0) {
148 error_exit(rc, __func__);
149 }
150 rc = pthread_cond_init(&sem->cond, NULL);
151 if (rc != 0) {
152 error_exit(rc, __func__);
153 }
154 if (init < 0) {
155 error_exit(EINVAL, __func__);
156 }
157 sem->count = init;
158 #else
159 rc = sem_init(&sem->sem, 0, init);
160 if (rc < 0) {
161 error_exit(errno, __func__);
162 }
163 #endif
164 }
165
166 void qemu_sem_destroy(QemuSemaphore *sem)
167 {
168 int rc;
169
170 #if defined(__APPLE__) || defined(__NetBSD__)
171 rc = pthread_cond_destroy(&sem->cond);
172 if (rc < 0) {
173 error_exit(rc, __func__);
174 }
175 rc = pthread_mutex_destroy(&sem->lock);
176 if (rc < 0) {
177 error_exit(rc, __func__);
178 }
179 #else
180 rc = sem_destroy(&sem->sem);
181 if (rc < 0) {
182 error_exit(errno, __func__);
183 }
184 #endif
185 }
186
187 void qemu_sem_post(QemuSemaphore *sem)
188 {
189 int rc;
190
191 #if defined(__APPLE__) || defined(__NetBSD__)
192 pthread_mutex_lock(&sem->lock);
193 if (sem->count == UINT_MAX) {
194 rc = EINVAL;
195 } else {
196 sem->count++;
197 rc = pthread_cond_signal(&sem->cond);
198 }
199 pthread_mutex_unlock(&sem->lock);
200 if (rc != 0) {
201 error_exit(rc, __func__);
202 }
203 #else
204 rc = sem_post(&sem->sem);
205 if (rc < 0) {
206 error_exit(errno, __func__);
207 }
208 #endif
209 }
210
211 static void compute_abs_deadline(struct timespec *ts, int ms)
212 {
213 struct timeval tv;
214 gettimeofday(&tv, NULL);
215 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
216 ts->tv_sec = tv.tv_sec + ms / 1000;
217 if (ts->tv_nsec >= 1000000000) {
218 ts->tv_sec++;
219 ts->tv_nsec -= 1000000000;
220 }
221 }
222
223 int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
224 {
225 int rc;
226 struct timespec ts;
227
228 #if defined(__APPLE__) || defined(__NetBSD__)
229 rc = 0;
230 compute_abs_deadline(&ts, ms);
231 pthread_mutex_lock(&sem->lock);
232 while (sem->count == 0) {
233 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
234 if (rc == ETIMEDOUT) {
235 break;
236 }
237 if (rc != 0) {
238 error_exit(rc, __func__);
239 }
240 }
241 if (rc != ETIMEDOUT) {
242 --sem->count;
243 }
244 pthread_mutex_unlock(&sem->lock);
245 return (rc == ETIMEDOUT ? -1 : 0);
246 #else
247 if (ms <= 0) {
248 /* This is cheaper than sem_timedwait. */
249 do {
250 rc = sem_trywait(&sem->sem);
251 } while (rc == -1 && errno == EINTR);
252 if (rc == -1 && errno == EAGAIN) {
253 return -1;
254 }
255 } else {
256 compute_abs_deadline(&ts, ms);
257 do {
258 rc = sem_timedwait(&sem->sem, &ts);
259 } while (rc == -1 && errno == EINTR);
260 if (rc == -1 && errno == ETIMEDOUT) {
261 return -1;
262 }
263 }
264 if (rc < 0) {
265 error_exit(errno, __func__);
266 }
267 return 0;
268 #endif
269 }
270
271 void qemu_sem_wait(QemuSemaphore *sem)
272 {
273 int rc;
274
275 #if defined(__APPLE__) || defined(__NetBSD__)
276 pthread_mutex_lock(&sem->lock);
277 while (sem->count == 0) {
278 rc = pthread_cond_wait(&sem->cond, &sem->lock);
279 if (rc != 0) {
280 error_exit(rc, __func__);
281 }
282 }
283 --sem->count;
284 pthread_mutex_unlock(&sem->lock);
285 #else
286 do {
287 rc = sem_wait(&sem->sem);
288 } while (rc == -1 && errno == EINTR);
289 if (rc < 0) {
290 error_exit(errno, __func__);
291 }
292 #endif
293 }
294
295 #ifdef __linux__
296 #define futex(...) syscall(__NR_futex, __VA_ARGS__)
297
298 static inline void futex_wake(QemuEvent *ev, int n)
299 {
300 futex(ev, FUTEX_WAKE, n, NULL, NULL, 0);
301 }
302
303 static inline void futex_wait(QemuEvent *ev, unsigned val)
304 {
305 futex(ev, FUTEX_WAIT, (int) val, NULL, NULL, 0);
306 }
307 #else
308 static inline void futex_wake(QemuEvent *ev, int n)
309 {
310 if (n == 1) {
311 pthread_cond_signal(&ev->cond);
312 } else {
313 pthread_cond_broadcast(&ev->cond);
314 }
315 }
316
317 static inline void futex_wait(QemuEvent *ev, unsigned val)
318 {
319 pthread_mutex_lock(&ev->lock);
320 if (ev->value == val) {
321 pthread_cond_wait(&ev->cond, &ev->lock);
322 }
323 pthread_mutex_unlock(&ev->lock);
324 }
325 #endif
326
327 /* Valid transitions:
328 * - free->set, when setting the event
329 * - busy->set, when setting the event, followed by futex_wake
330 * - set->free, when resetting the event
331 * - free->busy, when waiting
332 *
333 * set->busy does not happen (it can be observed from the outside but
334 * it really is set->free->busy).
335 *
336 * busy->free provably cannot happen; to enforce it, the set->free transition
337 * is done with an OR, which becomes a no-op if the event has concurrently
338 * transitioned to free or busy.
339 */
340
341 #define EV_SET 0
342 #define EV_FREE 1
343 #define EV_BUSY -1
344
345 void qemu_event_init(QemuEvent *ev, bool init)
346 {
347 #ifndef __linux__
348 pthread_mutex_init(&ev->lock, NULL);
349 pthread_cond_init(&ev->cond, NULL);
350 #endif
351
352 ev->value = (init ? EV_SET : EV_FREE);
353 }
354
355 void qemu_event_destroy(QemuEvent *ev)
356 {
357 #ifndef __linux__
358 pthread_mutex_destroy(&ev->lock);
359 pthread_cond_destroy(&ev->cond);
360 #endif
361 }
362
363 void qemu_event_set(QemuEvent *ev)
364 {
365 if (atomic_mb_read(&ev->value) != EV_SET) {
366 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
367 /* There were waiters, wake them up. */
368 futex_wake(ev, INT_MAX);
369 }
370 }
371 }
372
373 void qemu_event_reset(QemuEvent *ev)
374 {
375 if (atomic_mb_read(&ev->value) == EV_SET) {
376 /*
377 * If there was a concurrent reset (or even reset+wait),
378 * do nothing. Otherwise change EV_SET->EV_FREE.
379 */
380 atomic_or(&ev->value, EV_FREE);
381 }
382 }
383
384 void qemu_event_wait(QemuEvent *ev)
385 {
386 unsigned value;
387
388 value = atomic_mb_read(&ev->value);
389 if (value != EV_SET) {
390 if (value == EV_FREE) {
391 /*
392 * Leave the event reset and tell qemu_event_set that there
393 * are waiters. No need to retry, because there cannot be
394 * a concurent busy->free transition. After the CAS, the
395 * event will be either set or busy.
396 */
397 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
398 return;
399 }
400 }
401 futex_wait(ev, EV_BUSY);
402 }
403 }
404
405 static pthread_key_t exit_key;
406
407 union NotifierThreadData {
408 void *ptr;
409 NotifierList list;
410 };
411 QEMU_BUILD_BUG_ON(sizeof(union NotifierThreadData) != sizeof(void *));
412
413 void qemu_thread_atexit_add(Notifier *notifier)
414 {
415 union NotifierThreadData ntd;
416 ntd.ptr = pthread_getspecific(exit_key);
417 notifier_list_add(&ntd.list, notifier);
418 pthread_setspecific(exit_key, ntd.ptr);
419 }
420
421 void qemu_thread_atexit_remove(Notifier *notifier)
422 {
423 union NotifierThreadData ntd;
424 ntd.ptr = pthread_getspecific(exit_key);
425 notifier_remove(notifier);
426 pthread_setspecific(exit_key, ntd.ptr);
427 }
428
429 static void qemu_thread_atexit_run(void *arg)
430 {
431 union NotifierThreadData ntd = { .ptr = arg };
432 notifier_list_notify(&ntd.list, NULL);
433 }
434
435 static void __attribute__((constructor)) qemu_thread_atexit_init(void)
436 {
437 pthread_key_create(&exit_key, qemu_thread_atexit_run);
438 }
439
440
441 /* Attempt to set the threads name; note that this is for debug, so
442 * we're not going to fail if we can't set it.
443 */
444 static void qemu_thread_set_name(QemuThread *thread, const char *name)
445 {
446 #ifdef CONFIG_PTHREAD_SETNAME_NP
447 pthread_setname_np(thread->thread, name);
448 #endif
449 }
450
451 void qemu_thread_create(QemuThread *thread, const char *name,
452 void *(*start_routine)(void*),
453 void *arg, int mode)
454 {
455 sigset_t set, oldset;
456 int err;
457 pthread_attr_t attr;
458
459 err = pthread_attr_init(&attr);
460 if (err) {
461 error_exit(err, __func__);
462 }
463 if (mode == QEMU_THREAD_DETACHED) {
464 err = pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
465 if (err) {
466 error_exit(err, __func__);
467 }
468 }
469
470 /* Leave signal handling to the iothread. */
471 sigfillset(&set);
472 pthread_sigmask(SIG_SETMASK, &set, &oldset);
473 err = pthread_create(&thread->thread, &attr, start_routine, arg);
474 if (err)
475 error_exit(err, __func__);
476
477 if (name_threads) {
478 qemu_thread_set_name(thread, name);
479 }
480
481 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
482
483 pthread_attr_destroy(&attr);
484 }
485
486 void qemu_thread_get_self(QemuThread *thread)
487 {
488 thread->thread = pthread_self();
489 }
490
491 bool qemu_thread_is_self(QemuThread *thread)
492 {
493 return pthread_equal(pthread_self(), thread->thread);
494 }
495
496 void qemu_thread_exit(void *retval)
497 {
498 pthread_exit(retval);
499 }
500
501 void *qemu_thread_join(QemuThread *thread)
502 {
503 int err;
504 void *ret;
505
506 err = pthread_join(thread->thread, &ret);
507 if (err) {
508 error_exit(err, __func__);
509 }
510 return ret;
511 }