]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-posix.c
configure, meson: move remaining HAVE_* compiler tests to Meson
[mirror_qemu.git] / util / qemu-thread-posix.c
CommitLineData
e5d355d1
AL
1/*
2 * Wrappers around mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2009
5 *
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
aafd7584 13#include "qemu/osdep.h"
1de7afc9 14#include "qemu/thread.h"
c7c4d063 15#include "qemu/atomic.h"
ef57137f 16#include "qemu/notify.h"
f1aff7aa 17#include "qemu-thread-common.h"
ce9f0e5b 18#include "qemu/tsan.h"
e5d355d1 19
8f480de0
DDAG
20static bool name_threads;
21
22void qemu_thread_naming(bool enable)
23{
24 name_threads = enable;
5c312079
DDAG
25
26#ifndef CONFIG_THREAD_SETNAME_BYTHREAD
27 /* This is a debugging option, not fatal */
28 if (enable) {
29 fprintf(stderr, "qemu: thread naming not supported on this host\n");
30 }
31#endif
8f480de0
DDAG
32}
33
e5d355d1
AL
34static void error_exit(int err, const char *msg)
35{
36 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
53380ac3 37 abort();
e5d355d1
AL
38}
39
3dcc9c6e
YK
40static void compute_abs_deadline(struct timespec *ts, int ms)
41{
42 struct timeval tv;
43 gettimeofday(&tv, NULL);
44 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
45 ts->tv_sec = tv.tv_sec + ms / 1000;
46 if (ts->tv_nsec >= 1000000000) {
47 ts->tv_sec++;
48 ts->tv_nsec -= 1000000000;
49 }
50}
51
e5d355d1
AL
52void qemu_mutex_init(QemuMutex *mutex)
53{
54 int err;
55
24fa9049 56 err = pthread_mutex_init(&mutex->lock, NULL);
e5d355d1
AL
57 if (err)
58 error_exit(err, __func__);
f1aff7aa 59 qemu_mutex_post_init(mutex);
e5d355d1
AL
60}
61
313b1d69
CC
62void qemu_mutex_destroy(QemuMutex *mutex)
63{
64 int err;
65
c096358e
FZ
66 assert(mutex->initialized);
67 mutex->initialized = false;
313b1d69
CC
68 err = pthread_mutex_destroy(&mutex->lock);
69 if (err)
70 error_exit(err, __func__);
71}
72
6c27a0de 73void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
74{
75 int err;
76
c096358e 77 assert(mutex->initialized);
f1aff7aa 78 qemu_mutex_pre_lock(mutex, file, line);
e5d355d1
AL
79 err = pthread_mutex_lock(&mutex->lock);
80 if (err)
81 error_exit(err, __func__);
f1aff7aa 82 qemu_mutex_post_lock(mutex, file, line);
e5d355d1
AL
83}
84
6c27a0de 85int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1 86{
31f5a726
JRZ
87 int err;
88
c096358e 89 assert(mutex->initialized);
31f5a726
JRZ
90 err = pthread_mutex_trylock(&mutex->lock);
91 if (err == 0) {
f1aff7aa 92 qemu_mutex_post_lock(mutex, file, line);
31f5a726
JRZ
93 return 0;
94 }
95 if (err != EBUSY) {
96 error_exit(err, __func__);
97 }
98 return -EBUSY;
e5d355d1
AL
99}
100
6c27a0de 101void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
102{
103 int err;
104
c096358e 105 assert(mutex->initialized);
f1aff7aa 106 qemu_mutex_pre_unlock(mutex, file, line);
e5d355d1
AL
107 err = pthread_mutex_unlock(&mutex->lock);
108 if (err)
109 error_exit(err, __func__);
110}
111
feadec63
PB
112void qemu_rec_mutex_init(QemuRecMutex *mutex)
113{
114 int err;
115 pthread_mutexattr_t attr;
116
117 pthread_mutexattr_init(&attr);
118 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
6c98635e 119 err = pthread_mutex_init(&mutex->m.lock, &attr);
feadec63
PB
120 pthread_mutexattr_destroy(&attr);
121 if (err) {
122 error_exit(err, __func__);
123 }
6c98635e 124 mutex->m.initialized = true;
feadec63
PB
125}
126
4b193bb7
RH
127void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
128{
6c98635e 129 qemu_mutex_destroy(&mutex->m);
4b193bb7
RH
130}
131
132void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line)
133{
6c98635e 134 qemu_mutex_lock_impl(&mutex->m, file, line);
4b193bb7
RH
135}
136
137int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line)
138{
6c98635e 139 return qemu_mutex_trylock_impl(&mutex->m, file, line);
4b193bb7
RH
140}
141
9c75bae7 142void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line)
4b193bb7 143{
6c98635e 144 qemu_mutex_unlock_impl(&mutex->m, file, line);
4b193bb7
RH
145}
146
e5d355d1
AL
147void qemu_cond_init(QemuCond *cond)
148{
149 int err;
150
151 err = pthread_cond_init(&cond->cond, NULL);
152 if (err)
153 error_exit(err, __func__);
c096358e 154 cond->initialized = true;
e5d355d1
AL
155}
156
313b1d69
CC
157void qemu_cond_destroy(QemuCond *cond)
158{
159 int err;
160
c096358e
FZ
161 assert(cond->initialized);
162 cond->initialized = false;
313b1d69
CC
163 err = pthread_cond_destroy(&cond->cond);
164 if (err)
165 error_exit(err, __func__);
166}
167
e5d355d1
AL
168void qemu_cond_signal(QemuCond *cond)
169{
170 int err;
171
c096358e 172 assert(cond->initialized);
e5d355d1
AL
173 err = pthread_cond_signal(&cond->cond);
174 if (err)
175 error_exit(err, __func__);
176}
177
178void qemu_cond_broadcast(QemuCond *cond)
179{
180 int err;
181
c096358e 182 assert(cond->initialized);
e5d355d1
AL
183 err = pthread_cond_broadcast(&cond->cond);
184 if (err)
185 error_exit(err, __func__);
186}
187
6c27a0de 188void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
189{
190 int err;
191
c096358e 192 assert(cond->initialized);
f1aff7aa 193 qemu_mutex_pre_unlock(mutex, file, line);
e5d355d1 194 err = pthread_cond_wait(&cond->cond, &mutex->lock);
f1aff7aa 195 qemu_mutex_post_lock(mutex, file, line);
e5d355d1
AL
196 if (err)
197 error_exit(err, __func__);
198}
199
3dcc9c6e
YK
200bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
201 const char *file, const int line)
202{
203 int err;
204 struct timespec ts;
205
206 assert(cond->initialized);
207 trace_qemu_mutex_unlock(mutex, file, line);
208 compute_abs_deadline(&ts, ms);
209 err = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
210 trace_qemu_mutex_locked(mutex, file, line);
211 if (err && err != ETIMEDOUT) {
212 error_exit(err, __func__);
213 }
214 return err != ETIMEDOUT;
215}
216
38b14db3
PB
217void qemu_sem_init(QemuSemaphore *sem, int init)
218{
219 int rc;
220
401bc051 221#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72
PB
222 rc = pthread_mutex_init(&sem->lock, NULL);
223 if (rc != 0) {
224 error_exit(rc, __func__);
225 }
226 rc = pthread_cond_init(&sem->cond, NULL);
227 if (rc != 0) {
228 error_exit(rc, __func__);
229 }
230 if (init < 0) {
231 error_exit(EINVAL, __func__);
232 }
233 sem->count = init;
234#else
38b14db3
PB
235 rc = sem_init(&sem->sem, 0, init);
236 if (rc < 0) {
237 error_exit(errno, __func__);
238 }
c166cb72 239#endif
c096358e 240 sem->initialized = true;
38b14db3
PB
241}
242
243void qemu_sem_destroy(QemuSemaphore *sem)
244{
245 int rc;
246
c096358e
FZ
247 assert(sem->initialized);
248 sem->initialized = false;
401bc051 249#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72
PB
250 rc = pthread_cond_destroy(&sem->cond);
251 if (rc < 0) {
252 error_exit(rc, __func__);
253 }
254 rc = pthread_mutex_destroy(&sem->lock);
255 if (rc < 0) {
256 error_exit(rc, __func__);
257 }
258#else
38b14db3
PB
259 rc = sem_destroy(&sem->sem);
260 if (rc < 0) {
261 error_exit(errno, __func__);
262 }
c166cb72 263#endif
38b14db3
PB
264}
265
266void qemu_sem_post(QemuSemaphore *sem)
267{
268 int rc;
269
c096358e 270 assert(sem->initialized);
401bc051 271#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72 272 pthread_mutex_lock(&sem->lock);
79761c66 273 if (sem->count == UINT_MAX) {
c166cb72 274 rc = EINVAL;
c166cb72 275 } else {
79761c66
IT
276 sem->count++;
277 rc = pthread_cond_signal(&sem->cond);
c166cb72
PB
278 }
279 pthread_mutex_unlock(&sem->lock);
280 if (rc != 0) {
281 error_exit(rc, __func__);
282 }
283#else
38b14db3
PB
284 rc = sem_post(&sem->sem);
285 if (rc < 0) {
286 error_exit(errno, __func__);
287 }
c166cb72
PB
288#endif
289}
290
38b14db3
PB
291int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
292{
293 int rc;
c166cb72
PB
294 struct timespec ts;
295
c096358e 296 assert(sem->initialized);
401bc051 297#ifndef CONFIG_SEM_TIMEDWAIT
79761c66 298 rc = 0;
c166cb72
PB
299 compute_abs_deadline(&ts, ms);
300 pthread_mutex_lock(&sem->lock);
79761c66 301 while (sem->count == 0) {
c166cb72
PB
302 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
303 if (rc == ETIMEDOUT) {
304 break;
305 }
306 if (rc != 0) {
307 error_exit(rc, __func__);
308 }
309 }
79761c66
IT
310 if (rc != ETIMEDOUT) {
311 --sem->count;
312 }
c166cb72
PB
313 pthread_mutex_unlock(&sem->lock);
314 return (rc == ETIMEDOUT ? -1 : 0);
315#else
38b14db3
PB
316 if (ms <= 0) {
317 /* This is cheaper than sem_timedwait. */
318 do {
319 rc = sem_trywait(&sem->sem);
320 } while (rc == -1 && errno == EINTR);
321 if (rc == -1 && errno == EAGAIN) {
322 return -1;
323 }
324 } else {
c166cb72 325 compute_abs_deadline(&ts, ms);
38b14db3
PB
326 do {
327 rc = sem_timedwait(&sem->sem, &ts);
328 } while (rc == -1 && errno == EINTR);
329 if (rc == -1 && errno == ETIMEDOUT) {
330 return -1;
331 }
332 }
333 if (rc < 0) {
334 error_exit(errno, __func__);
335 }
336 return 0;
c166cb72 337#endif
38b14db3
PB
338}
339
340void qemu_sem_wait(QemuSemaphore *sem)
341{
79761c66
IT
342 int rc;
343
c096358e 344 assert(sem->initialized);
401bc051 345#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72 346 pthread_mutex_lock(&sem->lock);
79761c66
IT
347 while (sem->count == 0) {
348 rc = pthread_cond_wait(&sem->cond, &sem->lock);
349 if (rc != 0) {
350 error_exit(rc, __func__);
351 }
c166cb72 352 }
79761c66 353 --sem->count;
c166cb72
PB
354 pthread_mutex_unlock(&sem->lock);
355#else
38b14db3
PB
356 do {
357 rc = sem_wait(&sem->sem);
358 } while (rc == -1 && errno == EINTR);
359 if (rc < 0) {
360 error_exit(errno, __func__);
361 }
c166cb72 362#endif
38b14db3
PB
363}
364
c7c4d063 365#ifdef __linux__
fbcc3e50 366#include "qemu/futex.h"
c7c4d063 367#else
fbcc3e50 368static inline void qemu_futex_wake(QemuEvent *ev, int n)
c7c4d063 369{
c096358e 370 assert(ev->initialized);
158ef8cb 371 pthread_mutex_lock(&ev->lock);
c7c4d063
PB
372 if (n == 1) {
373 pthread_cond_signal(&ev->cond);
374 } else {
375 pthread_cond_broadcast(&ev->cond);
376 }
158ef8cb 377 pthread_mutex_unlock(&ev->lock);
c7c4d063
PB
378}
379
fbcc3e50 380static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
c7c4d063 381{
c096358e 382 assert(ev->initialized);
c7c4d063
PB
383 pthread_mutex_lock(&ev->lock);
384 if (ev->value == val) {
385 pthread_cond_wait(&ev->cond, &ev->lock);
386 }
387 pthread_mutex_unlock(&ev->lock);
388}
389#endif
390
391/* Valid transitions:
392 * - free->set, when setting the event
fbcc3e50 393 * - busy->set, when setting the event, followed by qemu_futex_wake
c7c4d063
PB
394 * - set->free, when resetting the event
395 * - free->busy, when waiting
396 *
397 * set->busy does not happen (it can be observed from the outside but
398 * it really is set->free->busy).
399 *
400 * busy->free provably cannot happen; to enforce it, the set->free transition
401 * is done with an OR, which becomes a no-op if the event has concurrently
402 * transitioned to free or busy.
403 */
404
405#define EV_SET 0
406#define EV_FREE 1
407#define EV_BUSY -1
408
409void qemu_event_init(QemuEvent *ev, bool init)
410{
411#ifndef __linux__
412 pthread_mutex_init(&ev->lock, NULL);
413 pthread_cond_init(&ev->cond, NULL);
414#endif
415
416 ev->value = (init ? EV_SET : EV_FREE);
c096358e 417 ev->initialized = true;
c7c4d063
PB
418}
419
420void qemu_event_destroy(QemuEvent *ev)
421{
c096358e
FZ
422 assert(ev->initialized);
423 ev->initialized = false;
c7c4d063
PB
424#ifndef __linux__
425 pthread_mutex_destroy(&ev->lock);
426 pthread_cond_destroy(&ev->cond);
427#endif
428}
429
430void qemu_event_set(QemuEvent *ev)
431{
374293ca
PB
432 /* qemu_event_set has release semantics, but because it *loads*
433 * ev->value we need a full memory barrier here.
434 */
c096358e 435 assert(ev->initialized);
374293ca 436 smp_mb();
d73415a3
SH
437 if (qatomic_read(&ev->value) != EV_SET) {
438 if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
c7c4d063 439 /* There were waiters, wake them up. */
fbcc3e50 440 qemu_futex_wake(ev, INT_MAX);
c7c4d063
PB
441 }
442 }
443}
444
445void qemu_event_reset(QemuEvent *ev)
446{
374293ca
PB
447 unsigned value;
448
c096358e 449 assert(ev->initialized);
d73415a3 450 value = qatomic_read(&ev->value);
374293ca
PB
451 smp_mb_acquire();
452 if (value == EV_SET) {
c7c4d063
PB
453 /*
454 * If there was a concurrent reset (or even reset+wait),
455 * do nothing. Otherwise change EV_SET->EV_FREE.
456 */
d73415a3 457 qatomic_or(&ev->value, EV_FREE);
c7c4d063
PB
458 }
459}
460
461void qemu_event_wait(QemuEvent *ev)
462{
463 unsigned value;
464
c096358e 465 assert(ev->initialized);
d73415a3 466 value = qatomic_read(&ev->value);
374293ca 467 smp_mb_acquire();
c7c4d063
PB
468 if (value != EV_SET) {
469 if (value == EV_FREE) {
470 /*
471 * Leave the event reset and tell qemu_event_set that there
472 * are waiters. No need to retry, because there cannot be
67cc32eb 473 * a concurrent busy->free transition. After the CAS, the
c7c4d063
PB
474 * event will be either set or busy.
475 */
d73415a3 476 if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
c7c4d063
PB
477 return;
478 }
479 }
fbcc3e50 480 qemu_futex_wait(ev, EV_BUSY);
c7c4d063
PB
481 }
482}
483
a458774a 484static __thread NotifierList thread_exit;
ef57137f 485
a458774a
PM
486/*
487 * Note that in this implementation you can register a thread-exit
488 * notifier for the main thread, but it will never be called.
489 * This is OK because main thread exit can only happen when the
490 * entire process is exiting, and the API allows notifiers to not
491 * be called on process exit.
492 */
ef57137f
PB
493void qemu_thread_atexit_add(Notifier *notifier)
494{
a458774a 495 notifier_list_add(&thread_exit, notifier);
ef57137f
PB
496}
497
498void qemu_thread_atexit_remove(Notifier *notifier)
499{
ef57137f 500 notifier_remove(notifier);
ef57137f
PB
501}
502
a458774a 503static void qemu_thread_atexit_notify(void *arg)
ef57137f 504{
a458774a
PM
505 /*
506 * Called when non-main thread exits (via qemu_thread_exit()
507 * or by returning from its start routine.)
508 */
509 notifier_list_notify(&thread_exit, NULL);
ef57137f
PB
510}
511
68a93982 512typedef struct {
513 void *(*start_routine)(void *);
514 void *arg;
515 char *name;
516} QemuThreadArgs;
517
518static void *qemu_thread_start(void *args)
519{
520 QemuThreadArgs *qemu_thread_args = args;
521 void *(*start_routine)(void *) = qemu_thread_args->start_routine;
522 void *arg = qemu_thread_args->arg;
a458774a 523 void *r;
68a93982 524
479a5747 525#ifdef CONFIG_THREAD_SETNAME_BYTHREAD
68a93982 526 /* Attempt to set the threads name; note that this is for debug, so
527 * we're not going to fail if we can't set it.
528 */
d820fa5b 529 if (name_threads && qemu_thread_args->name) {
479a5747 530# if defined(CONFIG_PTHREAD_SETNAME_NP_W_TID)
d820fa5b 531 pthread_setname_np(pthread_self(), qemu_thread_args->name);
479a5747
RB
532# elif defined(CONFIG_PTHREAD_SETNAME_NP_WO_TID)
533 pthread_setname_np(qemu_thread_args->name);
534# endif
d820fa5b
PX
535 }
536#endif
ce9f0e5b 537 QEMU_TSAN_ANNOTATE_THREAD_NAME(qemu_thread_args->name);
68a93982 538 g_free(qemu_thread_args->name);
539 g_free(qemu_thread_args);
37daf1ba
RH
540
541 /*
542 * GCC 11 with glibc 2.17 on PowerPC reports
543 *
544 * qemu-thread-posix.c:540:5: error: ‘__sigsetjmp’ accessing 656 bytes
545 * in a region of size 528 [-Werror=stringop-overflow=]
546 * 540 | pthread_cleanup_push(qemu_thread_atexit_notify, NULL);
547 * | ^~~~~~~~~~~~~~~~~~~~
548 *
549 * which is clearly nonsense.
550 */
551#pragma GCC diagnostic push
552#ifndef __clang__
553#pragma GCC diagnostic ignored "-Wstringop-overflow"
554#endif
555
a458774a
PM
556 pthread_cleanup_push(qemu_thread_atexit_notify, NULL);
557 r = start_routine(arg);
558 pthread_cleanup_pop(1);
37daf1ba
RH
559
560#pragma GCC diagnostic pop
561
a458774a 562 return r;
5c312079
DDAG
563}
564
4900116e 565void qemu_thread_create(QemuThread *thread, const char *name,
e5d355d1 566 void *(*start_routine)(void*),
cf218714 567 void *arg, int mode)
e5d355d1 568{
cf218714 569 sigset_t set, oldset;
e5d355d1 570 int err;
8763046b 571 pthread_attr_t attr;
d820fa5b 572 QemuThreadArgs *qemu_thread_args;
e5d355d1 573
8763046b
JK
574 err = pthread_attr_init(&attr);
575 if (err) {
576 error_exit(err, __func__);
577 }
55541c8a 578
68a93982 579 if (mode == QEMU_THREAD_DETACHED) {
580 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
581 }
582
cf218714 583 /* Leave signal handling to the iothread. */
55541c8a 584 sigfillset(&set);
21a43af0
RB
585 /* Blocking the signals can result in undefined behaviour. */
586 sigdelset(&set, SIGSEGV);
587 sigdelset(&set, SIGFPE);
588 sigdelset(&set, SIGILL);
589 /* TODO avoid SIGBUS loss on macOS */
55541c8a 590 pthread_sigmask(SIG_SETMASK, &set, &oldset);
55541c8a 591
d820fa5b
PX
592 qemu_thread_args = g_new0(QemuThreadArgs, 1);
593 qemu_thread_args->name = g_strdup(name);
594 qemu_thread_args->start_routine = start_routine;
595 qemu_thread_args->arg = arg;
596
597 err = pthread_create(&thread->thread, &attr,
598 qemu_thread_start, qemu_thread_args);
4900116e 599
68a93982 600 if (err)
601 error_exit(err, __func__);
602
55541c8a 603 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
8763046b
JK
604
605 pthread_attr_destroy(&attr);
e5d355d1
AL
606}
607
b7680cb6 608void qemu_thread_get_self(QemuThread *thread)
e5d355d1
AL
609{
610 thread->thread = pthread_self();
611}
612
2d797b65 613bool qemu_thread_is_self(QemuThread *thread)
e5d355d1 614{
b7680cb6 615 return pthread_equal(pthread_self(), thread->thread);
e5d355d1
AL
616}
617
313b1d69
CC
618void qemu_thread_exit(void *retval)
619{
620 pthread_exit(retval);
621}
8763046b
JK
622
623void *qemu_thread_join(QemuThread *thread)
624{
625 int err;
626 void *ret;
627
628 err = pthread_join(thread->thread, &ret);
629 if (err) {
630 error_exit(err, __func__);
631 }
632 return ret;
633}