]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-posix.c
migration: report SaveStateEntry id and name on failure
[mirror_qemu.git] / util / qemu-thread-posix.c
CommitLineData
e5d355d1
AL
1/*
2 * Wrappers around mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2009
5 *
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
aafd7584 13#include "qemu/osdep.h"
1de7afc9 14#include "qemu/thread.h"
c7c4d063 15#include "qemu/atomic.h"
ef57137f 16#include "qemu/notify.h"
f1aff7aa 17#include "qemu-thread-common.h"
e5d355d1 18
8f480de0
DDAG
19static bool name_threads;
20
21void qemu_thread_naming(bool enable)
22{
23 name_threads = enable;
5c312079
DDAG
24
25#ifndef CONFIG_THREAD_SETNAME_BYTHREAD
26 /* This is a debugging option, not fatal */
27 if (enable) {
28 fprintf(stderr, "qemu: thread naming not supported on this host\n");
29 }
30#endif
8f480de0
DDAG
31}
32
e5d355d1
AL
33static void error_exit(int err, const char *msg)
34{
35 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
53380ac3 36 abort();
e5d355d1
AL
37}
38
3dcc9c6e
YK
39static void compute_abs_deadline(struct timespec *ts, int ms)
40{
41 struct timeval tv;
42 gettimeofday(&tv, NULL);
43 ts->tv_nsec = tv.tv_usec * 1000 + (ms % 1000) * 1000000;
44 ts->tv_sec = tv.tv_sec + ms / 1000;
45 if (ts->tv_nsec >= 1000000000) {
46 ts->tv_sec++;
47 ts->tv_nsec -= 1000000000;
48 }
49}
50
e5d355d1
AL
51void qemu_mutex_init(QemuMutex *mutex)
52{
53 int err;
54
24fa9049 55 err = pthread_mutex_init(&mutex->lock, NULL);
e5d355d1
AL
56 if (err)
57 error_exit(err, __func__);
f1aff7aa 58 qemu_mutex_post_init(mutex);
e5d355d1
AL
59}
60
313b1d69
CC
61void qemu_mutex_destroy(QemuMutex *mutex)
62{
63 int err;
64
c096358e
FZ
65 assert(mutex->initialized);
66 mutex->initialized = false;
313b1d69
CC
67 err = pthread_mutex_destroy(&mutex->lock);
68 if (err)
69 error_exit(err, __func__);
70}
71
6c27a0de 72void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
73{
74 int err;
75
c096358e 76 assert(mutex->initialized);
f1aff7aa 77 qemu_mutex_pre_lock(mutex, file, line);
e5d355d1
AL
78 err = pthread_mutex_lock(&mutex->lock);
79 if (err)
80 error_exit(err, __func__);
f1aff7aa 81 qemu_mutex_post_lock(mutex, file, line);
e5d355d1
AL
82}
83
6c27a0de 84int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1 85{
31f5a726
JRZ
86 int err;
87
c096358e 88 assert(mutex->initialized);
31f5a726
JRZ
89 err = pthread_mutex_trylock(&mutex->lock);
90 if (err == 0) {
f1aff7aa 91 qemu_mutex_post_lock(mutex, file, line);
31f5a726
JRZ
92 return 0;
93 }
94 if (err != EBUSY) {
95 error_exit(err, __func__);
96 }
97 return -EBUSY;
e5d355d1
AL
98}
99
6c27a0de 100void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
101{
102 int err;
103
c096358e 104 assert(mutex->initialized);
f1aff7aa 105 qemu_mutex_pre_unlock(mutex, file, line);
e5d355d1
AL
106 err = pthread_mutex_unlock(&mutex->lock);
107 if (err)
108 error_exit(err, __func__);
109}
110
feadec63
PB
111void qemu_rec_mutex_init(QemuRecMutex *mutex)
112{
113 int err;
114 pthread_mutexattr_t attr;
115
116 pthread_mutexattr_init(&attr);
117 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
118 err = pthread_mutex_init(&mutex->lock, &attr);
119 pthread_mutexattr_destroy(&attr);
120 if (err) {
121 error_exit(err, __func__);
122 }
c096358e 123 mutex->initialized = true;
feadec63
PB
124}
125
e5d355d1
AL
126void qemu_cond_init(QemuCond *cond)
127{
128 int err;
129
130 err = pthread_cond_init(&cond->cond, NULL);
131 if (err)
132 error_exit(err, __func__);
c096358e 133 cond->initialized = true;
e5d355d1
AL
134}
135
313b1d69
CC
136void qemu_cond_destroy(QemuCond *cond)
137{
138 int err;
139
c096358e
FZ
140 assert(cond->initialized);
141 cond->initialized = false;
313b1d69
CC
142 err = pthread_cond_destroy(&cond->cond);
143 if (err)
144 error_exit(err, __func__);
145}
146
e5d355d1
AL
147void qemu_cond_signal(QemuCond *cond)
148{
149 int err;
150
c096358e 151 assert(cond->initialized);
e5d355d1
AL
152 err = pthread_cond_signal(&cond->cond);
153 if (err)
154 error_exit(err, __func__);
155}
156
157void qemu_cond_broadcast(QemuCond *cond)
158{
159 int err;
160
c096358e 161 assert(cond->initialized);
e5d355d1
AL
162 err = pthread_cond_broadcast(&cond->cond);
163 if (err)
164 error_exit(err, __func__);
165}
166
6c27a0de 167void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
168{
169 int err;
170
c096358e 171 assert(cond->initialized);
f1aff7aa 172 qemu_mutex_pre_unlock(mutex, file, line);
e5d355d1 173 err = pthread_cond_wait(&cond->cond, &mutex->lock);
f1aff7aa 174 qemu_mutex_post_lock(mutex, file, line);
e5d355d1
AL
175 if (err)
176 error_exit(err, __func__);
177}
178
3dcc9c6e
YK
179bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
180 const char *file, const int line)
181{
182 int err;
183 struct timespec ts;
184
185 assert(cond->initialized);
186 trace_qemu_mutex_unlock(mutex, file, line);
187 compute_abs_deadline(&ts, ms);
188 err = pthread_cond_timedwait(&cond->cond, &mutex->lock, &ts);
189 trace_qemu_mutex_locked(mutex, file, line);
190 if (err && err != ETIMEDOUT) {
191 error_exit(err, __func__);
192 }
193 return err != ETIMEDOUT;
194}
195
38b14db3
PB
196void qemu_sem_init(QemuSemaphore *sem, int init)
197{
198 int rc;
199
401bc051 200#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72
PB
201 rc = pthread_mutex_init(&sem->lock, NULL);
202 if (rc != 0) {
203 error_exit(rc, __func__);
204 }
205 rc = pthread_cond_init(&sem->cond, NULL);
206 if (rc != 0) {
207 error_exit(rc, __func__);
208 }
209 if (init < 0) {
210 error_exit(EINVAL, __func__);
211 }
212 sem->count = init;
213#else
38b14db3
PB
214 rc = sem_init(&sem->sem, 0, init);
215 if (rc < 0) {
216 error_exit(errno, __func__);
217 }
c166cb72 218#endif
c096358e 219 sem->initialized = true;
38b14db3
PB
220}
221
222void qemu_sem_destroy(QemuSemaphore *sem)
223{
224 int rc;
225
c096358e
FZ
226 assert(sem->initialized);
227 sem->initialized = false;
401bc051 228#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72
PB
229 rc = pthread_cond_destroy(&sem->cond);
230 if (rc < 0) {
231 error_exit(rc, __func__);
232 }
233 rc = pthread_mutex_destroy(&sem->lock);
234 if (rc < 0) {
235 error_exit(rc, __func__);
236 }
237#else
38b14db3
PB
238 rc = sem_destroy(&sem->sem);
239 if (rc < 0) {
240 error_exit(errno, __func__);
241 }
c166cb72 242#endif
38b14db3
PB
243}
244
245void qemu_sem_post(QemuSemaphore *sem)
246{
247 int rc;
248
c096358e 249 assert(sem->initialized);
401bc051 250#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72 251 pthread_mutex_lock(&sem->lock);
79761c66 252 if (sem->count == UINT_MAX) {
c166cb72 253 rc = EINVAL;
c166cb72 254 } else {
79761c66
IT
255 sem->count++;
256 rc = pthread_cond_signal(&sem->cond);
c166cb72
PB
257 }
258 pthread_mutex_unlock(&sem->lock);
259 if (rc != 0) {
260 error_exit(rc, __func__);
261 }
262#else
38b14db3
PB
263 rc = sem_post(&sem->sem);
264 if (rc < 0) {
265 error_exit(errno, __func__);
266 }
c166cb72
PB
267#endif
268}
269
38b14db3
PB
270int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
271{
272 int rc;
c166cb72
PB
273 struct timespec ts;
274
c096358e 275 assert(sem->initialized);
401bc051 276#ifndef CONFIG_SEM_TIMEDWAIT
79761c66 277 rc = 0;
c166cb72
PB
278 compute_abs_deadline(&ts, ms);
279 pthread_mutex_lock(&sem->lock);
79761c66 280 while (sem->count == 0) {
c166cb72
PB
281 rc = pthread_cond_timedwait(&sem->cond, &sem->lock, &ts);
282 if (rc == ETIMEDOUT) {
283 break;
284 }
285 if (rc != 0) {
286 error_exit(rc, __func__);
287 }
288 }
79761c66
IT
289 if (rc != ETIMEDOUT) {
290 --sem->count;
291 }
c166cb72
PB
292 pthread_mutex_unlock(&sem->lock);
293 return (rc == ETIMEDOUT ? -1 : 0);
294#else
38b14db3
PB
295 if (ms <= 0) {
296 /* This is cheaper than sem_timedwait. */
297 do {
298 rc = sem_trywait(&sem->sem);
299 } while (rc == -1 && errno == EINTR);
300 if (rc == -1 && errno == EAGAIN) {
301 return -1;
302 }
303 } else {
c166cb72 304 compute_abs_deadline(&ts, ms);
38b14db3
PB
305 do {
306 rc = sem_timedwait(&sem->sem, &ts);
307 } while (rc == -1 && errno == EINTR);
308 if (rc == -1 && errno == ETIMEDOUT) {
309 return -1;
310 }
311 }
312 if (rc < 0) {
313 error_exit(errno, __func__);
314 }
315 return 0;
c166cb72 316#endif
38b14db3
PB
317}
318
319void qemu_sem_wait(QemuSemaphore *sem)
320{
79761c66
IT
321 int rc;
322
c096358e 323 assert(sem->initialized);
401bc051 324#ifndef CONFIG_SEM_TIMEDWAIT
c166cb72 325 pthread_mutex_lock(&sem->lock);
79761c66
IT
326 while (sem->count == 0) {
327 rc = pthread_cond_wait(&sem->cond, &sem->lock);
328 if (rc != 0) {
329 error_exit(rc, __func__);
330 }
c166cb72 331 }
79761c66 332 --sem->count;
c166cb72
PB
333 pthread_mutex_unlock(&sem->lock);
334#else
38b14db3
PB
335 do {
336 rc = sem_wait(&sem->sem);
337 } while (rc == -1 && errno == EINTR);
338 if (rc < 0) {
339 error_exit(errno, __func__);
340 }
c166cb72 341#endif
38b14db3
PB
342}
343
c7c4d063 344#ifdef __linux__
fbcc3e50 345#include "qemu/futex.h"
c7c4d063 346#else
fbcc3e50 347static inline void qemu_futex_wake(QemuEvent *ev, int n)
c7c4d063 348{
c096358e 349 assert(ev->initialized);
158ef8cb 350 pthread_mutex_lock(&ev->lock);
c7c4d063
PB
351 if (n == 1) {
352 pthread_cond_signal(&ev->cond);
353 } else {
354 pthread_cond_broadcast(&ev->cond);
355 }
158ef8cb 356 pthread_mutex_unlock(&ev->lock);
c7c4d063
PB
357}
358
fbcc3e50 359static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
c7c4d063 360{
c096358e 361 assert(ev->initialized);
c7c4d063
PB
362 pthread_mutex_lock(&ev->lock);
363 if (ev->value == val) {
364 pthread_cond_wait(&ev->cond, &ev->lock);
365 }
366 pthread_mutex_unlock(&ev->lock);
367}
368#endif
369
370/* Valid transitions:
371 * - free->set, when setting the event
fbcc3e50 372 * - busy->set, when setting the event, followed by qemu_futex_wake
c7c4d063
PB
373 * - set->free, when resetting the event
374 * - free->busy, when waiting
375 *
376 * set->busy does not happen (it can be observed from the outside but
377 * it really is set->free->busy).
378 *
379 * busy->free provably cannot happen; to enforce it, the set->free transition
380 * is done with an OR, which becomes a no-op if the event has concurrently
381 * transitioned to free or busy.
382 */
383
384#define EV_SET 0
385#define EV_FREE 1
386#define EV_BUSY -1
387
388void qemu_event_init(QemuEvent *ev, bool init)
389{
390#ifndef __linux__
391 pthread_mutex_init(&ev->lock, NULL);
392 pthread_cond_init(&ev->cond, NULL);
393#endif
394
395 ev->value = (init ? EV_SET : EV_FREE);
c096358e 396 ev->initialized = true;
c7c4d063
PB
397}
398
399void qemu_event_destroy(QemuEvent *ev)
400{
c096358e
FZ
401 assert(ev->initialized);
402 ev->initialized = false;
c7c4d063
PB
403#ifndef __linux__
404 pthread_mutex_destroy(&ev->lock);
405 pthread_cond_destroy(&ev->cond);
406#endif
407}
408
409void qemu_event_set(QemuEvent *ev)
410{
374293ca
PB
411 /* qemu_event_set has release semantics, but because it *loads*
412 * ev->value we need a full memory barrier here.
413 */
c096358e 414 assert(ev->initialized);
374293ca
PB
415 smp_mb();
416 if (atomic_read(&ev->value) != EV_SET) {
c7c4d063
PB
417 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
418 /* There were waiters, wake them up. */
fbcc3e50 419 qemu_futex_wake(ev, INT_MAX);
c7c4d063
PB
420 }
421 }
422}
423
424void qemu_event_reset(QemuEvent *ev)
425{
374293ca
PB
426 unsigned value;
427
c096358e 428 assert(ev->initialized);
374293ca
PB
429 value = atomic_read(&ev->value);
430 smp_mb_acquire();
431 if (value == EV_SET) {
c7c4d063
PB
432 /*
433 * If there was a concurrent reset (or even reset+wait),
434 * do nothing. Otherwise change EV_SET->EV_FREE.
435 */
436 atomic_or(&ev->value, EV_FREE);
437 }
438}
439
440void qemu_event_wait(QemuEvent *ev)
441{
442 unsigned value;
443
c096358e 444 assert(ev->initialized);
374293ca
PB
445 value = atomic_read(&ev->value);
446 smp_mb_acquire();
c7c4d063
PB
447 if (value != EV_SET) {
448 if (value == EV_FREE) {
449 /*
450 * Leave the event reset and tell qemu_event_set that there
451 * are waiters. No need to retry, because there cannot be
67cc32eb 452 * a concurrent busy->free transition. After the CAS, the
c7c4d063
PB
453 * event will be either set or busy.
454 */
455 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
456 return;
457 }
458 }
fbcc3e50 459 qemu_futex_wait(ev, EV_BUSY);
c7c4d063
PB
460 }
461}
462
a458774a 463static __thread NotifierList thread_exit;
ef57137f 464
a458774a
PM
465/*
466 * Note that in this implementation you can register a thread-exit
467 * notifier for the main thread, but it will never be called.
468 * This is OK because main thread exit can only happen when the
469 * entire process is exiting, and the API allows notifiers to not
470 * be called on process exit.
471 */
ef57137f
PB
472void qemu_thread_atexit_add(Notifier *notifier)
473{
a458774a 474 notifier_list_add(&thread_exit, notifier);
ef57137f
PB
475}
476
477void qemu_thread_atexit_remove(Notifier *notifier)
478{
ef57137f 479 notifier_remove(notifier);
ef57137f
PB
480}
481
a458774a 482static void qemu_thread_atexit_notify(void *arg)
ef57137f 483{
a458774a
PM
484 /*
485 * Called when non-main thread exits (via qemu_thread_exit()
486 * or by returning from its start routine.)
487 */
488 notifier_list_notify(&thread_exit, NULL);
ef57137f
PB
489}
490
68a93982 491typedef struct {
492 void *(*start_routine)(void *);
493 void *arg;
494 char *name;
495} QemuThreadArgs;
496
497static void *qemu_thread_start(void *args)
498{
499 QemuThreadArgs *qemu_thread_args = args;
500 void *(*start_routine)(void *) = qemu_thread_args->start_routine;
501 void *arg = qemu_thread_args->arg;
a458774a 502 void *r;
68a93982 503
479a5747 504#ifdef CONFIG_THREAD_SETNAME_BYTHREAD
68a93982 505 /* Attempt to set the threads name; note that this is for debug, so
506 * we're not going to fail if we can't set it.
507 */
d820fa5b 508 if (name_threads && qemu_thread_args->name) {
479a5747 509# if defined(CONFIG_PTHREAD_SETNAME_NP_W_TID)
d820fa5b 510 pthread_setname_np(pthread_self(), qemu_thread_args->name);
479a5747
RB
511# elif defined(CONFIG_PTHREAD_SETNAME_NP_WO_TID)
512 pthread_setname_np(qemu_thread_args->name);
513# endif
d820fa5b
PX
514 }
515#endif
68a93982 516 g_free(qemu_thread_args->name);
517 g_free(qemu_thread_args);
a458774a
PM
518 pthread_cleanup_push(qemu_thread_atexit_notify, NULL);
519 r = start_routine(arg);
520 pthread_cleanup_pop(1);
521 return r;
5c312079
DDAG
522}
523
4900116e 524void qemu_thread_create(QemuThread *thread, const char *name,
e5d355d1 525 void *(*start_routine)(void*),
cf218714 526 void *arg, int mode)
e5d355d1 527{
cf218714 528 sigset_t set, oldset;
e5d355d1 529 int err;
8763046b 530 pthread_attr_t attr;
d820fa5b 531 QemuThreadArgs *qemu_thread_args;
e5d355d1 532
8763046b
JK
533 err = pthread_attr_init(&attr);
534 if (err) {
535 error_exit(err, __func__);
536 }
55541c8a 537
68a93982 538 if (mode == QEMU_THREAD_DETACHED) {
539 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
540 }
541
cf218714 542 /* Leave signal handling to the iothread. */
55541c8a 543 sigfillset(&set);
21a43af0
RB
544 /* Blocking the signals can result in undefined behaviour. */
545 sigdelset(&set, SIGSEGV);
546 sigdelset(&set, SIGFPE);
547 sigdelset(&set, SIGILL);
548 /* TODO avoid SIGBUS loss on macOS */
55541c8a 549 pthread_sigmask(SIG_SETMASK, &set, &oldset);
55541c8a 550
d820fa5b
PX
551 qemu_thread_args = g_new0(QemuThreadArgs, 1);
552 qemu_thread_args->name = g_strdup(name);
553 qemu_thread_args->start_routine = start_routine;
554 qemu_thread_args->arg = arg;
555
556 err = pthread_create(&thread->thread, &attr,
557 qemu_thread_start, qemu_thread_args);
4900116e 558
68a93982 559 if (err)
560 error_exit(err, __func__);
561
55541c8a 562 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
8763046b
JK
563
564 pthread_attr_destroy(&attr);
e5d355d1
AL
565}
566
b7680cb6 567void qemu_thread_get_self(QemuThread *thread)
e5d355d1
AL
568{
569 thread->thread = pthread_self();
570}
571
2d797b65 572bool qemu_thread_is_self(QemuThread *thread)
e5d355d1 573{
b7680cb6 574 return pthread_equal(pthread_self(), thread->thread);
e5d355d1
AL
575}
576
313b1d69
CC
577void qemu_thread_exit(void *retval)
578{
579 pthread_exit(retval);
580}
8763046b
JK
581
582void *qemu_thread_join(QemuThread *thread)
583{
584 int err;
585 void *ret;
586
587 err = pthread_join(thread->thread, &ret);
588 if (err) {
589 error_exit(err, __func__);
590 }
591 return ret;
592}