]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-posix.c
util/uri: Remove the uri_string_escape() function
[mirror_qemu.git] / util / qemu-thread-posix.c
CommitLineData
e5d355d1
AL
1/*
2 * Wrappers around mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2009
5 *
6 * Author:
7 * Marcelo Tosatti <mtosatti@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
aafd7584 13#include "qemu/osdep.h"
1de7afc9 14#include "qemu/thread.h"
c7c4d063 15#include "qemu/atomic.h"
ef57137f 16#include "qemu/notify.h"
f1aff7aa 17#include "qemu-thread-common.h"
ce9f0e5b 18#include "qemu/tsan.h"
7730f32c 19#include "qemu/bitmap.h"
e5d355d1 20
3ada67a3
BS
21#ifdef CONFIG_PTHREAD_SET_NAME_NP
22#include <pthread_np.h>
23#endif
24
8f480de0
DDAG
25static bool name_threads;
26
27void qemu_thread_naming(bool enable)
28{
29 name_threads = enable;
5c312079 30
10f6b231 31#if !defined CONFIG_PTHREAD_SETNAME_NP_W_TID && \
3ada67a3
BS
32 !defined CONFIG_PTHREAD_SETNAME_NP_WO_TID && \
33 !defined CONFIG_PTHREAD_SET_NAME_NP
5c312079
DDAG
34 /* This is a debugging option, not fatal */
35 if (enable) {
36 fprintf(stderr, "qemu: thread naming not supported on this host\n");
37 }
38#endif
8f480de0
DDAG
39}
40
e5d355d1
AL
41static void error_exit(int err, const char *msg)
42{
43 fprintf(stderr, "qemu: %s: %s\n", msg, strerror(err));
53380ac3 44 abort();
e5d355d1
AL
45}
46
657ac98b
LM
47static inline clockid_t qemu_timedwait_clockid(void)
48{
49#ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK
50 return CLOCK_MONOTONIC;
51#else
52 return CLOCK_REALTIME;
53#endif
54}
55
3dcc9c6e
YK
56static void compute_abs_deadline(struct timespec *ts, int ms)
57{
657ac98b
LM
58 clock_gettime(qemu_timedwait_clockid(), ts);
59 ts->tv_nsec += (ms % 1000) * 1000000;
60 ts->tv_sec += ms / 1000;
3dcc9c6e
YK
61 if (ts->tv_nsec >= 1000000000) {
62 ts->tv_sec++;
63 ts->tv_nsec -= 1000000000;
64 }
65}
66
e5d355d1
AL
67void qemu_mutex_init(QemuMutex *mutex)
68{
69 int err;
70
24fa9049 71 err = pthread_mutex_init(&mutex->lock, NULL);
e5d355d1
AL
72 if (err)
73 error_exit(err, __func__);
f1aff7aa 74 qemu_mutex_post_init(mutex);
e5d355d1
AL
75}
76
313b1d69
CC
77void qemu_mutex_destroy(QemuMutex *mutex)
78{
79 int err;
80
c096358e
FZ
81 assert(mutex->initialized);
82 mutex->initialized = false;
313b1d69
CC
83 err = pthread_mutex_destroy(&mutex->lock);
84 if (err)
85 error_exit(err, __func__);
86}
87
6c27a0de 88void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
89{
90 int err;
91
c096358e 92 assert(mutex->initialized);
f1aff7aa 93 qemu_mutex_pre_lock(mutex, file, line);
e5d355d1
AL
94 err = pthread_mutex_lock(&mutex->lock);
95 if (err)
96 error_exit(err, __func__);
f1aff7aa 97 qemu_mutex_post_lock(mutex, file, line);
e5d355d1
AL
98}
99
6c27a0de 100int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1 101{
31f5a726
JRZ
102 int err;
103
c096358e 104 assert(mutex->initialized);
31f5a726
JRZ
105 err = pthread_mutex_trylock(&mutex->lock);
106 if (err == 0) {
f1aff7aa 107 qemu_mutex_post_lock(mutex, file, line);
31f5a726
JRZ
108 return 0;
109 }
110 if (err != EBUSY) {
111 error_exit(err, __func__);
112 }
113 return -EBUSY;
e5d355d1
AL
114}
115
6c27a0de 116void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
117{
118 int err;
119
c096358e 120 assert(mutex->initialized);
f1aff7aa 121 qemu_mutex_pre_unlock(mutex, file, line);
e5d355d1
AL
122 err = pthread_mutex_unlock(&mutex->lock);
123 if (err)
124 error_exit(err, __func__);
125}
126
feadec63
PB
127void qemu_rec_mutex_init(QemuRecMutex *mutex)
128{
129 int err;
130 pthread_mutexattr_t attr;
131
132 pthread_mutexattr_init(&attr);
133 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
6c98635e 134 err = pthread_mutex_init(&mutex->m.lock, &attr);
feadec63
PB
135 pthread_mutexattr_destroy(&attr);
136 if (err) {
137 error_exit(err, __func__);
138 }
6c98635e 139 mutex->m.initialized = true;
feadec63
PB
140}
141
4b193bb7
RH
142void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
143{
6c98635e 144 qemu_mutex_destroy(&mutex->m);
4b193bb7
RH
145}
146
147void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line)
148{
6c98635e 149 qemu_mutex_lock_impl(&mutex->m, file, line);
4b193bb7
RH
150}
151
152int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line)
153{
6c98635e 154 return qemu_mutex_trylock_impl(&mutex->m, file, line);
4b193bb7
RH
155}
156
9c75bae7 157void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line)
4b193bb7 158{
6c98635e 159 qemu_mutex_unlock_impl(&mutex->m, file, line);
4b193bb7
RH
160}
161
e5d355d1
AL
162void qemu_cond_init(QemuCond *cond)
163{
657ac98b 164 pthread_condattr_t attr;
e5d355d1
AL
165 int err;
166
657ac98b
LM
167 err = pthread_condattr_init(&attr);
168 if (err) {
169 error_exit(err, __func__);
170 }
171#ifdef CONFIG_PTHREAD_CONDATTR_SETCLOCK
172 err = pthread_condattr_setclock(&attr, qemu_timedwait_clockid());
173 if (err) {
e5d355d1 174 error_exit(err, __func__);
657ac98b
LM
175 }
176#endif
177 err = pthread_cond_init(&cond->cond, &attr);
178 if (err) {
179 error_exit(err, __func__);
180 }
181 err = pthread_condattr_destroy(&attr);
182 if (err) {
183 error_exit(err, __func__);
184 }
c096358e 185 cond->initialized = true;
e5d355d1
AL
186}
187
313b1d69
CC
188void qemu_cond_destroy(QemuCond *cond)
189{
190 int err;
191
c096358e
FZ
192 assert(cond->initialized);
193 cond->initialized = false;
313b1d69
CC
194 err = pthread_cond_destroy(&cond->cond);
195 if (err)
196 error_exit(err, __func__);
197}
198
e5d355d1
AL
199void qemu_cond_signal(QemuCond *cond)
200{
201 int err;
202
c096358e 203 assert(cond->initialized);
e5d355d1
AL
204 err = pthread_cond_signal(&cond->cond);
205 if (err)
206 error_exit(err, __func__);
207}
208
209void qemu_cond_broadcast(QemuCond *cond)
210{
211 int err;
212
c096358e 213 assert(cond->initialized);
e5d355d1
AL
214 err = pthread_cond_broadcast(&cond->cond);
215 if (err)
216 error_exit(err, __func__);
217}
218
6c27a0de 219void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
e5d355d1
AL
220{
221 int err;
222
c096358e 223 assert(cond->initialized);
f1aff7aa 224 qemu_mutex_pre_unlock(mutex, file, line);
e5d355d1 225 err = pthread_cond_wait(&cond->cond, &mutex->lock);
f1aff7aa 226 qemu_mutex_post_lock(mutex, file, line);
e5d355d1
AL
227 if (err)
228 error_exit(err, __func__);
229}
230
deb9c2ad 231static bool TSA_NO_TSA
a0d45db8
LM
232qemu_cond_timedwait_ts(QemuCond *cond, QemuMutex *mutex, struct timespec *ts,
233 const char *file, const int line)
3dcc9c6e
YK
234{
235 int err;
3dcc9c6e
YK
236
237 assert(cond->initialized);
238 trace_qemu_mutex_unlock(mutex, file, line);
a0d45db8 239 err = pthread_cond_timedwait(&cond->cond, &mutex->lock, ts);
3dcc9c6e
YK
240 trace_qemu_mutex_locked(mutex, file, line);
241 if (err && err != ETIMEDOUT) {
242 error_exit(err, __func__);
243 }
244 return err != ETIMEDOUT;
245}
246
a0d45db8
LM
247bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
248 const char *file, const int line)
249{
250 struct timespec ts;
251
252 compute_abs_deadline(&ts, ms);
253 return qemu_cond_timedwait_ts(cond, mutex, &ts, file, line);
254}
255
38b14db3
PB
256void qemu_sem_init(QemuSemaphore *sem, int init)
257{
a0d45db8
LM
258 qemu_mutex_init(&sem->mutex);
259 qemu_cond_init(&sem->cond);
38b14db3 260
c166cb72
PB
261 if (init < 0) {
262 error_exit(EINVAL, __func__);
263 }
264 sem->count = init;
38b14db3
PB
265}
266
267void qemu_sem_destroy(QemuSemaphore *sem)
268{
a0d45db8
LM
269 qemu_cond_destroy(&sem->cond);
270 qemu_mutex_destroy(&sem->mutex);
38b14db3
PB
271}
272
273void qemu_sem_post(QemuSemaphore *sem)
274{
a0d45db8 275 qemu_mutex_lock(&sem->mutex);
79761c66 276 if (sem->count == UINT_MAX) {
a0d45db8 277 error_exit(EINVAL, __func__);
c166cb72 278 } else {
79761c66 279 sem->count++;
a0d45db8 280 qemu_cond_signal(&sem->cond);
c166cb72 281 }
a0d45db8 282 qemu_mutex_unlock(&sem->mutex);
c166cb72
PB
283}
284
38b14db3
PB
285int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
286{
a0d45db8 287 bool rc = true;
c166cb72
PB
288 struct timespec ts;
289
c166cb72 290 compute_abs_deadline(&ts, ms);
a0d45db8 291 qemu_mutex_lock(&sem->mutex);
79761c66 292 while (sem->count == 0) {
8ab30264
PB
293 if (ms == 0) {
294 rc = false;
295 } else {
296 rc = qemu_cond_timedwait_ts(&sem->cond, &sem->mutex, &ts,
297 __FILE__, __LINE__);
298 }
a0d45db8 299 if (!rc) { /* timeout */
c166cb72
PB
300 break;
301 }
c166cb72 302 }
a0d45db8 303 if (rc) {
79761c66
IT
304 --sem->count;
305 }
a0d45db8
LM
306 qemu_mutex_unlock(&sem->mutex);
307 return (rc ? 0 : -1);
38b14db3
PB
308}
309
310void qemu_sem_wait(QemuSemaphore *sem)
311{
a0d45db8 312 qemu_mutex_lock(&sem->mutex);
79761c66 313 while (sem->count == 0) {
a0d45db8 314 qemu_cond_wait(&sem->cond, &sem->mutex);
c166cb72 315 }
79761c66 316 --sem->count;
a0d45db8 317 qemu_mutex_unlock(&sem->mutex);
38b14db3
PB
318}
319
c7c4d063 320#ifdef __linux__
fbcc3e50 321#include "qemu/futex.h"
c7c4d063 322#else
fbcc3e50 323static inline void qemu_futex_wake(QemuEvent *ev, int n)
c7c4d063 324{
c096358e 325 assert(ev->initialized);
158ef8cb 326 pthread_mutex_lock(&ev->lock);
c7c4d063
PB
327 if (n == 1) {
328 pthread_cond_signal(&ev->cond);
329 } else {
330 pthread_cond_broadcast(&ev->cond);
331 }
158ef8cb 332 pthread_mutex_unlock(&ev->lock);
c7c4d063
PB
333}
334
fbcc3e50 335static inline void qemu_futex_wait(QemuEvent *ev, unsigned val)
c7c4d063 336{
c096358e 337 assert(ev->initialized);
c7c4d063
PB
338 pthread_mutex_lock(&ev->lock);
339 if (ev->value == val) {
340 pthread_cond_wait(&ev->cond, &ev->lock);
341 }
342 pthread_mutex_unlock(&ev->lock);
343}
344#endif
345
346/* Valid transitions:
347 * - free->set, when setting the event
fbcc3e50 348 * - busy->set, when setting the event, followed by qemu_futex_wake
c7c4d063
PB
349 * - set->free, when resetting the event
350 * - free->busy, when waiting
351 *
352 * set->busy does not happen (it can be observed from the outside but
353 * it really is set->free->busy).
354 *
355 * busy->free provably cannot happen; to enforce it, the set->free transition
356 * is done with an OR, which becomes a no-op if the event has concurrently
357 * transitioned to free or busy.
358 */
359
360#define EV_SET 0
361#define EV_FREE 1
362#define EV_BUSY -1
363
364void qemu_event_init(QemuEvent *ev, bool init)
365{
366#ifndef __linux__
367 pthread_mutex_init(&ev->lock, NULL);
368 pthread_cond_init(&ev->cond, NULL);
369#endif
370
371 ev->value = (init ? EV_SET : EV_FREE);
c096358e 372 ev->initialized = true;
c7c4d063
PB
373}
374
375void qemu_event_destroy(QemuEvent *ev)
376{
c096358e
FZ
377 assert(ev->initialized);
378 ev->initialized = false;
c7c4d063
PB
379#ifndef __linux__
380 pthread_mutex_destroy(&ev->lock);
381 pthread_cond_destroy(&ev->cond);
382#endif
383}
384
385void qemu_event_set(QemuEvent *ev)
386{
9586a132
PB
387 assert(ev->initialized);
388
389 /*
390 * Pairs with both qemu_event_reset() and qemu_event_wait().
391 *
392 * qemu_event_set has release semantics, but because it *loads*
374293ca
PB
393 * ev->value we need a full memory barrier here.
394 */
395 smp_mb();
d73415a3 396 if (qatomic_read(&ev->value) != EV_SET) {
9586a132
PB
397 int old = qatomic_xchg(&ev->value, EV_SET);
398
399 /* Pairs with memory barrier in kernel futex_wait system call. */
400 smp_mb__after_rmw();
401 if (old == EV_BUSY) {
c7c4d063 402 /* There were waiters, wake them up. */
fbcc3e50 403 qemu_futex_wake(ev, INT_MAX);
c7c4d063
PB
404 }
405 }
406}
407
408void qemu_event_reset(QemuEvent *ev)
409{
c096358e 410 assert(ev->initialized);
9586a132
PB
411
412 /*
413 * If there was a concurrent reset (or even reset+wait),
414 * do nothing. Otherwise change EV_SET->EV_FREE.
415 */
416 qatomic_or(&ev->value, EV_FREE);
417
418 /*
419 * Order reset before checking the condition in the caller.
420 * Pairs with the first memory barrier in qemu_event_set().
421 */
422 smp_mb__after_rmw();
c7c4d063
PB
423}
424
425void qemu_event_wait(QemuEvent *ev)
426{
427 unsigned value;
428
c096358e 429 assert(ev->initialized);
9586a132
PB
430
431 /*
432 * qemu_event_wait must synchronize with qemu_event_set even if it does
433 * not go down the slow path, so this load-acquire is needed that
434 * synchronizes with the first memory barrier in qemu_event_set().
435 *
436 * If we do go down the slow path, there is no requirement at all: we
437 * might miss a qemu_event_set() here but ultimately the memory barrier in
438 * qemu_futex_wait() will ensure the check is done correctly.
439 */
440 value = qatomic_load_acquire(&ev->value);
c7c4d063
PB
441 if (value != EV_SET) {
442 if (value == EV_FREE) {
443 /*
9586a132
PB
444 * Leave the event reset and tell qemu_event_set that there are
445 * waiters. No need to retry, because there cannot be a concurrent
446 * busy->free transition. After the CAS, the event will be either
447 * set or busy.
448 *
449 * This cmpxchg doesn't have particular ordering requirements if it
450 * succeeds (moving the store earlier can only cause qemu_event_set()
451 * to issue _more_ wakeups), the failing case needs acquire semantics
452 * like the load above.
c7c4d063 453 */
d73415a3 454 if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
c7c4d063
PB
455 return;
456 }
457 }
9586a132
PB
458
459 /*
460 * This is the final check for a concurrent set, so it does need
461 * a smp_mb() pairing with the second barrier of qemu_event_set().
462 * The barrier is inside the FUTEX_WAIT system call.
463 */
fbcc3e50 464 qemu_futex_wait(ev, EV_BUSY);
c7c4d063
PB
465 }
466}
467
a458774a 468static __thread NotifierList thread_exit;
ef57137f 469
a458774a
PM
470/*
471 * Note that in this implementation you can register a thread-exit
472 * notifier for the main thread, but it will never be called.
473 * This is OK because main thread exit can only happen when the
474 * entire process is exiting, and the API allows notifiers to not
475 * be called on process exit.
476 */
ef57137f
PB
477void qemu_thread_atexit_add(Notifier *notifier)
478{
a458774a 479 notifier_list_add(&thread_exit, notifier);
ef57137f
PB
480}
481
482void qemu_thread_atexit_remove(Notifier *notifier)
483{
ef57137f 484 notifier_remove(notifier);
ef57137f
PB
485}
486
a458774a 487static void qemu_thread_atexit_notify(void *arg)
ef57137f 488{
a458774a
PM
489 /*
490 * Called when non-main thread exits (via qemu_thread_exit()
491 * or by returning from its start routine.)
492 */
493 notifier_list_notify(&thread_exit, NULL);
ef57137f
PB
494}
495
68a93982 496typedef struct {
497 void *(*start_routine)(void *);
498 void *arg;
499 char *name;
500} QemuThreadArgs;
501
502static void *qemu_thread_start(void *args)
503{
504 QemuThreadArgs *qemu_thread_args = args;
505 void *(*start_routine)(void *) = qemu_thread_args->start_routine;
506 void *arg = qemu_thread_args->arg;
a458774a 507 void *r;
68a93982 508
509 /* Attempt to set the threads name; note that this is for debug, so
510 * we're not going to fail if we can't set it.
511 */
d820fa5b 512 if (name_threads && qemu_thread_args->name) {
479a5747 513# if defined(CONFIG_PTHREAD_SETNAME_NP_W_TID)
d820fa5b 514 pthread_setname_np(pthread_self(), qemu_thread_args->name);
479a5747
RB
515# elif defined(CONFIG_PTHREAD_SETNAME_NP_WO_TID)
516 pthread_setname_np(qemu_thread_args->name);
3ada67a3
BS
517# elif defined(CONFIG_PTHREAD_SET_NAME_NP)
518 pthread_set_name_np(pthread_self(), qemu_thread_args->name);
479a5747 519# endif
d820fa5b 520 }
ce9f0e5b 521 QEMU_TSAN_ANNOTATE_THREAD_NAME(qemu_thread_args->name);
68a93982 522 g_free(qemu_thread_args->name);
523 g_free(qemu_thread_args);
37daf1ba
RH
524
525 /*
526 * GCC 11 with glibc 2.17 on PowerPC reports
527 *
528 * qemu-thread-posix.c:540:5: error: ‘__sigsetjmp’ accessing 656 bytes
529 * in a region of size 528 [-Werror=stringop-overflow=]
530 * 540 | pthread_cleanup_push(qemu_thread_atexit_notify, NULL);
531 * | ^~~~~~~~~~~~~~~~~~~~
532 *
533 * which is clearly nonsense.
534 */
535#pragma GCC diagnostic push
536#ifndef __clang__
537#pragma GCC diagnostic ignored "-Wstringop-overflow"
538#endif
539
a458774a
PM
540 pthread_cleanup_push(qemu_thread_atexit_notify, NULL);
541 r = start_routine(arg);
542 pthread_cleanup_pop(1);
37daf1ba
RH
543
544#pragma GCC diagnostic pop
545
a458774a 546 return r;
5c312079
DDAG
547}
548
4900116e 549void qemu_thread_create(QemuThread *thread, const char *name,
e5d355d1 550 void *(*start_routine)(void*),
cf218714 551 void *arg, int mode)
e5d355d1 552{
cf218714 553 sigset_t set, oldset;
e5d355d1 554 int err;
8763046b 555 pthread_attr_t attr;
d820fa5b 556 QemuThreadArgs *qemu_thread_args;
e5d355d1 557
8763046b
JK
558 err = pthread_attr_init(&attr);
559 if (err) {
560 error_exit(err, __func__);
561 }
55541c8a 562
68a93982 563 if (mode == QEMU_THREAD_DETACHED) {
564 pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
565 }
566
cf218714 567 /* Leave signal handling to the iothread. */
55541c8a 568 sigfillset(&set);
21a43af0
RB
569 /* Blocking the signals can result in undefined behaviour. */
570 sigdelset(&set, SIGSEGV);
571 sigdelset(&set, SIGFPE);
572 sigdelset(&set, SIGILL);
573 /* TODO avoid SIGBUS loss on macOS */
55541c8a 574 pthread_sigmask(SIG_SETMASK, &set, &oldset);
55541c8a 575
d820fa5b
PX
576 qemu_thread_args = g_new0(QemuThreadArgs, 1);
577 qemu_thread_args->name = g_strdup(name);
578 qemu_thread_args->start_routine = start_routine;
579 qemu_thread_args->arg = arg;
580
581 err = pthread_create(&thread->thread, &attr,
582 qemu_thread_start, qemu_thread_args);
4900116e 583
68a93982 584 if (err)
585 error_exit(err, __func__);
586
55541c8a 587 pthread_sigmask(SIG_SETMASK, &oldset, NULL);
8763046b
JK
588
589 pthread_attr_destroy(&attr);
e5d355d1
AL
590}
591
7730f32c
DH
592int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus,
593 unsigned long nbits)
594{
595#if defined(CONFIG_PTHREAD_AFFINITY_NP)
596 const size_t setsize = CPU_ALLOC_SIZE(nbits);
597 unsigned long value;
598 cpu_set_t *cpuset;
599 int err;
600
601 cpuset = CPU_ALLOC(nbits);
602 g_assert(cpuset);
603
604 CPU_ZERO_S(setsize, cpuset);
605 value = find_first_bit(host_cpus, nbits);
606 while (value < nbits) {
607 CPU_SET_S(value, setsize, cpuset);
608 value = find_next_bit(host_cpus, nbits, value + 1);
609 }
610
611 err = pthread_setaffinity_np(thread->thread, setsize, cpuset);
612 CPU_FREE(cpuset);
613 return err;
614#else
615 return -ENOSYS;
616#endif
617}
618
619int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus,
620 unsigned long *nbits)
621{
622#if defined(CONFIG_PTHREAD_AFFINITY_NP)
623 unsigned long tmpbits;
624 cpu_set_t *cpuset;
625 size_t setsize;
626 int i, err;
627
628 tmpbits = CPU_SETSIZE;
629 while (true) {
630 setsize = CPU_ALLOC_SIZE(tmpbits);
631 cpuset = CPU_ALLOC(tmpbits);
632 g_assert(cpuset);
633
634 err = pthread_getaffinity_np(thread->thread, setsize, cpuset);
635 if (err) {
636 CPU_FREE(cpuset);
637 if (err != -EINVAL) {
638 return err;
639 }
640 tmpbits *= 2;
641 } else {
642 break;
643 }
644 }
645
646 /* Convert the result into a proper bitmap. */
647 *nbits = tmpbits;
648 *host_cpus = bitmap_new(tmpbits);
649 for (i = 0; i < tmpbits; i++) {
650 if (CPU_ISSET(i, cpuset)) {
651 set_bit(i, *host_cpus);
652 }
653 }
654 CPU_FREE(cpuset);
655 return 0;
656#else
657 return -ENOSYS;
658#endif
659}
660
b7680cb6 661void qemu_thread_get_self(QemuThread *thread)
e5d355d1
AL
662{
663 thread->thread = pthread_self();
664}
665
2d797b65 666bool qemu_thread_is_self(QemuThread *thread)
e5d355d1 667{
b7680cb6 668 return pthread_equal(pthread_self(), thread->thread);
e5d355d1
AL
669}
670
313b1d69
CC
671void qemu_thread_exit(void *retval)
672{
673 pthread_exit(retval);
674}
8763046b
JK
675
676void *qemu_thread_join(QemuThread *thread)
677{
678 int err;
679 void *ret;
680
681 err = pthread_join(thread->thread, &ret);
682 if (err) {
683 error_exit(err, __func__);
684 }
685 return ret;
686}