]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-win32.c
migration/postcopy: mis->have_listen_thread check will never be touched
[mirror_qemu.git] / util / qemu-thread-win32.c
CommitLineData
9257d46d
PB
1/*
2 * Win32 implementation for mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Author:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
12f8def0 13
aafd7584 14#include "qemu/osdep.h"
9257d46d 15#include "qemu-common.h"
1de7afc9 16#include "qemu/thread.h"
ef57137f 17#include "qemu/notify.h"
f1aff7aa 18#include "qemu-thread-common.h"
9257d46d 19#include <process.h>
9257d46d 20
8f480de0
DDAG
21static bool name_threads;
22
23void qemu_thread_naming(bool enable)
24{
25 /* But note we don't actually name them on Windows yet */
26 name_threads = enable;
5c312079
DDAG
27
28 fprintf(stderr, "qemu: thread naming not supported on this host\n");
8f480de0
DDAG
29}
30
9257d46d
PB
31static void error_exit(int err, const char *msg)
32{
33 char *pstr;
34
35 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
36 NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
37 fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
38 LocalFree(pstr);
53380ac3 39 abort();
9257d46d
PB
40}
41
42void qemu_mutex_init(QemuMutex *mutex)
43{
12f8def0 44 InitializeSRWLock(&mutex->lock);
f1aff7aa 45 qemu_mutex_post_init(mutex);
9257d46d
PB
46}
47
1a290aea
SW
48void qemu_mutex_destroy(QemuMutex *mutex)
49{
c096358e
FZ
50 assert(mutex->initialized);
51 mutex->initialized = false;
12f8def0 52 InitializeSRWLock(&mutex->lock);
1a290aea
SW
53}
54
6c27a0de 55void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d 56{
c096358e 57 assert(mutex->initialized);
f1aff7aa 58 qemu_mutex_pre_lock(mutex, file, line);
12f8def0 59 AcquireSRWLockExclusive(&mutex->lock);
f1aff7aa 60 qemu_mutex_post_lock(mutex, file, line);
9257d46d
PB
61}
62
6c27a0de 63int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d
PB
64{
65 int owned;
66
c096358e 67 assert(mutex->initialized);
12f8def0 68 owned = TryAcquireSRWLockExclusive(&mutex->lock);
31f5a726 69 if (owned) {
f1aff7aa 70 qemu_mutex_post_lock(mutex, file, line);
31f5a726
JRZ
71 return 0;
72 }
73 return -EBUSY;
9257d46d
PB
74}
75
6c27a0de 76void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d 77{
c096358e 78 assert(mutex->initialized);
f1aff7aa 79 qemu_mutex_pre_unlock(mutex, file, line);
12f8def0 80 ReleaseSRWLockExclusive(&mutex->lock);
9257d46d
PB
81}
82
feadec63
PB
83void qemu_rec_mutex_init(QemuRecMutex *mutex)
84{
85 InitializeCriticalSection(&mutex->lock);
c096358e 86 mutex->initialized = true;
feadec63
PB
87}
88
89void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
90{
c096358e
FZ
91 assert(mutex->initialized);
92 mutex->initialized = false;
feadec63
PB
93 DeleteCriticalSection(&mutex->lock);
94}
95
fe9959a2 96void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line)
feadec63 97{
c096358e 98 assert(mutex->initialized);
feadec63
PB
99 EnterCriticalSection(&mutex->lock);
100}
101
fe9959a2 102int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line)
feadec63 103{
c096358e 104 assert(mutex->initialized);
feadec63
PB
105 return !TryEnterCriticalSection(&mutex->lock);
106}
107
108void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
109{
c096358e 110 assert(mutex->initialized);
feadec63
PB
111 LeaveCriticalSection(&mutex->lock);
112}
113
9257d46d
PB
114void qemu_cond_init(QemuCond *cond)
115{
116 memset(cond, 0, sizeof(*cond));
12f8def0 117 InitializeConditionVariable(&cond->var);
c096358e 118 cond->initialized = true;
9257d46d
PB
119}
120
1a290aea
SW
121void qemu_cond_destroy(QemuCond *cond)
122{
c096358e
FZ
123 assert(cond->initialized);
124 cond->initialized = false;
12f8def0 125 InitializeConditionVariable(&cond->var);
1a290aea
SW
126}
127
9257d46d
PB
128void qemu_cond_signal(QemuCond *cond)
129{
c096358e 130 assert(cond->initialized);
12f8def0 131 WakeConditionVariable(&cond->var);
9257d46d
PB
132}
133
134void qemu_cond_broadcast(QemuCond *cond)
135{
c096358e 136 assert(cond->initialized);
12f8def0 137 WakeAllConditionVariable(&cond->var);
9257d46d
PB
138}
139
6c27a0de 140void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
9257d46d 141{
c096358e 142 assert(cond->initialized);
f1aff7aa 143 qemu_mutex_pre_unlock(mutex, file, line);
12f8def0 144 SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
f1aff7aa 145 qemu_mutex_post_lock(mutex, file, line);
9257d46d
PB
146}
147
3dcc9c6e
YK
148bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
149 const char *file, const int line)
150{
151 int rc = 0;
152
153 assert(cond->initialized);
154 trace_qemu_mutex_unlock(mutex, file, line);
155 if (!SleepConditionVariableSRW(&cond->var, &mutex->lock, ms, 0)) {
156 rc = GetLastError();
157 }
158 trace_qemu_mutex_locked(mutex, file, line);
159 if (rc && rc != ERROR_TIMEOUT) {
160 error_exit(rc, __func__);
161 }
162 return rc != ERROR_TIMEOUT;
163}
164
38b14db3
PB
165void qemu_sem_init(QemuSemaphore *sem, int init)
166{
167 /* Manual reset. */
168 sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
c096358e 169 sem->initialized = true;
38b14db3
PB
170}
171
172void qemu_sem_destroy(QemuSemaphore *sem)
173{
c096358e
FZ
174 assert(sem->initialized);
175 sem->initialized = false;
38b14db3
PB
176 CloseHandle(sem->sema);
177}
178
179void qemu_sem_post(QemuSemaphore *sem)
180{
c096358e 181 assert(sem->initialized);
38b14db3
PB
182 ReleaseSemaphore(sem->sema, 1, NULL);
183}
184
185int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
186{
c096358e
FZ
187 int rc;
188
189 assert(sem->initialized);
190 rc = WaitForSingleObject(sem->sema, ms);
38b14db3
PB
191 if (rc == WAIT_OBJECT_0) {
192 return 0;
193 }
194 if (rc != WAIT_TIMEOUT) {
195 error_exit(GetLastError(), __func__);
196 }
197 return -1;
198}
199
200void qemu_sem_wait(QemuSemaphore *sem)
201{
c096358e 202 assert(sem->initialized);
38b14db3
PB
203 if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
204 error_exit(GetLastError(), __func__);
205 }
206}
207
7c9b2bf6
PB
208/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
209 * is to reset the Win32 event lazily, as part of a test-reset-test-wait
210 * sequence. Such a sequence is, indeed, how QemuEvents are used by
211 * RCU and other subsystems!
212 *
213 * Valid transitions:
214 * - free->set, when setting the event
fbcc3e50 215 * - busy->set, when setting the event, followed by SetEvent
7c9b2bf6
PB
216 * - set->free, when resetting the event
217 * - free->busy, when waiting
218 *
219 * set->busy does not happen (it can be observed from the outside but
220 * it really is set->free->busy).
221 *
222 * busy->free provably cannot happen; to enforce it, the set->free transition
223 * is done with an OR, which becomes a no-op if the event has concurrently
224 * transitioned to free or busy (and is faster than cmpxchg).
225 */
226
227#define EV_SET 0
228#define EV_FREE 1
229#define EV_BUSY -1
230
c7c4d063
PB
231void qemu_event_init(QemuEvent *ev, bool init)
232{
233 /* Manual reset. */
7c9b2bf6
PB
234 ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
235 ev->value = (init ? EV_SET : EV_FREE);
c096358e 236 ev->initialized = true;
c7c4d063
PB
237}
238
239void qemu_event_destroy(QemuEvent *ev)
240{
c096358e
FZ
241 assert(ev->initialized);
242 ev->initialized = false;
c7c4d063
PB
243 CloseHandle(ev->event);
244}
245
246void qemu_event_set(QemuEvent *ev)
247{
c096358e 248 assert(ev->initialized);
374293ca
PB
249 /* qemu_event_set has release semantics, but because it *loads*
250 * ev->value we need a full memory barrier here.
251 */
252 smp_mb();
253 if (atomic_read(&ev->value) != EV_SET) {
7c9b2bf6
PB
254 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
255 /* There were waiters, wake them up. */
256 SetEvent(ev->event);
257 }
258 }
c7c4d063
PB
259}
260
261void qemu_event_reset(QemuEvent *ev)
262{
374293ca
PB
263 unsigned value;
264
c096358e 265 assert(ev->initialized);
374293ca
PB
266 value = atomic_read(&ev->value);
267 smp_mb_acquire();
268 if (value == EV_SET) {
7c9b2bf6
PB
269 /* If there was a concurrent reset (or even reset+wait),
270 * do nothing. Otherwise change EV_SET->EV_FREE.
271 */
272 atomic_or(&ev->value, EV_FREE);
273 }
c7c4d063
PB
274}
275
276void qemu_event_wait(QemuEvent *ev)
277{
7c9b2bf6
PB
278 unsigned value;
279
c096358e 280 assert(ev->initialized);
374293ca
PB
281 value = atomic_read(&ev->value);
282 smp_mb_acquire();
7c9b2bf6
PB
283 if (value != EV_SET) {
284 if (value == EV_FREE) {
285 /* qemu_event_set is not yet going to call SetEvent, but we are
286 * going to do another check for EV_SET below when setting EV_BUSY.
287 * At that point it is safe to call WaitForSingleObject.
288 */
289 ResetEvent(ev->event);
290
291 /* Tell qemu_event_set that there are waiters. No need to retry
292 * because there cannot be a concurent busy->free transition.
293 * After the CAS, the event will be either set or busy.
294 */
295 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
296 value = EV_SET;
297 } else {
298 value = EV_BUSY;
299 }
300 }
301 if (value == EV_BUSY) {
302 WaitForSingleObject(ev->event, INFINITE);
303 }
304 }
c7c4d063
PB
305}
306
9257d46d 307struct QemuThreadData {
403e6331
PB
308 /* Passed to win32_start_routine. */
309 void *(*start_routine)(void *);
310 void *arg;
311 short mode;
ef57137f 312 NotifierList exit;
403e6331
PB
313
314 /* Only used for joinable threads. */
315 bool exited;
316 void *ret;
317 CRITICAL_SECTION cs;
9257d46d
PB
318};
319
ef57137f
PB
320static bool atexit_registered;
321static NotifierList main_thread_exit;
322
6265e4ff 323static __thread QemuThreadData *qemu_thread_data;
9257d46d 324
ef57137f
PB
325static void run_main_thread_exit(void)
326{
327 notifier_list_notify(&main_thread_exit, NULL);
328}
329
330void qemu_thread_atexit_add(Notifier *notifier)
331{
332 if (!qemu_thread_data) {
333 if (!atexit_registered) {
334 atexit_registered = true;
335 atexit(run_main_thread_exit);
336 }
337 notifier_list_add(&main_thread_exit, notifier);
338 } else {
339 notifier_list_add(&qemu_thread_data->exit, notifier);
340 }
341}
342
343void qemu_thread_atexit_remove(Notifier *notifier)
344{
345 notifier_remove(notifier);
346}
347
9257d46d
PB
348static unsigned __stdcall win32_start_routine(void *arg)
349{
403e6331
PB
350 QemuThreadData *data = (QemuThreadData *) arg;
351 void *(*start_routine)(void *) = data->start_routine;
352 void *thread_arg = data->arg;
353
6265e4ff 354 qemu_thread_data = data;
403e6331 355 qemu_thread_exit(start_routine(thread_arg));
9257d46d
PB
356 abort();
357}
358
359void qemu_thread_exit(void *arg)
360{
6265e4ff
JK
361 QemuThreadData *data = qemu_thread_data;
362
ef57137f
PB
363 notifier_list_notify(&data->exit, NULL);
364 if (data->mode == QEMU_THREAD_JOINABLE) {
403e6331
PB
365 data->ret = arg;
366 EnterCriticalSection(&data->cs);
367 data->exited = true;
368 LeaveCriticalSection(&data->cs);
ef57137f
PB
369 } else {
370 g_free(data);
403e6331
PB
371 }
372 _endthreadex(0);
373}
374
375void *qemu_thread_join(QemuThread *thread)
376{
377 QemuThreadData *data;
378 void *ret;
379 HANDLE handle;
380
381 data = thread->data;
ef57137f 382 if (data->mode == QEMU_THREAD_DETACHED) {
403e6331
PB
383 return NULL;
384 }
ef57137f 385
403e6331
PB
386 /*
387 * Because multiple copies of the QemuThread can exist via
388 * qemu_thread_get_self, we need to store a value that cannot
389 * leak there. The simplest, non racy way is to store the TID,
390 * discard the handle that _beginthreadex gives back, and
391 * get another copy of the handle here.
392 */
1ecf47bf
PB
393 handle = qemu_thread_get_handle(thread);
394 if (handle) {
403e6331
PB
395 WaitForSingleObject(handle, INFINITE);
396 CloseHandle(handle);
403e6331
PB
397 }
398 ret = data->ret;
399 DeleteCriticalSection(&data->cs);
400 g_free(data);
401 return ret;
9257d46d
PB
402}
403
4900116e 404void qemu_thread_create(QemuThread *thread, const char *name,
9257d46d 405 void *(*start_routine)(void *),
cf218714 406 void *arg, int mode)
9257d46d
PB
407{
408 HANDLE hThread;
9257d46d 409 struct QemuThreadData *data;
6265e4ff 410
7267c094 411 data = g_malloc(sizeof *data);
9257d46d
PB
412 data->start_routine = start_routine;
413 data->arg = arg;
403e6331
PB
414 data->mode = mode;
415 data->exited = false;
ef57137f 416 notifier_list_init(&data->exit);
9257d46d 417
edc1de97
SW
418 if (data->mode != QEMU_THREAD_DETACHED) {
419 InitializeCriticalSection(&data->cs);
420 }
421
9257d46d 422 hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
403e6331 423 data, 0, &thread->tid);
9257d46d
PB
424 if (!hThread) {
425 error_exit(GetLastError(), __func__);
426 }
427 CloseHandle(hThread);
ef57137f 428 thread->data = data;
9257d46d
PB
429}
430
431void qemu_thread_get_self(QemuThread *thread)
432{
6265e4ff 433 thread->data = qemu_thread_data;
403e6331 434 thread->tid = GetCurrentThreadId();
9257d46d
PB
435}
436
1ecf47bf
PB
437HANDLE qemu_thread_get_handle(QemuThread *thread)
438{
439 QemuThreadData *data;
440 HANDLE handle;
441
442 data = thread->data;
ef57137f 443 if (data->mode == QEMU_THREAD_DETACHED) {
1ecf47bf
PB
444 return NULL;
445 }
446
447 EnterCriticalSection(&data->cs);
448 if (!data->exited) {
b0cb0a66
VP
449 handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME |
450 THREAD_SET_CONTEXT, FALSE, thread->tid);
1ecf47bf
PB
451 } else {
452 handle = NULL;
453 }
454 LeaveCriticalSection(&data->cs);
455 return handle;
456}
457
2d797b65 458bool qemu_thread_is_self(QemuThread *thread)
9257d46d 459{
403e6331 460 return GetCurrentThreadId() == thread->tid;
9257d46d 461}