]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-win32.c
vhost-scsi: prevent using uninitialized vqs
[mirror_qemu.git] / util / qemu-thread-win32.c
CommitLineData
9257d46d
PB
1/*
2 * Win32 implementation for mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Author:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
12f8def0
AS
13
14#ifndef _WIN32_WINNT
15#define _WIN32_WINNT 0x0600
16#endif
17
aafd7584 18#include "qemu/osdep.h"
9257d46d 19#include "qemu-common.h"
1de7afc9 20#include "qemu/thread.h"
ef57137f 21#include "qemu/notify.h"
f1aff7aa 22#include "qemu-thread-common.h"
9257d46d 23#include <process.h>
9257d46d 24
8f480de0
DDAG
25static bool name_threads;
26
27void qemu_thread_naming(bool enable)
28{
29 /* But note we don't actually name them on Windows yet */
30 name_threads = enable;
5c312079
DDAG
31
32 fprintf(stderr, "qemu: thread naming not supported on this host\n");
8f480de0
DDAG
33}
34
9257d46d
PB
35static void error_exit(int err, const char *msg)
36{
37 char *pstr;
38
39 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
40 NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
41 fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
42 LocalFree(pstr);
53380ac3 43 abort();
9257d46d
PB
44}
45
46void qemu_mutex_init(QemuMutex *mutex)
47{
12f8def0 48 InitializeSRWLock(&mutex->lock);
f1aff7aa 49 qemu_mutex_post_init(mutex);
9257d46d
PB
50}
51
1a290aea
SW
52void qemu_mutex_destroy(QemuMutex *mutex)
53{
c096358e
FZ
54 assert(mutex->initialized);
55 mutex->initialized = false;
12f8def0 56 InitializeSRWLock(&mutex->lock);
1a290aea
SW
57}
58
6c27a0de 59void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d 60{
c096358e 61 assert(mutex->initialized);
f1aff7aa 62 qemu_mutex_pre_lock(mutex, file, line);
12f8def0 63 AcquireSRWLockExclusive(&mutex->lock);
f1aff7aa 64 qemu_mutex_post_lock(mutex, file, line);
9257d46d
PB
65}
66
6c27a0de 67int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d
PB
68{
69 int owned;
70
c096358e 71 assert(mutex->initialized);
12f8def0 72 owned = TryAcquireSRWLockExclusive(&mutex->lock);
31f5a726 73 if (owned) {
f1aff7aa 74 qemu_mutex_post_lock(mutex, file, line);
31f5a726
JRZ
75 return 0;
76 }
77 return -EBUSY;
9257d46d
PB
78}
79
6c27a0de 80void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d 81{
c096358e 82 assert(mutex->initialized);
f1aff7aa 83 qemu_mutex_pre_unlock(mutex, file, line);
12f8def0 84 ReleaseSRWLockExclusive(&mutex->lock);
9257d46d
PB
85}
86
feadec63
PB
87void qemu_rec_mutex_init(QemuRecMutex *mutex)
88{
89 InitializeCriticalSection(&mutex->lock);
c096358e 90 mutex->initialized = true;
feadec63
PB
91}
92
93void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
94{
c096358e
FZ
95 assert(mutex->initialized);
96 mutex->initialized = false;
feadec63
PB
97 DeleteCriticalSection(&mutex->lock);
98}
99
100void qemu_rec_mutex_lock(QemuRecMutex *mutex)
101{
c096358e 102 assert(mutex->initialized);
feadec63
PB
103 EnterCriticalSection(&mutex->lock);
104}
105
106int qemu_rec_mutex_trylock(QemuRecMutex *mutex)
107{
c096358e 108 assert(mutex->initialized);
feadec63
PB
109 return !TryEnterCriticalSection(&mutex->lock);
110}
111
112void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
113{
c096358e 114 assert(mutex->initialized);
feadec63
PB
115 LeaveCriticalSection(&mutex->lock);
116}
117
9257d46d
PB
118void qemu_cond_init(QemuCond *cond)
119{
120 memset(cond, 0, sizeof(*cond));
12f8def0 121 InitializeConditionVariable(&cond->var);
c096358e 122 cond->initialized = true;
9257d46d
PB
123}
124
1a290aea
SW
125void qemu_cond_destroy(QemuCond *cond)
126{
c096358e
FZ
127 assert(cond->initialized);
128 cond->initialized = false;
12f8def0 129 InitializeConditionVariable(&cond->var);
1a290aea
SW
130}
131
9257d46d
PB
132void qemu_cond_signal(QemuCond *cond)
133{
c096358e 134 assert(cond->initialized);
12f8def0 135 WakeConditionVariable(&cond->var);
9257d46d
PB
136}
137
138void qemu_cond_broadcast(QemuCond *cond)
139{
c096358e 140 assert(cond->initialized);
12f8def0 141 WakeAllConditionVariable(&cond->var);
9257d46d
PB
142}
143
6c27a0de 144void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
9257d46d 145{
c096358e 146 assert(cond->initialized);
f1aff7aa 147 qemu_mutex_pre_unlock(mutex, file, line);
12f8def0 148 SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
f1aff7aa 149 qemu_mutex_post_lock(mutex, file, line);
9257d46d
PB
150}
151
38b14db3
PB
152void qemu_sem_init(QemuSemaphore *sem, int init)
153{
154 /* Manual reset. */
155 sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
c096358e 156 sem->initialized = true;
38b14db3
PB
157}
158
159void qemu_sem_destroy(QemuSemaphore *sem)
160{
c096358e
FZ
161 assert(sem->initialized);
162 sem->initialized = false;
38b14db3
PB
163 CloseHandle(sem->sema);
164}
165
166void qemu_sem_post(QemuSemaphore *sem)
167{
c096358e 168 assert(sem->initialized);
38b14db3
PB
169 ReleaseSemaphore(sem->sema, 1, NULL);
170}
171
172int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
173{
c096358e
FZ
174 int rc;
175
176 assert(sem->initialized);
177 rc = WaitForSingleObject(sem->sema, ms);
38b14db3
PB
178 if (rc == WAIT_OBJECT_0) {
179 return 0;
180 }
181 if (rc != WAIT_TIMEOUT) {
182 error_exit(GetLastError(), __func__);
183 }
184 return -1;
185}
186
187void qemu_sem_wait(QemuSemaphore *sem)
188{
c096358e 189 assert(sem->initialized);
38b14db3
PB
190 if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
191 error_exit(GetLastError(), __func__);
192 }
193}
194
7c9b2bf6
PB
195/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
196 * is to reset the Win32 event lazily, as part of a test-reset-test-wait
197 * sequence. Such a sequence is, indeed, how QemuEvents are used by
198 * RCU and other subsystems!
199 *
200 * Valid transitions:
201 * - free->set, when setting the event
fbcc3e50 202 * - busy->set, when setting the event, followed by SetEvent
7c9b2bf6
PB
203 * - set->free, when resetting the event
204 * - free->busy, when waiting
205 *
206 * set->busy does not happen (it can be observed from the outside but
207 * it really is set->free->busy).
208 *
209 * busy->free provably cannot happen; to enforce it, the set->free transition
210 * is done with an OR, which becomes a no-op if the event has concurrently
211 * transitioned to free or busy (and is faster than cmpxchg).
212 */
213
214#define EV_SET 0
215#define EV_FREE 1
216#define EV_BUSY -1
217
c7c4d063
PB
218void qemu_event_init(QemuEvent *ev, bool init)
219{
220 /* Manual reset. */
7c9b2bf6
PB
221 ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
222 ev->value = (init ? EV_SET : EV_FREE);
c096358e 223 ev->initialized = true;
c7c4d063
PB
224}
225
226void qemu_event_destroy(QemuEvent *ev)
227{
c096358e
FZ
228 assert(ev->initialized);
229 ev->initialized = false;
c7c4d063
PB
230 CloseHandle(ev->event);
231}
232
233void qemu_event_set(QemuEvent *ev)
234{
c096358e 235 assert(ev->initialized);
374293ca
PB
236 /* qemu_event_set has release semantics, but because it *loads*
237 * ev->value we need a full memory barrier here.
238 */
239 smp_mb();
240 if (atomic_read(&ev->value) != EV_SET) {
7c9b2bf6
PB
241 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
242 /* There were waiters, wake them up. */
243 SetEvent(ev->event);
244 }
245 }
c7c4d063
PB
246}
247
248void qemu_event_reset(QemuEvent *ev)
249{
374293ca
PB
250 unsigned value;
251
c096358e 252 assert(ev->initialized);
374293ca
PB
253 value = atomic_read(&ev->value);
254 smp_mb_acquire();
255 if (value == EV_SET) {
7c9b2bf6
PB
256 /* If there was a concurrent reset (or even reset+wait),
257 * do nothing. Otherwise change EV_SET->EV_FREE.
258 */
259 atomic_or(&ev->value, EV_FREE);
260 }
c7c4d063
PB
261}
262
263void qemu_event_wait(QemuEvent *ev)
264{
7c9b2bf6
PB
265 unsigned value;
266
c096358e 267 assert(ev->initialized);
374293ca
PB
268 value = atomic_read(&ev->value);
269 smp_mb_acquire();
7c9b2bf6
PB
270 if (value != EV_SET) {
271 if (value == EV_FREE) {
272 /* qemu_event_set is not yet going to call SetEvent, but we are
273 * going to do another check for EV_SET below when setting EV_BUSY.
274 * At that point it is safe to call WaitForSingleObject.
275 */
276 ResetEvent(ev->event);
277
278 /* Tell qemu_event_set that there are waiters. No need to retry
279 * because there cannot be a concurent busy->free transition.
280 * After the CAS, the event will be either set or busy.
281 */
282 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
283 value = EV_SET;
284 } else {
285 value = EV_BUSY;
286 }
287 }
288 if (value == EV_BUSY) {
289 WaitForSingleObject(ev->event, INFINITE);
290 }
291 }
c7c4d063
PB
292}
293
9257d46d 294struct QemuThreadData {
403e6331
PB
295 /* Passed to win32_start_routine. */
296 void *(*start_routine)(void *);
297 void *arg;
298 short mode;
ef57137f 299 NotifierList exit;
403e6331
PB
300
301 /* Only used for joinable threads. */
302 bool exited;
303 void *ret;
304 CRITICAL_SECTION cs;
9257d46d
PB
305};
306
ef57137f
PB
307static bool atexit_registered;
308static NotifierList main_thread_exit;
309
6265e4ff 310static __thread QemuThreadData *qemu_thread_data;
9257d46d 311
ef57137f
PB
312static void run_main_thread_exit(void)
313{
314 notifier_list_notify(&main_thread_exit, NULL);
315}
316
317void qemu_thread_atexit_add(Notifier *notifier)
318{
319 if (!qemu_thread_data) {
320 if (!atexit_registered) {
321 atexit_registered = true;
322 atexit(run_main_thread_exit);
323 }
324 notifier_list_add(&main_thread_exit, notifier);
325 } else {
326 notifier_list_add(&qemu_thread_data->exit, notifier);
327 }
328}
329
330void qemu_thread_atexit_remove(Notifier *notifier)
331{
332 notifier_remove(notifier);
333}
334
9257d46d
PB
335static unsigned __stdcall win32_start_routine(void *arg)
336{
403e6331
PB
337 QemuThreadData *data = (QemuThreadData *) arg;
338 void *(*start_routine)(void *) = data->start_routine;
339 void *thread_arg = data->arg;
340
6265e4ff 341 qemu_thread_data = data;
403e6331 342 qemu_thread_exit(start_routine(thread_arg));
9257d46d
PB
343 abort();
344}
345
346void qemu_thread_exit(void *arg)
347{
6265e4ff
JK
348 QemuThreadData *data = qemu_thread_data;
349
ef57137f
PB
350 notifier_list_notify(&data->exit, NULL);
351 if (data->mode == QEMU_THREAD_JOINABLE) {
403e6331
PB
352 data->ret = arg;
353 EnterCriticalSection(&data->cs);
354 data->exited = true;
355 LeaveCriticalSection(&data->cs);
ef57137f
PB
356 } else {
357 g_free(data);
403e6331
PB
358 }
359 _endthreadex(0);
360}
361
362void *qemu_thread_join(QemuThread *thread)
363{
364 QemuThreadData *data;
365 void *ret;
366 HANDLE handle;
367
368 data = thread->data;
ef57137f 369 if (data->mode == QEMU_THREAD_DETACHED) {
403e6331
PB
370 return NULL;
371 }
ef57137f 372
403e6331
PB
373 /*
374 * Because multiple copies of the QemuThread can exist via
375 * qemu_thread_get_self, we need to store a value that cannot
376 * leak there. The simplest, non racy way is to store the TID,
377 * discard the handle that _beginthreadex gives back, and
378 * get another copy of the handle here.
379 */
1ecf47bf
PB
380 handle = qemu_thread_get_handle(thread);
381 if (handle) {
403e6331
PB
382 WaitForSingleObject(handle, INFINITE);
383 CloseHandle(handle);
403e6331
PB
384 }
385 ret = data->ret;
386 DeleteCriticalSection(&data->cs);
387 g_free(data);
388 return ret;
9257d46d
PB
389}
390
4900116e 391void qemu_thread_create(QemuThread *thread, const char *name,
9257d46d 392 void *(*start_routine)(void *),
cf218714 393 void *arg, int mode)
9257d46d
PB
394{
395 HANDLE hThread;
9257d46d 396 struct QemuThreadData *data;
6265e4ff 397
7267c094 398 data = g_malloc(sizeof *data);
9257d46d
PB
399 data->start_routine = start_routine;
400 data->arg = arg;
403e6331
PB
401 data->mode = mode;
402 data->exited = false;
ef57137f 403 notifier_list_init(&data->exit);
9257d46d 404
edc1de97
SW
405 if (data->mode != QEMU_THREAD_DETACHED) {
406 InitializeCriticalSection(&data->cs);
407 }
408
9257d46d 409 hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
403e6331 410 data, 0, &thread->tid);
9257d46d
PB
411 if (!hThread) {
412 error_exit(GetLastError(), __func__);
413 }
414 CloseHandle(hThread);
ef57137f 415 thread->data = data;
9257d46d
PB
416}
417
418void qemu_thread_get_self(QemuThread *thread)
419{
6265e4ff 420 thread->data = qemu_thread_data;
403e6331 421 thread->tid = GetCurrentThreadId();
9257d46d
PB
422}
423
1ecf47bf
PB
424HANDLE qemu_thread_get_handle(QemuThread *thread)
425{
426 QemuThreadData *data;
427 HANDLE handle;
428
429 data = thread->data;
ef57137f 430 if (data->mode == QEMU_THREAD_DETACHED) {
1ecf47bf
PB
431 return NULL;
432 }
433
434 EnterCriticalSection(&data->cs);
435 if (!data->exited) {
b0cb0a66
VP
436 handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME |
437 THREAD_SET_CONTEXT, FALSE, thread->tid);
1ecf47bf
PB
438 } else {
439 handle = NULL;
440 }
441 LeaveCriticalSection(&data->cs);
442 return handle;
443}
444
2d797b65 445bool qemu_thread_is_self(QemuThread *thread)
9257d46d 446{
403e6331 447 return GetCurrentThreadId() == thread->tid;
9257d46d 448}