]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-win32.c
virtio-scsi: finalize IOMMU support
[mirror_qemu.git] / util / qemu-thread-win32.c
CommitLineData
9257d46d
PB
1/*
2 * Win32 implementation for mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Author:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
12f8def0
AS
13
14#ifndef _WIN32_WINNT
15#define _WIN32_WINNT 0x0600
16#endif
17
aafd7584 18#include "qemu/osdep.h"
9257d46d 19#include "qemu-common.h"
1de7afc9 20#include "qemu/thread.h"
ef57137f 21#include "qemu/notify.h"
31f5a726 22#include "trace.h"
9257d46d 23#include <process.h>
9257d46d 24
8f480de0
DDAG
25static bool name_threads;
26
27void qemu_thread_naming(bool enable)
28{
29 /* But note we don't actually name them on Windows yet */
30 name_threads = enable;
5c312079
DDAG
31
32 fprintf(stderr, "qemu: thread naming not supported on this host\n");
8f480de0
DDAG
33}
34
9257d46d
PB
35static void error_exit(int err, const char *msg)
36{
37 char *pstr;
38
39 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
40 NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
41 fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
42 LocalFree(pstr);
53380ac3 43 abort();
9257d46d
PB
44}
45
46void qemu_mutex_init(QemuMutex *mutex)
47{
12f8def0 48 InitializeSRWLock(&mutex->lock);
9257d46d
PB
49}
50
1a290aea
SW
51void qemu_mutex_destroy(QemuMutex *mutex)
52{
12f8def0 53 InitializeSRWLock(&mutex->lock);
1a290aea
SW
54}
55
9257d46d
PB
56void qemu_mutex_lock(QemuMutex *mutex)
57{
12f8def0 58 AcquireSRWLockExclusive(&mutex->lock);
31f5a726 59 trace_qemu_mutex_locked(mutex);
9257d46d
PB
60}
61
62int qemu_mutex_trylock(QemuMutex *mutex)
63{
64 int owned;
65
12f8def0 66 owned = TryAcquireSRWLockExclusive(&mutex->lock);
31f5a726
JRZ
67 if (owned) {
68 trace_qemu_mutex_locked(mutex);
69 return 0;
70 }
71 return -EBUSY;
9257d46d
PB
72}
73
74void qemu_mutex_unlock(QemuMutex *mutex)
75{
31f5a726 76 trace_qemu_mutex_unlocked(mutex);
12f8def0 77 ReleaseSRWLockExclusive(&mutex->lock);
9257d46d
PB
78}
79
feadec63
PB
80void qemu_rec_mutex_init(QemuRecMutex *mutex)
81{
82 InitializeCriticalSection(&mutex->lock);
83}
84
85void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
86{
87 DeleteCriticalSection(&mutex->lock);
88}
89
90void qemu_rec_mutex_lock(QemuRecMutex *mutex)
91{
92 EnterCriticalSection(&mutex->lock);
93}
94
95int qemu_rec_mutex_trylock(QemuRecMutex *mutex)
96{
97 return !TryEnterCriticalSection(&mutex->lock);
98}
99
100void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
101{
102 LeaveCriticalSection(&mutex->lock);
103}
104
9257d46d
PB
105void qemu_cond_init(QemuCond *cond)
106{
107 memset(cond, 0, sizeof(*cond));
12f8def0 108 InitializeConditionVariable(&cond->var);
9257d46d
PB
109}
110
1a290aea
SW
111void qemu_cond_destroy(QemuCond *cond)
112{
12f8def0 113 InitializeConditionVariable(&cond->var);
1a290aea
SW
114}
115
9257d46d
PB
116void qemu_cond_signal(QemuCond *cond)
117{
12f8def0 118 WakeConditionVariable(&cond->var);
9257d46d
PB
119}
120
121void qemu_cond_broadcast(QemuCond *cond)
122{
12f8def0 123 WakeAllConditionVariable(&cond->var);
9257d46d
PB
124}
125
126void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
127{
31f5a726 128 trace_qemu_mutex_unlocked(mutex);
12f8def0 129 SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
31f5a726 130 trace_qemu_mutex_locked(mutex);
9257d46d
PB
131}
132
38b14db3
PB
133void qemu_sem_init(QemuSemaphore *sem, int init)
134{
135 /* Manual reset. */
136 sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
137}
138
139void qemu_sem_destroy(QemuSemaphore *sem)
140{
141 CloseHandle(sem->sema);
142}
143
144void qemu_sem_post(QemuSemaphore *sem)
145{
146 ReleaseSemaphore(sem->sema, 1, NULL);
147}
148
149int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
150{
151 int rc = WaitForSingleObject(sem->sema, ms);
152 if (rc == WAIT_OBJECT_0) {
153 return 0;
154 }
155 if (rc != WAIT_TIMEOUT) {
156 error_exit(GetLastError(), __func__);
157 }
158 return -1;
159}
160
161void qemu_sem_wait(QemuSemaphore *sem)
162{
163 if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
164 error_exit(GetLastError(), __func__);
165 }
166}
167
7c9b2bf6
PB
168/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
169 * is to reset the Win32 event lazily, as part of a test-reset-test-wait
170 * sequence. Such a sequence is, indeed, how QemuEvents are used by
171 * RCU and other subsystems!
172 *
173 * Valid transitions:
174 * - free->set, when setting the event
fbcc3e50 175 * - busy->set, when setting the event, followed by SetEvent
7c9b2bf6
PB
176 * - set->free, when resetting the event
177 * - free->busy, when waiting
178 *
179 * set->busy does not happen (it can be observed from the outside but
180 * it really is set->free->busy).
181 *
182 * busy->free provably cannot happen; to enforce it, the set->free transition
183 * is done with an OR, which becomes a no-op if the event has concurrently
184 * transitioned to free or busy (and is faster than cmpxchg).
185 */
186
187#define EV_SET 0
188#define EV_FREE 1
189#define EV_BUSY -1
190
c7c4d063
PB
191void qemu_event_init(QemuEvent *ev, bool init)
192{
193 /* Manual reset. */
7c9b2bf6
PB
194 ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
195 ev->value = (init ? EV_SET : EV_FREE);
c7c4d063
PB
196}
197
198void qemu_event_destroy(QemuEvent *ev)
199{
200 CloseHandle(ev->event);
201}
202
203void qemu_event_set(QemuEvent *ev)
204{
374293ca
PB
205 /* qemu_event_set has release semantics, but because it *loads*
206 * ev->value we need a full memory barrier here.
207 */
208 smp_mb();
209 if (atomic_read(&ev->value) != EV_SET) {
7c9b2bf6
PB
210 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
211 /* There were waiters, wake them up. */
212 SetEvent(ev->event);
213 }
214 }
c7c4d063
PB
215}
216
217void qemu_event_reset(QemuEvent *ev)
218{
374293ca
PB
219 unsigned value;
220
221 value = atomic_read(&ev->value);
222 smp_mb_acquire();
223 if (value == EV_SET) {
7c9b2bf6
PB
224 /* If there was a concurrent reset (or even reset+wait),
225 * do nothing. Otherwise change EV_SET->EV_FREE.
226 */
227 atomic_or(&ev->value, EV_FREE);
228 }
c7c4d063
PB
229}
230
231void qemu_event_wait(QemuEvent *ev)
232{
7c9b2bf6
PB
233 unsigned value;
234
374293ca
PB
235 value = atomic_read(&ev->value);
236 smp_mb_acquire();
7c9b2bf6
PB
237 if (value != EV_SET) {
238 if (value == EV_FREE) {
239 /* qemu_event_set is not yet going to call SetEvent, but we are
240 * going to do another check for EV_SET below when setting EV_BUSY.
241 * At that point it is safe to call WaitForSingleObject.
242 */
243 ResetEvent(ev->event);
244
245 /* Tell qemu_event_set that there are waiters. No need to retry
246 * because there cannot be a concurent busy->free transition.
247 * After the CAS, the event will be either set or busy.
248 */
249 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
250 value = EV_SET;
251 } else {
252 value = EV_BUSY;
253 }
254 }
255 if (value == EV_BUSY) {
256 WaitForSingleObject(ev->event, INFINITE);
257 }
258 }
c7c4d063
PB
259}
260
9257d46d 261struct QemuThreadData {
403e6331
PB
262 /* Passed to win32_start_routine. */
263 void *(*start_routine)(void *);
264 void *arg;
265 short mode;
ef57137f 266 NotifierList exit;
403e6331
PB
267
268 /* Only used for joinable threads. */
269 bool exited;
270 void *ret;
271 CRITICAL_SECTION cs;
9257d46d
PB
272};
273
ef57137f
PB
274static bool atexit_registered;
275static NotifierList main_thread_exit;
276
6265e4ff 277static __thread QemuThreadData *qemu_thread_data;
9257d46d 278
ef57137f
PB
279static void run_main_thread_exit(void)
280{
281 notifier_list_notify(&main_thread_exit, NULL);
282}
283
284void qemu_thread_atexit_add(Notifier *notifier)
285{
286 if (!qemu_thread_data) {
287 if (!atexit_registered) {
288 atexit_registered = true;
289 atexit(run_main_thread_exit);
290 }
291 notifier_list_add(&main_thread_exit, notifier);
292 } else {
293 notifier_list_add(&qemu_thread_data->exit, notifier);
294 }
295}
296
297void qemu_thread_atexit_remove(Notifier *notifier)
298{
299 notifier_remove(notifier);
300}
301
9257d46d
PB
302static unsigned __stdcall win32_start_routine(void *arg)
303{
403e6331
PB
304 QemuThreadData *data = (QemuThreadData *) arg;
305 void *(*start_routine)(void *) = data->start_routine;
306 void *thread_arg = data->arg;
307
6265e4ff 308 qemu_thread_data = data;
403e6331 309 qemu_thread_exit(start_routine(thread_arg));
9257d46d
PB
310 abort();
311}
312
313void qemu_thread_exit(void *arg)
314{
6265e4ff
JK
315 QemuThreadData *data = qemu_thread_data;
316
ef57137f
PB
317 notifier_list_notify(&data->exit, NULL);
318 if (data->mode == QEMU_THREAD_JOINABLE) {
403e6331
PB
319 data->ret = arg;
320 EnterCriticalSection(&data->cs);
321 data->exited = true;
322 LeaveCriticalSection(&data->cs);
ef57137f
PB
323 } else {
324 g_free(data);
403e6331
PB
325 }
326 _endthreadex(0);
327}
328
329void *qemu_thread_join(QemuThread *thread)
330{
331 QemuThreadData *data;
332 void *ret;
333 HANDLE handle;
334
335 data = thread->data;
ef57137f 336 if (data->mode == QEMU_THREAD_DETACHED) {
403e6331
PB
337 return NULL;
338 }
ef57137f 339
403e6331
PB
340 /*
341 * Because multiple copies of the QemuThread can exist via
342 * qemu_thread_get_self, we need to store a value that cannot
343 * leak there. The simplest, non racy way is to store the TID,
344 * discard the handle that _beginthreadex gives back, and
345 * get another copy of the handle here.
346 */
1ecf47bf
PB
347 handle = qemu_thread_get_handle(thread);
348 if (handle) {
403e6331
PB
349 WaitForSingleObject(handle, INFINITE);
350 CloseHandle(handle);
403e6331
PB
351 }
352 ret = data->ret;
353 DeleteCriticalSection(&data->cs);
354 g_free(data);
355 return ret;
9257d46d
PB
356}
357
4900116e 358void qemu_thread_create(QemuThread *thread, const char *name,
9257d46d 359 void *(*start_routine)(void *),
cf218714 360 void *arg, int mode)
9257d46d
PB
361{
362 HANDLE hThread;
9257d46d 363 struct QemuThreadData *data;
6265e4ff 364
7267c094 365 data = g_malloc(sizeof *data);
9257d46d
PB
366 data->start_routine = start_routine;
367 data->arg = arg;
403e6331
PB
368 data->mode = mode;
369 data->exited = false;
ef57137f 370 notifier_list_init(&data->exit);
9257d46d 371
edc1de97
SW
372 if (data->mode != QEMU_THREAD_DETACHED) {
373 InitializeCriticalSection(&data->cs);
374 }
375
9257d46d 376 hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
403e6331 377 data, 0, &thread->tid);
9257d46d
PB
378 if (!hThread) {
379 error_exit(GetLastError(), __func__);
380 }
381 CloseHandle(hThread);
ef57137f 382 thread->data = data;
9257d46d
PB
383}
384
385void qemu_thread_get_self(QemuThread *thread)
386{
6265e4ff 387 thread->data = qemu_thread_data;
403e6331 388 thread->tid = GetCurrentThreadId();
9257d46d
PB
389}
390
1ecf47bf
PB
391HANDLE qemu_thread_get_handle(QemuThread *thread)
392{
393 QemuThreadData *data;
394 HANDLE handle;
395
396 data = thread->data;
ef57137f 397 if (data->mode == QEMU_THREAD_DETACHED) {
1ecf47bf
PB
398 return NULL;
399 }
400
401 EnterCriticalSection(&data->cs);
402 if (!data->exited) {
b0cb0a66
VP
403 handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME |
404 THREAD_SET_CONTEXT, FALSE, thread->tid);
1ecf47bf
PB
405 } else {
406 handle = NULL;
407 }
408 LeaveCriticalSection(&data->cs);
409 return handle;
410}
411
2d797b65 412bool qemu_thread_is_self(QemuThread *thread)
9257d46d 413{
403e6331 414 return GetCurrentThreadId() == thread->tid;
9257d46d 415}