]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-win32.c
target-i386: fix "info lapic" segfault on isapc
[mirror_qemu.git] / util / qemu-thread-win32.c
CommitLineData
9257d46d
PB
1/*
2 * Win32 implementation for mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Author:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
12f8def0
AS
13
14#ifndef _WIN32_WINNT
15#define _WIN32_WINNT 0x0600
16#endif
17
aafd7584 18#include "qemu/osdep.h"
9257d46d 19#include "qemu-common.h"
1de7afc9 20#include "qemu/thread.h"
ef57137f 21#include "qemu/notify.h"
9257d46d 22#include <process.h>
9257d46d 23
8f480de0
DDAG
24static bool name_threads;
25
26void qemu_thread_naming(bool enable)
27{
28 /* But note we don't actually name them on Windows yet */
29 name_threads = enable;
5c312079
DDAG
30
31 fprintf(stderr, "qemu: thread naming not supported on this host\n");
8f480de0
DDAG
32}
33
9257d46d
PB
34static void error_exit(int err, const char *msg)
35{
36 char *pstr;
37
38 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
39 NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
40 fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
41 LocalFree(pstr);
53380ac3 42 abort();
9257d46d
PB
43}
44
45void qemu_mutex_init(QemuMutex *mutex)
46{
12f8def0 47 InitializeSRWLock(&mutex->lock);
9257d46d
PB
48}
49
1a290aea
SW
50void qemu_mutex_destroy(QemuMutex *mutex)
51{
12f8def0 52 InitializeSRWLock(&mutex->lock);
1a290aea
SW
53}
54
9257d46d
PB
55void qemu_mutex_lock(QemuMutex *mutex)
56{
12f8def0 57 AcquireSRWLockExclusive(&mutex->lock);
9257d46d
PB
58}
59
60int qemu_mutex_trylock(QemuMutex *mutex)
61{
62 int owned;
63
12f8def0 64 owned = TryAcquireSRWLockExclusive(&mutex->lock);
9257d46d
PB
65 return !owned;
66}
67
68void qemu_mutex_unlock(QemuMutex *mutex)
69{
12f8def0 70 ReleaseSRWLockExclusive(&mutex->lock);
9257d46d
PB
71}
72
feadec63
PB
73void qemu_rec_mutex_init(QemuRecMutex *mutex)
74{
75 InitializeCriticalSection(&mutex->lock);
76}
77
78void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
79{
80 DeleteCriticalSection(&mutex->lock);
81}
82
83void qemu_rec_mutex_lock(QemuRecMutex *mutex)
84{
85 EnterCriticalSection(&mutex->lock);
86}
87
88int qemu_rec_mutex_trylock(QemuRecMutex *mutex)
89{
90 return !TryEnterCriticalSection(&mutex->lock);
91}
92
93void qemu_rec_mutex_unlock(QemuRecMutex *mutex)
94{
95 LeaveCriticalSection(&mutex->lock);
96}
97
9257d46d
PB
98void qemu_cond_init(QemuCond *cond)
99{
100 memset(cond, 0, sizeof(*cond));
12f8def0 101 InitializeConditionVariable(&cond->var);
9257d46d
PB
102}
103
1a290aea
SW
104void qemu_cond_destroy(QemuCond *cond)
105{
12f8def0 106 InitializeConditionVariable(&cond->var);
1a290aea
SW
107}
108
9257d46d
PB
109void qemu_cond_signal(QemuCond *cond)
110{
12f8def0 111 WakeConditionVariable(&cond->var);
9257d46d
PB
112}
113
114void qemu_cond_broadcast(QemuCond *cond)
115{
12f8def0 116 WakeAllConditionVariable(&cond->var);
9257d46d
PB
117}
118
119void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
120{
12f8def0 121 SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
9257d46d
PB
122}
123
38b14db3
PB
124void qemu_sem_init(QemuSemaphore *sem, int init)
125{
126 /* Manual reset. */
127 sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
128}
129
130void qemu_sem_destroy(QemuSemaphore *sem)
131{
132 CloseHandle(sem->sema);
133}
134
135void qemu_sem_post(QemuSemaphore *sem)
136{
137 ReleaseSemaphore(sem->sema, 1, NULL);
138}
139
140int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
141{
142 int rc = WaitForSingleObject(sem->sema, ms);
143 if (rc == WAIT_OBJECT_0) {
144 return 0;
145 }
146 if (rc != WAIT_TIMEOUT) {
147 error_exit(GetLastError(), __func__);
148 }
149 return -1;
150}
151
152void qemu_sem_wait(QemuSemaphore *sem)
153{
154 if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
155 error_exit(GetLastError(), __func__);
156 }
157}
158
7c9b2bf6
PB
159/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
160 * is to reset the Win32 event lazily, as part of a test-reset-test-wait
161 * sequence. Such a sequence is, indeed, how QemuEvents are used by
162 * RCU and other subsystems!
163 *
164 * Valid transitions:
165 * - free->set, when setting the event
fbcc3e50 166 * - busy->set, when setting the event, followed by SetEvent
7c9b2bf6
PB
167 * - set->free, when resetting the event
168 * - free->busy, when waiting
169 *
170 * set->busy does not happen (it can be observed from the outside but
171 * it really is set->free->busy).
172 *
173 * busy->free provably cannot happen; to enforce it, the set->free transition
174 * is done with an OR, which becomes a no-op if the event has concurrently
175 * transitioned to free or busy (and is faster than cmpxchg).
176 */
177
178#define EV_SET 0
179#define EV_FREE 1
180#define EV_BUSY -1
181
c7c4d063
PB
182void qemu_event_init(QemuEvent *ev, bool init)
183{
184 /* Manual reset. */
7c9b2bf6
PB
185 ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
186 ev->value = (init ? EV_SET : EV_FREE);
c7c4d063
PB
187}
188
189void qemu_event_destroy(QemuEvent *ev)
190{
191 CloseHandle(ev->event);
192}
193
194void qemu_event_set(QemuEvent *ev)
195{
374293ca
PB
196 /* qemu_event_set has release semantics, but because it *loads*
197 * ev->value we need a full memory barrier here.
198 */
199 smp_mb();
200 if (atomic_read(&ev->value) != EV_SET) {
7c9b2bf6
PB
201 if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
202 /* There were waiters, wake them up. */
203 SetEvent(ev->event);
204 }
205 }
c7c4d063
PB
206}
207
208void qemu_event_reset(QemuEvent *ev)
209{
374293ca
PB
210 unsigned value;
211
212 value = atomic_read(&ev->value);
213 smp_mb_acquire();
214 if (value == EV_SET) {
7c9b2bf6
PB
215 /* If there was a concurrent reset (or even reset+wait),
216 * do nothing. Otherwise change EV_SET->EV_FREE.
217 */
218 atomic_or(&ev->value, EV_FREE);
219 }
c7c4d063
PB
220}
221
222void qemu_event_wait(QemuEvent *ev)
223{
7c9b2bf6
PB
224 unsigned value;
225
374293ca
PB
226 value = atomic_read(&ev->value);
227 smp_mb_acquire();
7c9b2bf6
PB
228 if (value != EV_SET) {
229 if (value == EV_FREE) {
230 /* qemu_event_set is not yet going to call SetEvent, but we are
231 * going to do another check for EV_SET below when setting EV_BUSY.
232 * At that point it is safe to call WaitForSingleObject.
233 */
234 ResetEvent(ev->event);
235
236 /* Tell qemu_event_set that there are waiters. No need to retry
237 * because there cannot be a concurent busy->free transition.
238 * After the CAS, the event will be either set or busy.
239 */
240 if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
241 value = EV_SET;
242 } else {
243 value = EV_BUSY;
244 }
245 }
246 if (value == EV_BUSY) {
247 WaitForSingleObject(ev->event, INFINITE);
248 }
249 }
c7c4d063
PB
250}
251
9257d46d 252struct QemuThreadData {
403e6331
PB
253 /* Passed to win32_start_routine. */
254 void *(*start_routine)(void *);
255 void *arg;
256 short mode;
ef57137f 257 NotifierList exit;
403e6331
PB
258
259 /* Only used for joinable threads. */
260 bool exited;
261 void *ret;
262 CRITICAL_SECTION cs;
9257d46d
PB
263};
264
ef57137f
PB
265static bool atexit_registered;
266static NotifierList main_thread_exit;
267
6265e4ff 268static __thread QemuThreadData *qemu_thread_data;
9257d46d 269
ef57137f
PB
270static void run_main_thread_exit(void)
271{
272 notifier_list_notify(&main_thread_exit, NULL);
273}
274
275void qemu_thread_atexit_add(Notifier *notifier)
276{
277 if (!qemu_thread_data) {
278 if (!atexit_registered) {
279 atexit_registered = true;
280 atexit(run_main_thread_exit);
281 }
282 notifier_list_add(&main_thread_exit, notifier);
283 } else {
284 notifier_list_add(&qemu_thread_data->exit, notifier);
285 }
286}
287
288void qemu_thread_atexit_remove(Notifier *notifier)
289{
290 notifier_remove(notifier);
291}
292
9257d46d
PB
293static unsigned __stdcall win32_start_routine(void *arg)
294{
403e6331
PB
295 QemuThreadData *data = (QemuThreadData *) arg;
296 void *(*start_routine)(void *) = data->start_routine;
297 void *thread_arg = data->arg;
298
6265e4ff 299 qemu_thread_data = data;
403e6331 300 qemu_thread_exit(start_routine(thread_arg));
9257d46d
PB
301 abort();
302}
303
304void qemu_thread_exit(void *arg)
305{
6265e4ff
JK
306 QemuThreadData *data = qemu_thread_data;
307
ef57137f
PB
308 notifier_list_notify(&data->exit, NULL);
309 if (data->mode == QEMU_THREAD_JOINABLE) {
403e6331
PB
310 data->ret = arg;
311 EnterCriticalSection(&data->cs);
312 data->exited = true;
313 LeaveCriticalSection(&data->cs);
ef57137f
PB
314 } else {
315 g_free(data);
403e6331
PB
316 }
317 _endthreadex(0);
318}
319
320void *qemu_thread_join(QemuThread *thread)
321{
322 QemuThreadData *data;
323 void *ret;
324 HANDLE handle;
325
326 data = thread->data;
ef57137f 327 if (data->mode == QEMU_THREAD_DETACHED) {
403e6331
PB
328 return NULL;
329 }
ef57137f 330
403e6331
PB
331 /*
332 * Because multiple copies of the QemuThread can exist via
333 * qemu_thread_get_self, we need to store a value that cannot
334 * leak there. The simplest, non racy way is to store the TID,
335 * discard the handle that _beginthreadex gives back, and
336 * get another copy of the handle here.
337 */
1ecf47bf
PB
338 handle = qemu_thread_get_handle(thread);
339 if (handle) {
403e6331
PB
340 WaitForSingleObject(handle, INFINITE);
341 CloseHandle(handle);
403e6331
PB
342 }
343 ret = data->ret;
344 DeleteCriticalSection(&data->cs);
345 g_free(data);
346 return ret;
9257d46d
PB
347}
348
4900116e 349void qemu_thread_create(QemuThread *thread, const char *name,
9257d46d 350 void *(*start_routine)(void *),
cf218714 351 void *arg, int mode)
9257d46d
PB
352{
353 HANDLE hThread;
9257d46d 354 struct QemuThreadData *data;
6265e4ff 355
7267c094 356 data = g_malloc(sizeof *data);
9257d46d
PB
357 data->start_routine = start_routine;
358 data->arg = arg;
403e6331
PB
359 data->mode = mode;
360 data->exited = false;
ef57137f 361 notifier_list_init(&data->exit);
9257d46d 362
edc1de97
SW
363 if (data->mode != QEMU_THREAD_DETACHED) {
364 InitializeCriticalSection(&data->cs);
365 }
366
9257d46d 367 hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
403e6331 368 data, 0, &thread->tid);
9257d46d
PB
369 if (!hThread) {
370 error_exit(GetLastError(), __func__);
371 }
372 CloseHandle(hThread);
ef57137f 373 thread->data = data;
9257d46d
PB
374}
375
376void qemu_thread_get_self(QemuThread *thread)
377{
6265e4ff 378 thread->data = qemu_thread_data;
403e6331 379 thread->tid = GetCurrentThreadId();
9257d46d
PB
380}
381
1ecf47bf
PB
382HANDLE qemu_thread_get_handle(QemuThread *thread)
383{
384 QemuThreadData *data;
385 HANDLE handle;
386
387 data = thread->data;
ef57137f 388 if (data->mode == QEMU_THREAD_DETACHED) {
1ecf47bf
PB
389 return NULL;
390 }
391
392 EnterCriticalSection(&data->cs);
393 if (!data->exited) {
b0cb0a66
VP
394 handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME |
395 THREAD_SET_CONTEXT, FALSE, thread->tid);
1ecf47bf
PB
396 } else {
397 handle = NULL;
398 }
399 LeaveCriticalSection(&data->cs);
400 return handle;
401}
402
2d797b65 403bool qemu_thread_is_self(QemuThread *thread)
9257d46d 404{
403e6331 405 return GetCurrentThreadId() == thread->tid;
9257d46d 406}