]> git.proxmox.com Git - mirror_qemu.git/blame - util/qemu-thread-win32.c
pflash: fix sectors vs bytes confusion in blk_pread_nonzeroes()
[mirror_qemu.git] / util / qemu-thread-win32.c
CommitLineData
9257d46d
PB
1/*
2 * Win32 implementation for mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Author:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
12f8def0 13
aafd7584 14#include "qemu/osdep.h"
1de7afc9 15#include "qemu/thread.h"
ef57137f 16#include "qemu/notify.h"
f1aff7aa 17#include "qemu-thread-common.h"
9257d46d 18#include <process.h>
9257d46d 19
8f480de0
DDAG
20static bool name_threads;
21
4db99c9d
MAL
22typedef HRESULT (WINAPI *pSetThreadDescription) (HANDLE hThread,
23 PCWSTR lpThreadDescription);
24static pSetThreadDescription SetThreadDescriptionFunc;
25static HMODULE kernel32_module;
26
27static bool load_set_thread_description(void)
28{
29 static gsize _init_once = 0;
30
31 if (g_once_init_enter(&_init_once)) {
32 kernel32_module = LoadLibrary("kernel32.dll");
33 if (kernel32_module) {
34 SetThreadDescriptionFunc =
35 (pSetThreadDescription)GetProcAddress(kernel32_module,
36 "SetThreadDescription");
37 if (!SetThreadDescriptionFunc) {
38 FreeLibrary(kernel32_module);
39 }
40 }
41 g_once_init_leave(&_init_once, 1);
42 }
43
44 return !!SetThreadDescriptionFunc;
45}
46
8f480de0
DDAG
47void qemu_thread_naming(bool enable)
48{
8f480de0 49 name_threads = enable;
5c312079 50
4db99c9d
MAL
51 if (enable && !load_set_thread_description()) {
52 fprintf(stderr, "qemu: thread naming not supported on this host\n");
53 name_threads = false;
54 }
8f480de0
DDAG
55}
56
9257d46d
PB
57static void error_exit(int err, const char *msg)
58{
59 char *pstr;
60
61 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
62 NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
63 fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
64 LocalFree(pstr);
53380ac3 65 abort();
9257d46d
PB
66}
67
68void qemu_mutex_init(QemuMutex *mutex)
69{
12f8def0 70 InitializeSRWLock(&mutex->lock);
f1aff7aa 71 qemu_mutex_post_init(mutex);
9257d46d
PB
72}
73
1a290aea
SW
74void qemu_mutex_destroy(QemuMutex *mutex)
75{
c096358e
FZ
76 assert(mutex->initialized);
77 mutex->initialized = false;
12f8def0 78 InitializeSRWLock(&mutex->lock);
1a290aea
SW
79}
80
6c27a0de 81void qemu_mutex_lock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d 82{
c096358e 83 assert(mutex->initialized);
f1aff7aa 84 qemu_mutex_pre_lock(mutex, file, line);
12f8def0 85 AcquireSRWLockExclusive(&mutex->lock);
f1aff7aa 86 qemu_mutex_post_lock(mutex, file, line);
9257d46d
PB
87}
88
6c27a0de 89int qemu_mutex_trylock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d
PB
90{
91 int owned;
92
c096358e 93 assert(mutex->initialized);
12f8def0 94 owned = TryAcquireSRWLockExclusive(&mutex->lock);
31f5a726 95 if (owned) {
f1aff7aa 96 qemu_mutex_post_lock(mutex, file, line);
31f5a726
JRZ
97 return 0;
98 }
99 return -EBUSY;
9257d46d
PB
100}
101
6c27a0de 102void qemu_mutex_unlock_impl(QemuMutex *mutex, const char *file, const int line)
9257d46d 103{
c096358e 104 assert(mutex->initialized);
f1aff7aa 105 qemu_mutex_pre_unlock(mutex, file, line);
12f8def0 106 ReleaseSRWLockExclusive(&mutex->lock);
9257d46d
PB
107}
108
feadec63
PB
109void qemu_rec_mutex_init(QemuRecMutex *mutex)
110{
111 InitializeCriticalSection(&mutex->lock);
c096358e 112 mutex->initialized = true;
feadec63
PB
113}
114
115void qemu_rec_mutex_destroy(QemuRecMutex *mutex)
116{
c096358e
FZ
117 assert(mutex->initialized);
118 mutex->initialized = false;
feadec63
PB
119 DeleteCriticalSection(&mutex->lock);
120}
121
fe9959a2 122void qemu_rec_mutex_lock_impl(QemuRecMutex *mutex, const char *file, int line)
feadec63 123{
c096358e 124 assert(mutex->initialized);
feadec63
PB
125 EnterCriticalSection(&mutex->lock);
126}
127
fe9959a2 128int qemu_rec_mutex_trylock_impl(QemuRecMutex *mutex, const char *file, int line)
feadec63 129{
c096358e 130 assert(mutex->initialized);
feadec63
PB
131 return !TryEnterCriticalSection(&mutex->lock);
132}
133
9c75bae7 134void qemu_rec_mutex_unlock_impl(QemuRecMutex *mutex, const char *file, int line)
feadec63 135{
c096358e 136 assert(mutex->initialized);
feadec63
PB
137 LeaveCriticalSection(&mutex->lock);
138}
139
9257d46d
PB
140void qemu_cond_init(QemuCond *cond)
141{
142 memset(cond, 0, sizeof(*cond));
12f8def0 143 InitializeConditionVariable(&cond->var);
c096358e 144 cond->initialized = true;
9257d46d
PB
145}
146
1a290aea
SW
147void qemu_cond_destroy(QemuCond *cond)
148{
c096358e
FZ
149 assert(cond->initialized);
150 cond->initialized = false;
12f8def0 151 InitializeConditionVariable(&cond->var);
1a290aea
SW
152}
153
9257d46d
PB
154void qemu_cond_signal(QemuCond *cond)
155{
c096358e 156 assert(cond->initialized);
12f8def0 157 WakeConditionVariable(&cond->var);
9257d46d
PB
158}
159
160void qemu_cond_broadcast(QemuCond *cond)
161{
c096358e 162 assert(cond->initialized);
12f8def0 163 WakeAllConditionVariable(&cond->var);
9257d46d
PB
164}
165
6c27a0de 166void qemu_cond_wait_impl(QemuCond *cond, QemuMutex *mutex, const char *file, const int line)
9257d46d 167{
c096358e 168 assert(cond->initialized);
f1aff7aa 169 qemu_mutex_pre_unlock(mutex, file, line);
12f8def0 170 SleepConditionVariableSRW(&cond->var, &mutex->lock, INFINITE, 0);
f1aff7aa 171 qemu_mutex_post_lock(mutex, file, line);
9257d46d
PB
172}
173
3dcc9c6e
YK
174bool qemu_cond_timedwait_impl(QemuCond *cond, QemuMutex *mutex, int ms,
175 const char *file, const int line)
176{
177 int rc = 0;
178
179 assert(cond->initialized);
180 trace_qemu_mutex_unlock(mutex, file, line);
181 if (!SleepConditionVariableSRW(&cond->var, &mutex->lock, ms, 0)) {
182 rc = GetLastError();
183 }
184 trace_qemu_mutex_locked(mutex, file, line);
185 if (rc && rc != ERROR_TIMEOUT) {
186 error_exit(rc, __func__);
187 }
188 return rc != ERROR_TIMEOUT;
189}
190
38b14db3
PB
191void qemu_sem_init(QemuSemaphore *sem, int init)
192{
193 /* Manual reset. */
194 sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
c096358e 195 sem->initialized = true;
38b14db3
PB
196}
197
198void qemu_sem_destroy(QemuSemaphore *sem)
199{
c096358e
FZ
200 assert(sem->initialized);
201 sem->initialized = false;
38b14db3
PB
202 CloseHandle(sem->sema);
203}
204
205void qemu_sem_post(QemuSemaphore *sem)
206{
c096358e 207 assert(sem->initialized);
38b14db3
PB
208 ReleaseSemaphore(sem->sema, 1, NULL);
209}
210
211int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
212{
c096358e
FZ
213 int rc;
214
215 assert(sem->initialized);
216 rc = WaitForSingleObject(sem->sema, ms);
38b14db3
PB
217 if (rc == WAIT_OBJECT_0) {
218 return 0;
219 }
220 if (rc != WAIT_TIMEOUT) {
221 error_exit(GetLastError(), __func__);
222 }
223 return -1;
224}
225
226void qemu_sem_wait(QemuSemaphore *sem)
227{
c096358e 228 assert(sem->initialized);
38b14db3
PB
229 if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
230 error_exit(GetLastError(), __func__);
231 }
232}
233
7c9b2bf6
PB
234/* Wrap a Win32 manual-reset event with a fast userspace path. The idea
235 * is to reset the Win32 event lazily, as part of a test-reset-test-wait
236 * sequence. Such a sequence is, indeed, how QemuEvents are used by
237 * RCU and other subsystems!
238 *
239 * Valid transitions:
240 * - free->set, when setting the event
fbcc3e50 241 * - busy->set, when setting the event, followed by SetEvent
7c9b2bf6
PB
242 * - set->free, when resetting the event
243 * - free->busy, when waiting
244 *
245 * set->busy does not happen (it can be observed from the outside but
246 * it really is set->free->busy).
247 *
248 * busy->free provably cannot happen; to enforce it, the set->free transition
249 * is done with an OR, which becomes a no-op if the event has concurrently
250 * transitioned to free or busy (and is faster than cmpxchg).
251 */
252
253#define EV_SET 0
254#define EV_FREE 1
255#define EV_BUSY -1
256
c7c4d063
PB
257void qemu_event_init(QemuEvent *ev, bool init)
258{
259 /* Manual reset. */
7c9b2bf6
PB
260 ev->event = CreateEvent(NULL, TRUE, TRUE, NULL);
261 ev->value = (init ? EV_SET : EV_FREE);
c096358e 262 ev->initialized = true;
c7c4d063
PB
263}
264
265void qemu_event_destroy(QemuEvent *ev)
266{
c096358e
FZ
267 assert(ev->initialized);
268 ev->initialized = false;
c7c4d063
PB
269 CloseHandle(ev->event);
270}
271
272void qemu_event_set(QemuEvent *ev)
273{
c096358e 274 assert(ev->initialized);
6c5df4b4
PB
275
276 /*
277 * Pairs with both qemu_event_reset() and qemu_event_wait().
278 *
279 * qemu_event_set has release semantics, but because it *loads*
374293ca
PB
280 * ev->value we need a full memory barrier here.
281 */
282 smp_mb();
d73415a3 283 if (qatomic_read(&ev->value) != EV_SET) {
6c5df4b4
PB
284 int old = qatomic_xchg(&ev->value, EV_SET);
285
286 /* Pairs with memory barrier after ResetEvent. */
287 smp_mb__after_rmw();
288 if (old == EV_BUSY) {
7c9b2bf6
PB
289 /* There were waiters, wake them up. */
290 SetEvent(ev->event);
291 }
292 }
c7c4d063
PB
293}
294
295void qemu_event_reset(QemuEvent *ev)
296{
c096358e 297 assert(ev->initialized);
6c5df4b4
PB
298
299 /*
300 * If there was a concurrent reset (or even reset+wait),
301 * do nothing. Otherwise change EV_SET->EV_FREE.
302 */
303 qatomic_or(&ev->value, EV_FREE);
304
305 /*
306 * Order reset before checking the condition in the caller.
307 * Pairs with the first memory barrier in qemu_event_set().
308 */
309 smp_mb__after_rmw();
c7c4d063
PB
310}
311
312void qemu_event_wait(QemuEvent *ev)
313{
7c9b2bf6
PB
314 unsigned value;
315
c096358e 316 assert(ev->initialized);
6c5df4b4
PB
317
318 /*
319 * qemu_event_wait must synchronize with qemu_event_set even if it does
320 * not go down the slow path, so this load-acquire is needed that
321 * synchronizes with the first memory barrier in qemu_event_set().
322 *
323 * If we do go down the slow path, there is no requirement at all: we
324 * might miss a qemu_event_set() here but ultimately the memory barrier in
325 * qemu_futex_wait() will ensure the check is done correctly.
326 */
327 value = qatomic_load_acquire(&ev->value);
7c9b2bf6
PB
328 if (value != EV_SET) {
329 if (value == EV_FREE) {
6c5df4b4
PB
330 /*
331 * Here the underlying kernel event is reset, but qemu_event_set is
332 * not yet going to call SetEvent. However, there will be another
333 * check for EV_SET below when setting EV_BUSY. At that point it
334 * is safe to call WaitForSingleObject.
7c9b2bf6
PB
335 */
336 ResetEvent(ev->event);
337
6c5df4b4
PB
338 /*
339 * It is not clear whether ResetEvent provides this barrier; kernel
340 * APIs (KeResetEvent/KeClearEvent) do not. Better safe than sorry!
341 */
342 smp_mb();
343
344 /*
345 * Leave the event reset and tell qemu_event_set that there are
346 * waiters. No need to retry, because there cannot be a concurrent
347 * busy->free transition. After the CAS, the event will be either
348 * set or busy.
7c9b2bf6 349 */
d73415a3 350 if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
6c5df4b4 351 return;
7c9b2bf6
PB
352 }
353 }
6c5df4b4
PB
354
355 /*
356 * ev->value is now EV_BUSY. Since we didn't observe EV_SET,
357 * qemu_event_set() must observe EV_BUSY and call SetEvent().
358 */
359 WaitForSingleObject(ev->event, INFINITE);
7c9b2bf6 360 }
c7c4d063
PB
361}
362
9257d46d 363struct QemuThreadData {
403e6331
PB
364 /* Passed to win32_start_routine. */
365 void *(*start_routine)(void *);
366 void *arg;
367 short mode;
ef57137f 368 NotifierList exit;
403e6331
PB
369
370 /* Only used for joinable threads. */
371 bool exited;
372 void *ret;
373 CRITICAL_SECTION cs;
9257d46d
PB
374};
375
ef57137f
PB
376static bool atexit_registered;
377static NotifierList main_thread_exit;
378
6265e4ff 379static __thread QemuThreadData *qemu_thread_data;
9257d46d 380
ef57137f
PB
381static void run_main_thread_exit(void)
382{
383 notifier_list_notify(&main_thread_exit, NULL);
384}
385
386void qemu_thread_atexit_add(Notifier *notifier)
387{
388 if (!qemu_thread_data) {
389 if (!atexit_registered) {
390 atexit_registered = true;
391 atexit(run_main_thread_exit);
392 }
393 notifier_list_add(&main_thread_exit, notifier);
394 } else {
395 notifier_list_add(&qemu_thread_data->exit, notifier);
396 }
397}
398
399void qemu_thread_atexit_remove(Notifier *notifier)
400{
401 notifier_remove(notifier);
402}
403
9257d46d
PB
404static unsigned __stdcall win32_start_routine(void *arg)
405{
403e6331
PB
406 QemuThreadData *data = (QemuThreadData *) arg;
407 void *(*start_routine)(void *) = data->start_routine;
408 void *thread_arg = data->arg;
409
6265e4ff 410 qemu_thread_data = data;
403e6331 411 qemu_thread_exit(start_routine(thread_arg));
9257d46d
PB
412 abort();
413}
414
415void qemu_thread_exit(void *arg)
416{
6265e4ff
JK
417 QemuThreadData *data = qemu_thread_data;
418
ef57137f
PB
419 notifier_list_notify(&data->exit, NULL);
420 if (data->mode == QEMU_THREAD_JOINABLE) {
403e6331
PB
421 data->ret = arg;
422 EnterCriticalSection(&data->cs);
423 data->exited = true;
424 LeaveCriticalSection(&data->cs);
ef57137f
PB
425 } else {
426 g_free(data);
403e6331
PB
427 }
428 _endthreadex(0);
429}
430
431void *qemu_thread_join(QemuThread *thread)
432{
433 QemuThreadData *data;
434 void *ret;
435 HANDLE handle;
436
437 data = thread->data;
ef57137f 438 if (data->mode == QEMU_THREAD_DETACHED) {
403e6331
PB
439 return NULL;
440 }
ef57137f 441
403e6331
PB
442 /*
443 * Because multiple copies of the QemuThread can exist via
444 * qemu_thread_get_self, we need to store a value that cannot
445 * leak there. The simplest, non racy way is to store the TID,
446 * discard the handle that _beginthreadex gives back, and
447 * get another copy of the handle here.
448 */
1ecf47bf
PB
449 handle = qemu_thread_get_handle(thread);
450 if (handle) {
403e6331
PB
451 WaitForSingleObject(handle, INFINITE);
452 CloseHandle(handle);
403e6331
PB
453 }
454 ret = data->ret;
455 DeleteCriticalSection(&data->cs);
456 g_free(data);
457 return ret;
9257d46d
PB
458}
459
4db99c9d
MAL
460static bool set_thread_description(HANDLE h, const char *name)
461{
462 HRESULT hr;
463 g_autofree wchar_t *namew = NULL;
464
465 if (!load_set_thread_description()) {
466 return false;
467 }
468
469 namew = g_utf8_to_utf16(name, -1, NULL, NULL, NULL);
470 if (!namew) {
471 return false;
472 }
473
474 hr = SetThreadDescriptionFunc(h, namew);
475
476 return SUCCEEDED(hr);
477}
478
4900116e 479void qemu_thread_create(QemuThread *thread, const char *name,
9257d46d 480 void *(*start_routine)(void *),
cf218714 481 void *arg, int mode)
9257d46d
PB
482{
483 HANDLE hThread;
9257d46d 484 struct QemuThreadData *data;
6265e4ff 485
7267c094 486 data = g_malloc(sizeof *data);
9257d46d
PB
487 data->start_routine = start_routine;
488 data->arg = arg;
403e6331
PB
489 data->mode = mode;
490 data->exited = false;
ef57137f 491 notifier_list_init(&data->exit);
9257d46d 492
edc1de97
SW
493 if (data->mode != QEMU_THREAD_DETACHED) {
494 InitializeCriticalSection(&data->cs);
495 }
496
9257d46d 497 hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
403e6331 498 data, 0, &thread->tid);
9257d46d
PB
499 if (!hThread) {
500 error_exit(GetLastError(), __func__);
501 }
4db99c9d
MAL
502 if (name_threads && name && !set_thread_description(hThread, name)) {
503 fprintf(stderr, "qemu: failed to set thread description: %s\n", name);
504 }
9257d46d 505 CloseHandle(hThread);
4db99c9d 506
ef57137f 507 thread->data = data;
9257d46d
PB
508}
509
7730f32c
DH
510int qemu_thread_set_affinity(QemuThread *thread, unsigned long *host_cpus,
511 unsigned long nbits)
512{
513 return -ENOSYS;
514}
515
516int qemu_thread_get_affinity(QemuThread *thread, unsigned long **host_cpus,
517 unsigned long *nbits)
518{
519 return -ENOSYS;
520}
521
9257d46d
PB
522void qemu_thread_get_self(QemuThread *thread)
523{
6265e4ff 524 thread->data = qemu_thread_data;
403e6331 525 thread->tid = GetCurrentThreadId();
9257d46d
PB
526}
527
1ecf47bf
PB
528HANDLE qemu_thread_get_handle(QemuThread *thread)
529{
530 QemuThreadData *data;
531 HANDLE handle;
532
533 data = thread->data;
ef57137f 534 if (data->mode == QEMU_THREAD_DETACHED) {
1ecf47bf
PB
535 return NULL;
536 }
537
538 EnterCriticalSection(&data->cs);
539 if (!data->exited) {
b0cb0a66
VP
540 handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME |
541 THREAD_SET_CONTEXT, FALSE, thread->tid);
1ecf47bf
PB
542 } else {
543 handle = NULL;
544 }
545 LeaveCriticalSection(&data->cs);
546 return handle;
547}
548
2d797b65 549bool qemu_thread_is_self(QemuThread *thread)
9257d46d 550{
403e6331 551 return GetCurrentThreadId() == thread->tid;
9257d46d 552}