]>
Commit | Line | Data |
---|---|---|
9257d46d PB |
1 | /* |
2 | * Win32 implementation for mutex/cond/thread functions | |
3 | * | |
4 | * Copyright Red Hat, Inc. 2010 | |
5 | * | |
6 | * Author: | |
7 | * Paolo Bonzini <pbonzini@redhat.com> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
10 | * See the COPYING file in the top-level directory. | |
11 | * | |
12 | */ | |
13 | #include "qemu-common.h" | |
14 | #include "qemu-thread.h" | |
15 | #include <process.h> | |
16 | #include <assert.h> | |
17 | #include <limits.h> | |
18 | ||
19 | static void error_exit(int err, const char *msg) | |
20 | { | |
21 | char *pstr; | |
22 | ||
23 | FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER, | |
24 | NULL, err, 0, (LPTSTR)&pstr, 2, NULL); | |
25 | fprintf(stderr, "qemu: %s: %s\n", msg, pstr); | |
26 | LocalFree(pstr); | |
27 | exit(1); | |
28 | } | |
29 | ||
30 | void qemu_mutex_init(QemuMutex *mutex) | |
31 | { | |
32 | mutex->owner = 0; | |
33 | InitializeCriticalSection(&mutex->lock); | |
34 | } | |
35 | ||
36 | void qemu_mutex_lock(QemuMutex *mutex) | |
37 | { | |
38 | EnterCriticalSection(&mutex->lock); | |
39 | ||
40 | /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not | |
41 | * using them as such. | |
42 | */ | |
43 | assert(mutex->owner == 0); | |
44 | mutex->owner = GetCurrentThreadId(); | |
45 | } | |
46 | ||
47 | int qemu_mutex_trylock(QemuMutex *mutex) | |
48 | { | |
49 | int owned; | |
50 | ||
51 | owned = TryEnterCriticalSection(&mutex->lock); | |
52 | if (owned) { | |
53 | assert(mutex->owner == 0); | |
54 | mutex->owner = GetCurrentThreadId(); | |
55 | } | |
56 | return !owned; | |
57 | } | |
58 | ||
59 | void qemu_mutex_unlock(QemuMutex *mutex) | |
60 | { | |
61 | assert(mutex->owner == GetCurrentThreadId()); | |
62 | mutex->owner = 0; | |
63 | LeaveCriticalSection(&mutex->lock); | |
64 | } | |
65 | ||
66 | void qemu_cond_init(QemuCond *cond) | |
67 | { | |
68 | memset(cond, 0, sizeof(*cond)); | |
69 | ||
70 | cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL); | |
71 | if (!cond->sema) { | |
72 | error_exit(GetLastError(), __func__); | |
73 | } | |
74 | cond->continue_event = CreateEvent(NULL, /* security */ | |
75 | FALSE, /* auto-reset */ | |
76 | FALSE, /* not signaled */ | |
77 | NULL); /* name */ | |
78 | if (!cond->continue_event) { | |
79 | error_exit(GetLastError(), __func__); | |
80 | } | |
81 | } | |
82 | ||
83 | void qemu_cond_signal(QemuCond *cond) | |
84 | { | |
85 | DWORD result; | |
86 | ||
87 | /* | |
88 | * Signal only when there are waiters. cond->waiters is | |
89 | * incremented by pthread_cond_wait under the external lock, | |
90 | * so we are safe about that. | |
91 | */ | |
92 | if (cond->waiters == 0) { | |
93 | return; | |
94 | } | |
95 | ||
96 | /* | |
97 | * Waiting threads decrement it outside the external lock, but | |
98 | * only if another thread is executing pthread_cond_broadcast and | |
99 | * has the mutex. So, it also cannot be decremented concurrently | |
100 | * with this particular access. | |
101 | */ | |
102 | cond->target = cond->waiters - 1; | |
103 | result = SignalObjectAndWait(cond->sema, cond->continue_event, | |
104 | INFINITE, FALSE); | |
105 | if (result == WAIT_ABANDONED || result == WAIT_FAILED) { | |
106 | error_exit(GetLastError(), __func__); | |
107 | } | |
108 | } | |
109 | ||
110 | void qemu_cond_broadcast(QemuCond *cond) | |
111 | { | |
112 | BOOLEAN result; | |
113 | /* | |
114 | * As in pthread_cond_signal, access to cond->waiters and | |
115 | * cond->target is locked via the external mutex. | |
116 | */ | |
117 | if (cond->waiters == 0) { | |
118 | return; | |
119 | } | |
120 | ||
121 | cond->target = 0; | |
122 | result = ReleaseSemaphore(cond->sema, cond->waiters, NULL); | |
123 | if (!result) { | |
124 | error_exit(GetLastError(), __func__); | |
125 | } | |
126 | ||
127 | /* | |
128 | * At this point all waiters continue. Each one takes its | |
129 | * slice of the semaphore. Now it's our turn to wait: Since | |
130 | * the external mutex is held, no thread can leave cond_wait, | |
131 | * yet. For this reason, we can be sure that no thread gets | |
132 | * a chance to eat *more* than one slice. OTOH, it means | |
133 | * that the last waiter must send us a wake-up. | |
134 | */ | |
135 | WaitForSingleObject(cond->continue_event, INFINITE); | |
136 | } | |
137 | ||
138 | void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex) | |
139 | { | |
140 | /* | |
141 | * This access is protected under the mutex. | |
142 | */ | |
143 | cond->waiters++; | |
144 | ||
145 | /* | |
146 | * Unlock external mutex and wait for signal. | |
147 | * NOTE: we've held mutex locked long enough to increment | |
148 | * waiters count above, so there's no problem with | |
149 | * leaving mutex unlocked before we wait on semaphore. | |
150 | */ | |
151 | qemu_mutex_unlock(mutex); | |
152 | WaitForSingleObject(cond->sema, INFINITE); | |
153 | ||
154 | /* Now waiters must rendez-vous with the signaling thread and | |
155 | * let it continue. For cond_broadcast this has heavy contention | |
156 | * and triggers thundering herd. So goes life. | |
157 | * | |
158 | * Decrease waiters count. The mutex is not taken, so we have | |
159 | * to do this atomically. | |
160 | * | |
161 | * All waiters contend for the mutex at the end of this function | |
162 | * until the signaling thread relinquishes it. To ensure | |
163 | * each waiter consumes exactly one slice of the semaphore, | |
164 | * the signaling thread stops until it is told by the last | |
165 | * waiter that it can go on. | |
166 | */ | |
167 | if (InterlockedDecrement(&cond->waiters) == cond->target) { | |
168 | SetEvent(cond->continue_event); | |
169 | } | |
170 | ||
171 | qemu_mutex_lock(mutex); | |
172 | } | |
173 | ||
174 | struct QemuThreadData { | |
175 | QemuThread *thread; | |
176 | void *(*start_routine)(void *); | |
177 | void *arg; | |
178 | }; | |
179 | ||
180 | static int qemu_thread_tls_index = TLS_OUT_OF_INDEXES; | |
181 | ||
182 | static unsigned __stdcall win32_start_routine(void *arg) | |
183 | { | |
184 | struct QemuThreadData data = *(struct QemuThreadData *) arg; | |
185 | QemuThread *thread = data.thread; | |
186 | ||
187 | free(arg); | |
188 | TlsSetValue(qemu_thread_tls_index, thread); | |
189 | ||
190 | /* | |
191 | * Use DuplicateHandle instead of assigning thread->thread in the | |
192 | * creating thread to avoid races. It's simpler this way than with | |
193 | * synchronization. | |
194 | */ | |
195 | DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), | |
196 | GetCurrentProcess(), &thread->thread, | |
197 | 0, FALSE, DUPLICATE_SAME_ACCESS); | |
198 | ||
199 | qemu_thread_exit(data.start_routine(data.arg)); | |
200 | abort(); | |
201 | } | |
202 | ||
203 | void qemu_thread_exit(void *arg) | |
204 | { | |
205 | QemuThread *thread = TlsGetValue(qemu_thread_tls_index); | |
206 | thread->ret = arg; | |
207 | CloseHandle(thread->thread); | |
208 | thread->thread = NULL; | |
209 | ExitThread(0); | |
210 | } | |
211 | ||
212 | static inline void qemu_thread_init(void) | |
213 | { | |
214 | if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) { | |
215 | qemu_thread_tls_index = TlsAlloc(); | |
216 | if (qemu_thread_tls_index == TLS_OUT_OF_INDEXES) { | |
217 | error_exit(ERROR_NO_SYSTEM_RESOURCES, __func__); | |
218 | } | |
219 | } | |
220 | } | |
221 | ||
222 | ||
223 | void qemu_thread_create(QemuThread *thread, | |
224 | void *(*start_routine)(void *), | |
225 | void *arg) | |
226 | { | |
227 | HANDLE hThread; | |
228 | ||
229 | struct QemuThreadData *data; | |
230 | qemu_thread_init(); | |
231 | data = qemu_malloc(sizeof *data); | |
232 | data->thread = thread; | |
233 | data->start_routine = start_routine; | |
234 | data->arg = arg; | |
235 | ||
236 | hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine, | |
237 | data, 0, NULL); | |
238 | if (!hThread) { | |
239 | error_exit(GetLastError(), __func__); | |
240 | } | |
241 | CloseHandle(hThread); | |
242 | } | |
243 | ||
244 | void qemu_thread_get_self(QemuThread *thread) | |
245 | { | |
246 | if (!thread->thread) { | |
247 | /* In the main thread of the process. Initialize the QemuThread | |
248 | pointer in TLS, and use the dummy GetCurrentThread handle as | |
249 | the identifier for qemu_thread_is_self. */ | |
250 | qemu_thread_init(); | |
251 | TlsSetValue(qemu_thread_tls_index, thread); | |
252 | thread->thread = GetCurrentThread(); | |
253 | } | |
254 | } | |
255 | ||
256 | int qemu_thread_is_self(QemuThread *thread) | |
257 | { | |
258 | QemuThread *this_thread = TlsGetValue(qemu_thread_tls_index); | |
259 | return this_thread->thread == thread->thread; | |
260 | } |