]>
Commit | Line | Data |
---|---|---|
b96e9247 KW |
1 | /* |
2 | * coroutine queues and locks | |
3 | * | |
4 | * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com> | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
fed20a70 PB |
23 | * |
24 | * The lock-free mutex implementation is based on OSv | |
25 | * (core/lfmutex.cc, include/lockfree/mutex.hh). | |
26 | * Copyright (C) 2013 Cloudius Systems, Ltd. | |
b96e9247 KW |
27 | */ |
28 | ||
aafd7584 | 29 | #include "qemu/osdep.h" |
10817bf0 DB |
30 | #include "qemu/coroutine.h" |
31 | #include "qemu/coroutine_int.h" | |
480cff63 | 32 | #include "qemu/processor.h" |
1de7afc9 | 33 | #include "qemu/queue.h" |
a9d92355 | 34 | #include "block/aio.h" |
b96e9247 KW |
35 | #include "trace.h" |
36 | ||
b96e9247 KW |
37 | void qemu_co_queue_init(CoQueue *queue) |
38 | { | |
7d9c8581 | 39 | QSIMPLEQ_INIT(&queue->entries); |
b96e9247 KW |
40 | } |
41 | ||
1a957cf9 | 42 | void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock) |
b96e9247 KW |
43 | { |
44 | Coroutine *self = qemu_coroutine_self(); | |
7d9c8581 | 45 | QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); |
1ace7cea | 46 | |
1a957cf9 PB |
47 | if (lock) { |
48 | qemu_lockable_unlock(lock); | |
1ace7cea PB |
49 | } |
50 | ||
51 | /* There is no race condition here. Other threads will call | |
52 | * aio_co_schedule on our AioContext, which can reenter this | |
53 | * coroutine but only after this yield and after the main loop | |
54 | * has gone through the next iteration. | |
55 | */ | |
b96e9247 KW |
56 | qemu_coroutine_yield(); |
57 | assert(qemu_in_coroutine()); | |
1ace7cea PB |
58 | |
59 | /* TODO: OSv implements wait morphing here, where the wakeup | |
60 | * primitive automatically places the woken coroutine on the | |
61 | * mutex's queue. This avoids the thundering herd effect. | |
1a957cf9 PB |
62 | * This could be implemented for CoMutexes, but not really for |
63 | * other cases of QemuLockable. | |
1ace7cea | 64 | */ |
1a957cf9 PB |
65 | if (lock) { |
66 | qemu_lockable_lock(lock); | |
1ace7cea | 67 | } |
02ffb504 SH |
68 | } |
69 | ||
28f08246 | 70 | static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) |
b96e9247 | 71 | { |
b96e9247 | 72 | Coroutine *next; |
28f08246 | 73 | |
7d9c8581 | 74 | if (QSIMPLEQ_EMPTY(&queue->entries)) { |
28f08246 SH |
75 | return false; |
76 | } | |
b96e9247 | 77 | |
7d9c8581 PB |
78 | while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) { |
79 | QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); | |
a9d92355 | 80 | aio_co_wake(next); |
28f08246 SH |
81 | if (single) { |
82 | break; | |
83 | } | |
b96e9247 | 84 | } |
28f08246 SH |
85 | return true; |
86 | } | |
b96e9247 | 87 | |
e18d9a96 | 88 | bool qemu_co_queue_next(CoQueue *queue) |
28f08246 SH |
89 | { |
90 | return qemu_co_queue_do_restart(queue, true); | |
b96e9247 KW |
91 | } |
92 | ||
e18d9a96 | 93 | void qemu_co_queue_restart_all(CoQueue *queue) |
e8ee5e4c | 94 | { |
28f08246 | 95 | qemu_co_queue_do_restart(queue, false); |
e8ee5e4c SH |
96 | } |
97 | ||
5261dd7b | 98 | bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock) |
b681a1c7 BC |
99 | { |
100 | Coroutine *next; | |
101 | ||
7d9c8581 | 102 | next = QSIMPLEQ_FIRST(&queue->entries); |
b681a1c7 BC |
103 | if (!next) { |
104 | return false; | |
105 | } | |
106 | ||
7d9c8581 | 107 | QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); |
5261dd7b PB |
108 | if (lock) { |
109 | qemu_lockable_unlock(lock); | |
110 | } | |
111 | aio_co_wake(next); | |
112 | if (lock) { | |
113 | qemu_lockable_lock(lock); | |
114 | } | |
b681a1c7 BC |
115 | return true; |
116 | } | |
117 | ||
b96e9247 KW |
118 | bool qemu_co_queue_empty(CoQueue *queue) |
119 | { | |
7d9c8581 | 120 | return QSIMPLEQ_FIRST(&queue->entries) == NULL; |
b96e9247 KW |
121 | } |
122 | ||
fed20a70 PB |
123 | /* The wait records are handled with a multiple-producer, single-consumer |
124 | * lock-free queue. There cannot be two concurrent pop_waiter() calls | |
125 | * because pop_waiter() can only be called while mutex->handoff is zero. | |
126 | * This can happen in three cases: | |
127 | * - in qemu_co_mutex_unlock, before the hand-off protocol has started. | |
128 | * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and | |
129 | * not take part in the handoff. | |
130 | * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from | |
131 | * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail | |
132 | * the cmpxchg (it will see either 0 or the next sequence value) and | |
133 | * exit. The next hand-off cannot begin until qemu_co_mutex_lock has | |
134 | * woken up someone. | |
135 | * - in qemu_co_mutex_unlock, if it takes the hand-off token itself. | |
136 | * In this case another iteration starts with mutex->handoff == 0; | |
137 | * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and | |
138 | * qemu_co_mutex_unlock will go back to case (1). | |
139 | * | |
140 | * The following functions manage this queue. | |
141 | */ | |
142 | typedef struct CoWaitRecord { | |
143 | Coroutine *co; | |
144 | QSLIST_ENTRY(CoWaitRecord) next; | |
145 | } CoWaitRecord; | |
146 | ||
147 | static void push_waiter(CoMutex *mutex, CoWaitRecord *w) | |
148 | { | |
149 | w->co = qemu_coroutine_self(); | |
150 | QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); | |
151 | } | |
152 | ||
153 | static void move_waiters(CoMutex *mutex) | |
154 | { | |
155 | QSLIST_HEAD(, CoWaitRecord) reversed; | |
156 | QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push); | |
157 | while (!QSLIST_EMPTY(&reversed)) { | |
158 | CoWaitRecord *w = QSLIST_FIRST(&reversed); | |
159 | QSLIST_REMOVE_HEAD(&reversed, next); | |
160 | QSLIST_INSERT_HEAD(&mutex->to_pop, w, next); | |
161 | } | |
162 | } | |
163 | ||
164 | static CoWaitRecord *pop_waiter(CoMutex *mutex) | |
165 | { | |
166 | CoWaitRecord *w; | |
167 | ||
168 | if (QSLIST_EMPTY(&mutex->to_pop)) { | |
169 | move_waiters(mutex); | |
170 | if (QSLIST_EMPTY(&mutex->to_pop)) { | |
171 | return NULL; | |
172 | } | |
173 | } | |
174 | w = QSLIST_FIRST(&mutex->to_pop); | |
175 | QSLIST_REMOVE_HEAD(&mutex->to_pop, next); | |
176 | return w; | |
177 | } | |
178 | ||
179 | static bool has_waiters(CoMutex *mutex) | |
180 | { | |
181 | return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push); | |
182 | } | |
183 | ||
b96e9247 KW |
184 | void qemu_co_mutex_init(CoMutex *mutex) |
185 | { | |
186 | memset(mutex, 0, sizeof(*mutex)); | |
b96e9247 KW |
187 | } |
188 | ||
480cff63 PB |
189 | static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co) |
190 | { | |
191 | /* Read co before co->ctx; pairs with smp_wmb() in | |
192 | * qemu_coroutine_enter(). | |
193 | */ | |
194 | smp_read_barrier_depends(); | |
195 | mutex->ctx = co->ctx; | |
196 | aio_co_wake(co); | |
197 | } | |
198 | ||
199 | static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, | |
200 | CoMutex *mutex) | |
b96e9247 KW |
201 | { |
202 | Coroutine *self = qemu_coroutine_self(); | |
fed20a70 PB |
203 | CoWaitRecord w; |
204 | unsigned old_handoff; | |
b96e9247 KW |
205 | |
206 | trace_qemu_co_mutex_lock_entry(mutex, self); | |
fed20a70 | 207 | push_waiter(mutex, &w); |
b96e9247 | 208 | |
fed20a70 PB |
209 | /* This is the "Responsibility Hand-Off" protocol; a lock() picks from |
210 | * a concurrent unlock() the responsibility of waking somebody up. | |
211 | */ | |
d73415a3 | 212 | old_handoff = qatomic_mb_read(&mutex->handoff); |
fed20a70 PB |
213 | if (old_handoff && |
214 | has_waiters(mutex) && | |
d73415a3 | 215 | qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { |
fed20a70 PB |
216 | /* There can be no concurrent pops, because there can be only |
217 | * one active handoff at a time. | |
218 | */ | |
219 | CoWaitRecord *to_wake = pop_waiter(mutex); | |
220 | Coroutine *co = to_wake->co; | |
221 | if (co == self) { | |
222 | /* We got the lock ourselves! */ | |
223 | assert(to_wake == &w); | |
480cff63 | 224 | mutex->ctx = ctx; |
fed20a70 PB |
225 | return; |
226 | } | |
227 | ||
480cff63 | 228 | qemu_co_mutex_wake(mutex, co); |
b96e9247 KW |
229 | } |
230 | ||
fed20a70 PB |
231 | qemu_coroutine_yield(); |
232 | trace_qemu_co_mutex_lock_return(mutex, self); | |
233 | } | |
234 | ||
235 | void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) | |
236 | { | |
480cff63 | 237 | AioContext *ctx = qemu_get_current_aio_context(); |
fed20a70 | 238 | Coroutine *self = qemu_coroutine_self(); |
480cff63 PB |
239 | int waiters, i; |
240 | ||
241 | /* Running a very small critical section on pthread_mutex_t and CoMutex | |
242 | * shows that pthread_mutex_t is much faster because it doesn't actually | |
243 | * go to sleep. What happens is that the critical section is shorter | |
244 | * than the latency of entering the kernel and thus FUTEX_WAIT always | |
245 | * fails. With CoMutex there is no such latency but you still want to | |
246 | * avoid wait and wakeup. So introduce it artificially. | |
247 | */ | |
248 | i = 0; | |
249 | retry_fast_path: | |
d73415a3 | 250 | waiters = qatomic_cmpxchg(&mutex->locked, 0, 1); |
480cff63 PB |
251 | if (waiters != 0) { |
252 | while (waiters == 1 && ++i < 1000) { | |
d73415a3 | 253 | if (qatomic_read(&mutex->ctx) == ctx) { |
480cff63 PB |
254 | break; |
255 | } | |
d73415a3 | 256 | if (qatomic_read(&mutex->locked) == 0) { |
480cff63 PB |
257 | goto retry_fast_path; |
258 | } | |
259 | cpu_relax(); | |
260 | } | |
d73415a3 | 261 | waiters = qatomic_fetch_inc(&mutex->locked); |
480cff63 | 262 | } |
fed20a70 | 263 | |
480cff63 | 264 | if (waiters == 0) { |
fed20a70 PB |
265 | /* Uncontended. */ |
266 | trace_qemu_co_mutex_lock_uncontended(mutex, self); | |
480cff63 | 267 | mutex->ctx = ctx; |
fed20a70 | 268 | } else { |
480cff63 | 269 | qemu_co_mutex_lock_slowpath(ctx, mutex); |
fed20a70 | 270 | } |
0e438cdc | 271 | mutex->holder = self; |
1b7f01d9 | 272 | self->locks_held++; |
b96e9247 KW |
273 | } |
274 | ||
275 | void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) | |
276 | { | |
277 | Coroutine *self = qemu_coroutine_self(); | |
278 | ||
279 | trace_qemu_co_mutex_unlock_entry(mutex, self); | |
280 | ||
fed20a70 | 281 | assert(mutex->locked); |
0e438cdc | 282 | assert(mutex->holder == self); |
b96e9247 KW |
283 | assert(qemu_in_coroutine()); |
284 | ||
480cff63 | 285 | mutex->ctx = NULL; |
0e438cdc | 286 | mutex->holder = NULL; |
1b7f01d9 | 287 | self->locks_held--; |
d73415a3 | 288 | if (qatomic_fetch_dec(&mutex->locked) == 1) { |
fed20a70 PB |
289 | /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */ |
290 | return; | |
291 | } | |
292 | ||
293 | for (;;) { | |
294 | CoWaitRecord *to_wake = pop_waiter(mutex); | |
295 | unsigned our_handoff; | |
296 | ||
297 | if (to_wake) { | |
480cff63 | 298 | qemu_co_mutex_wake(mutex, to_wake->co); |
fed20a70 PB |
299 | break; |
300 | } | |
301 | ||
302 | /* Some concurrent lock() is in progress (we know this because | |
303 | * mutex->locked was >1) but it hasn't yet put itself on the wait | |
304 | * queue. Pick a sequence number for the handoff protocol (not 0). | |
305 | */ | |
306 | if (++mutex->sequence == 0) { | |
307 | mutex->sequence = 1; | |
308 | } | |
309 | ||
310 | our_handoff = mutex->sequence; | |
d73415a3 | 311 | qatomic_mb_set(&mutex->handoff, our_handoff); |
fed20a70 PB |
312 | if (!has_waiters(mutex)) { |
313 | /* The concurrent lock has not added itself yet, so it | |
314 | * will be able to pick our handoff. | |
315 | */ | |
316 | break; | |
317 | } | |
318 | ||
319 | /* Try to do the handoff protocol ourselves; if somebody else has | |
320 | * already taken it, however, we're done and they're responsible. | |
321 | */ | |
d73415a3 | 322 | if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { |
fed20a70 PB |
323 | break; |
324 | } | |
325 | } | |
b96e9247 KW |
326 | |
327 | trace_qemu_co_mutex_unlock_return(mutex, self); | |
328 | } | |
12888904 | 329 | |
050de36b PB |
330 | struct CoRwTicket { |
331 | bool read; | |
332 | Coroutine *co; | |
333 | QSIMPLEQ_ENTRY(CoRwTicket) next; | |
334 | }; | |
335 | ||
12888904 AK |
336 | void qemu_co_rwlock_init(CoRwlock *lock) |
337 | { | |
a7b91d35 | 338 | qemu_co_mutex_init(&lock->mutex); |
050de36b PB |
339 | lock->owners = 0; |
340 | QSIMPLEQ_INIT(&lock->tickets); | |
341 | } | |
342 | ||
343 | /* Releases the internal CoMutex. */ | |
344 | static void qemu_co_rwlock_maybe_wake_one(CoRwlock *lock) | |
345 | { | |
346 | CoRwTicket *tkt = QSIMPLEQ_FIRST(&lock->tickets); | |
347 | Coroutine *co = NULL; | |
348 | ||
349 | /* | |
350 | * Setting lock->owners here prevents rdlock and wrlock from | |
351 | * sneaking in between unlock and wake. | |
352 | */ | |
353 | ||
354 | if (tkt) { | |
355 | if (tkt->read) { | |
356 | if (lock->owners >= 0) { | |
357 | lock->owners++; | |
358 | co = tkt->co; | |
359 | } | |
360 | } else { | |
361 | if (lock->owners == 0) { | |
362 | lock->owners = -1; | |
363 | co = tkt->co; | |
364 | } | |
365 | } | |
366 | } | |
367 | ||
368 | if (co) { | |
369 | QSIMPLEQ_REMOVE_HEAD(&lock->tickets, next); | |
370 | qemu_co_mutex_unlock(&lock->mutex); | |
371 | aio_co_wake(co); | |
372 | } else { | |
373 | qemu_co_mutex_unlock(&lock->mutex); | |
374 | } | |
12888904 AK |
375 | } |
376 | ||
377 | void qemu_co_rwlock_rdlock(CoRwlock *lock) | |
378 | { | |
1b7f01d9 KW |
379 | Coroutine *self = qemu_coroutine_self(); |
380 | ||
a7b91d35 PB |
381 | qemu_co_mutex_lock(&lock->mutex); |
382 | /* For fairness, wait if a writer is in line. */ | |
050de36b PB |
383 | if (lock->owners == 0 || (lock->owners > 0 && QSIMPLEQ_EMPTY(&lock->tickets))) { |
384 | lock->owners++; | |
385 | qemu_co_mutex_unlock(&lock->mutex); | |
386 | } else { | |
387 | CoRwTicket my_ticket = { true, self }; | |
388 | ||
389 | QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); | |
390 | qemu_co_mutex_unlock(&lock->mutex); | |
391 | qemu_coroutine_yield(); | |
392 | assert(lock->owners >= 1); | |
393 | ||
394 | /* Possibly wake another reader, which will wake the next in line. */ | |
395 | qemu_co_mutex_lock(&lock->mutex); | |
396 | qemu_co_rwlock_maybe_wake_one(lock); | |
12888904 | 397 | } |
a7b91d35 | 398 | |
1b7f01d9 | 399 | self->locks_held++; |
12888904 AK |
400 | } |
401 | ||
402 | void qemu_co_rwlock_unlock(CoRwlock *lock) | |
403 | { | |
1b7f01d9 KW |
404 | Coroutine *self = qemu_coroutine_self(); |
405 | ||
12888904 | 406 | assert(qemu_in_coroutine()); |
050de36b | 407 | self->locks_held--; |
a7b91d35 | 408 | |
050de36b PB |
409 | qemu_co_mutex_lock(&lock->mutex); |
410 | if (lock->owners > 0) { | |
411 | lock->owners--; | |
412 | } else { | |
413 | assert(lock->owners == -1); | |
414 | lock->owners = 0; | |
12888904 | 415 | } |
050de36b PB |
416 | |
417 | qemu_co_rwlock_maybe_wake_one(lock); | |
12888904 AK |
418 | } |
419 | ||
667221c1 PB |
420 | void qemu_co_rwlock_downgrade(CoRwlock *lock) |
421 | { | |
050de36b PB |
422 | qemu_co_mutex_lock(&lock->mutex); |
423 | assert(lock->owners == -1); | |
424 | lock->owners = 1; | |
667221c1 | 425 | |
050de36b PB |
426 | /* Possibly wake another reader, which will wake the next in line. */ |
427 | qemu_co_rwlock_maybe_wake_one(lock); | |
667221c1 PB |
428 | } |
429 | ||
12888904 AK |
430 | void qemu_co_rwlock_wrlock(CoRwlock *lock) |
431 | { | |
050de36b PB |
432 | Coroutine *self = qemu_coroutine_self(); |
433 | ||
a7b91d35 | 434 | qemu_co_mutex_lock(&lock->mutex); |
050de36b PB |
435 | if (lock->owners == 0) { |
436 | lock->owners = -1; | |
437 | qemu_co_mutex_unlock(&lock->mutex); | |
438 | } else { | |
439 | CoRwTicket my_ticket = { false, qemu_coroutine_self() }; | |
440 | ||
441 | QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); | |
442 | qemu_co_mutex_unlock(&lock->mutex); | |
443 | qemu_coroutine_yield(); | |
444 | assert(lock->owners == -1); | |
12888904 | 445 | } |
a7b91d35 | 446 | |
050de36b | 447 | self->locks_held++; |
12888904 | 448 | } |
667221c1 PB |
449 | |
450 | void qemu_co_rwlock_upgrade(CoRwlock *lock) | |
451 | { | |
667221c1 | 452 | qemu_co_mutex_lock(&lock->mutex); |
050de36b PB |
453 | assert(lock->owners > 0); |
454 | /* For fairness, wait if a writer is in line. */ | |
455 | if (lock->owners == 1 && QSIMPLEQ_EMPTY(&lock->tickets)) { | |
456 | lock->owners = -1; | |
457 | qemu_co_mutex_unlock(&lock->mutex); | |
458 | } else { | |
459 | CoRwTicket my_ticket = { false, qemu_coroutine_self() }; | |
667221c1 | 460 | |
050de36b PB |
461 | lock->owners--; |
462 | QSIMPLEQ_INSERT_TAIL(&lock->tickets, &my_ticket, next); | |
463 | qemu_co_rwlock_maybe_wake_one(lock); | |
464 | qemu_coroutine_yield(); | |
465 | assert(lock->owners == -1); | |
466 | } | |
667221c1 | 467 | } |