]> git.proxmox.com Git - wasi-libc.git/blame - libc-top-half/musl/src/thread/pthread_barrier_wait.c
threads: enable access to `pthread_barrier_*` functions (#358)
[wasi-libc.git] / libc-top-half / musl / src / thread / pthread_barrier_wait.c
CommitLineData
320054e8
DG
1#include "pthread_impl.h"
2
3static int pshared_barrier_wait(pthread_barrier_t *b)
4{
5 int limit = (b->_b_limit & INT_MAX) + 1;
6 int ret = 0;
7 int v, w;
8
9 if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD;
10
11 while ((v=a_cas(&b->_b_lock, 0, limit)))
12 __wait(&b->_b_lock, &b->_b_waiters, v, 0);
13
14 /* Wait for <limit> threads to get to the barrier */
15 if (++b->_b_count == limit) {
16 a_store(&b->_b_count, 0);
17 ret = PTHREAD_BARRIER_SERIAL_THREAD;
18 if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
19 } else {
20 a_store(&b->_b_lock, 0);
21 if (b->_b_waiters) __wake(&b->_b_lock, 1, 0);
22 while ((v=b->_b_count)>0)
23 __wait(&b->_b_count, &b->_b_waiters2, v, 0);
24 }
25
583c4269 26#ifdef __wasilibc_unmodified_upstream /* WASI does not understand processes or locking between them. */
320054e8 27 __vm_lock();
583c4269 28#endif
320054e8
DG
29
30 /* Ensure all threads have a vm lock before proceeding */
31 if (a_fetch_add(&b->_b_count, -1)==1-limit) {
32 a_store(&b->_b_count, 0);
33 if (b->_b_waiters2) __wake(&b->_b_count, -1, 0);
34 } else {
35 while ((v=b->_b_count))
36 __wait(&b->_b_count, &b->_b_waiters2, v, 0);
37 }
38
39 /* Perform a recursive unlock suitable for self-sync'd destruction */
40 do {
41 v = b->_b_lock;
42 w = b->_b_waiters;
43 } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
44
45 /* Wake a thread waiting to reuse or destroy the barrier */
46 if (v==INT_MIN+1 || (v==1 && w))
47 __wake(&b->_b_lock, 1, 0);
48
583c4269 49#ifdef __wasilibc_unmodified_upstream /* WASI does not understand processes or locking between them. */
320054e8 50 __vm_unlock();
583c4269 51#endif
320054e8
DG
52
53 return ret;
54}
55
56struct instance
57{
58 volatile int count;
59 volatile int last;
60 volatile int waiters;
61 volatile int finished;
62};
63
64int pthread_barrier_wait(pthread_barrier_t *b)
65{
66 int limit = b->_b_limit;
67 struct instance *inst;
68
69 /* Trivial case: count was set at 1 */
70 if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD;
71
72 /* Process-shared barriers require a separate, inefficient wait */
73 if (limit < 0) return pshared_barrier_wait(b);
74
75 /* Otherwise we need a lock on the barrier object */
76 while (a_swap(&b->_b_lock, 1))
77 __wait(&b->_b_lock, &b->_b_waiters, 1, 1);
78 inst = b->_b_inst;
79
80 /* First thread to enter the barrier becomes the "instance owner" */
81 if (!inst) {
82 struct instance new_inst = { 0 };
83 int spins = 200;
84 b->_b_inst = inst = &new_inst;
85 a_store(&b->_b_lock, 0);
86 if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
87 while (spins-- && !inst->finished)
88 a_spin();
89 a_inc(&inst->finished);
90 while (inst->finished == 1)
583c4269 91#ifdef __wasilibc_unmodified_upstream
320054e8
DG
92 __syscall(SYS_futex,&inst->finished,FUTEX_WAIT|FUTEX_PRIVATE,1,0) != -ENOSYS
93 || __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0);
583c4269
AB
94#else
95 __futexwait(&inst->finished, 1, 0);
96#endif
320054e8
DG
97 return PTHREAD_BARRIER_SERIAL_THREAD;
98 }
99
100 /* Last thread to enter the barrier wakes all non-instance-owners */
101 if (++inst->count == limit) {
102 b->_b_inst = 0;
103 a_store(&b->_b_lock, 0);
104 if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
105 a_store(&inst->last, 1);
106 if (inst->waiters)
107 __wake(&inst->last, -1, 1);
108 } else {
109 a_store(&b->_b_lock, 0);
110 if (b->_b_waiters) __wake(&b->_b_lock, 1, 1);
111 __wait(&inst->last, &inst->waiters, 0, 1);
112 }
113
114 /* Last thread to exit the barrier wakes the instance owner */
115 if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1))
116 __wake(&inst->finished, 1, 1);
117
118 return 0;
119}