]> git.proxmox.com Git - wasi-libc.git/blob - libc-top-half/musl/src/thread/pthread_create.c
7d4dc2ed5f4ad5c5a1bbdda806fada9e017f5992
[wasi-libc.git] / libc-top-half / musl / src / thread / pthread_create.c
1 #define _GNU_SOURCE
2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
4 #include "libc.h"
5 #include "lock.h"
6 #include <sys/mman.h>
7 #include <string.h>
8 #include <stddef.h>
9
10 static void dummy_0()
11 {
12 }
13 weak_alias(dummy_0, __acquire_ptc);
14 weak_alias(dummy_0, __release_ptc);
15 weak_alias(dummy_0, __pthread_tsd_run_dtors);
16 weak_alias(dummy_0, __do_orphaned_stdio_locks);
17 weak_alias(dummy_0, __dl_thread_cleanup);
18 weak_alias(dummy_0, __membarrier_init);
19
20 static int tl_lock_count;
21 static int tl_lock_waiters;
22
23 void __tl_lock(void)
24 {
25 int tid = __pthread_self()->tid;
26 int val = __thread_list_lock;
27 if (val == tid) {
28 tl_lock_count++;
29 return;
30 }
31 while ((val = a_cas(&__thread_list_lock, 0, tid)))
32 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
33 }
34
35 void __tl_unlock(void)
36 {
37 if (tl_lock_count) {
38 tl_lock_count--;
39 return;
40 }
41 a_store(&__thread_list_lock, 0);
42 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
43 }
44
45 void __tl_sync(pthread_t td)
46 {
47 a_barrier();
48 int val = __thread_list_lock;
49 if (!val) return;
50 __wait(&__thread_list_lock, &tl_lock_waiters, val, 0);
51 if (tl_lock_waiters) __wake(&__thread_list_lock, 1, 0);
52 }
53
54 _Noreturn void __pthread_exit(void *result)
55 {
56 pthread_t self = __pthread_self();
57 sigset_t set;
58
59 self->canceldisable = 1;
60 self->cancelasync = 0;
61 self->result = result;
62
63 while (self->cancelbuf) {
64 void (*f)(void *) = self->cancelbuf->__f;
65 void *x = self->cancelbuf->__x;
66 self->cancelbuf = self->cancelbuf->__next;
67 f(x);
68 }
69
70 __pthread_tsd_run_dtors();
71
72 /* Access to target the exiting thread with syscalls that use
73 * its kernel tid is controlled by killlock. For detached threads,
74 * any use past this point would have undefined behavior, but for
75 * joinable threads it's a valid usage that must be handled. */
76 LOCK(self->killlock);
77
78 /* The thread list lock must be AS-safe, and thus requires
79 * application signals to be blocked before it can be taken. */
80 __block_app_sigs(&set);
81 __tl_lock();
82
83 /* If this is the only thread in the list, don't proceed with
84 * termination of the thread, but restore the previous lock and
85 * signal state to prepare for exit to call atexit handlers. */
86 if (self->next == self) {
87 __tl_unlock();
88 __restore_sigs(&set);
89 UNLOCK(self->killlock);
90 exit(0);
91 }
92
93 /* At this point we are committed to thread termination. Unlink
94 * the thread from the list. This change will not be visible
95 * until the lock is released, which only happens after SYS_exit
96 * has been called, via the exit futex address pointing at the lock. */
97 libc.threads_minus_1--;
98 self->next->prev = self->prev;
99 self->prev->next = self->next;
100 self->prev = self->next = self;
101
102 /* Process robust list in userspace to handle non-pshared mutexes
103 * and the detached thread case where the robust list head will
104 * be invalid when the kernel would process it. */
105 __vm_lock();
106 volatile void *volatile *rp;
107 while ((rp=self->robust_list.head) && rp != &self->robust_list.head) {
108 pthread_mutex_t *m = (void *)((char *)rp
109 - offsetof(pthread_mutex_t, _m_next));
110 int waiters = m->_m_waiters;
111 int priv = (m->_m_type & 128) ^ 128;
112 self->robust_list.pending = rp;
113 self->robust_list.head = *rp;
114 int cont = a_swap(&m->_m_lock, 0x40000000);
115 self->robust_list.pending = 0;
116 if (cont < 0 || waiters)
117 __wake(&m->_m_lock, 1, priv);
118 }
119 __vm_unlock();
120
121 __do_orphaned_stdio_locks();
122 __dl_thread_cleanup();
123
124 /* This atomic potentially competes with a concurrent pthread_detach
125 * call; the loser is responsible for freeing thread resources. */
126 int state = a_cas(&self->detach_state, DT_JOINABLE, DT_EXITING);
127
128 if (state==DT_DETACHED && self->map_base) {
129 /* Detached threads must block even implementation-internal
130 * signals, since they will not have a stack in their last
131 * moments of existence. */
132 __block_all_sigs(&set);
133
134 /* Robust list will no longer be valid, and was already
135 * processed above, so unregister it with the kernel. */
136 if (self->robust_list.off)
137 __syscall(SYS_set_robust_list, 0, 3*sizeof(long));
138
139 /* Since __unmapself bypasses the normal munmap code path,
140 * explicitly wait for vmlock holders first. */
141 __vm_wait();
142
143 /* The following call unmaps the thread's stack mapping
144 * and then exits without touching the stack. */
145 __unmapself(self->map_base, self->map_size);
146 }
147
148 /* Wake any joiner. */
149 __wake(&self->detach_state, 1, 1);
150
151 /* After the kernel thread exits, its tid may be reused. Clear it
152 * to prevent inadvertent use and inform functions that would use
153 * it that it's no longer available. */
154 self->tid = 0;
155 UNLOCK(self->killlock);
156
157 for (;;) __syscall(SYS_exit, 0);
158 }
159
160 void __do_cleanup_push(struct __ptcb *cb)
161 {
162 struct pthread *self = __pthread_self();
163 cb->__next = self->cancelbuf;
164 self->cancelbuf = cb;
165 }
166
167 void __do_cleanup_pop(struct __ptcb *cb)
168 {
169 __pthread_self()->cancelbuf = cb->__next;
170 }
171
172 struct start_args {
173 void *(*start_func)(void *);
174 void *start_arg;
175 pthread_attr_t *attr;
176 volatile int *perr;
177 unsigned long sig_mask[_NSIG/8/sizeof(long)];
178 };
179
180 static int start(void *p)
181 {
182 struct start_args *args = p;
183 if (args->attr) {
184 pthread_t self = __pthread_self();
185 int ret = -__syscall(SYS_sched_setscheduler, self->tid,
186 args->attr->_a_policy, &args->attr->_a_prio);
187 if (a_swap(args->perr, ret)==-2)
188 __wake(args->perr, 1, 1);
189 if (ret) {
190 self->detach_state = DT_DETACHED;
191 __pthread_exit(0);
192 }
193 }
194 __syscall(SYS_rt_sigprocmask, SIG_SETMASK, &args->sig_mask, 0, _NSIG/8);
195 __pthread_exit(args->start_func(args->start_arg));
196 return 0;
197 }
198
199 static int start_c11(void *p)
200 {
201 struct start_args *args = p;
202 int (*start)(void*) = (int(*)(void*)) args->start_func;
203 __pthread_exit((void *)(uintptr_t)start(args->start_arg));
204 return 0;
205 }
206
207 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
208
209 /* pthread_key_create.c overrides this */
210 static volatile size_t dummy = 0;
211 weak_alias(dummy, __pthread_tsd_size);
212 static void *dummy_tsd[1] = { 0 };
213 weak_alias(dummy_tsd, __pthread_tsd_main);
214
215 static FILE *volatile dummy_file = 0;
216 weak_alias(dummy_file, __stdin_used);
217 weak_alias(dummy_file, __stdout_used);
218 weak_alias(dummy_file, __stderr_used);
219
220 static void init_file_lock(FILE *f)
221 {
222 if (f && f->lock<0) f->lock = 0;
223 }
224
225 int __pthread_create(pthread_t *restrict res, const pthread_attr_t *restrict attrp, void *(*entry)(void *), void *restrict arg)
226 {
227 int ret, c11 = (attrp == __ATTRP_C11_THREAD);
228 size_t size, guard;
229 struct pthread *self, *new;
230 unsigned char *map = 0, *stack = 0, *tsd = 0, *stack_limit;
231 unsigned flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND
232 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_SETTLS
233 | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID | CLONE_DETACHED;
234 pthread_attr_t attr = { 0 };
235 sigset_t set;
236 volatile int err = -1;
237
238 if (!libc.can_do_threads) return ENOSYS;
239 self = __pthread_self();
240 if (!libc.threaded) {
241 for (FILE *f=*__ofl_lock(); f; f=f->next)
242 init_file_lock(f);
243 __ofl_unlock();
244 init_file_lock(__stdin_used);
245 init_file_lock(__stdout_used);
246 init_file_lock(__stderr_used);
247 __syscall(SYS_rt_sigprocmask, SIG_UNBLOCK, SIGPT_SET, 0, _NSIG/8);
248 self->tsd = (void **)__pthread_tsd_main;
249 __membarrier_init();
250 libc.threaded = 1;
251 }
252 if (attrp && !c11) attr = *attrp;
253
254 __acquire_ptc();
255 if (!attrp || c11) {
256 attr._a_stacksize = __default_stacksize;
257 attr._a_guardsize = __default_guardsize;
258 }
259
260 if (attr._a_stackaddr) {
261 size_t need = libc.tls_size + __pthread_tsd_size;
262 size = attr._a_stacksize;
263 stack = (void *)(attr._a_stackaddr & -16);
264 stack_limit = (void *)(attr._a_stackaddr - size);
265 /* Use application-provided stack for TLS only when
266 * it does not take more than ~12% or 2k of the
267 * application's stack space. */
268 if (need < size/8 && need < 2048) {
269 tsd = stack - __pthread_tsd_size;
270 stack = tsd - libc.tls_size;
271 memset(stack, 0, need);
272 } else {
273 size = ROUND(need);
274 }
275 guard = 0;
276 } else {
277 guard = ROUND(attr._a_guardsize);
278 size = guard + ROUND(attr._a_stacksize
279 + libc.tls_size + __pthread_tsd_size);
280 }
281
282 if (!tsd) {
283 if (guard) {
284 map = __mmap(0, size, PROT_NONE, MAP_PRIVATE|MAP_ANON, -1, 0);
285 if (map == MAP_FAILED) goto fail;
286 if (__mprotect(map+guard, size-guard, PROT_READ|PROT_WRITE)
287 && errno != ENOSYS) {
288 __munmap(map, size);
289 goto fail;
290 }
291 } else {
292 map = __mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
293 if (map == MAP_FAILED) goto fail;
294 }
295 tsd = map + size - __pthread_tsd_size;
296 if (!stack) {
297 stack = tsd - libc.tls_size;
298 stack_limit = map + guard;
299 }
300 }
301
302 new = __copy_tls(tsd - libc.tls_size);
303 new->map_base = map;
304 new->map_size = size;
305 new->stack = stack;
306 new->stack_size = stack - stack_limit;
307 new->guard_size = guard;
308 new->self = new;
309 new->tsd = (void *)tsd;
310 new->locale = &libc.global_locale;
311 if (attr._a_detach) {
312 new->detach_state = DT_DETACHED;
313 } else {
314 new->detach_state = DT_JOINABLE;
315 }
316 new->robust_list.head = &new->robust_list.head;
317 new->CANARY = self->CANARY;
318
319 /* Setup argument structure for the new thread on its stack.
320 * It's safe to access from the caller only until the thread
321 * list is unlocked. */
322 stack -= (uintptr_t)stack % sizeof(uintptr_t);
323 stack -= sizeof(struct start_args);
324 struct start_args *args = (void *)stack;
325 args->start_func = entry;
326 args->start_arg = arg;
327 if (attr._a_sched) {
328 args->attr = &attr;
329 args->perr = &err;
330 } else {
331 args->attr = 0;
332 args->perr = 0;
333 }
334
335 /* Application signals (but not the synccall signal) must be
336 * blocked before the thread list lock can be taken, to ensure
337 * that the lock is AS-safe. */
338 __block_app_sigs(&set);
339
340 /* Ensure SIGCANCEL is unblocked in new thread. This requires
341 * working with a copy of the set so we can restore the
342 * original mask in the calling thread. */
343 memcpy(&args->sig_mask, &set, sizeof args->sig_mask);
344 args->sig_mask[(SIGCANCEL-1)/8/sizeof(long)] &=
345 ~(1UL<<((SIGCANCEL-1)%(8*sizeof(long))));
346
347 __tl_lock();
348 libc.threads_minus_1++;
349 ret = __clone((c11 ? start_c11 : start), stack, flags, args, &new->tid, TP_ADJ(new), &__thread_list_lock);
350
351 /* If clone succeeded, new thread must be linked on the thread
352 * list before unlocking it, even if scheduling may still fail. */
353 if (ret >= 0) {
354 new->next = self->next;
355 new->prev = self;
356 new->next->prev = new;
357 new->prev->next = new;
358 }
359 __tl_unlock();
360 __restore_sigs(&set);
361 __release_ptc();
362
363 if (ret < 0) {
364 libc.threads_minus_1--;
365 if (map) __munmap(map, size);
366 return EAGAIN;
367 }
368
369 if (attr._a_sched) {
370 if (a_cas(&err, -1, -2)==-1)
371 __wait(&err, 0, -2, 1);
372 ret = err;
373 if (ret) return ret;
374 }
375
376 *res = new;
377 return 0;
378 fail:
379 __release_ptc();
380 return EAGAIN;
381 }
382
383 weak_alias(__pthread_exit, pthread_exit);
384 weak_alias(__pthread_create, pthread_create);