2 #include "pthread_impl.h"
3 #include "stdio_impl.h"
13 weak_alias(dummy_0
, __acquire_ptc
);
14 weak_alias(dummy_0
, __release_ptc
);
15 weak_alias(dummy_0
, __pthread_tsd_run_dtors
);
16 weak_alias(dummy_0
, __do_orphaned_stdio_locks
);
17 weak_alias(dummy_0
, __dl_thread_cleanup
);
18 weak_alias(dummy_0
, __membarrier_init
);
20 static int tl_lock_count
;
21 static int tl_lock_waiters
;
25 int tid
= __pthread_self()->tid
;
26 int val
= __thread_list_lock
;
31 while ((val
= a_cas(&__thread_list_lock
, 0, tid
)))
32 __wait(&__thread_list_lock
, &tl_lock_waiters
, val
, 0);
35 void __tl_unlock(void)
41 a_store(&__thread_list_lock
, 0);
42 if (tl_lock_waiters
) __wake(&__thread_list_lock
, 1, 0);
45 void __tl_sync(pthread_t td
)
48 int val
= __thread_list_lock
;
50 __wait(&__thread_list_lock
, &tl_lock_waiters
, val
, 0);
51 if (tl_lock_waiters
) __wake(&__thread_list_lock
, 1, 0);
54 _Noreturn
void __pthread_exit(void *result
)
56 pthread_t self
= __pthread_self();
59 self
->canceldisable
= 1;
60 self
->cancelasync
= 0;
61 self
->result
= result
;
63 while (self
->cancelbuf
) {
64 void (*f
)(void *) = self
->cancelbuf
->__f
;
65 void *x
= self
->cancelbuf
->__x
;
66 self
->cancelbuf
= self
->cancelbuf
->__next
;
70 __pthread_tsd_run_dtors();
72 /* Access to target the exiting thread with syscalls that use
73 * its kernel tid is controlled by killlock. For detached threads,
74 * any use past this point would have undefined behavior, but for
75 * joinable threads it's a valid usage that must be handled. */
78 /* The thread list lock must be AS-safe, and thus requires
79 * application signals to be blocked before it can be taken. */
80 __block_app_sigs(&set
);
83 /* If this is the only thread in the list, don't proceed with
84 * termination of the thread, but restore the previous lock and
85 * signal state to prepare for exit to call atexit handlers. */
86 if (self
->next
== self
) {
89 UNLOCK(self
->killlock
);
93 /* At this point we are committed to thread termination. Unlink
94 * the thread from the list. This change will not be visible
95 * until the lock is released, which only happens after SYS_exit
96 * has been called, via the exit futex address pointing at the lock. */
97 libc
.threads_minus_1
--;
98 self
->next
->prev
= self
->prev
;
99 self
->prev
->next
= self
->next
;
100 self
->prev
= self
->next
= self
;
102 /* Process robust list in userspace to handle non-pshared mutexes
103 * and the detached thread case where the robust list head will
104 * be invalid when the kernel would process it. */
106 volatile void *volatile *rp
;
107 while ((rp
=self
->robust_list
.head
) && rp
!= &self
->robust_list
.head
) {
108 pthread_mutex_t
*m
= (void *)((char *)rp
109 - offsetof(pthread_mutex_t
, _m_next
));
110 int waiters
= m
->_m_waiters
;
111 int priv
= (m
->_m_type
& 128) ^ 128;
112 self
->robust_list
.pending
= rp
;
113 self
->robust_list
.head
= *rp
;
114 int cont
= a_swap(&m
->_m_lock
, 0x40000000);
115 self
->robust_list
.pending
= 0;
116 if (cont
< 0 || waiters
)
117 __wake(&m
->_m_lock
, 1, priv
);
121 __do_orphaned_stdio_locks();
122 __dl_thread_cleanup();
124 /* This atomic potentially competes with a concurrent pthread_detach
125 * call; the loser is responsible for freeing thread resources. */
126 int state
= a_cas(&self
->detach_state
, DT_JOINABLE
, DT_EXITING
);
128 if (state
==DT_DETACHED
&& self
->map_base
) {
129 /* Detached threads must block even implementation-internal
130 * signals, since they will not have a stack in their last
131 * moments of existence. */
132 __block_all_sigs(&set
);
134 /* Robust list will no longer be valid, and was already
135 * processed above, so unregister it with the kernel. */
136 if (self
->robust_list
.off
)
137 __syscall(SYS_set_robust_list
, 0, 3*sizeof(long));
139 /* Since __unmapself bypasses the normal munmap code path,
140 * explicitly wait for vmlock holders first. */
143 /* The following call unmaps the thread's stack mapping
144 * and then exits without touching the stack. */
145 __unmapself(self
->map_base
, self
->map_size
);
148 /* Wake any joiner. */
149 __wake(&self
->detach_state
, 1, 1);
151 /* After the kernel thread exits, its tid may be reused. Clear it
152 * to prevent inadvertent use and inform functions that would use
153 * it that it's no longer available. */
155 UNLOCK(self
->killlock
);
157 for (;;) __syscall(SYS_exit
, 0);
160 void __do_cleanup_push(struct __ptcb
*cb
)
162 struct pthread
*self
= __pthread_self();
163 cb
->__next
= self
->cancelbuf
;
164 self
->cancelbuf
= cb
;
167 void __do_cleanup_pop(struct __ptcb
*cb
)
169 __pthread_self()->cancelbuf
= cb
->__next
;
173 void *(*start_func
)(void *);
175 pthread_attr_t
*attr
;
177 unsigned long sig_mask
[_NSIG
/8/sizeof(long)];
180 static int start(void *p
)
182 struct start_args
*args
= p
;
184 pthread_t self
= __pthread_self();
185 int ret
= -__syscall(SYS_sched_setscheduler
, self
->tid
,
186 args
->attr
->_a_policy
, &args
->attr
->_a_prio
);
187 if (a_swap(args
->perr
, ret
)==-2)
188 __wake(args
->perr
, 1, 1);
190 self
->detach_state
= DT_DETACHED
;
194 __syscall(SYS_rt_sigprocmask
, SIG_SETMASK
, &args
->sig_mask
, 0, _NSIG
/8);
195 __pthread_exit(args
->start_func(args
->start_arg
));
199 static int start_c11(void *p
)
201 struct start_args
*args
= p
;
202 int (*start
)(void*) = (int(*)(void*)) args
->start_func
;
203 __pthread_exit((void *)(uintptr_t)start(args
->start_arg
));
207 #define ROUND(x) (((x)+PAGE_SIZE-1)&-PAGE_SIZE)
209 /* pthread_key_create.c overrides this */
210 static volatile size_t dummy
= 0;
211 weak_alias(dummy
, __pthread_tsd_size
);
212 static void *dummy_tsd
[1] = { 0 };
213 weak_alias(dummy_tsd
, __pthread_tsd_main
);
215 static FILE *volatile dummy_file
= 0;
216 weak_alias(dummy_file
, __stdin_used
);
217 weak_alias(dummy_file
, __stdout_used
);
218 weak_alias(dummy_file
, __stderr_used
);
220 static void init_file_lock(FILE *f
)
222 if (f
&& f
->lock
<0) f
->lock
= 0;
225 int __pthread_create(pthread_t
*restrict res
, const pthread_attr_t
*restrict attrp
, void *(*entry
)(void *), void *restrict arg
)
227 int ret
, c11
= (attrp
== __ATTRP_C11_THREAD
);
229 struct pthread
*self
, *new;
230 unsigned char *map
= 0, *stack
= 0, *tsd
= 0, *stack_limit
;
231 unsigned flags
= CLONE_VM
| CLONE_FS
| CLONE_FILES
| CLONE_SIGHAND
232 | CLONE_THREAD
| CLONE_SYSVSEM
| CLONE_SETTLS
233 | CLONE_PARENT_SETTID
| CLONE_CHILD_CLEARTID
| CLONE_DETACHED
;
234 pthread_attr_t attr
= { 0 };
236 volatile int err
= -1;
238 if (!libc
.can_do_threads
) return ENOSYS
;
239 self
= __pthread_self();
240 if (!libc
.threaded
) {
241 for (FILE *f
=*__ofl_lock(); f
; f
=f
->next
)
244 init_file_lock(__stdin_used
);
245 init_file_lock(__stdout_used
);
246 init_file_lock(__stderr_used
);
247 __syscall(SYS_rt_sigprocmask
, SIG_UNBLOCK
, SIGPT_SET
, 0, _NSIG
/8);
248 self
->tsd
= (void **)__pthread_tsd_main
;
252 if (attrp
&& !c11
) attr
= *attrp
;
256 attr
._a_stacksize
= __default_stacksize
;
257 attr
._a_guardsize
= __default_guardsize
;
260 if (attr
._a_stackaddr
) {
261 size_t need
= libc
.tls_size
+ __pthread_tsd_size
;
262 size
= attr
._a_stacksize
;
263 stack
= (void *)(attr
._a_stackaddr
& -16);
264 stack_limit
= (void *)(attr
._a_stackaddr
- size
);
265 /* Use application-provided stack for TLS only when
266 * it does not take more than ~12% or 2k of the
267 * application's stack space. */
268 if (need
< size
/8 && need
< 2048) {
269 tsd
= stack
- __pthread_tsd_size
;
270 stack
= tsd
- libc
.tls_size
;
271 memset(stack
, 0, need
);
277 guard
= ROUND(attr
._a_guardsize
);
278 size
= guard
+ ROUND(attr
._a_stacksize
279 + libc
.tls_size
+ __pthread_tsd_size
);
284 map
= __mmap(0, size
, PROT_NONE
, MAP_PRIVATE
|MAP_ANON
, -1, 0);
285 if (map
== MAP_FAILED
) goto fail
;
286 if (__mprotect(map
+guard
, size
-guard
, PROT_READ
|PROT_WRITE
)
287 && errno
!= ENOSYS
) {
292 map
= __mmap(0, size
, PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
, -1, 0);
293 if (map
== MAP_FAILED
) goto fail
;
295 tsd
= map
+ size
- __pthread_tsd_size
;
297 stack
= tsd
- libc
.tls_size
;
298 stack_limit
= map
+ guard
;
302 new = __copy_tls(tsd
- libc
.tls_size
);
304 new->map_size
= size
;
306 new->stack_size
= stack
- stack_limit
;
307 new->guard_size
= guard
;
309 new->tsd
= (void *)tsd
;
310 new->locale
= &libc
.global_locale
;
311 if (attr
._a_detach
) {
312 new->detach_state
= DT_DETACHED
;
314 new->detach_state
= DT_JOINABLE
;
316 new->robust_list
.head
= &new->robust_list
.head
;
317 new->CANARY
= self
->CANARY
;
319 /* Setup argument structure for the new thread on its stack.
320 * It's safe to access from the caller only until the thread
321 * list is unlocked. */
322 stack
-= (uintptr_t)stack
% sizeof(uintptr_t);
323 stack
-= sizeof(struct start_args
);
324 struct start_args
*args
= (void *)stack
;
325 args
->start_func
= entry
;
326 args
->start_arg
= arg
;
335 /* Application signals (but not the synccall signal) must be
336 * blocked before the thread list lock can be taken, to ensure
337 * that the lock is AS-safe. */
338 __block_app_sigs(&set
);
340 /* Ensure SIGCANCEL is unblocked in new thread. This requires
341 * working with a copy of the set so we can restore the
342 * original mask in the calling thread. */
343 memcpy(&args
->sig_mask
, &set
, sizeof args
->sig_mask
);
344 args
->sig_mask
[(SIGCANCEL
-1)/8/sizeof(long)] &=
345 ~(1UL<<((SIGCANCEL
-1)%(8*sizeof(long))));
348 libc
.threads_minus_1
++;
349 ret
= __clone((c11
? start_c11
: start
), stack
, flags
, args
, &new->tid
, TP_ADJ(new), &__thread_list_lock
);
351 /* If clone succeeded, new thread must be linked on the thread
352 * list before unlocking it, even if scheduling may still fail. */
354 new->next
= self
->next
;
356 new->next
->prev
= new;
357 new->prev
->next
= new;
360 __restore_sigs(&set
);
364 libc
.threads_minus_1
--;
365 if (map
) __munmap(map
, size
);
370 if (a_cas(&err
, -1, -2)==-1)
371 __wait(&err
, 0, -2, 1);
383 weak_alias(__pthread_exit
, pthread_exit
);
384 weak_alias(__pthread_create
, pthread_create
);