]>
git.proxmox.com Git - wasi-libc.git/blob - libc-top-half/musl/src/thread/pthread_cond_timedwait.c
d15012406d6fd98b5e8f11029d319eea3eb29658
1 #include "pthread_impl.h"
6 * Waiter objects have automatic storage on the waiting thread, and
7 * are used in building a linked list representing waiters currently
8 * waiting on the condition variable or a group of waiters woken
9 * together by a broadcast or signal; in the case of signal, this is a
10 * degenerate list of one member.
12 * Waiter lists attached to the condition variable itself are
13 * protected by the lock on the cv. Detached waiter lists are never
14 * modified again, but can only be traversed in reverse order, and are
15 * protected by the "barrier" locks in each node, which are unlocked
16 * in turn to control wake order.
18 * Since process-shared cond var semantics do not necessarily allow
19 * one thread to see another's automatic storage (they may be in
20 * different processes), the waiter list is not used for the
21 * process-shared case, but the structure is still used to store data
22 * needed by the cancellation cleanup handler.
26 struct waiter
*prev
, *next
;
27 volatile int state
, barrier
;
31 /* Self-synchronized-destruction-safe lock functions */
33 static inline void lock(volatile int *l
)
37 do __wait(l
, 0, 2, 1);
38 while (a_cas(l
, 0, 2));
42 static inline void unlock(volatile int *l
)
48 static inline void unlock_requeue(volatile int *l
, volatile int *r
, int w
)
51 if (w
) __wake(l
, 1, 1);
52 else __syscall(SYS_futex
, l
, FUTEX_REQUEUE
|FUTEX_PRIVATE
, 0, 1, r
) != -ENOSYS
53 || __syscall(SYS_futex
, l
, FUTEX_REQUEUE
, 0, 1, r
);
62 int __pthread_cond_timedwait(pthread_cond_t
*restrict c
, pthread_mutex_t
*restrict m
, const struct timespec
*restrict ts
)
64 struct waiter node
= { 0 };
65 int e
, seq
, clock
= c
->_c_clock
, cs
, shared
=0, oldstate
, tmp
;
68 if ((m
->_m_type
&15) && (m
->_m_lock
&INT_MAX
) != __pthread_self()->tid
)
71 if (ts
&& ts
->tv_nsec
>= 1000000000UL)
74 __pthread_testcancel();
80 a_inc(&c
->_c_waiters
);
84 seq
= node
.barrier
= 2;
87 node
.next
= c
->_c_head
;
89 if (!c
->_c_tail
) c
->_c_tail
= &node
;
90 else node
.next
->prev
= &node
;
95 __pthread_mutex_unlock(m
);
97 __pthread_setcancelstate(PTHREAD_CANCEL_MASKED
, &cs
);
98 if (cs
== PTHREAD_CANCEL_DISABLE
) __pthread_setcancelstate(cs
, 0);
100 do e
= __timedwait_cp(fut
, seq
, clock
, ts
, !shared
);
101 while (*fut
==seq
&& (!e
|| e
==EINTR
));
102 if (e
== EINTR
) e
= 0;
105 /* Suppress cancellation if a signal was potentially
106 * consumed; this is a legitimate form of spurious
107 * wake even if not. */
108 if (e
== ECANCELED
&& c
->_c_seq
!= seq
) e
= 0;
109 if (a_fetch_add(&c
->_c_waiters
, -1) == -0x7fffffff)
110 __wake(&c
->_c_waiters
, 1, 0);
115 oldstate
= a_cas(&node
.state
, WAITING
, LEAVING
);
117 if (oldstate
== WAITING
) {
118 /* Access to cv object is valid because this waiter was not
119 * yet signaled and a new signal/broadcast cannot return
120 * after seeing a LEAVING waiter without getting notified
121 * via the futex notify below. */
125 if (c
->_c_head
== &node
) c
->_c_head
= node
.next
;
126 else if (node
.prev
) node
.prev
->next
= node
.next
;
127 if (c
->_c_tail
== &node
) c
->_c_tail
= node
.prev
;
128 else if (node
.next
) node
.next
->prev
= node
.prev
;
133 if (a_fetch_add(node
.notify
, -1)==1)
134 __wake(node
.notify
, 1, 1);
137 /* Lock barrier first to control wake order. */
142 /* Errors locking the mutex override any existing error or
143 * cancellation, since the caller must see them to know the
144 * state of the mutex. */
145 if ((tmp
= pthread_mutex_lock(m
))) e
= tmp
;
147 if (oldstate
== WAITING
) goto done
;
149 if (!node
.next
) a_inc(&m
->_m_waiters
);
151 /* Unlock the barrier that's holding back the next waiter, and
152 * either wake it or requeue it to the mutex. */
154 unlock_requeue(&node
.prev
->barrier
, &m
->_m_lock
, m
->_m_type
& 128);
156 a_dec(&m
->_m_waiters
);
158 /* Since a signal was consumed, cancellation is not permitted. */
159 if (e
== ECANCELED
) e
= 0;
162 __pthread_setcancelstate(cs
, 0);
164 if (e
== ECANCELED
) {
165 __pthread_testcancel();
166 __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
, 0);
172 int __private_cond_signal(pthread_cond_t
*c
, int n
)
174 struct waiter
*p
, *first
=0;
175 volatile int ref
= 0;
179 for (p
=c
->_c_tail
; n
&& p
; p
=p
->prev
) {
180 if (a_cas(&p
->state
, WAITING
, SIGNALED
) != WAITING
) {
188 /* Split the list, leaving any remainder on the cv. */
190 if (p
->next
) p
->next
->prev
= 0;
198 /* Wait for any waiters in the LEAVING state to remove
199 * themselves from the list before returning or allowing
200 * signaled threads to proceed. */
201 while ((cur
= ref
)) __wait(&ref
, 0, cur
, 1);
203 /* Allow first signaled waiter, if any, to proceed. */
204 if (first
) unlock(&first
->barrier
);
209 weak_alias(__pthread_cond_timedwait
, pthread_cond_timedwait
);