]>
git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-condvar.c
c420d18cadfe14f61cf01dec57e119bd0be401e4
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
24 * Solaris Porting Layer (SPL) Credential Implementation.
27 #include <sys/condvar.h>
31 __cv_init(kcondvar_t
*cvp
, char *name
, kcv_type_t type
, void *arg
)
35 ASSERT(type
== CV_DEFAULT
);
38 cvp
->cv_magic
= CV_MAGIC
;
39 init_waitqueue_head(&cvp
->cv_event
);
40 init_waitqueue_head(&cvp
->cv_destroy
);
41 atomic_set(&cvp
->cv_waiters
, 0);
42 atomic_set(&cvp
->cv_refs
, 1);
45 EXPORT_SYMBOL(__cv_init
);
48 cv_destroy_wakeup(kcondvar_t
*cvp
)
50 if (!atomic_read(&cvp
->cv_waiters
) && !atomic_read(&cvp
->cv_refs
)) {
51 ASSERT(cvp
->cv_mutex
== NULL
);
52 ASSERT(!waitqueue_active(&cvp
->cv_event
));
60 __cv_destroy(kcondvar_t
*cvp
)
63 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
65 cvp
->cv_magic
= CV_DESTROY
;
66 atomic_dec(&cvp
->cv_refs
);
68 /* Block until all waiters are woken and references dropped. */
69 while (cv_destroy_wakeup(cvp
) == 0)
70 wait_event_timeout(cvp
->cv_destroy
, cv_destroy_wakeup(cvp
), 1);
72 ASSERT3P(cvp
->cv_mutex
, ==, NULL
);
73 ASSERT3S(atomic_read(&cvp
->cv_refs
), ==, 0);
74 ASSERT3S(atomic_read(&cvp
->cv_waiters
), ==, 0);
75 ASSERT3S(waitqueue_active(&cvp
->cv_event
), ==, 0);
77 EXPORT_SYMBOL(__cv_destroy
);
80 cv_wait_common(kcondvar_t
*cvp
, kmutex_t
*mp
, int state
, int io
)
87 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
88 ASSERT(mutex_owned(mp
));
89 atomic_inc(&cvp
->cv_refs
);
91 m
= ACCESS_ONCE(cvp
->cv_mutex
);
93 m
= xchg(&cvp
->cv_mutex
, mp
);
94 /* Ensure the same mutex is used by all callers */
95 ASSERT(m
== NULL
|| m
== mp
);
97 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
98 atomic_inc(&cvp
->cv_waiters
);
101 * Mutex should be dropped after prepare_to_wait() this
102 * ensures we're linked in to the waiters list and avoids the
103 * race where 'cvp->cv_waiters > 0' but the list is empty.
111 /* No more waiters a different mutex could be used */
112 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
114 * This is set without any lock, so it's racy. But this is
115 * just for debug anyway, so make it best-effort
117 cvp
->cv_mutex
= NULL
;
118 wake_up(&cvp
->cv_destroy
);
121 finish_wait(&cvp
->cv_event
, &wait
);
122 atomic_dec(&cvp
->cv_refs
);
125 * Hold mutex after we release the cvp, otherwise we could dead lock
126 * with a thread holding the mutex and call cv_destroy.
132 __cv_wait(kcondvar_t
*cvp
, kmutex_t
*mp
)
134 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 0);
136 EXPORT_SYMBOL(__cv_wait
);
139 __cv_wait_sig(kcondvar_t
*cvp
, kmutex_t
*mp
)
141 cv_wait_common(cvp
, mp
, TASK_INTERRUPTIBLE
, 0);
143 EXPORT_SYMBOL(__cv_wait_sig
);
146 __cv_wait_io(kcondvar_t
*cvp
, kmutex_t
*mp
)
148 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 1);
150 EXPORT_SYMBOL(__cv_wait_io
);
153 * 'expire_time' argument is an absolute wall clock time in jiffies.
154 * Return value is time left (expire_time - now) or -1 if timeout occurred.
157 __cv_timedwait_common(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t expire_time
,
166 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
167 ASSERT(mutex_owned(mp
));
168 atomic_inc(&cvp
->cv_refs
);
170 m
= ACCESS_ONCE(cvp
->cv_mutex
);
172 m
= xchg(&cvp
->cv_mutex
, mp
);
173 /* Ensure the same mutex is used by all callers */
174 ASSERT(m
== NULL
|| m
== mp
);
176 /* XXX - Does not handle jiffie wrap properly */
177 time_left
= expire_time
- jiffies
;
178 if (time_left
<= 0) {
179 /* XXX - doesn't reset cv_mutex */
180 atomic_dec(&cvp
->cv_refs
);
184 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
185 atomic_inc(&cvp
->cv_waiters
);
188 * Mutex should be dropped after prepare_to_wait() this
189 * ensures we're linked in to the waiters list and avoids the
190 * race where 'cvp->cv_waiters > 0' but the list is empty.
193 time_left
= schedule_timeout(time_left
);
195 /* No more waiters a different mutex could be used */
196 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
198 * This is set without any lock, so it's racy. But this is
199 * just for debug anyway, so make it best-effort
201 cvp
->cv_mutex
= NULL
;
202 wake_up(&cvp
->cv_destroy
);
205 finish_wait(&cvp
->cv_event
, &wait
);
206 atomic_dec(&cvp
->cv_refs
);
209 * Hold mutex after we release the cvp, otherwise we could dead lock
210 * with a thread holding the mutex and call cv_destroy.
213 return (time_left
> 0 ? time_left
: -1);
217 __cv_timedwait(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
219 return (__cv_timedwait_common(cvp
, mp
, exp_time
, TASK_UNINTERRUPTIBLE
));
221 EXPORT_SYMBOL(__cv_timedwait
);
224 __cv_timedwait_sig(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
226 return (__cv_timedwait_common(cvp
, mp
, exp_time
, TASK_INTERRUPTIBLE
));
228 EXPORT_SYMBOL(__cv_timedwait_sig
);
231 * 'expire_time' argument is an absolute clock time in nanoseconds.
232 * Return value is time left (expire_time - now) or -1 if timeout occurred.
235 __cv_timedwait_hires(kcondvar_t
*cvp
, kmutex_t
*mp
, hrtime_t expire_time
,
240 hrtime_t time_left
, now
;
241 unsigned long time_left_us
;
245 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
246 ASSERT(mutex_owned(mp
));
247 atomic_inc(&cvp
->cv_refs
);
249 m
= ACCESS_ONCE(cvp
->cv_mutex
);
251 m
= xchg(&cvp
->cv_mutex
, mp
);
252 /* Ensure the same mutex is used by all callers */
253 ASSERT(m
== NULL
|| m
== mp
);
256 time_left
= expire_time
- now
;
257 if (time_left
<= 0) {
258 atomic_dec(&cvp
->cv_refs
);
261 time_left_us
= time_left
/ NSEC_PER_USEC
;
263 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
264 atomic_inc(&cvp
->cv_waiters
);
267 * Mutex should be dropped after prepare_to_wait() this
268 * ensures we're linked in to the waiters list and avoids the
269 * race where 'cvp->cv_waiters > 0' but the list is empty.
273 * Allow a 100 us range to give kernel an opportunity to coalesce
276 usleep_range(time_left_us
, time_left_us
+ 100);
278 /* No more waiters a different mutex could be used */
279 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
281 * This is set without any lock, so it's racy. But this is
282 * just for debug anyway, so make it best-effort
284 cvp
->cv_mutex
= NULL
;
285 wake_up(&cvp
->cv_destroy
);
288 finish_wait(&cvp
->cv_event
, &wait
);
289 atomic_dec(&cvp
->cv_refs
);
292 time_left
= expire_time
- gethrtime();
293 return (time_left
> 0 ? time_left
: -1);
297 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
300 cv_timedwait_hires(kcondvar_t
*cvp
, kmutex_t
*mp
, hrtime_t tim
, hrtime_t res
,
305 * Align expiration to the specified resolution.
307 if (flag
& CALLOUT_FLAG_ROUNDUP
)
309 tim
= (tim
/ res
) * res
;
312 if (!(flag
& CALLOUT_FLAG_ABSOLUTE
))
315 return (__cv_timedwait_hires(cvp
, mp
, tim
, TASK_UNINTERRUPTIBLE
));
317 EXPORT_SYMBOL(cv_timedwait_hires
);
320 __cv_signal(kcondvar_t
*cvp
)
323 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
324 atomic_inc(&cvp
->cv_refs
);
327 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
328 * waiter will be set runable with each call to wake_up().
329 * Additionally wake_up() holds a spin_lock assoicated with
330 * the wait queue to ensure we don't race waking up processes.
332 if (atomic_read(&cvp
->cv_waiters
) > 0)
333 wake_up(&cvp
->cv_event
);
335 atomic_dec(&cvp
->cv_refs
);
337 EXPORT_SYMBOL(__cv_signal
);
340 __cv_broadcast(kcondvar_t
*cvp
)
343 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
344 atomic_inc(&cvp
->cv_refs
);
347 * Wake_up_all() will wake up all waiters even those which
348 * have the WQ_FLAG_EXCLUSIVE flag set.
350 if (atomic_read(&cvp
->cv_waiters
) > 0)
351 wake_up_all(&cvp
->cv_event
);
353 atomic_dec(&cvp
->cv_refs
);
355 EXPORT_SYMBOL(__cv_broadcast
);