1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 \*****************************************************************************/
27 #include <sys/condvar.h>
30 __cv_init(kcondvar_t
*cvp
, char *name
, kcv_type_t type
, void *arg
)
34 ASSERT(type
== CV_DEFAULT
);
37 cvp
->cv_magic
= CV_MAGIC
;
38 init_waitqueue_head(&cvp
->cv_event
);
39 init_waitqueue_head(&cvp
->cv_destroy
);
40 atomic_set(&cvp
->cv_waiters
, 0);
41 atomic_set(&cvp
->cv_refs
, 1);
44 EXPORT_SYMBOL(__cv_init
);
47 cv_destroy_wakeup(kcondvar_t
*cvp
)
49 if (!atomic_read(&cvp
->cv_waiters
) && !atomic_read(&cvp
->cv_refs
)) {
50 ASSERT(cvp
->cv_mutex
== NULL
);
51 ASSERT(!waitqueue_active(&cvp
->cv_event
));
59 __cv_destroy(kcondvar_t
*cvp
)
62 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
64 cvp
->cv_magic
= CV_DESTROY
;
65 atomic_dec(&cvp
->cv_refs
);
67 /* Block until all waiters are woken and references dropped. */
68 while (cv_destroy_wakeup(cvp
) == 0)
69 wait_event_timeout(cvp
->cv_destroy
, cv_destroy_wakeup(cvp
), 1);
71 ASSERT3P(cvp
->cv_mutex
, ==, NULL
);
72 ASSERT3S(atomic_read(&cvp
->cv_refs
), ==, 0);
73 ASSERT3S(atomic_read(&cvp
->cv_waiters
), ==, 0);
74 ASSERT3S(waitqueue_active(&cvp
->cv_event
), ==, 0);
76 EXPORT_SYMBOL(__cv_destroy
);
79 cv_wait_common(kcondvar_t
*cvp
, kmutex_t
*mp
, int state
, int io
)
85 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
86 ASSERT(mutex_owned(mp
));
87 atomic_inc(&cvp
->cv_refs
);
89 if (cvp
->cv_mutex
== NULL
)
92 /* Ensure the same mutex is used by all callers */
93 ASSERT(cvp
->cv_mutex
== mp
);
95 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
96 atomic_inc(&cvp
->cv_waiters
);
98 /* Mutex should be dropped after prepare_to_wait() this
99 * ensures we're linked in to the waiters list and avoids the
100 * race where 'cvp->cv_waiters > 0' but the list is empty. */
108 /* No more waiters a different mutex could be used */
109 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
110 cvp
->cv_mutex
= NULL
;
111 wake_up(&cvp
->cv_destroy
);
114 finish_wait(&cvp
->cv_event
, &wait
);
115 atomic_dec(&cvp
->cv_refs
);
119 __cv_wait(kcondvar_t
*cvp
, kmutex_t
*mp
)
121 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 0);
123 EXPORT_SYMBOL(__cv_wait
);
126 __cv_wait_interruptible(kcondvar_t
*cvp
, kmutex_t
*mp
)
128 cv_wait_common(cvp
, mp
, TASK_INTERRUPTIBLE
, 0);
130 EXPORT_SYMBOL(__cv_wait_interruptible
);
133 __cv_wait_io(kcondvar_t
*cvp
, kmutex_t
*mp
)
135 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 1);
137 EXPORT_SYMBOL(__cv_wait_io
);
139 /* 'expire_time' argument is an absolute wall clock time in jiffies.
140 * Return value is time left (expire_time - now) or -1 if timeout occurred.
143 __cv_timedwait_common(kcondvar_t
*cvp
, kmutex_t
*mp
,
144 clock_t expire_time
, int state
)
151 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
152 ASSERT(mutex_owned(mp
));
153 atomic_inc(&cvp
->cv_refs
);
155 if (cvp
->cv_mutex
== NULL
)
158 /* Ensure the same mutex is used by all callers */
159 ASSERT(cvp
->cv_mutex
== mp
);
161 /* XXX - Does not handle jiffie wrap properly */
162 time_left
= expire_time
- jiffies
;
163 if (time_left
<= 0) {
164 atomic_dec(&cvp
->cv_refs
);
168 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
169 atomic_inc(&cvp
->cv_waiters
);
171 /* Mutex should be dropped after prepare_to_wait() this
172 * ensures we're linked in to the waiters list and avoids the
173 * race where 'cvp->cv_waiters > 0' but the list is empty. */
175 time_left
= schedule_timeout(time_left
);
178 /* No more waiters a different mutex could be used */
179 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
180 cvp
->cv_mutex
= NULL
;
181 wake_up(&cvp
->cv_destroy
);
184 finish_wait(&cvp
->cv_event
, &wait
);
185 atomic_dec(&cvp
->cv_refs
);
187 return (time_left
> 0 ? time_left
: -1);
191 __cv_timedwait(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
193 return __cv_timedwait_common(cvp
, mp
, exp_time
, TASK_UNINTERRUPTIBLE
);
195 EXPORT_SYMBOL(__cv_timedwait
);
198 __cv_timedwait_interruptible(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
200 return __cv_timedwait_common(cvp
, mp
, exp_time
, TASK_INTERRUPTIBLE
);
202 EXPORT_SYMBOL(__cv_timedwait_interruptible
);
205 *'expire_time' argument is an absolute clock time in nanoseconds.
206 * Return value is time left (expire_time - now) or -1 if timeout occurred.
209 __cv_timedwait_hires(kcondvar_t
*cvp
, kmutex_t
*mp
,
210 hrtime_t expire_time
, int state
)
213 hrtime_t time_left
, now
;
214 unsigned long time_left_us
;
218 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
219 ASSERT(mutex_owned(mp
));
220 atomic_inc(&cvp
->cv_refs
);
222 if (cvp
->cv_mutex
== NULL
)
225 /* Ensure the same mutex is used by all callers */
226 ASSERT(cvp
->cv_mutex
== mp
);
229 time_left
= expire_time
- now
;
230 if (time_left
<= 0) {
231 atomic_dec(&cvp
->cv_refs
);
234 time_left_us
= time_left
/ NSEC_PER_USEC
;
236 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
237 atomic_inc(&cvp
->cv_waiters
);
239 /* Mutex should be dropped after prepare_to_wait() this
240 * ensures we're linked in to the waiters list and avoids the
241 * race where 'cvp->cv_waiters > 0' but the list is empty. */
243 /* Allow a 100 us range to give kernel an opportunity to coalesce
245 usleep_range(time_left_us
, time_left_us
+ 100);
248 /* No more waiters a different mutex could be used */
249 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
250 cvp
->cv_mutex
= NULL
;
251 wake_up(&cvp
->cv_destroy
);
254 finish_wait(&cvp
->cv_event
, &wait
);
255 atomic_dec(&cvp
->cv_refs
);
257 time_left
= expire_time
- gethrtime();
258 return (time_left
> 0 ? time_left
: -1);
262 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
265 cv_timedwait_hires(kcondvar_t
*cvp
, kmutex_t
*mp
, hrtime_t tim
,
266 hrtime_t res
, int flag
)
270 * Align expiration to the specified resolution.
272 if (flag
& CALLOUT_FLAG_ROUNDUP
)
274 tim
= (tim
/ res
) * res
;
277 if (!(flag
& CALLOUT_FLAG_ABSOLUTE
))
280 return __cv_timedwait_hires(cvp
, mp
, tim
, TASK_UNINTERRUPTIBLE
);
282 EXPORT_SYMBOL(cv_timedwait_hires
);
285 __cv_signal(kcondvar_t
*cvp
)
288 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
289 atomic_inc(&cvp
->cv_refs
);
291 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
292 * waiter will be set runable with each call to wake_up().
293 * Additionally wake_up() holds a spin_lock assoicated with
294 * the wait queue to ensure we don't race waking up processes. */
295 if (atomic_read(&cvp
->cv_waiters
) > 0)
296 wake_up(&cvp
->cv_event
);
298 atomic_dec(&cvp
->cv_refs
);
300 EXPORT_SYMBOL(__cv_signal
);
303 __cv_broadcast(kcondvar_t
*cvp
)
306 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
307 atomic_inc(&cvp
->cv_refs
);
309 /* Wake_up_all() will wake up all waiters even those which
310 * have the WQ_FLAG_EXCLUSIVE flag set. */
311 if (atomic_read(&cvp
->cv_waiters
) > 0)
312 wake_up_all(&cvp
->cv_event
);
314 atomic_dec(&cvp
->cv_refs
);
316 EXPORT_SYMBOL(__cv_broadcast
);