1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 \*****************************************************************************/
27 #include <sys/condvar.h>
28 #include <spl-debug.h>
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
34 #define SS_DEBUG_SUBSYS SS_CONDVAR
37 __cv_init(kcondvar_t
*cvp
, char *name
, kcv_type_t type
, void *arg
)
44 ASSERT(type
== CV_DEFAULT
);
47 cvp
->cv_magic
= CV_MAGIC
;
48 init_waitqueue_head(&cvp
->cv_event
);
49 init_waitqueue_head(&cvp
->cv_destroy
);
50 atomic_set(&cvp
->cv_waiters
, 0);
51 atomic_set(&cvp
->cv_refs
, 1);
54 /* We may be called when there is a non-zero preempt_count or
55 * interrupts are disabled is which case we must not sleep.
57 if (current_thread_info()->preempt_count
|| irqs_disabled())
62 EXPORT_SYMBOL(__cv_init
);
65 cv_destroy_wakeup(kcondvar_t
*cvp
)
67 if (!atomic_read(&cvp
->cv_waiters
) && !atomic_read(&cvp
->cv_refs
)) {
68 ASSERT(cvp
->cv_mutex
== NULL
);
69 ASSERT(!waitqueue_active(&cvp
->cv_event
));
77 __cv_destroy(kcondvar_t
*cvp
)
81 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
83 cvp
->cv_magic
= CV_DESTROY
;
84 atomic_dec(&cvp
->cv_refs
);
86 /* Block until all waiters are woken and references dropped. */
87 while (cv_destroy_wakeup(cvp
) == 0)
88 wait_event_timeout(cvp
->cv_destroy
, cv_destroy_wakeup(cvp
), 1);
90 ASSERT3P(cvp
->cv_mutex
, ==, NULL
);
91 ASSERT3S(atomic_read(&cvp
->cv_refs
), ==, 0);
92 ASSERT3S(atomic_read(&cvp
->cv_waiters
), ==, 0);
93 ASSERT3S(waitqueue_active(&cvp
->cv_event
), ==, 0);
97 EXPORT_SYMBOL(__cv_destroy
);
100 cv_wait_common(kcondvar_t
*cvp
, kmutex_t
*mp
, int state
, int io
)
107 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
108 ASSERT(mutex_owned(mp
));
109 atomic_inc(&cvp
->cv_refs
);
111 if (cvp
->cv_mutex
== NULL
)
114 /* Ensure the same mutex is used by all callers */
115 ASSERT(cvp
->cv_mutex
== mp
);
117 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
118 atomic_inc(&cvp
->cv_waiters
);
120 /* Mutex should be dropped after prepare_to_wait() this
121 * ensures we're linked in to the waiters list and avoids the
122 * race where 'cvp->cv_waiters > 0' but the list is empty. */
130 /* No more waiters a different mutex could be used */
131 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
132 cvp
->cv_mutex
= NULL
;
133 wake_up(&cvp
->cv_destroy
);
136 finish_wait(&cvp
->cv_event
, &wait
);
137 atomic_dec(&cvp
->cv_refs
);
143 __cv_wait(kcondvar_t
*cvp
, kmutex_t
*mp
)
145 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 0);
147 EXPORT_SYMBOL(__cv_wait
);
150 __cv_wait_interruptible(kcondvar_t
*cvp
, kmutex_t
*mp
)
152 cv_wait_common(cvp
, mp
, TASK_INTERRUPTIBLE
, 0);
154 EXPORT_SYMBOL(__cv_wait_interruptible
);
157 __cv_wait_io(kcondvar_t
*cvp
, kmutex_t
*mp
)
159 cv_wait_common(cvp
, mp
, TASK_UNINTERRUPTIBLE
, 1);
161 EXPORT_SYMBOL(__cv_wait_io
);
163 /* 'expire_time' argument is an absolute wall clock time in jiffies.
164 * Return value is time left (expire_time - now) or -1 if timeout occurred.
167 __cv_timedwait_common(kcondvar_t
*cvp
, kmutex_t
*mp
,
168 clock_t expire_time
, int state
)
176 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
177 ASSERT(mutex_owned(mp
));
178 atomic_inc(&cvp
->cv_refs
);
180 if (cvp
->cv_mutex
== NULL
)
183 /* Ensure the same mutex is used by all callers */
184 ASSERT(cvp
->cv_mutex
== mp
);
186 /* XXX - Does not handle jiffie wrap properly */
187 time_left
= expire_time
- jiffies
;
188 if (time_left
<= 0) {
189 atomic_dec(&cvp
->cv_refs
);
193 prepare_to_wait_exclusive(&cvp
->cv_event
, &wait
, state
);
194 atomic_inc(&cvp
->cv_waiters
);
196 /* Mutex should be dropped after prepare_to_wait() this
197 * ensures we're linked in to the waiters list and avoids the
198 * race where 'cvp->cv_waiters > 0' but the list is empty. */
200 time_left
= schedule_timeout(time_left
);
203 /* No more waiters a different mutex could be used */
204 if (atomic_dec_and_test(&cvp
->cv_waiters
)) {
205 cvp
->cv_mutex
= NULL
;
206 wake_up(&cvp
->cv_destroy
);
209 finish_wait(&cvp
->cv_event
, &wait
);
210 atomic_dec(&cvp
->cv_refs
);
212 SRETURN(time_left
> 0 ? time_left
: -1);
216 __cv_timedwait(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
218 return __cv_timedwait_common(cvp
, mp
, exp_time
, TASK_UNINTERRUPTIBLE
);
220 EXPORT_SYMBOL(__cv_timedwait
);
223 __cv_timedwait_interruptible(kcondvar_t
*cvp
, kmutex_t
*mp
, clock_t exp_time
)
225 return __cv_timedwait_common(cvp
, mp
, exp_time
, TASK_INTERRUPTIBLE
);
227 EXPORT_SYMBOL(__cv_timedwait_interruptible
);
230 __cv_signal(kcondvar_t
*cvp
)
234 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
235 atomic_inc(&cvp
->cv_refs
);
237 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
238 * waiter will be set runable with each call to wake_up().
239 * Additionally wake_up() holds a spin_lock assoicated with
240 * the wait queue to ensure we don't race waking up processes. */
241 if (atomic_read(&cvp
->cv_waiters
) > 0)
242 wake_up(&cvp
->cv_event
);
244 atomic_dec(&cvp
->cv_refs
);
247 EXPORT_SYMBOL(__cv_signal
);
250 __cv_broadcast(kcondvar_t
*cvp
)
254 ASSERT(cvp
->cv_magic
== CV_MAGIC
);
255 atomic_inc(&cvp
->cv_refs
);
257 /* Wake_up_all() will wake up all waiters even those which
258 * have the WQ_FLAG_EXCLUSIVE flag set. */
259 if (atomic_read(&cvp
->cv_waiters
) > 0)
260 wake_up_all(&cvp
->cv_event
);
262 atomic_dec(&cvp
->cv_refs
);
265 EXPORT_SYMBOL(__cv_broadcast
);