]> git.proxmox.com Git - mirror_spl-debian.git/blame - module/spl/spl-condvar.c
Imported Upstream version 0.6.5.5
[mirror_spl-debian.git] / module / spl / spl-condvar.c
CommitLineData
f6188ddd 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
f6188ddd 23 *
716154c5 24 * Solaris Porting Layer (SPL) Credential Implementation.
f6188ddd 25 */
715f6251 26
4efd4118 27#include <sys/condvar.h>
10946b02 28#include <sys/time.h>
4efd4118 29
30void
31__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
32{
4efd4118 33 ASSERT(cvp);
b29012b9 34 ASSERT(name == NULL);
4efd4118 35 ASSERT(type == CV_DEFAULT);
36 ASSERT(arg == NULL);
37
38 cvp->cv_magic = CV_MAGIC;
39 init_waitqueue_head(&cvp->cv_event);
d599e4fa 40 init_waitqueue_head(&cvp->cv_destroy);
4efd4118 41 atomic_set(&cvp->cv_waiters, 0);
d2733258 42 atomic_set(&cvp->cv_refs, 1);
4efd4118 43 cvp->cv_mutex = NULL;
4efd4118 44}
45EXPORT_SYMBOL(__cv_init);
46
d599e4fa
BB
47static int
48cv_destroy_wakeup(kcondvar_t *cvp)
49{
d2733258
BB
50 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
51 ASSERT(cvp->cv_mutex == NULL);
52 ASSERT(!waitqueue_active(&cvp->cv_event));
f6188ddd 53 return (1);
d2733258 54 }
d599e4fa 55
f6188ddd 56 return (0);
d599e4fa
BB
57}
58
4efd4118 59void
60__cv_destroy(kcondvar_t *cvp)
61{
4efd4118 62 ASSERT(cvp);
63 ASSERT(cvp->cv_magic == CV_MAGIC);
d599e4fa 64
d2733258
BB
65 cvp->cv_magic = CV_DESTROY;
66 atomic_dec(&cvp->cv_refs);
67
68 /* Block until all waiters are woken and references dropped. */
d599e4fa
BB
69 while (cv_destroy_wakeup(cvp) == 0)
70 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
71
3c60f505 72 ASSERT3P(cvp->cv_mutex, ==, NULL);
d2733258 73 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
3c60f505
BB
74 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
75 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
4efd4118 76}
77EXPORT_SYMBOL(__cv_destroy);
78
f752b46e 79static void
46a75aad 80cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
4efd4118 81{
82 DEFINE_WAIT(wait);
81dab2ed 83 kmutex_t *m;
4efd4118 84
85 ASSERT(cvp);
f6188ddd 86 ASSERT(mp);
4efd4118 87 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 88 ASSERT(mutex_owned(mp));
d2733258 89 atomic_inc(&cvp->cv_refs);
4efd4118 90
81dab2ed
AX
91 m = ACCESS_ONCE(cvp->cv_mutex);
92 if (!m)
93 m = xchg(&cvp->cv_mutex, mp);
4efd4118 94 /* Ensure the same mutex is used by all callers */
81dab2ed 95 ASSERT(m == NULL || m == mp);
4efd4118 96
f752b46e 97 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 98 atomic_inc(&cvp->cv_waiters);
99
f6188ddd
AX
100 /*
101 * Mutex should be dropped after prepare_to_wait() this
4efd4118 102 * ensures we're linked in to the waiters list and avoids the
f6188ddd
AX
103 * race where 'cvp->cv_waiters > 0' but the list is empty.
104 */
4efd4118 105 mutex_exit(mp);
46a75aad
MJ
106 if (io)
107 io_schedule();
108 else
109 schedule();
4efd4118 110
058de03c 111 /* No more waiters a different mutex could be used */
d599e4fa 112 if (atomic_dec_and_test(&cvp->cv_waiters)) {
81dab2ed
AX
113 /*
114 * This is set without any lock, so it's racy. But this is
115 * just for debug anyway, so make it best-effort
116 */
058de03c 117 cvp->cv_mutex = NULL;
d599e4fa
BB
118 wake_up(&cvp->cv_destroy);
119 }
058de03c 120
4efd4118 121 finish_wait(&cvp->cv_event, &wait);
d2733258 122 atomic_dec(&cvp->cv_refs);
81dab2ed
AX
123
124 /*
125 * Hold mutex after we release the cvp, otherwise we could dead lock
126 * with a thread holding the mutex and call cv_destroy.
127 */
128 mutex_enter(mp);
4efd4118 129}
f752b46e
BB
130
131void
132__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
133{
46a75aad 134 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
f752b46e 135}
4efd4118 136EXPORT_SYMBOL(__cv_wait);
137
f752b46e 138void
f6188ddd 139__cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
f752b46e 140{
46a75aad 141 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
f752b46e 142}
f6188ddd 143EXPORT_SYMBOL(__cv_wait_sig);
f752b46e 144
46a75aad
MJ
145void
146__cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
147{
148 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
149}
150EXPORT_SYMBOL(__cv_wait_io);
151
f6188ddd
AX
152/*
153 * 'expire_time' argument is an absolute wall clock time in jiffies.
4efd4118 154 * Return value is time left (expire_time - now) or -1 if timeout occurred.
155 */
3f688a8c 156static clock_t
f6188ddd
AX
157__cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
158 int state)
4efd4118 159{
160 DEFINE_WAIT(wait);
81dab2ed 161 kmutex_t *m;
4efd4118 162 clock_t time_left;
4efd4118 163
164 ASSERT(cvp);
f6188ddd 165 ASSERT(mp);
4efd4118 166 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 167 ASSERT(mutex_owned(mp));
d2733258 168 atomic_inc(&cvp->cv_refs);
4efd4118 169
81dab2ed
AX
170 m = ACCESS_ONCE(cvp->cv_mutex);
171 if (!m)
172 m = xchg(&cvp->cv_mutex, mp);
4efd4118 173 /* Ensure the same mutex is used by all callers */
81dab2ed 174 ASSERT(m == NULL || m == mp);
4efd4118 175
176 /* XXX - Does not handle jiffie wrap properly */
177 time_left = expire_time - jiffies;
d2733258 178 if (time_left <= 0) {
81dab2ed 179 /* XXX - doesn't reset cv_mutex */
d2733258 180 atomic_dec(&cvp->cv_refs);
10946b02 181 return (-1);
d2733258 182 }
4efd4118 183
3f688a8c 184 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 185 atomic_inc(&cvp->cv_waiters);
186
f6188ddd
AX
187 /*
188 * Mutex should be dropped after prepare_to_wait() this
4efd4118 189 * ensures we're linked in to the waiters list and avoids the
f6188ddd
AX
190 * race where 'cvp->cv_waiters > 0' but the list is empty.
191 */
4efd4118 192 mutex_exit(mp);
193 time_left = schedule_timeout(time_left);
4efd4118 194
058de03c 195 /* No more waiters a different mutex could be used */
d599e4fa 196 if (atomic_dec_and_test(&cvp->cv_waiters)) {
81dab2ed
AX
197 /*
198 * This is set without any lock, so it's racy. But this is
199 * just for debug anyway, so make it best-effort
200 */
058de03c 201 cvp->cv_mutex = NULL;
d599e4fa
BB
202 wake_up(&cvp->cv_destroy);
203 }
058de03c 204
4efd4118 205 finish_wait(&cvp->cv_event, &wait);
d2733258 206 atomic_dec(&cvp->cv_refs);
4efd4118 207
81dab2ed
AX
208 /*
209 * Hold mutex after we release the cvp, otherwise we could dead lock
210 * with a thread holding the mutex and call cv_destroy.
211 */
212 mutex_enter(mp);
10946b02 213 return (time_left > 0 ? time_left : -1);
4efd4118 214}
3f688a8c
NK
215
216clock_t
217__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
218{
f6188ddd 219 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE));
3f688a8c 220}
4efd4118 221EXPORT_SYMBOL(__cv_timedwait);
222
3f688a8c 223clock_t
f6188ddd 224__cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
3f688a8c 225{
f6188ddd 226 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE));
3f688a8c 227}
f6188ddd 228EXPORT_SYMBOL(__cv_timedwait_sig);
3f688a8c 229
33a20369 230/*
f6188ddd 231 * 'expire_time' argument is an absolute clock time in nanoseconds.
33a20369
LG
232 * Return value is time left (expire_time - now) or -1 if timeout occurred.
233 */
234static clock_t
f6188ddd
AX
235__cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
236 int state)
33a20369
LG
237{
238 DEFINE_WAIT(wait);
81dab2ed 239 kmutex_t *m;
33a20369
LG
240 hrtime_t time_left, now;
241 unsigned long time_left_us;
33a20369
LG
242
243 ASSERT(cvp);
244 ASSERT(mp);
245 ASSERT(cvp->cv_magic == CV_MAGIC);
246 ASSERT(mutex_owned(mp));
247 atomic_inc(&cvp->cv_refs);
248
81dab2ed
AX
249 m = ACCESS_ONCE(cvp->cv_mutex);
250 if (!m)
251 m = xchg(&cvp->cv_mutex, mp);
33a20369 252 /* Ensure the same mutex is used by all callers */
81dab2ed 253 ASSERT(m == NULL || m == mp);
33a20369
LG
254
255 now = gethrtime();
256 time_left = expire_time - now;
257 if (time_left <= 0) {
258 atomic_dec(&cvp->cv_refs);
10946b02 259 return (-1);
33a20369
LG
260 }
261 time_left_us = time_left / NSEC_PER_USEC;
262
263 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
264 atomic_inc(&cvp->cv_waiters);
265
f6188ddd
AX
266 /*
267 * Mutex should be dropped after prepare_to_wait() this
33a20369 268 * ensures we're linked in to the waiters list and avoids the
f6188ddd
AX
269 * race where 'cvp->cv_waiters > 0' but the list is empty.
270 */
33a20369 271 mutex_exit(mp);
f6188ddd
AX
272 /*
273 * Allow a 100 us range to give kernel an opportunity to coalesce
274 * interrupts
275 */
33a20369 276 usleep_range(time_left_us, time_left_us + 100);
33a20369
LG
277
278 /* No more waiters a different mutex could be used */
279 if (atomic_dec_and_test(&cvp->cv_waiters)) {
81dab2ed
AX
280 /*
281 * This is set without any lock, so it's racy. But this is
282 * just for debug anyway, so make it best-effort
283 */
33a20369
LG
284 cvp->cv_mutex = NULL;
285 wake_up(&cvp->cv_destroy);
286 }
287
288 finish_wait(&cvp->cv_event, &wait);
289 atomic_dec(&cvp->cv_refs);
290
81dab2ed 291 mutex_enter(mp);
33a20369 292 time_left = expire_time - gethrtime();
10946b02 293 return (time_left > 0 ? time_left : -1);
33a20369
LG
294}
295
296/*
297 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
298 */
299clock_t
f6188ddd
AX
300cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
301 int flag)
33a20369
LG
302{
303 if (res > 1) {
304 /*
305 * Align expiration to the specified resolution.
306 */
307 if (flag & CALLOUT_FLAG_ROUNDUP)
308 tim += res - 1;
309 tim = (tim / res) * res;
310 }
311
312 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
313 tim += gethrtime();
314
f6188ddd 315 return (__cv_timedwait_hires(cvp, mp, tim, TASK_UNINTERRUPTIBLE));
33a20369
LG
316}
317EXPORT_SYMBOL(cv_timedwait_hires);
318
4efd4118 319void
320__cv_signal(kcondvar_t *cvp)
321{
4efd4118 322 ASSERT(cvp);
323 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 324 atomic_inc(&cvp->cv_refs);
4efd4118 325
f6188ddd
AX
326 /*
327 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
4efd4118 328 * waiter will be set runable with each call to wake_up().
329 * Additionally wake_up() holds a spin_lock assoicated with
f6188ddd
AX
330 * the wait queue to ensure we don't race waking up processes.
331 */
4efd4118 332 if (atomic_read(&cvp->cv_waiters) > 0)
333 wake_up(&cvp->cv_event);
334
d2733258 335 atomic_dec(&cvp->cv_refs);
4efd4118 336}
337EXPORT_SYMBOL(__cv_signal);
338
339void
340__cv_broadcast(kcondvar_t *cvp)
341{
342 ASSERT(cvp);
343 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 344 atomic_inc(&cvp->cv_refs);
4efd4118 345
f6188ddd
AX
346 /*
347 * Wake_up_all() will wake up all waiters even those which
348 * have the WQ_FLAG_EXCLUSIVE flag set.
349 */
4efd4118 350 if (atomic_read(&cvp->cv_waiters) > 0)
351 wake_up_all(&cvp->cv_event);
352
d2733258 353 atomic_dec(&cvp->cv_refs);
4efd4118 354}
355EXPORT_SYMBOL(__cv_broadcast);