]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-condvar.c
Add cv_timedwait_sig_hires to allow interruptible sleep
[mirror_spl.git] / module / spl / spl-condvar.c
CommitLineData
23453686 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23453686 23 *
716154c5 24 * Solaris Porting Layer (SPL) Credential Implementation.
23453686 25 */
715f6251 26
4efd4118 27#include <sys/condvar.h>
e5b9b344 28#include <sys/time.h>
39cd90ef 29#include <linux/hrtimer.h>
4efd4118 30
31void
32__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
33{
4efd4118 34 ASSERT(cvp);
b29012b9 35 ASSERT(name == NULL);
4efd4118 36 ASSERT(type == CV_DEFAULT);
37 ASSERT(arg == NULL);
38
39 cvp->cv_magic = CV_MAGIC;
40 init_waitqueue_head(&cvp->cv_event);
d599e4fa 41 init_waitqueue_head(&cvp->cv_destroy);
4efd4118 42 atomic_set(&cvp->cv_waiters, 0);
d2733258 43 atomic_set(&cvp->cv_refs, 1);
4efd4118 44 cvp->cv_mutex = NULL;
4efd4118 45}
46EXPORT_SYMBOL(__cv_init);
47
d599e4fa
BB
48static int
49cv_destroy_wakeup(kcondvar_t *cvp)
50{
d2733258
BB
51 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
52 ASSERT(cvp->cv_mutex == NULL);
53 ASSERT(!waitqueue_active(&cvp->cv_event));
23453686 54 return (1);
d2733258 55 }
d599e4fa 56
23453686 57 return (0);
d599e4fa
BB
58}
59
4efd4118 60void
61__cv_destroy(kcondvar_t *cvp)
62{
4efd4118 63 ASSERT(cvp);
64 ASSERT(cvp->cv_magic == CV_MAGIC);
d599e4fa 65
d2733258
BB
66 cvp->cv_magic = CV_DESTROY;
67 atomic_dec(&cvp->cv_refs);
68
69 /* Block until all waiters are woken and references dropped. */
d599e4fa
BB
70 while (cv_destroy_wakeup(cvp) == 0)
71 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
72
3c60f505 73 ASSERT3P(cvp->cv_mutex, ==, NULL);
d2733258 74 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
3c60f505
BB
75 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
76 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
4efd4118 77}
78EXPORT_SYMBOL(__cv_destroy);
79
f752b46e 80static void
46a75aad 81cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
4efd4118 82{
83 DEFINE_WAIT(wait);
e843553d 84 kmutex_t *m;
4efd4118 85
86 ASSERT(cvp);
23453686 87 ASSERT(mp);
4efd4118 88 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 89 ASSERT(mutex_owned(mp));
d2733258 90 atomic_inc(&cvp->cv_refs);
4efd4118 91
e843553d
CC
92 m = ACCESS_ONCE(cvp->cv_mutex);
93 if (!m)
94 m = xchg(&cvp->cv_mutex, mp);
4efd4118 95 /* Ensure the same mutex is used by all callers */
e843553d 96 ASSERT(m == NULL || m == mp);
4efd4118 97
f752b46e 98 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 99 atomic_inc(&cvp->cv_waiters);
100
23453686
BB
101 /*
102 * Mutex should be dropped after prepare_to_wait() this
4efd4118 103 * ensures we're linked in to the waiters list and avoids the
23453686
BB
104 * race where 'cvp->cv_waiters > 0' but the list is empty.
105 */
4efd4118 106 mutex_exit(mp);
46a75aad
MJ
107 if (io)
108 io_schedule();
109 else
110 schedule();
4efd4118 111
058de03c 112 /* No more waiters a different mutex could be used */
d599e4fa 113 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
114 /*
115 * This is set without any lock, so it's racy. But this is
116 * just for debug anyway, so make it best-effort
117 */
058de03c 118 cvp->cv_mutex = NULL;
d599e4fa
BB
119 wake_up(&cvp->cv_destroy);
120 }
058de03c 121
4efd4118 122 finish_wait(&cvp->cv_event, &wait);
d2733258 123 atomic_dec(&cvp->cv_refs);
e843553d
CC
124
125 /*
126 * Hold mutex after we release the cvp, otherwise we could dead lock
127 * with a thread holding the mutex and call cv_destroy.
128 */
129 mutex_enter(mp);
4efd4118 130}
f752b46e
BB
131
132void
133__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
134{
46a75aad 135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
f752b46e 136}
4efd4118 137EXPORT_SYMBOL(__cv_wait);
138
f752b46e 139void
23453686 140__cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
f752b46e 141{
46a75aad 142 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
f752b46e 143}
23453686 144EXPORT_SYMBOL(__cv_wait_sig);
f752b46e 145
46a75aad
MJ
146void
147__cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
148{
149 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
150}
151EXPORT_SYMBOL(__cv_wait_io);
152
23453686
BB
153/*
154 * 'expire_time' argument is an absolute wall clock time in jiffies.
4efd4118 155 * Return value is time left (expire_time - now) or -1 if timeout occurred.
156 */
3f688a8c 157static clock_t
23453686
BB
158__cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
159 int state)
4efd4118 160{
161 DEFINE_WAIT(wait);
e843553d 162 kmutex_t *m;
4efd4118 163 clock_t time_left;
4efd4118 164
165 ASSERT(cvp);
23453686 166 ASSERT(mp);
4efd4118 167 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 168 ASSERT(mutex_owned(mp));
d2733258 169 atomic_inc(&cvp->cv_refs);
4efd4118 170
e843553d
CC
171 m = ACCESS_ONCE(cvp->cv_mutex);
172 if (!m)
173 m = xchg(&cvp->cv_mutex, mp);
4efd4118 174 /* Ensure the same mutex is used by all callers */
e843553d 175 ASSERT(m == NULL || m == mp);
4efd4118 176
177 /* XXX - Does not handle jiffie wrap properly */
178 time_left = expire_time - jiffies;
d2733258 179 if (time_left <= 0) {
e843553d 180 /* XXX - doesn't reset cv_mutex */
d2733258 181 atomic_dec(&cvp->cv_refs);
8d9a23e8 182 return (-1);
d2733258 183 }
4efd4118 184
3f688a8c 185 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 186 atomic_inc(&cvp->cv_waiters);
187
23453686
BB
188 /*
189 * Mutex should be dropped after prepare_to_wait() this
4efd4118 190 * ensures we're linked in to the waiters list and avoids the
23453686
BB
191 * race where 'cvp->cv_waiters > 0' but the list is empty.
192 */
4efd4118 193 mutex_exit(mp);
194 time_left = schedule_timeout(time_left);
4efd4118 195
058de03c 196 /* No more waiters a different mutex could be used */
d599e4fa 197 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
198 /*
199 * This is set without any lock, so it's racy. But this is
200 * just for debug anyway, so make it best-effort
201 */
058de03c 202 cvp->cv_mutex = NULL;
d599e4fa
BB
203 wake_up(&cvp->cv_destroy);
204 }
058de03c 205
4efd4118 206 finish_wait(&cvp->cv_event, &wait);
d2733258 207 atomic_dec(&cvp->cv_refs);
4efd4118 208
e843553d
CC
209 /*
210 * Hold mutex after we release the cvp, otherwise we could dead lock
211 * with a thread holding the mutex and call cv_destroy.
212 */
213 mutex_enter(mp);
8d9a23e8 214 return (time_left > 0 ? time_left : -1);
4efd4118 215}
3f688a8c
NK
216
217clock_t
218__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
219{
23453686 220 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE));
3f688a8c 221}
4efd4118 222EXPORT_SYMBOL(__cv_timedwait);
223
3f688a8c 224clock_t
23453686 225__cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
3f688a8c 226{
23453686 227 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE));
3f688a8c 228}
23453686 229EXPORT_SYMBOL(__cv_timedwait_sig);
3f688a8c 230
184c6873 231/*
23453686 232 * 'expire_time' argument is an absolute clock time in nanoseconds.
184c6873
NB
233 * Return value is time left (expire_time - now) or -1 if timeout occurred.
234 */
235static clock_t
23453686
BB
236__cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
237 int state)
184c6873
NB
238{
239 DEFINE_WAIT(wait);
e843553d 240 kmutex_t *m;
184c6873 241 hrtime_t time_left, now;
39cd90ef 242 ktime_t ktime_left;
184c6873
NB
243
244 ASSERT(cvp);
245 ASSERT(mp);
246 ASSERT(cvp->cv_magic == CV_MAGIC);
247 ASSERT(mutex_owned(mp));
248 atomic_inc(&cvp->cv_refs);
249
e843553d
CC
250 m = ACCESS_ONCE(cvp->cv_mutex);
251 if (!m)
252 m = xchg(&cvp->cv_mutex, mp);
184c6873 253 /* Ensure the same mutex is used by all callers */
e843553d 254 ASSERT(m == NULL || m == mp);
184c6873
NB
255
256 now = gethrtime();
257 time_left = expire_time - now;
258 if (time_left <= 0) {
259 atomic_dec(&cvp->cv_refs);
8d9a23e8 260 return (-1);
184c6873 261 }
184c6873
NB
262
263 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
264 atomic_inc(&cvp->cv_waiters);
265
23453686
BB
266 /*
267 * Mutex should be dropped after prepare_to_wait() this
184c6873 268 * ensures we're linked in to the waiters list and avoids the
23453686
BB
269 * race where 'cvp->cv_waiters > 0' but the list is empty.
270 */
184c6873 271 mutex_exit(mp);
23453686
BB
272 /*
273 * Allow a 100 us range to give kernel an opportunity to coalesce
274 * interrupts
275 */
39cd90ef
CC
276 ktime_left = ktime_set(0, time_left);
277 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
278 HRTIMER_MODE_REL);
184c6873
NB
279
280 /* No more waiters a different mutex could be used */
281 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
282 /*
283 * This is set without any lock, so it's racy. But this is
284 * just for debug anyway, so make it best-effort
285 */
184c6873
NB
286 cvp->cv_mutex = NULL;
287 wake_up(&cvp->cv_destroy);
288 }
289
290 finish_wait(&cvp->cv_event, &wait);
291 atomic_dec(&cvp->cv_refs);
292
e843553d 293 mutex_enter(mp);
184c6873 294 time_left = expire_time - gethrtime();
39cd90ef 295 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
184c6873
NB
296}
297
298/*
299 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
300 */
39cd90ef
CC
301static clock_t
302cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
303 int flag, int state)
184c6873
NB
304{
305 if (res > 1) {
306 /*
307 * Align expiration to the specified resolution.
308 */
309 if (flag & CALLOUT_FLAG_ROUNDUP)
310 tim += res - 1;
311 tim = (tim / res) * res;
312 }
313
39cd90ef
CC
314 ASSERT(!(flag & CALLOUT_FLAG_ABSOLUTE));
315 /* get abs expire time */
316 tim += gethrtime();
317
318 return (__cv_timedwait_hires(cvp, mp, tim, state));
319}
184c6873 320
39cd90ef
CC
321clock_t
322cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
323 int flag)
324{
325 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
326 TASK_UNINTERRUPTIBLE));
184c6873
NB
327}
328EXPORT_SYMBOL(cv_timedwait_hires);
329
39cd90ef
CC
330clock_t
331cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
332 int flag)
333{
334 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
335 TASK_INTERRUPTIBLE));
336}
337EXPORT_SYMBOL(cv_timedwait_sig_hires);
338
4efd4118 339void
340__cv_signal(kcondvar_t *cvp)
341{
4efd4118 342 ASSERT(cvp);
343 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 344 atomic_inc(&cvp->cv_refs);
4efd4118 345
23453686
BB
346 /*
347 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
4efd4118 348 * waiter will be set runable with each call to wake_up().
349 * Additionally wake_up() holds a spin_lock assoicated with
23453686
BB
350 * the wait queue to ensure we don't race waking up processes.
351 */
4efd4118 352 if (atomic_read(&cvp->cv_waiters) > 0)
353 wake_up(&cvp->cv_event);
354
d2733258 355 atomic_dec(&cvp->cv_refs);
4efd4118 356}
357EXPORT_SYMBOL(__cv_signal);
358
359void
360__cv_broadcast(kcondvar_t *cvp)
361{
362 ASSERT(cvp);
363 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 364 atomic_inc(&cvp->cv_refs);
4efd4118 365
23453686
BB
366 /*
367 * Wake_up_all() will wake up all waiters even those which
368 * have the WQ_FLAG_EXCLUSIVE flag set.
369 */
4efd4118 370 if (atomic_read(&cvp->cv_waiters) > 0)
371 wake_up_all(&cvp->cv_event);
372
d2733258 373 atomic_dec(&cvp->cv_refs);
4efd4118 374}
375EXPORT_SYMBOL(__cv_broadcast);