]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-condvar.c
Fix cv_timedwait timeout
[mirror_spl.git] / module / spl / spl-condvar.c
CommitLineData
23453686 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23453686 23 *
716154c5 24 * Solaris Porting Layer (SPL) Credential Implementation.
23453686 25 */
715f6251 26
4efd4118 27#include <sys/condvar.h>
e5b9b344 28#include <sys/time.h>
39cd90ef 29#include <linux/hrtimer.h>
4efd4118 30
31void
32__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
33{
4efd4118 34 ASSERT(cvp);
b29012b9 35 ASSERT(name == NULL);
4efd4118 36 ASSERT(type == CV_DEFAULT);
37 ASSERT(arg == NULL);
38
39 cvp->cv_magic = CV_MAGIC;
40 init_waitqueue_head(&cvp->cv_event);
d599e4fa 41 init_waitqueue_head(&cvp->cv_destroy);
4efd4118 42 atomic_set(&cvp->cv_waiters, 0);
d2733258 43 atomic_set(&cvp->cv_refs, 1);
4efd4118 44 cvp->cv_mutex = NULL;
4efd4118 45}
46EXPORT_SYMBOL(__cv_init);
47
d599e4fa
BB
48static int
49cv_destroy_wakeup(kcondvar_t *cvp)
50{
d2733258
BB
51 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
52 ASSERT(cvp->cv_mutex == NULL);
53 ASSERT(!waitqueue_active(&cvp->cv_event));
23453686 54 return (1);
d2733258 55 }
d599e4fa 56
23453686 57 return (0);
d599e4fa
BB
58}
59
4efd4118 60void
61__cv_destroy(kcondvar_t *cvp)
62{
4efd4118 63 ASSERT(cvp);
64 ASSERT(cvp->cv_magic == CV_MAGIC);
d599e4fa 65
d2733258
BB
66 cvp->cv_magic = CV_DESTROY;
67 atomic_dec(&cvp->cv_refs);
68
69 /* Block until all waiters are woken and references dropped. */
d599e4fa
BB
70 while (cv_destroy_wakeup(cvp) == 0)
71 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
72
3c60f505 73 ASSERT3P(cvp->cv_mutex, ==, NULL);
d2733258 74 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
3c60f505
BB
75 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
76 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
4efd4118 77}
78EXPORT_SYMBOL(__cv_destroy);
79
f752b46e 80static void
46a75aad 81cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
4efd4118 82{
83 DEFINE_WAIT(wait);
e843553d 84 kmutex_t *m;
4efd4118 85
86 ASSERT(cvp);
23453686 87 ASSERT(mp);
4efd4118 88 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 89 ASSERT(mutex_owned(mp));
d2733258 90 atomic_inc(&cvp->cv_refs);
4efd4118 91
e843553d
CC
92 m = ACCESS_ONCE(cvp->cv_mutex);
93 if (!m)
94 m = xchg(&cvp->cv_mutex, mp);
4efd4118 95 /* Ensure the same mutex is used by all callers */
e843553d 96 ASSERT(m == NULL || m == mp);
4efd4118 97
f752b46e 98 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 99 atomic_inc(&cvp->cv_waiters);
100
23453686
BB
101 /*
102 * Mutex should be dropped after prepare_to_wait() this
4efd4118 103 * ensures we're linked in to the waiters list and avoids the
23453686
BB
104 * race where 'cvp->cv_waiters > 0' but the list is empty.
105 */
4efd4118 106 mutex_exit(mp);
46a75aad
MJ
107 if (io)
108 io_schedule();
109 else
110 schedule();
4efd4118 111
058de03c 112 /* No more waiters a different mutex could be used */
d599e4fa 113 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
114 /*
115 * This is set without any lock, so it's racy. But this is
116 * just for debug anyway, so make it best-effort
117 */
058de03c 118 cvp->cv_mutex = NULL;
d599e4fa
BB
119 wake_up(&cvp->cv_destroy);
120 }
058de03c 121
4efd4118 122 finish_wait(&cvp->cv_event, &wait);
d2733258 123 atomic_dec(&cvp->cv_refs);
e843553d
CC
124
125 /*
126 * Hold mutex after we release the cvp, otherwise we could dead lock
127 * with a thread holding the mutex and call cv_destroy.
128 */
129 mutex_enter(mp);
4efd4118 130}
f752b46e
BB
131
132void
133__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
134{
46a75aad 135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
f752b46e 136}
4efd4118 137EXPORT_SYMBOL(__cv_wait);
138
f752b46e 139void
23453686 140__cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
f752b46e 141{
46a75aad 142 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
f752b46e 143}
23453686 144EXPORT_SYMBOL(__cv_wait_sig);
f752b46e 145
46a75aad
MJ
146void
147__cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
148{
149 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
150}
151EXPORT_SYMBOL(__cv_wait_io);
152
23453686
BB
153/*
154 * 'expire_time' argument is an absolute wall clock time in jiffies.
4efd4118 155 * Return value is time left (expire_time - now) or -1 if timeout occurred.
156 */
3f688a8c 157static clock_t
23453686
BB
158__cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
159 int state)
4efd4118 160{
161 DEFINE_WAIT(wait);
e843553d 162 kmutex_t *m;
4efd4118 163 clock_t time_left;
4efd4118 164
165 ASSERT(cvp);
23453686 166 ASSERT(mp);
4efd4118 167 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 168 ASSERT(mutex_owned(mp));
169
2ded1c7e
BB
170 /* XXX - Does not handle jiffie wrap properly */
171 time_left = expire_time - jiffies;
172 if (time_left <= 0)
173 return (-1);
174
175 atomic_inc(&cvp->cv_refs);
e843553d
CC
176 m = ACCESS_ONCE(cvp->cv_mutex);
177 if (!m)
178 m = xchg(&cvp->cv_mutex, mp);
4efd4118 179 /* Ensure the same mutex is used by all callers */
e843553d 180 ASSERT(m == NULL || m == mp);
4efd4118 181
3f688a8c 182 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 183 atomic_inc(&cvp->cv_waiters);
184
23453686
BB
185 /*
186 * Mutex should be dropped after prepare_to_wait() this
4efd4118 187 * ensures we're linked in to the waiters list and avoids the
23453686
BB
188 * race where 'cvp->cv_waiters > 0' but the list is empty.
189 */
4efd4118 190 mutex_exit(mp);
191 time_left = schedule_timeout(time_left);
4efd4118 192
058de03c 193 /* No more waiters a different mutex could be used */
d599e4fa 194 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
195 /*
196 * This is set without any lock, so it's racy. But this is
197 * just for debug anyway, so make it best-effort
198 */
058de03c 199 cvp->cv_mutex = NULL;
d599e4fa
BB
200 wake_up(&cvp->cv_destroy);
201 }
058de03c 202
4efd4118 203 finish_wait(&cvp->cv_event, &wait);
d2733258 204 atomic_dec(&cvp->cv_refs);
4efd4118 205
e843553d
CC
206 /*
207 * Hold mutex after we release the cvp, otherwise we could dead lock
208 * with a thread holding the mutex and call cv_destroy.
209 */
210 mutex_enter(mp);
8d9a23e8 211 return (time_left > 0 ? time_left : -1);
4efd4118 212}
3f688a8c
NK
213
214clock_t
215__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
216{
23453686 217 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE));
3f688a8c 218}
4efd4118 219EXPORT_SYMBOL(__cv_timedwait);
220
3f688a8c 221clock_t
23453686 222__cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
3f688a8c 223{
23453686 224 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE));
3f688a8c 225}
23453686 226EXPORT_SYMBOL(__cv_timedwait_sig);
3f688a8c 227
184c6873 228/*
23453686 229 * 'expire_time' argument is an absolute clock time in nanoseconds.
184c6873
NB
230 * Return value is time left (expire_time - now) or -1 if timeout occurred.
231 */
232static clock_t
23453686
BB
233__cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
234 int state)
184c6873
NB
235{
236 DEFINE_WAIT(wait);
e843553d 237 kmutex_t *m;
2ded1c7e 238 hrtime_t time_left;
39cd90ef 239 ktime_t ktime_left;
184c6873
NB
240
241 ASSERT(cvp);
242 ASSERT(mp);
243 ASSERT(cvp->cv_magic == CV_MAGIC);
244 ASSERT(mutex_owned(mp));
184c6873 245
2ded1c7e
BB
246 time_left = expire_time - gethrtime();
247 if (time_left <= 0)
248 return (-1);
249
250 atomic_inc(&cvp->cv_refs);
e843553d
CC
251 m = ACCESS_ONCE(cvp->cv_mutex);
252 if (!m)
253 m = xchg(&cvp->cv_mutex, mp);
184c6873 254 /* Ensure the same mutex is used by all callers */
e843553d 255 ASSERT(m == NULL || m == mp);
184c6873 256
184c6873
NB
257 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
258 atomic_inc(&cvp->cv_waiters);
259
23453686
BB
260 /*
261 * Mutex should be dropped after prepare_to_wait() this
184c6873 262 * ensures we're linked in to the waiters list and avoids the
23453686
BB
263 * race where 'cvp->cv_waiters > 0' but the list is empty.
264 */
184c6873 265 mutex_exit(mp);
23453686
BB
266 /*
267 * Allow a 100 us range to give kernel an opportunity to coalesce
268 * interrupts
269 */
39cd90ef
CC
270 ktime_left = ktime_set(0, time_left);
271 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
272 HRTIMER_MODE_REL);
184c6873
NB
273
274 /* No more waiters a different mutex could be used */
275 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
276 /*
277 * This is set without any lock, so it's racy. But this is
278 * just for debug anyway, so make it best-effort
279 */
184c6873
NB
280 cvp->cv_mutex = NULL;
281 wake_up(&cvp->cv_destroy);
282 }
283
284 finish_wait(&cvp->cv_event, &wait);
285 atomic_dec(&cvp->cv_refs);
286
e843553d 287 mutex_enter(mp);
184c6873 288 time_left = expire_time - gethrtime();
39cd90ef 289 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
184c6873
NB
290}
291
292/*
293 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
294 */
39cd90ef
CC
295static clock_t
296cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
297 int flag, int state)
184c6873
NB
298{
299 if (res > 1) {
300 /*
301 * Align expiration to the specified resolution.
302 */
303 if (flag & CALLOUT_FLAG_ROUNDUP)
304 tim += res - 1;
305 tim = (tim / res) * res;
306 }
307
872e0cc9
CC
308 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
309 tim += gethrtime();
39cd90ef
CC
310
311 return (__cv_timedwait_hires(cvp, mp, tim, state));
312}
184c6873 313
39cd90ef
CC
314clock_t
315cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
316 int flag)
317{
318 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
319 TASK_UNINTERRUPTIBLE));
184c6873
NB
320}
321EXPORT_SYMBOL(cv_timedwait_hires);
322
39cd90ef
CC
323clock_t
324cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
325 int flag)
326{
327 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
328 TASK_INTERRUPTIBLE));
329}
330EXPORT_SYMBOL(cv_timedwait_sig_hires);
331
4efd4118 332void
333__cv_signal(kcondvar_t *cvp)
334{
4efd4118 335 ASSERT(cvp);
336 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 337 atomic_inc(&cvp->cv_refs);
4efd4118 338
23453686
BB
339 /*
340 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
4efd4118 341 * waiter will be set runable with each call to wake_up().
342 * Additionally wake_up() holds a spin_lock assoicated with
23453686
BB
343 * the wait queue to ensure we don't race waking up processes.
344 */
4efd4118 345 if (atomic_read(&cvp->cv_waiters) > 0)
346 wake_up(&cvp->cv_event);
347
d2733258 348 atomic_dec(&cvp->cv_refs);
4efd4118 349}
350EXPORT_SYMBOL(__cv_signal);
351
352void
353__cv_broadcast(kcondvar_t *cvp)
354{
355 ASSERT(cvp);
356 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 357 atomic_inc(&cvp->cv_refs);
4efd4118 358
23453686
BB
359 /*
360 * Wake_up_all() will wake up all waiters even those which
361 * have the WQ_FLAG_EXCLUSIVE flag set.
362 */
4efd4118 363 if (atomic_read(&cvp->cv_waiters) > 0)
364 wake_up_all(&cvp->cv_event);
365
d2733258 366 atomic_dec(&cvp->cv_refs);
4efd4118 367}
368EXPORT_SYMBOL(__cv_broadcast);