]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-condvar.c
Fix cstyle warnings
[mirror_spl.git] / module / spl / spl-condvar.c
CommitLineData
23453686 1/*
716154c5
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
715f6251 6 * UCRL-CODE-235197
7 *
716154c5 8 * This file is part of the SPL, Solaris Porting Layer.
3d6af2dd 9 * For details, see <http://zfsonlinux.org/>.
716154c5
BB
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
715f6251 15 *
716154c5 16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
715f6251 17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
716154c5 22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23453686 23 *
716154c5 24 * Solaris Porting Layer (SPL) Credential Implementation.
23453686 25 */
715f6251 26
4efd4118 27#include <sys/condvar.h>
e5b9b344 28#include <sys/time.h>
39cd90ef 29#include <linux/hrtimer.h>
4efd4118 30
31void
32__cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
33{
4efd4118 34 ASSERT(cvp);
b29012b9 35 ASSERT(name == NULL);
4efd4118 36 ASSERT(type == CV_DEFAULT);
37 ASSERT(arg == NULL);
38
39 cvp->cv_magic = CV_MAGIC;
40 init_waitqueue_head(&cvp->cv_event);
d599e4fa 41 init_waitqueue_head(&cvp->cv_destroy);
4efd4118 42 atomic_set(&cvp->cv_waiters, 0);
d2733258 43 atomic_set(&cvp->cv_refs, 1);
4efd4118 44 cvp->cv_mutex = NULL;
4efd4118 45}
46EXPORT_SYMBOL(__cv_init);
47
d599e4fa
BB
48static int
49cv_destroy_wakeup(kcondvar_t *cvp)
50{
d2733258
BB
51 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
52 ASSERT(cvp->cv_mutex == NULL);
53 ASSERT(!waitqueue_active(&cvp->cv_event));
23453686 54 return (1);
d2733258 55 }
d599e4fa 56
23453686 57 return (0);
d599e4fa
BB
58}
59
4efd4118 60void
61__cv_destroy(kcondvar_t *cvp)
62{
4efd4118 63 ASSERT(cvp);
64 ASSERT(cvp->cv_magic == CV_MAGIC);
d599e4fa 65
d2733258
BB
66 cvp->cv_magic = CV_DESTROY;
67 atomic_dec(&cvp->cv_refs);
68
69 /* Block until all waiters are woken and references dropped. */
d599e4fa
BB
70 while (cv_destroy_wakeup(cvp) == 0)
71 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
72
3c60f505 73 ASSERT3P(cvp->cv_mutex, ==, NULL);
d2733258 74 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
3c60f505
BB
75 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
76 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
4efd4118 77}
78EXPORT_SYMBOL(__cv_destroy);
79
f752b46e 80static void
46a75aad 81cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
4efd4118 82{
83 DEFINE_WAIT(wait);
e843553d 84 kmutex_t *m;
4efd4118 85
86 ASSERT(cvp);
23453686 87 ASSERT(mp);
4efd4118 88 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 89 ASSERT(mutex_owned(mp));
d2733258 90 atomic_inc(&cvp->cv_refs);
4efd4118 91
e843553d
CC
92 m = ACCESS_ONCE(cvp->cv_mutex);
93 if (!m)
94 m = xchg(&cvp->cv_mutex, mp);
4efd4118 95 /* Ensure the same mutex is used by all callers */
e843553d 96 ASSERT(m == NULL || m == mp);
4efd4118 97
f752b46e 98 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 99 atomic_inc(&cvp->cv_waiters);
100
23453686
BB
101 /*
102 * Mutex should be dropped after prepare_to_wait() this
4efd4118 103 * ensures we're linked in to the waiters list and avoids the
23453686
BB
104 * race where 'cvp->cv_waiters > 0' but the list is empty.
105 */
4efd4118 106 mutex_exit(mp);
46a75aad
MJ
107 if (io)
108 io_schedule();
109 else
110 schedule();
4efd4118 111
058de03c 112 /* No more waiters a different mutex could be used */
d599e4fa 113 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
114 /*
115 * This is set without any lock, so it's racy. But this is
116 * just for debug anyway, so make it best-effort
117 */
058de03c 118 cvp->cv_mutex = NULL;
d599e4fa
BB
119 wake_up(&cvp->cv_destroy);
120 }
058de03c 121
4efd4118 122 finish_wait(&cvp->cv_event, &wait);
d2733258 123 atomic_dec(&cvp->cv_refs);
e843553d
CC
124
125 /*
126 * Hold mutex after we release the cvp, otherwise we could dead lock
127 * with a thread holding the mutex and call cv_destroy.
128 */
129 mutex_enter(mp);
4efd4118 130}
f752b46e
BB
131
132void
133__cv_wait(kcondvar_t *cvp, kmutex_t *mp)
134{
46a75aad 135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
f752b46e 136}
4efd4118 137EXPORT_SYMBOL(__cv_wait);
138
23602fdb
BB
139void
140__cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
141{
142 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
143}
144EXPORT_SYMBOL(__cv_wait_io);
145
f752b46e 146void
23453686 147__cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
f752b46e 148{
46a75aad 149 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
f752b46e 150}
23453686 151EXPORT_SYMBOL(__cv_wait_sig);
f752b46e 152
23602fdb
BB
153#if defined(HAVE_IO_SCHEDULE_TIMEOUT)
154#define spl_io_schedule_timeout(t) io_schedule_timeout(t)
155#else
156static void
157__cv_wakeup(unsigned long data)
46a75aad 158{
23602fdb 159 wake_up_process((struct task_struct *)data);
46a75aad 160}
23602fdb
BB
161
162static long
163spl_io_schedule_timeout(long time_left)
164{
165 long expire_time = jiffies + time_left;
166 struct timer_list timer;
167
168 init_timer(&timer);
169 setup_timer(&timer, __cv_wakeup, (unsigned long)current);
170 timer.expires = expire_time;
171 add_timer(&timer);
172
173 io_schedule();
174
175 del_timer_sync(&timer);
176 time_left = expire_time - jiffies;
177
178 return (time_left < 0 ? 0 : time_left);
179}
180#endif
46a75aad 181
23453686
BB
182/*
183 * 'expire_time' argument is an absolute wall clock time in jiffies.
4efd4118 184 * Return value is time left (expire_time - now) or -1 if timeout occurred.
185 */
3f688a8c 186static clock_t
23453686 187__cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
23602fdb 188 int state, int io)
4efd4118 189{
190 DEFINE_WAIT(wait);
e843553d 191 kmutex_t *m;
4efd4118 192 clock_t time_left;
4efd4118 193
194 ASSERT(cvp);
23453686 195 ASSERT(mp);
4efd4118 196 ASSERT(cvp->cv_magic == CV_MAGIC);
4efd4118 197 ASSERT(mutex_owned(mp));
198
2ded1c7e
BB
199 /* XXX - Does not handle jiffie wrap properly */
200 time_left = expire_time - jiffies;
201 if (time_left <= 0)
202 return (-1);
203
204 atomic_inc(&cvp->cv_refs);
e843553d
CC
205 m = ACCESS_ONCE(cvp->cv_mutex);
206 if (!m)
207 m = xchg(&cvp->cv_mutex, mp);
4efd4118 208 /* Ensure the same mutex is used by all callers */
e843553d 209 ASSERT(m == NULL || m == mp);
4efd4118 210
3f688a8c 211 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
4efd4118 212 atomic_inc(&cvp->cv_waiters);
213
23453686
BB
214 /*
215 * Mutex should be dropped after prepare_to_wait() this
4efd4118 216 * ensures we're linked in to the waiters list and avoids the
23453686
BB
217 * race where 'cvp->cv_waiters > 0' but the list is empty.
218 */
4efd4118 219 mutex_exit(mp);
23602fdb
BB
220 if (io)
221 time_left = spl_io_schedule_timeout(time_left);
222 else
223 time_left = schedule_timeout(time_left);
4efd4118 224
058de03c 225 /* No more waiters a different mutex could be used */
d599e4fa 226 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
227 /*
228 * This is set without any lock, so it's racy. But this is
229 * just for debug anyway, so make it best-effort
230 */
058de03c 231 cvp->cv_mutex = NULL;
d599e4fa
BB
232 wake_up(&cvp->cv_destroy);
233 }
058de03c 234
4efd4118 235 finish_wait(&cvp->cv_event, &wait);
d2733258 236 atomic_dec(&cvp->cv_refs);
4efd4118 237
e843553d
CC
238 /*
239 * Hold mutex after we release the cvp, otherwise we could dead lock
240 * with a thread holding the mutex and call cv_destroy.
241 */
242 mutex_enter(mp);
8d9a23e8 243 return (time_left > 0 ? time_left : -1);
4efd4118 244}
3f688a8c
NK
245
246clock_t
247__cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
248{
23602fdb
BB
249 return (__cv_timedwait_common(cvp, mp, exp_time,
250 TASK_UNINTERRUPTIBLE, 0));
3f688a8c 251}
4efd4118 252EXPORT_SYMBOL(__cv_timedwait);
253
23602fdb
BB
254clock_t
255__cv_timedwait_io(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
256{
257 return (__cv_timedwait_common(cvp, mp, exp_time,
258 TASK_UNINTERRUPTIBLE, 1));
259}
260EXPORT_SYMBOL(__cv_timedwait_io);
261
3f688a8c 262clock_t
23453686 263__cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
3f688a8c 264{
23602fdb
BB
265 return (__cv_timedwait_common(cvp, mp, exp_time,
266 TASK_INTERRUPTIBLE, 0));
3f688a8c 267}
23453686 268EXPORT_SYMBOL(__cv_timedwait_sig);
3f688a8c 269
184c6873 270/*
23453686 271 * 'expire_time' argument is an absolute clock time in nanoseconds.
184c6873
NB
272 * Return value is time left (expire_time - now) or -1 if timeout occurred.
273 */
274static clock_t
23453686
BB
275__cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
276 int state)
184c6873
NB
277{
278 DEFINE_WAIT(wait);
e843553d 279 kmutex_t *m;
2ded1c7e 280 hrtime_t time_left;
39cd90ef 281 ktime_t ktime_left;
184c6873
NB
282
283 ASSERT(cvp);
284 ASSERT(mp);
285 ASSERT(cvp->cv_magic == CV_MAGIC);
286 ASSERT(mutex_owned(mp));
184c6873 287
2ded1c7e
BB
288 time_left = expire_time - gethrtime();
289 if (time_left <= 0)
290 return (-1);
291
292 atomic_inc(&cvp->cv_refs);
e843553d
CC
293 m = ACCESS_ONCE(cvp->cv_mutex);
294 if (!m)
295 m = xchg(&cvp->cv_mutex, mp);
184c6873 296 /* Ensure the same mutex is used by all callers */
e843553d 297 ASSERT(m == NULL || m == mp);
184c6873 298
184c6873
NB
299 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
300 atomic_inc(&cvp->cv_waiters);
301
23453686
BB
302 /*
303 * Mutex should be dropped after prepare_to_wait() this
184c6873 304 * ensures we're linked in to the waiters list and avoids the
23453686
BB
305 * race where 'cvp->cv_waiters > 0' but the list is empty.
306 */
184c6873 307 mutex_exit(mp);
23453686
BB
308 /*
309 * Allow a 100 us range to give kernel an opportunity to coalesce
310 * interrupts
311 */
39cd90ef
CC
312 ktime_left = ktime_set(0, time_left);
313 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
314 HRTIMER_MODE_REL);
184c6873
NB
315
316 /* No more waiters a different mutex could be used */
317 if (atomic_dec_and_test(&cvp->cv_waiters)) {
e843553d
CC
318 /*
319 * This is set without any lock, so it's racy. But this is
320 * just for debug anyway, so make it best-effort
321 */
184c6873
NB
322 cvp->cv_mutex = NULL;
323 wake_up(&cvp->cv_destroy);
324 }
325
326 finish_wait(&cvp->cv_event, &wait);
327 atomic_dec(&cvp->cv_refs);
328
e843553d 329 mutex_enter(mp);
184c6873 330 time_left = expire_time - gethrtime();
39cd90ef 331 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
184c6873
NB
332}
333
334/*
335 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
336 */
39cd90ef 337static clock_t
5461eefe
BB
338cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
339 hrtime_t res, int flag, int state)
184c6873
NB
340{
341 if (res > 1) {
342 /*
343 * Align expiration to the specified resolution.
344 */
345 if (flag & CALLOUT_FLAG_ROUNDUP)
346 tim += res - 1;
347 tim = (tim / res) * res;
348 }
349
872e0cc9
CC
350 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
351 tim += gethrtime();
39cd90ef
CC
352
353 return (__cv_timedwait_hires(cvp, mp, tim, state));
354}
184c6873 355
39cd90ef
CC
356clock_t
357cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
358 int flag)
359{
360 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
361 TASK_UNINTERRUPTIBLE));
184c6873
NB
362}
363EXPORT_SYMBOL(cv_timedwait_hires);
364
39cd90ef 365clock_t
5461eefe
BB
366cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
367 hrtime_t res, int flag)
39cd90ef
CC
368{
369 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
370 TASK_INTERRUPTIBLE));
371}
372EXPORT_SYMBOL(cv_timedwait_sig_hires);
373
4efd4118 374void
375__cv_signal(kcondvar_t *cvp)
376{
4efd4118 377 ASSERT(cvp);
378 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 379 atomic_inc(&cvp->cv_refs);
4efd4118 380
23453686
BB
381 /*
382 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
4efd4118 383 * waiter will be set runable with each call to wake_up().
384 * Additionally wake_up() holds a spin_lock assoicated with
23453686
BB
385 * the wait queue to ensure we don't race waking up processes.
386 */
4efd4118 387 if (atomic_read(&cvp->cv_waiters) > 0)
388 wake_up(&cvp->cv_event);
389
d2733258 390 atomic_dec(&cvp->cv_refs);
4efd4118 391}
392EXPORT_SYMBOL(__cv_signal);
393
394void
395__cv_broadcast(kcondvar_t *cvp)
396{
397 ASSERT(cvp);
398 ASSERT(cvp->cv_magic == CV_MAGIC);
d2733258 399 atomic_inc(&cvp->cv_refs);
4efd4118 400
23453686
BB
401 /*
402 * Wake_up_all() will wake up all waiters even those which
403 * have the WQ_FLAG_EXCLUSIVE flag set.
404 */
4efd4118 405 if (atomic_read(&cvp->cv_waiters) > 0)
406 wake_up_all(&cvp->cv_event);
407
d2733258 408 atomic_dec(&cvp->cv_refs);
4efd4118 409}
410EXPORT_SYMBOL(__cv_broadcast);