]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
Fix cstyle warnings
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 */
26
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29 #include <linux/hrtimer.h>
30
31 void
32 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
33 {
34 ASSERT(cvp);
35 ASSERT(name == NULL);
36 ASSERT(type == CV_DEFAULT);
37 ASSERT(arg == NULL);
38
39 cvp->cv_magic = CV_MAGIC;
40 init_waitqueue_head(&cvp->cv_event);
41 init_waitqueue_head(&cvp->cv_destroy);
42 atomic_set(&cvp->cv_waiters, 0);
43 atomic_set(&cvp->cv_refs, 1);
44 cvp->cv_mutex = NULL;
45 }
46 EXPORT_SYMBOL(__cv_init);
47
48 static int
49 cv_destroy_wakeup(kcondvar_t *cvp)
50 {
51 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
52 ASSERT(cvp->cv_mutex == NULL);
53 ASSERT(!waitqueue_active(&cvp->cv_event));
54 return (1);
55 }
56
57 return (0);
58 }
59
60 void
61 __cv_destroy(kcondvar_t *cvp)
62 {
63 ASSERT(cvp);
64 ASSERT(cvp->cv_magic == CV_MAGIC);
65
66 cvp->cv_magic = CV_DESTROY;
67 atomic_dec(&cvp->cv_refs);
68
69 /* Block until all waiters are woken and references dropped. */
70 while (cv_destroy_wakeup(cvp) == 0)
71 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
72
73 ASSERT3P(cvp->cv_mutex, ==, NULL);
74 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
75 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
76 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
77 }
78 EXPORT_SYMBOL(__cv_destroy);
79
80 static void
81 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
82 {
83 DEFINE_WAIT(wait);
84 kmutex_t *m;
85
86 ASSERT(cvp);
87 ASSERT(mp);
88 ASSERT(cvp->cv_magic == CV_MAGIC);
89 ASSERT(mutex_owned(mp));
90 atomic_inc(&cvp->cv_refs);
91
92 m = ACCESS_ONCE(cvp->cv_mutex);
93 if (!m)
94 m = xchg(&cvp->cv_mutex, mp);
95 /* Ensure the same mutex is used by all callers */
96 ASSERT(m == NULL || m == mp);
97
98 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
99 atomic_inc(&cvp->cv_waiters);
100
101 /*
102 * Mutex should be dropped after prepare_to_wait() this
103 * ensures we're linked in to the waiters list and avoids the
104 * race where 'cvp->cv_waiters > 0' but the list is empty.
105 */
106 mutex_exit(mp);
107 if (io)
108 io_schedule();
109 else
110 schedule();
111
112 /* No more waiters a different mutex could be used */
113 if (atomic_dec_and_test(&cvp->cv_waiters)) {
114 /*
115 * This is set without any lock, so it's racy. But this is
116 * just for debug anyway, so make it best-effort
117 */
118 cvp->cv_mutex = NULL;
119 wake_up(&cvp->cv_destroy);
120 }
121
122 finish_wait(&cvp->cv_event, &wait);
123 atomic_dec(&cvp->cv_refs);
124
125 /*
126 * Hold mutex after we release the cvp, otherwise we could dead lock
127 * with a thread holding the mutex and call cv_destroy.
128 */
129 mutex_enter(mp);
130 }
131
132 void
133 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
134 {
135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
136 }
137 EXPORT_SYMBOL(__cv_wait);
138
139 void
140 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
141 {
142 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
143 }
144 EXPORT_SYMBOL(__cv_wait_io);
145
146 void
147 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
148 {
149 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
150 }
151 EXPORT_SYMBOL(__cv_wait_sig);
152
153 #if defined(HAVE_IO_SCHEDULE_TIMEOUT)
154 #define spl_io_schedule_timeout(t) io_schedule_timeout(t)
155 #else
156 static void
157 __cv_wakeup(unsigned long data)
158 {
159 wake_up_process((struct task_struct *)data);
160 }
161
162 static long
163 spl_io_schedule_timeout(long time_left)
164 {
165 long expire_time = jiffies + time_left;
166 struct timer_list timer;
167
168 init_timer(&timer);
169 setup_timer(&timer, __cv_wakeup, (unsigned long)current);
170 timer.expires = expire_time;
171 add_timer(&timer);
172
173 io_schedule();
174
175 del_timer_sync(&timer);
176 time_left = expire_time - jiffies;
177
178 return (time_left < 0 ? 0 : time_left);
179 }
180 #endif
181
182 /*
183 * 'expire_time' argument is an absolute wall clock time in jiffies.
184 * Return value is time left (expire_time - now) or -1 if timeout occurred.
185 */
186 static clock_t
187 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
188 int state, int io)
189 {
190 DEFINE_WAIT(wait);
191 kmutex_t *m;
192 clock_t time_left;
193
194 ASSERT(cvp);
195 ASSERT(mp);
196 ASSERT(cvp->cv_magic == CV_MAGIC);
197 ASSERT(mutex_owned(mp));
198
199 /* XXX - Does not handle jiffie wrap properly */
200 time_left = expire_time - jiffies;
201 if (time_left <= 0)
202 return (-1);
203
204 atomic_inc(&cvp->cv_refs);
205 m = ACCESS_ONCE(cvp->cv_mutex);
206 if (!m)
207 m = xchg(&cvp->cv_mutex, mp);
208 /* Ensure the same mutex is used by all callers */
209 ASSERT(m == NULL || m == mp);
210
211 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
212 atomic_inc(&cvp->cv_waiters);
213
214 /*
215 * Mutex should be dropped after prepare_to_wait() this
216 * ensures we're linked in to the waiters list and avoids the
217 * race where 'cvp->cv_waiters > 0' but the list is empty.
218 */
219 mutex_exit(mp);
220 if (io)
221 time_left = spl_io_schedule_timeout(time_left);
222 else
223 time_left = schedule_timeout(time_left);
224
225 /* No more waiters a different mutex could be used */
226 if (atomic_dec_and_test(&cvp->cv_waiters)) {
227 /*
228 * This is set without any lock, so it's racy. But this is
229 * just for debug anyway, so make it best-effort
230 */
231 cvp->cv_mutex = NULL;
232 wake_up(&cvp->cv_destroy);
233 }
234
235 finish_wait(&cvp->cv_event, &wait);
236 atomic_dec(&cvp->cv_refs);
237
238 /*
239 * Hold mutex after we release the cvp, otherwise we could dead lock
240 * with a thread holding the mutex and call cv_destroy.
241 */
242 mutex_enter(mp);
243 return (time_left > 0 ? time_left : -1);
244 }
245
246 clock_t
247 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
248 {
249 return (__cv_timedwait_common(cvp, mp, exp_time,
250 TASK_UNINTERRUPTIBLE, 0));
251 }
252 EXPORT_SYMBOL(__cv_timedwait);
253
254 clock_t
255 __cv_timedwait_io(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
256 {
257 return (__cv_timedwait_common(cvp, mp, exp_time,
258 TASK_UNINTERRUPTIBLE, 1));
259 }
260 EXPORT_SYMBOL(__cv_timedwait_io);
261
262 clock_t
263 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
264 {
265 return (__cv_timedwait_common(cvp, mp, exp_time,
266 TASK_INTERRUPTIBLE, 0));
267 }
268 EXPORT_SYMBOL(__cv_timedwait_sig);
269
270 /*
271 * 'expire_time' argument is an absolute clock time in nanoseconds.
272 * Return value is time left (expire_time - now) or -1 if timeout occurred.
273 */
274 static clock_t
275 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
276 int state)
277 {
278 DEFINE_WAIT(wait);
279 kmutex_t *m;
280 hrtime_t time_left;
281 ktime_t ktime_left;
282
283 ASSERT(cvp);
284 ASSERT(mp);
285 ASSERT(cvp->cv_magic == CV_MAGIC);
286 ASSERT(mutex_owned(mp));
287
288 time_left = expire_time - gethrtime();
289 if (time_left <= 0)
290 return (-1);
291
292 atomic_inc(&cvp->cv_refs);
293 m = ACCESS_ONCE(cvp->cv_mutex);
294 if (!m)
295 m = xchg(&cvp->cv_mutex, mp);
296 /* Ensure the same mutex is used by all callers */
297 ASSERT(m == NULL || m == mp);
298
299 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
300 atomic_inc(&cvp->cv_waiters);
301
302 /*
303 * Mutex should be dropped after prepare_to_wait() this
304 * ensures we're linked in to the waiters list and avoids the
305 * race where 'cvp->cv_waiters > 0' but the list is empty.
306 */
307 mutex_exit(mp);
308 /*
309 * Allow a 100 us range to give kernel an opportunity to coalesce
310 * interrupts
311 */
312 ktime_left = ktime_set(0, time_left);
313 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
314 HRTIMER_MODE_REL);
315
316 /* No more waiters a different mutex could be used */
317 if (atomic_dec_and_test(&cvp->cv_waiters)) {
318 /*
319 * This is set without any lock, so it's racy. But this is
320 * just for debug anyway, so make it best-effort
321 */
322 cvp->cv_mutex = NULL;
323 wake_up(&cvp->cv_destroy);
324 }
325
326 finish_wait(&cvp->cv_event, &wait);
327 atomic_dec(&cvp->cv_refs);
328
329 mutex_enter(mp);
330 time_left = expire_time - gethrtime();
331 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
332 }
333
334 /*
335 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
336 */
337 static clock_t
338 cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
339 hrtime_t res, int flag, int state)
340 {
341 if (res > 1) {
342 /*
343 * Align expiration to the specified resolution.
344 */
345 if (flag & CALLOUT_FLAG_ROUNDUP)
346 tim += res - 1;
347 tim = (tim / res) * res;
348 }
349
350 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
351 tim += gethrtime();
352
353 return (__cv_timedwait_hires(cvp, mp, tim, state));
354 }
355
356 clock_t
357 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
358 int flag)
359 {
360 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
361 TASK_UNINTERRUPTIBLE));
362 }
363 EXPORT_SYMBOL(cv_timedwait_hires);
364
365 clock_t
366 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
367 hrtime_t res, int flag)
368 {
369 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
370 TASK_INTERRUPTIBLE));
371 }
372 EXPORT_SYMBOL(cv_timedwait_sig_hires);
373
374 void
375 __cv_signal(kcondvar_t *cvp)
376 {
377 ASSERT(cvp);
378 ASSERT(cvp->cv_magic == CV_MAGIC);
379 atomic_inc(&cvp->cv_refs);
380
381 /*
382 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
383 * waiter will be set runable with each call to wake_up().
384 * Additionally wake_up() holds a spin_lock assoicated with
385 * the wait queue to ensure we don't race waking up processes.
386 */
387 if (atomic_read(&cvp->cv_waiters) > 0)
388 wake_up(&cvp->cv_event);
389
390 atomic_dec(&cvp->cv_refs);
391 }
392 EXPORT_SYMBOL(__cv_signal);
393
394 void
395 __cv_broadcast(kcondvar_t *cvp)
396 {
397 ASSERT(cvp);
398 ASSERT(cvp->cv_magic == CV_MAGIC);
399 atomic_inc(&cvp->cv_refs);
400
401 /*
402 * Wake_up_all() will wake up all waiters even those which
403 * have the WQ_FLAG_EXCLUSIVE flag set.
404 */
405 if (atomic_read(&cvp->cv_waiters) > 0)
406 wake_up_all(&cvp->cv_event);
407
408 atomic_dec(&cvp->cv_refs);
409 }
410 EXPORT_SYMBOL(__cv_broadcast);