]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
Fix cv_timedwait timeout
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 */
26
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29 #include <linux/hrtimer.h>
30
31 void
32 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
33 {
34 ASSERT(cvp);
35 ASSERT(name == NULL);
36 ASSERT(type == CV_DEFAULT);
37 ASSERT(arg == NULL);
38
39 cvp->cv_magic = CV_MAGIC;
40 init_waitqueue_head(&cvp->cv_event);
41 init_waitqueue_head(&cvp->cv_destroy);
42 atomic_set(&cvp->cv_waiters, 0);
43 atomic_set(&cvp->cv_refs, 1);
44 cvp->cv_mutex = NULL;
45 }
46 EXPORT_SYMBOL(__cv_init);
47
48 static int
49 cv_destroy_wakeup(kcondvar_t *cvp)
50 {
51 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
52 ASSERT(cvp->cv_mutex == NULL);
53 ASSERT(!waitqueue_active(&cvp->cv_event));
54 return (1);
55 }
56
57 return (0);
58 }
59
60 void
61 __cv_destroy(kcondvar_t *cvp)
62 {
63 ASSERT(cvp);
64 ASSERT(cvp->cv_magic == CV_MAGIC);
65
66 cvp->cv_magic = CV_DESTROY;
67 atomic_dec(&cvp->cv_refs);
68
69 /* Block until all waiters are woken and references dropped. */
70 while (cv_destroy_wakeup(cvp) == 0)
71 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
72
73 ASSERT3P(cvp->cv_mutex, ==, NULL);
74 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
75 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
76 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
77 }
78 EXPORT_SYMBOL(__cv_destroy);
79
80 static void
81 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
82 {
83 DEFINE_WAIT(wait);
84 kmutex_t *m;
85
86 ASSERT(cvp);
87 ASSERT(mp);
88 ASSERT(cvp->cv_magic == CV_MAGIC);
89 ASSERT(mutex_owned(mp));
90 atomic_inc(&cvp->cv_refs);
91
92 m = ACCESS_ONCE(cvp->cv_mutex);
93 if (!m)
94 m = xchg(&cvp->cv_mutex, mp);
95 /* Ensure the same mutex is used by all callers */
96 ASSERT(m == NULL || m == mp);
97
98 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
99 atomic_inc(&cvp->cv_waiters);
100
101 /*
102 * Mutex should be dropped after prepare_to_wait() this
103 * ensures we're linked in to the waiters list and avoids the
104 * race where 'cvp->cv_waiters > 0' but the list is empty.
105 */
106 mutex_exit(mp);
107 if (io)
108 io_schedule();
109 else
110 schedule();
111
112 /* No more waiters a different mutex could be used */
113 if (atomic_dec_and_test(&cvp->cv_waiters)) {
114 /*
115 * This is set without any lock, so it's racy. But this is
116 * just for debug anyway, so make it best-effort
117 */
118 cvp->cv_mutex = NULL;
119 wake_up(&cvp->cv_destroy);
120 }
121
122 finish_wait(&cvp->cv_event, &wait);
123 atomic_dec(&cvp->cv_refs);
124
125 /*
126 * Hold mutex after we release the cvp, otherwise we could dead lock
127 * with a thread holding the mutex and call cv_destroy.
128 */
129 mutex_enter(mp);
130 }
131
132 void
133 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
134 {
135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
136 }
137 EXPORT_SYMBOL(__cv_wait);
138
139 void
140 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
141 {
142 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
143 }
144 EXPORT_SYMBOL(__cv_wait_sig);
145
146 void
147 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
148 {
149 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
150 }
151 EXPORT_SYMBOL(__cv_wait_io);
152
153 /*
154 * 'expire_time' argument is an absolute wall clock time in jiffies.
155 * Return value is time left (expire_time - now) or -1 if timeout occurred.
156 */
157 static clock_t
158 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
159 int state)
160 {
161 DEFINE_WAIT(wait);
162 kmutex_t *m;
163 clock_t time_left;
164
165 ASSERT(cvp);
166 ASSERT(mp);
167 ASSERT(cvp->cv_magic == CV_MAGIC);
168 ASSERT(mutex_owned(mp));
169
170 /* XXX - Does not handle jiffie wrap properly */
171 time_left = expire_time - jiffies;
172 if (time_left <= 0)
173 return (-1);
174
175 atomic_inc(&cvp->cv_refs);
176 m = ACCESS_ONCE(cvp->cv_mutex);
177 if (!m)
178 m = xchg(&cvp->cv_mutex, mp);
179 /* Ensure the same mutex is used by all callers */
180 ASSERT(m == NULL || m == mp);
181
182 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
183 atomic_inc(&cvp->cv_waiters);
184
185 /*
186 * Mutex should be dropped after prepare_to_wait() this
187 * ensures we're linked in to the waiters list and avoids the
188 * race where 'cvp->cv_waiters > 0' but the list is empty.
189 */
190 mutex_exit(mp);
191 time_left = schedule_timeout(time_left);
192
193 /* No more waiters a different mutex could be used */
194 if (atomic_dec_and_test(&cvp->cv_waiters)) {
195 /*
196 * This is set without any lock, so it's racy. But this is
197 * just for debug anyway, so make it best-effort
198 */
199 cvp->cv_mutex = NULL;
200 wake_up(&cvp->cv_destroy);
201 }
202
203 finish_wait(&cvp->cv_event, &wait);
204 atomic_dec(&cvp->cv_refs);
205
206 /*
207 * Hold mutex after we release the cvp, otherwise we could dead lock
208 * with a thread holding the mutex and call cv_destroy.
209 */
210 mutex_enter(mp);
211 return (time_left > 0 ? time_left : -1);
212 }
213
214 clock_t
215 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
216 {
217 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE));
218 }
219 EXPORT_SYMBOL(__cv_timedwait);
220
221 clock_t
222 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
223 {
224 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE));
225 }
226 EXPORT_SYMBOL(__cv_timedwait_sig);
227
228 /*
229 * 'expire_time' argument is an absolute clock time in nanoseconds.
230 * Return value is time left (expire_time - now) or -1 if timeout occurred.
231 */
232 static clock_t
233 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
234 int state)
235 {
236 DEFINE_WAIT(wait);
237 kmutex_t *m;
238 hrtime_t time_left;
239 ktime_t ktime_left;
240
241 ASSERT(cvp);
242 ASSERT(mp);
243 ASSERT(cvp->cv_magic == CV_MAGIC);
244 ASSERT(mutex_owned(mp));
245
246 time_left = expire_time - gethrtime();
247 if (time_left <= 0)
248 return (-1);
249
250 atomic_inc(&cvp->cv_refs);
251 m = ACCESS_ONCE(cvp->cv_mutex);
252 if (!m)
253 m = xchg(&cvp->cv_mutex, mp);
254 /* Ensure the same mutex is used by all callers */
255 ASSERT(m == NULL || m == mp);
256
257 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
258 atomic_inc(&cvp->cv_waiters);
259
260 /*
261 * Mutex should be dropped after prepare_to_wait() this
262 * ensures we're linked in to the waiters list and avoids the
263 * race where 'cvp->cv_waiters > 0' but the list is empty.
264 */
265 mutex_exit(mp);
266 /*
267 * Allow a 100 us range to give kernel an opportunity to coalesce
268 * interrupts
269 */
270 ktime_left = ktime_set(0, time_left);
271 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
272 HRTIMER_MODE_REL);
273
274 /* No more waiters a different mutex could be used */
275 if (atomic_dec_and_test(&cvp->cv_waiters)) {
276 /*
277 * This is set without any lock, so it's racy. But this is
278 * just for debug anyway, so make it best-effort
279 */
280 cvp->cv_mutex = NULL;
281 wake_up(&cvp->cv_destroy);
282 }
283
284 finish_wait(&cvp->cv_event, &wait);
285 atomic_dec(&cvp->cv_refs);
286
287 mutex_enter(mp);
288 time_left = expire_time - gethrtime();
289 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
290 }
291
292 /*
293 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
294 */
295 static clock_t
296 cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
297 int flag, int state)
298 {
299 if (res > 1) {
300 /*
301 * Align expiration to the specified resolution.
302 */
303 if (flag & CALLOUT_FLAG_ROUNDUP)
304 tim += res - 1;
305 tim = (tim / res) * res;
306 }
307
308 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
309 tim += gethrtime();
310
311 return (__cv_timedwait_hires(cvp, mp, tim, state));
312 }
313
314 clock_t
315 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
316 int flag)
317 {
318 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
319 TASK_UNINTERRUPTIBLE));
320 }
321 EXPORT_SYMBOL(cv_timedwait_hires);
322
323 clock_t
324 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
325 int flag)
326 {
327 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
328 TASK_INTERRUPTIBLE));
329 }
330 EXPORT_SYMBOL(cv_timedwait_sig_hires);
331
332 void
333 __cv_signal(kcondvar_t *cvp)
334 {
335 ASSERT(cvp);
336 ASSERT(cvp->cv_magic == CV_MAGIC);
337 atomic_inc(&cvp->cv_refs);
338
339 /*
340 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
341 * waiter will be set runable with each call to wake_up().
342 * Additionally wake_up() holds a spin_lock assoicated with
343 * the wait queue to ensure we don't race waking up processes.
344 */
345 if (atomic_read(&cvp->cv_waiters) > 0)
346 wake_up(&cvp->cv_event);
347
348 atomic_dec(&cvp->cv_refs);
349 }
350 EXPORT_SYMBOL(__cv_signal);
351
352 void
353 __cv_broadcast(kcondvar_t *cvp)
354 {
355 ASSERT(cvp);
356 ASSERT(cvp->cv_magic == CV_MAGIC);
357 atomic_inc(&cvp->cv_refs);
358
359 /*
360 * Wake_up_all() will wake up all waiters even those which
361 * have the WQ_FLAG_EXCLUSIVE flag set.
362 */
363 if (atomic_read(&cvp->cv_waiters) > 0)
364 wake_up_all(&cvp->cv_event);
365
366 atomic_dec(&cvp->cv_refs);
367 }
368 EXPORT_SYMBOL(__cv_broadcast);