]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
479bbfd1234896a63b714398105c8b187dbcf1d3
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 */
26
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29 #include <linux/hrtimer.h>
30
31 void
32 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
33 {
34 ASSERT(cvp);
35 ASSERT(name == NULL);
36 ASSERT(type == CV_DEFAULT);
37 ASSERT(arg == NULL);
38
39 cvp->cv_magic = CV_MAGIC;
40 init_waitqueue_head(&cvp->cv_event);
41 init_waitqueue_head(&cvp->cv_destroy);
42 atomic_set(&cvp->cv_waiters, 0);
43 atomic_set(&cvp->cv_refs, 1);
44 cvp->cv_mutex = NULL;
45 }
46 EXPORT_SYMBOL(__cv_init);
47
48 static int
49 cv_destroy_wakeup(kcondvar_t *cvp)
50 {
51 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
52 ASSERT(cvp->cv_mutex == NULL);
53 ASSERT(!waitqueue_active(&cvp->cv_event));
54 return (1);
55 }
56
57 return (0);
58 }
59
60 void
61 __cv_destroy(kcondvar_t *cvp)
62 {
63 ASSERT(cvp);
64 ASSERT(cvp->cv_magic == CV_MAGIC);
65
66 cvp->cv_magic = CV_DESTROY;
67 atomic_dec(&cvp->cv_refs);
68
69 /* Block until all waiters are woken and references dropped. */
70 while (cv_destroy_wakeup(cvp) == 0)
71 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
72
73 ASSERT3P(cvp->cv_mutex, ==, NULL);
74 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
75 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
76 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
77 }
78 EXPORT_SYMBOL(__cv_destroy);
79
80 static void
81 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
82 {
83 DEFINE_WAIT(wait);
84 kmutex_t *m;
85
86 ASSERT(cvp);
87 ASSERT(mp);
88 ASSERT(cvp->cv_magic == CV_MAGIC);
89 ASSERT(mutex_owned(mp));
90 atomic_inc(&cvp->cv_refs);
91
92 m = ACCESS_ONCE(cvp->cv_mutex);
93 if (!m)
94 m = xchg(&cvp->cv_mutex, mp);
95 /* Ensure the same mutex is used by all callers */
96 ASSERT(m == NULL || m == mp);
97
98 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
99 atomic_inc(&cvp->cv_waiters);
100
101 /*
102 * Mutex should be dropped after prepare_to_wait() this
103 * ensures we're linked in to the waiters list and avoids the
104 * race where 'cvp->cv_waiters > 0' but the list is empty.
105 */
106 mutex_exit(mp);
107 if (io)
108 io_schedule();
109 else
110 schedule();
111
112 /* No more waiters a different mutex could be used */
113 if (atomic_dec_and_test(&cvp->cv_waiters)) {
114 /*
115 * This is set without any lock, so it's racy. But this is
116 * just for debug anyway, so make it best-effort
117 */
118 cvp->cv_mutex = NULL;
119 wake_up(&cvp->cv_destroy);
120 }
121
122 finish_wait(&cvp->cv_event, &wait);
123 atomic_dec(&cvp->cv_refs);
124
125 /*
126 * Hold mutex after we release the cvp, otherwise we could dead lock
127 * with a thread holding the mutex and call cv_destroy.
128 */
129 mutex_enter(mp);
130 }
131
132 void
133 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
134 {
135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
136 }
137 EXPORT_SYMBOL(__cv_wait);
138
139 void
140 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
141 {
142 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
143 }
144 EXPORT_SYMBOL(__cv_wait_sig);
145
146 void
147 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
148 {
149 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
150 }
151 EXPORT_SYMBOL(__cv_wait_io);
152
153 /*
154 * 'expire_time' argument is an absolute wall clock time in jiffies.
155 * Return value is time left (expire_time - now) or -1 if timeout occurred.
156 */
157 static clock_t
158 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
159 int state)
160 {
161 DEFINE_WAIT(wait);
162 kmutex_t *m;
163 clock_t time_left;
164
165 ASSERT(cvp);
166 ASSERT(mp);
167 ASSERT(cvp->cv_magic == CV_MAGIC);
168 ASSERT(mutex_owned(mp));
169 atomic_inc(&cvp->cv_refs);
170
171 m = ACCESS_ONCE(cvp->cv_mutex);
172 if (!m)
173 m = xchg(&cvp->cv_mutex, mp);
174 /* Ensure the same mutex is used by all callers */
175 ASSERT(m == NULL || m == mp);
176
177 /* XXX - Does not handle jiffie wrap properly */
178 time_left = expire_time - jiffies;
179 if (time_left <= 0) {
180 /* XXX - doesn't reset cv_mutex */
181 atomic_dec(&cvp->cv_refs);
182 return (-1);
183 }
184
185 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
186 atomic_inc(&cvp->cv_waiters);
187
188 /*
189 * Mutex should be dropped after prepare_to_wait() this
190 * ensures we're linked in to the waiters list and avoids the
191 * race where 'cvp->cv_waiters > 0' but the list is empty.
192 */
193 mutex_exit(mp);
194 time_left = schedule_timeout(time_left);
195
196 /* No more waiters a different mutex could be used */
197 if (atomic_dec_and_test(&cvp->cv_waiters)) {
198 /*
199 * This is set without any lock, so it's racy. But this is
200 * just for debug anyway, so make it best-effort
201 */
202 cvp->cv_mutex = NULL;
203 wake_up(&cvp->cv_destroy);
204 }
205
206 finish_wait(&cvp->cv_event, &wait);
207 atomic_dec(&cvp->cv_refs);
208
209 /*
210 * Hold mutex after we release the cvp, otherwise we could dead lock
211 * with a thread holding the mutex and call cv_destroy.
212 */
213 mutex_enter(mp);
214 return (time_left > 0 ? time_left : -1);
215 }
216
217 clock_t
218 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
219 {
220 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE));
221 }
222 EXPORT_SYMBOL(__cv_timedwait);
223
224 clock_t
225 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
226 {
227 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE));
228 }
229 EXPORT_SYMBOL(__cv_timedwait_sig);
230
231 /*
232 * 'expire_time' argument is an absolute clock time in nanoseconds.
233 * Return value is time left (expire_time - now) or -1 if timeout occurred.
234 */
235 static clock_t
236 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
237 int state)
238 {
239 DEFINE_WAIT(wait);
240 kmutex_t *m;
241 hrtime_t time_left, now;
242 ktime_t ktime_left;
243
244 ASSERT(cvp);
245 ASSERT(mp);
246 ASSERT(cvp->cv_magic == CV_MAGIC);
247 ASSERT(mutex_owned(mp));
248 atomic_inc(&cvp->cv_refs);
249
250 m = ACCESS_ONCE(cvp->cv_mutex);
251 if (!m)
252 m = xchg(&cvp->cv_mutex, mp);
253 /* Ensure the same mutex is used by all callers */
254 ASSERT(m == NULL || m == mp);
255
256 now = gethrtime();
257 time_left = expire_time - now;
258 if (time_left <= 0) {
259 atomic_dec(&cvp->cv_refs);
260 return (-1);
261 }
262
263 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
264 atomic_inc(&cvp->cv_waiters);
265
266 /*
267 * Mutex should be dropped after prepare_to_wait() this
268 * ensures we're linked in to the waiters list and avoids the
269 * race where 'cvp->cv_waiters > 0' but the list is empty.
270 */
271 mutex_exit(mp);
272 /*
273 * Allow a 100 us range to give kernel an opportunity to coalesce
274 * interrupts
275 */
276 ktime_left = ktime_set(0, time_left);
277 schedule_hrtimeout_range(&ktime_left, 100 * NSEC_PER_USEC,
278 HRTIMER_MODE_REL);
279
280 /* No more waiters a different mutex could be used */
281 if (atomic_dec_and_test(&cvp->cv_waiters)) {
282 /*
283 * This is set without any lock, so it's racy. But this is
284 * just for debug anyway, so make it best-effort
285 */
286 cvp->cv_mutex = NULL;
287 wake_up(&cvp->cv_destroy);
288 }
289
290 finish_wait(&cvp->cv_event, &wait);
291 atomic_dec(&cvp->cv_refs);
292
293 mutex_enter(mp);
294 time_left = expire_time - gethrtime();
295 return (time_left > 0 ? NSEC_TO_TICK(time_left) : -1);
296 }
297
298 /*
299 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
300 */
301 static clock_t
302 cv_timedwait_hires_common(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
303 int flag, int state)
304 {
305 if (res > 1) {
306 /*
307 * Align expiration to the specified resolution.
308 */
309 if (flag & CALLOUT_FLAG_ROUNDUP)
310 tim += res - 1;
311 tim = (tim / res) * res;
312 }
313
314 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
315 tim += gethrtime();
316
317 return (__cv_timedwait_hires(cvp, mp, tim, state));
318 }
319
320 clock_t
321 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
322 int flag)
323 {
324 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
325 TASK_UNINTERRUPTIBLE));
326 }
327 EXPORT_SYMBOL(cv_timedwait_hires);
328
329 clock_t
330 cv_timedwait_sig_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
331 int flag)
332 {
333 return (cv_timedwait_hires_common(cvp, mp, tim, res, flag,
334 TASK_INTERRUPTIBLE));
335 }
336 EXPORT_SYMBOL(cv_timedwait_sig_hires);
337
338 void
339 __cv_signal(kcondvar_t *cvp)
340 {
341 ASSERT(cvp);
342 ASSERT(cvp->cv_magic == CV_MAGIC);
343 atomic_inc(&cvp->cv_refs);
344
345 /*
346 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
347 * waiter will be set runable with each call to wake_up().
348 * Additionally wake_up() holds a spin_lock assoicated with
349 * the wait queue to ensure we don't race waking up processes.
350 */
351 if (atomic_read(&cvp->cv_waiters) > 0)
352 wake_up(&cvp->cv_event);
353
354 atomic_dec(&cvp->cv_refs);
355 }
356 EXPORT_SYMBOL(__cv_signal);
357
358 void
359 __cv_broadcast(kcondvar_t *cvp)
360 {
361 ASSERT(cvp);
362 ASSERT(cvp->cv_magic == CV_MAGIC);
363 atomic_inc(&cvp->cv_refs);
364
365 /*
366 * Wake_up_all() will wake up all waiters even those which
367 * have the WQ_FLAG_EXCLUSIVE flag set.
368 */
369 if (atomic_read(&cvp->cv_waiters) > 0)
370 wake_up_all(&cvp->cv_event);
371
372 atomic_dec(&cvp->cv_refs);
373 }
374 EXPORT_SYMBOL(__cv_broadcast);