]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-condvar.c
c420d18cadfe14f61cf01dec57e119bd0be401e4
[mirror_spl-debian.git] / module / spl / spl-condvar.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 */
26
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29
30 void
31 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
32 {
33 ASSERT(cvp);
34 ASSERT(name == NULL);
35 ASSERT(type == CV_DEFAULT);
36 ASSERT(arg == NULL);
37
38 cvp->cv_magic = CV_MAGIC;
39 init_waitqueue_head(&cvp->cv_event);
40 init_waitqueue_head(&cvp->cv_destroy);
41 atomic_set(&cvp->cv_waiters, 0);
42 atomic_set(&cvp->cv_refs, 1);
43 cvp->cv_mutex = NULL;
44 }
45 EXPORT_SYMBOL(__cv_init);
46
47 static int
48 cv_destroy_wakeup(kcondvar_t *cvp)
49 {
50 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
51 ASSERT(cvp->cv_mutex == NULL);
52 ASSERT(!waitqueue_active(&cvp->cv_event));
53 return (1);
54 }
55
56 return (0);
57 }
58
59 void
60 __cv_destroy(kcondvar_t *cvp)
61 {
62 ASSERT(cvp);
63 ASSERT(cvp->cv_magic == CV_MAGIC);
64
65 cvp->cv_magic = CV_DESTROY;
66 atomic_dec(&cvp->cv_refs);
67
68 /* Block until all waiters are woken and references dropped. */
69 while (cv_destroy_wakeup(cvp) == 0)
70 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
71
72 ASSERT3P(cvp->cv_mutex, ==, NULL);
73 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
74 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
75 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
76 }
77 EXPORT_SYMBOL(__cv_destroy);
78
79 static void
80 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
81 {
82 DEFINE_WAIT(wait);
83 kmutex_t *m;
84
85 ASSERT(cvp);
86 ASSERT(mp);
87 ASSERT(cvp->cv_magic == CV_MAGIC);
88 ASSERT(mutex_owned(mp));
89 atomic_inc(&cvp->cv_refs);
90
91 m = ACCESS_ONCE(cvp->cv_mutex);
92 if (!m)
93 m = xchg(&cvp->cv_mutex, mp);
94 /* Ensure the same mutex is used by all callers */
95 ASSERT(m == NULL || m == mp);
96
97 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
98 atomic_inc(&cvp->cv_waiters);
99
100 /*
101 * Mutex should be dropped after prepare_to_wait() this
102 * ensures we're linked in to the waiters list and avoids the
103 * race where 'cvp->cv_waiters > 0' but the list is empty.
104 */
105 mutex_exit(mp);
106 if (io)
107 io_schedule();
108 else
109 schedule();
110
111 /* No more waiters a different mutex could be used */
112 if (atomic_dec_and_test(&cvp->cv_waiters)) {
113 /*
114 * This is set without any lock, so it's racy. But this is
115 * just for debug anyway, so make it best-effort
116 */
117 cvp->cv_mutex = NULL;
118 wake_up(&cvp->cv_destroy);
119 }
120
121 finish_wait(&cvp->cv_event, &wait);
122 atomic_dec(&cvp->cv_refs);
123
124 /*
125 * Hold mutex after we release the cvp, otherwise we could dead lock
126 * with a thread holding the mutex and call cv_destroy.
127 */
128 mutex_enter(mp);
129 }
130
131 void
132 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
133 {
134 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
135 }
136 EXPORT_SYMBOL(__cv_wait);
137
138 void
139 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
140 {
141 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
142 }
143 EXPORT_SYMBOL(__cv_wait_sig);
144
145 void
146 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
147 {
148 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
149 }
150 EXPORT_SYMBOL(__cv_wait_io);
151
152 /*
153 * 'expire_time' argument is an absolute wall clock time in jiffies.
154 * Return value is time left (expire_time - now) or -1 if timeout occurred.
155 */
156 static clock_t
157 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
158 int state)
159 {
160 DEFINE_WAIT(wait);
161 kmutex_t *m;
162 clock_t time_left;
163
164 ASSERT(cvp);
165 ASSERT(mp);
166 ASSERT(cvp->cv_magic == CV_MAGIC);
167 ASSERT(mutex_owned(mp));
168 atomic_inc(&cvp->cv_refs);
169
170 m = ACCESS_ONCE(cvp->cv_mutex);
171 if (!m)
172 m = xchg(&cvp->cv_mutex, mp);
173 /* Ensure the same mutex is used by all callers */
174 ASSERT(m == NULL || m == mp);
175
176 /* XXX - Does not handle jiffie wrap properly */
177 time_left = expire_time - jiffies;
178 if (time_left <= 0) {
179 /* XXX - doesn't reset cv_mutex */
180 atomic_dec(&cvp->cv_refs);
181 return (-1);
182 }
183
184 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
185 atomic_inc(&cvp->cv_waiters);
186
187 /*
188 * Mutex should be dropped after prepare_to_wait() this
189 * ensures we're linked in to the waiters list and avoids the
190 * race where 'cvp->cv_waiters > 0' but the list is empty.
191 */
192 mutex_exit(mp);
193 time_left = schedule_timeout(time_left);
194
195 /* No more waiters a different mutex could be used */
196 if (atomic_dec_and_test(&cvp->cv_waiters)) {
197 /*
198 * This is set without any lock, so it's racy. But this is
199 * just for debug anyway, so make it best-effort
200 */
201 cvp->cv_mutex = NULL;
202 wake_up(&cvp->cv_destroy);
203 }
204
205 finish_wait(&cvp->cv_event, &wait);
206 atomic_dec(&cvp->cv_refs);
207
208 /*
209 * Hold mutex after we release the cvp, otherwise we could dead lock
210 * with a thread holding the mutex and call cv_destroy.
211 */
212 mutex_enter(mp);
213 return (time_left > 0 ? time_left : -1);
214 }
215
216 clock_t
217 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
218 {
219 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE));
220 }
221 EXPORT_SYMBOL(__cv_timedwait);
222
223 clock_t
224 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
225 {
226 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE));
227 }
228 EXPORT_SYMBOL(__cv_timedwait_sig);
229
230 /*
231 * 'expire_time' argument is an absolute clock time in nanoseconds.
232 * Return value is time left (expire_time - now) or -1 if timeout occurred.
233 */
234 static clock_t
235 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
236 int state)
237 {
238 DEFINE_WAIT(wait);
239 kmutex_t *m;
240 hrtime_t time_left, now;
241 unsigned long time_left_us;
242
243 ASSERT(cvp);
244 ASSERT(mp);
245 ASSERT(cvp->cv_magic == CV_MAGIC);
246 ASSERT(mutex_owned(mp));
247 atomic_inc(&cvp->cv_refs);
248
249 m = ACCESS_ONCE(cvp->cv_mutex);
250 if (!m)
251 m = xchg(&cvp->cv_mutex, mp);
252 /* Ensure the same mutex is used by all callers */
253 ASSERT(m == NULL || m == mp);
254
255 now = gethrtime();
256 time_left = expire_time - now;
257 if (time_left <= 0) {
258 atomic_dec(&cvp->cv_refs);
259 return (-1);
260 }
261 time_left_us = time_left / NSEC_PER_USEC;
262
263 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
264 atomic_inc(&cvp->cv_waiters);
265
266 /*
267 * Mutex should be dropped after prepare_to_wait() this
268 * ensures we're linked in to the waiters list and avoids the
269 * race where 'cvp->cv_waiters > 0' but the list is empty.
270 */
271 mutex_exit(mp);
272 /*
273 * Allow a 100 us range to give kernel an opportunity to coalesce
274 * interrupts
275 */
276 usleep_range(time_left_us, time_left_us + 100);
277
278 /* No more waiters a different mutex could be used */
279 if (atomic_dec_and_test(&cvp->cv_waiters)) {
280 /*
281 * This is set without any lock, so it's racy. But this is
282 * just for debug anyway, so make it best-effort
283 */
284 cvp->cv_mutex = NULL;
285 wake_up(&cvp->cv_destroy);
286 }
287
288 finish_wait(&cvp->cv_event, &wait);
289 atomic_dec(&cvp->cv_refs);
290
291 mutex_enter(mp);
292 time_left = expire_time - gethrtime();
293 return (time_left > 0 ? time_left : -1);
294 }
295
296 /*
297 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
298 */
299 clock_t
300 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
301 int flag)
302 {
303 if (res > 1) {
304 /*
305 * Align expiration to the specified resolution.
306 */
307 if (flag & CALLOUT_FLAG_ROUNDUP)
308 tim += res - 1;
309 tim = (tim / res) * res;
310 }
311
312 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
313 tim += gethrtime();
314
315 return (__cv_timedwait_hires(cvp, mp, tim, TASK_UNINTERRUPTIBLE));
316 }
317 EXPORT_SYMBOL(cv_timedwait_hires);
318
319 void
320 __cv_signal(kcondvar_t *cvp)
321 {
322 ASSERT(cvp);
323 ASSERT(cvp->cv_magic == CV_MAGIC);
324 atomic_inc(&cvp->cv_refs);
325
326 /*
327 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
328 * waiter will be set runable with each call to wake_up().
329 * Additionally wake_up() holds a spin_lock assoicated with
330 * the wait queue to ensure we don't race waking up processes.
331 */
332 if (atomic_read(&cvp->cv_waiters) > 0)
333 wake_up(&cvp->cv_event);
334
335 atomic_dec(&cvp->cv_refs);
336 }
337 EXPORT_SYMBOL(__cv_signal);
338
339 void
340 __cv_broadcast(kcondvar_t *cvp)
341 {
342 ASSERT(cvp);
343 ASSERT(cvp->cv_magic == CV_MAGIC);
344 atomic_inc(&cvp->cv_refs);
345
346 /*
347 * Wake_up_all() will wake up all waiters even those which
348 * have the WQ_FLAG_EXCLUSIVE flag set.
349 */
350 if (atomic_read(&cvp->cv_waiters) > 0)
351 wake_up_all(&cvp->cv_event);
352
353 atomic_dec(&cvp->cv_refs);
354 }
355 EXPORT_SYMBOL(__cv_broadcast);