]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-condvar.c
Imported Upstream version 0.6.2+git20140204
[mirror_spl-debian.git] / module / spl / spl-condvar.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 \*****************************************************************************/
26
27 #include <sys/condvar.h>
28 #include <spl-debug.h>
29
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
32 #endif
33
34 #define SS_DEBUG_SUBSYS SS_CONDVAR
35
36 void
37 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
38 {
39 SENTRY;
40 ASSERT(cvp);
41 ASSERT(name == NULL);
42 ASSERT(type == CV_DEFAULT);
43 ASSERT(arg == NULL);
44
45 cvp->cv_magic = CV_MAGIC;
46 init_waitqueue_head(&cvp->cv_event);
47 init_waitqueue_head(&cvp->cv_destroy);
48 atomic_set(&cvp->cv_waiters, 0);
49 atomic_set(&cvp->cv_refs, 1);
50 cvp->cv_mutex = NULL;
51
52 SEXIT;
53 }
54 EXPORT_SYMBOL(__cv_init);
55
56 static int
57 cv_destroy_wakeup(kcondvar_t *cvp)
58 {
59 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
60 ASSERT(cvp->cv_mutex == NULL);
61 ASSERT(!waitqueue_active(&cvp->cv_event));
62 return 1;
63 }
64
65 return 0;
66 }
67
68 void
69 __cv_destroy(kcondvar_t *cvp)
70 {
71 SENTRY;
72 ASSERT(cvp);
73 ASSERT(cvp->cv_magic == CV_MAGIC);
74
75 cvp->cv_magic = CV_DESTROY;
76 atomic_dec(&cvp->cv_refs);
77
78 /* Block until all waiters are woken and references dropped. */
79 while (cv_destroy_wakeup(cvp) == 0)
80 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
81
82 ASSERT3P(cvp->cv_mutex, ==, NULL);
83 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
84 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
85 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
86
87 SEXIT;
88 }
89 EXPORT_SYMBOL(__cv_destroy);
90
91 static void
92 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
93 {
94 DEFINE_WAIT(wait);
95 SENTRY;
96
97 ASSERT(cvp);
98 ASSERT(mp);
99 ASSERT(cvp->cv_magic == CV_MAGIC);
100 ASSERT(mutex_owned(mp));
101 atomic_inc(&cvp->cv_refs);
102
103 if (cvp->cv_mutex == NULL)
104 cvp->cv_mutex = mp;
105
106 /* Ensure the same mutex is used by all callers */
107 ASSERT(cvp->cv_mutex == mp);
108
109 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
110 atomic_inc(&cvp->cv_waiters);
111
112 /* Mutex should be dropped after prepare_to_wait() this
113 * ensures we're linked in to the waiters list and avoids the
114 * race where 'cvp->cv_waiters > 0' but the list is empty. */
115 mutex_exit(mp);
116 if (io)
117 io_schedule();
118 else
119 schedule();
120 mutex_enter(mp);
121
122 /* No more waiters a different mutex could be used */
123 if (atomic_dec_and_test(&cvp->cv_waiters)) {
124 cvp->cv_mutex = NULL;
125 wake_up(&cvp->cv_destroy);
126 }
127
128 finish_wait(&cvp->cv_event, &wait);
129 atomic_dec(&cvp->cv_refs);
130
131 SEXIT;
132 }
133
134 void
135 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
136 {
137 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
138 }
139 EXPORT_SYMBOL(__cv_wait);
140
141 void
142 __cv_wait_interruptible(kcondvar_t *cvp, kmutex_t *mp)
143 {
144 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
145 }
146 EXPORT_SYMBOL(__cv_wait_interruptible);
147
148 void
149 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
150 {
151 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
152 }
153 EXPORT_SYMBOL(__cv_wait_io);
154
155 /* 'expire_time' argument is an absolute wall clock time in jiffies.
156 * Return value is time left (expire_time - now) or -1 if timeout occurred.
157 */
158 static clock_t
159 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
160 clock_t expire_time, int state)
161 {
162 DEFINE_WAIT(wait);
163 clock_t time_left;
164 SENTRY;
165
166 ASSERT(cvp);
167 ASSERT(mp);
168 ASSERT(cvp->cv_magic == CV_MAGIC);
169 ASSERT(mutex_owned(mp));
170 atomic_inc(&cvp->cv_refs);
171
172 if (cvp->cv_mutex == NULL)
173 cvp->cv_mutex = mp;
174
175 /* Ensure the same mutex is used by all callers */
176 ASSERT(cvp->cv_mutex == mp);
177
178 /* XXX - Does not handle jiffie wrap properly */
179 time_left = expire_time - jiffies;
180 if (time_left <= 0) {
181 atomic_dec(&cvp->cv_refs);
182 SRETURN(-1);
183 }
184
185 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
186 atomic_inc(&cvp->cv_waiters);
187
188 /* Mutex should be dropped after prepare_to_wait() this
189 * ensures we're linked in to the waiters list and avoids the
190 * race where 'cvp->cv_waiters > 0' but the list is empty. */
191 mutex_exit(mp);
192 time_left = schedule_timeout(time_left);
193 mutex_enter(mp);
194
195 /* No more waiters a different mutex could be used */
196 if (atomic_dec_and_test(&cvp->cv_waiters)) {
197 cvp->cv_mutex = NULL;
198 wake_up(&cvp->cv_destroy);
199 }
200
201 finish_wait(&cvp->cv_event, &wait);
202 atomic_dec(&cvp->cv_refs);
203
204 SRETURN(time_left > 0 ? time_left : -1);
205 }
206
207 clock_t
208 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
209 {
210 return __cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE);
211 }
212 EXPORT_SYMBOL(__cv_timedwait);
213
214 clock_t
215 __cv_timedwait_interruptible(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
216 {
217 return __cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE);
218 }
219 EXPORT_SYMBOL(__cv_timedwait_interruptible);
220
221 /*
222 *'expire_time' argument is an absolute clock time in nanoseconds.
223 * Return value is time left (expire_time - now) or -1 if timeout occurred.
224 */
225 static clock_t
226 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp,
227 hrtime_t expire_time, int state)
228 {
229 DEFINE_WAIT(wait);
230 hrtime_t time_left, now;
231 unsigned long time_left_us;
232 SENTRY;
233
234 ASSERT(cvp);
235 ASSERT(mp);
236 ASSERT(cvp->cv_magic == CV_MAGIC);
237 ASSERT(mutex_owned(mp));
238 atomic_inc(&cvp->cv_refs);
239
240 if (cvp->cv_mutex == NULL)
241 cvp->cv_mutex = mp;
242
243 /* Ensure the same mutex is used by all callers */
244 ASSERT(cvp->cv_mutex == mp);
245
246 now = gethrtime();
247 time_left = expire_time - now;
248 if (time_left <= 0) {
249 atomic_dec(&cvp->cv_refs);
250 SRETURN(-1);
251 }
252 time_left_us = time_left / NSEC_PER_USEC;
253
254 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
255 atomic_inc(&cvp->cv_waiters);
256
257 /* Mutex should be dropped after prepare_to_wait() this
258 * ensures we're linked in to the waiters list and avoids the
259 * race where 'cvp->cv_waiters > 0' but the list is empty. */
260 mutex_exit(mp);
261 /* Allow a 100 us range to give kernel an opportunity to coalesce
262 * interrupts */
263 usleep_range(time_left_us, time_left_us + 100);
264 mutex_enter(mp);
265
266 /* No more waiters a different mutex could be used */
267 if (atomic_dec_and_test(&cvp->cv_waiters)) {
268 cvp->cv_mutex = NULL;
269 wake_up(&cvp->cv_destroy);
270 }
271
272 finish_wait(&cvp->cv_event, &wait);
273 atomic_dec(&cvp->cv_refs);
274
275 time_left = expire_time - gethrtime();
276 SRETURN(time_left > 0 ? time_left : -1);
277 }
278
279 /*
280 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
281 */
282 clock_t
283 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
284 hrtime_t res, int flag)
285 {
286 if (res > 1) {
287 /*
288 * Align expiration to the specified resolution.
289 */
290 if (flag & CALLOUT_FLAG_ROUNDUP)
291 tim += res - 1;
292 tim = (tim / res) * res;
293 }
294
295 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
296 tim += gethrtime();
297
298 return __cv_timedwait_hires(cvp, mp, tim, TASK_UNINTERRUPTIBLE);
299 }
300 EXPORT_SYMBOL(cv_timedwait_hires);
301
302 void
303 __cv_signal(kcondvar_t *cvp)
304 {
305 SENTRY;
306 ASSERT(cvp);
307 ASSERT(cvp->cv_magic == CV_MAGIC);
308 atomic_inc(&cvp->cv_refs);
309
310 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
311 * waiter will be set runable with each call to wake_up().
312 * Additionally wake_up() holds a spin_lock assoicated with
313 * the wait queue to ensure we don't race waking up processes. */
314 if (atomic_read(&cvp->cv_waiters) > 0)
315 wake_up(&cvp->cv_event);
316
317 atomic_dec(&cvp->cv_refs);
318 SEXIT;
319 }
320 EXPORT_SYMBOL(__cv_signal);
321
322 void
323 __cv_broadcast(kcondvar_t *cvp)
324 {
325 SENTRY;
326 ASSERT(cvp);
327 ASSERT(cvp->cv_magic == CV_MAGIC);
328 atomic_inc(&cvp->cv_refs);
329
330 /* Wake_up_all() will wake up all waiters even those which
331 * have the WQ_FLAG_EXCLUSIVE flag set. */
332 if (atomic_read(&cvp->cv_waiters) > 0)
333 wake_up_all(&cvp->cv_event);
334
335 atomic_dec(&cvp->cv_refs);
336 SEXIT;
337 }
338 EXPORT_SYMBOL(__cv_broadcast);