]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
Emulate illumos interface cv_timedwait_hires()
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 \*****************************************************************************/
26
27 #include <sys/condvar.h>
28 #include <spl-debug.h>
29
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
32 #endif
33
34 #define SS_DEBUG_SUBSYS SS_CONDVAR
35
36 void
37 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
38 {
39 int flags = KM_SLEEP;
40
41 SENTRY;
42 ASSERT(cvp);
43 ASSERT(name == NULL);
44 ASSERT(type == CV_DEFAULT);
45 ASSERT(arg == NULL);
46
47 cvp->cv_magic = CV_MAGIC;
48 init_waitqueue_head(&cvp->cv_event);
49 init_waitqueue_head(&cvp->cv_destroy);
50 atomic_set(&cvp->cv_waiters, 0);
51 atomic_set(&cvp->cv_refs, 1);
52 cvp->cv_mutex = NULL;
53
54 /* We may be called when there is a non-zero preempt_count or
55 * interrupts are disabled is which case we must not sleep.
56 */
57 if (current_thread_info()->preempt_count || irqs_disabled())
58 flags = KM_NOSLEEP;
59
60 SEXIT;
61 }
62 EXPORT_SYMBOL(__cv_init);
63
64 static int
65 cv_destroy_wakeup(kcondvar_t *cvp)
66 {
67 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
68 ASSERT(cvp->cv_mutex == NULL);
69 ASSERT(!waitqueue_active(&cvp->cv_event));
70 return 1;
71 }
72
73 return 0;
74 }
75
76 void
77 __cv_destroy(kcondvar_t *cvp)
78 {
79 SENTRY;
80 ASSERT(cvp);
81 ASSERT(cvp->cv_magic == CV_MAGIC);
82
83 cvp->cv_magic = CV_DESTROY;
84 atomic_dec(&cvp->cv_refs);
85
86 /* Block until all waiters are woken and references dropped. */
87 while (cv_destroy_wakeup(cvp) == 0)
88 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
89
90 ASSERT3P(cvp->cv_mutex, ==, NULL);
91 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
92 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
93 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
94
95 SEXIT;
96 }
97 EXPORT_SYMBOL(__cv_destroy);
98
99 static void
100 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
101 {
102 DEFINE_WAIT(wait);
103 SENTRY;
104
105 ASSERT(cvp);
106 ASSERT(mp);
107 ASSERT(cvp->cv_magic == CV_MAGIC);
108 ASSERT(mutex_owned(mp));
109 atomic_inc(&cvp->cv_refs);
110
111 if (cvp->cv_mutex == NULL)
112 cvp->cv_mutex = mp;
113
114 /* Ensure the same mutex is used by all callers */
115 ASSERT(cvp->cv_mutex == mp);
116
117 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
118 atomic_inc(&cvp->cv_waiters);
119
120 /* Mutex should be dropped after prepare_to_wait() this
121 * ensures we're linked in to the waiters list and avoids the
122 * race where 'cvp->cv_waiters > 0' but the list is empty. */
123 mutex_exit(mp);
124 if (io)
125 io_schedule();
126 else
127 schedule();
128 mutex_enter(mp);
129
130 /* No more waiters a different mutex could be used */
131 if (atomic_dec_and_test(&cvp->cv_waiters)) {
132 cvp->cv_mutex = NULL;
133 wake_up(&cvp->cv_destroy);
134 }
135
136 finish_wait(&cvp->cv_event, &wait);
137 atomic_dec(&cvp->cv_refs);
138
139 SEXIT;
140 }
141
142 void
143 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
144 {
145 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
146 }
147 EXPORT_SYMBOL(__cv_wait);
148
149 void
150 __cv_wait_interruptible(kcondvar_t *cvp, kmutex_t *mp)
151 {
152 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
153 }
154 EXPORT_SYMBOL(__cv_wait_interruptible);
155
156 void
157 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
158 {
159 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
160 }
161 EXPORT_SYMBOL(__cv_wait_io);
162
163 /* 'expire_time' argument is an absolute wall clock time in jiffies.
164 * Return value is time left (expire_time - now) or -1 if timeout occurred.
165 */
166 static clock_t
167 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
168 clock_t expire_time, int state)
169 {
170 DEFINE_WAIT(wait);
171 clock_t time_left;
172 SENTRY;
173
174 ASSERT(cvp);
175 ASSERT(mp);
176 ASSERT(cvp->cv_magic == CV_MAGIC);
177 ASSERT(mutex_owned(mp));
178 atomic_inc(&cvp->cv_refs);
179
180 if (cvp->cv_mutex == NULL)
181 cvp->cv_mutex = mp;
182
183 /* Ensure the same mutex is used by all callers */
184 ASSERT(cvp->cv_mutex == mp);
185
186 /* XXX - Does not handle jiffie wrap properly */
187 time_left = expire_time - jiffies;
188 if (time_left <= 0) {
189 atomic_dec(&cvp->cv_refs);
190 SRETURN(-1);
191 }
192
193 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
194 atomic_inc(&cvp->cv_waiters);
195
196 /* Mutex should be dropped after prepare_to_wait() this
197 * ensures we're linked in to the waiters list and avoids the
198 * race where 'cvp->cv_waiters > 0' but the list is empty. */
199 mutex_exit(mp);
200 time_left = schedule_timeout(time_left);
201 mutex_enter(mp);
202
203 /* No more waiters a different mutex could be used */
204 if (atomic_dec_and_test(&cvp->cv_waiters)) {
205 cvp->cv_mutex = NULL;
206 wake_up(&cvp->cv_destroy);
207 }
208
209 finish_wait(&cvp->cv_event, &wait);
210 atomic_dec(&cvp->cv_refs);
211
212 SRETURN(time_left > 0 ? time_left : -1);
213 }
214
215 clock_t
216 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
217 {
218 return __cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE);
219 }
220 EXPORT_SYMBOL(__cv_timedwait);
221
222 clock_t
223 __cv_timedwait_interruptible(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
224 {
225 return __cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE);
226 }
227 EXPORT_SYMBOL(__cv_timedwait_interruptible);
228
229 /*
230 *'expire_time' argument is an absolute clock time in nanoseconds.
231 * Return value is time left (expire_time - now) or -1 if timeout occurred.
232 */
233 static clock_t
234 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp,
235 hrtime_t expire_time, int state)
236 {
237 DEFINE_WAIT(wait);
238 hrtime_t time_left, now;
239 unsigned long time_left_us;
240 SENTRY;
241
242 ASSERT(cvp);
243 ASSERT(mp);
244 ASSERT(cvp->cv_magic == CV_MAGIC);
245 ASSERT(mutex_owned(mp));
246 atomic_inc(&cvp->cv_refs);
247
248 if (cvp->cv_mutex == NULL)
249 cvp->cv_mutex = mp;
250
251 /* Ensure the same mutex is used by all callers */
252 ASSERT(cvp->cv_mutex == mp);
253
254 now = gethrtime();
255 time_left = expire_time - now;
256 if (time_left <= 0) {
257 atomic_dec(&cvp->cv_refs);
258 SRETURN(-1);
259 }
260 time_left_us = time_left / NSEC_PER_USEC;
261
262 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
263 atomic_inc(&cvp->cv_waiters);
264
265 /* Mutex should be dropped after prepare_to_wait() this
266 * ensures we're linked in to the waiters list and avoids the
267 * race where 'cvp->cv_waiters > 0' but the list is empty. */
268 mutex_exit(mp);
269 /* Allow a 100 us range to give kernel an opportunity to coalesce
270 * interrupts */
271 usleep_range(time_left_us, time_left_us + 100);
272 mutex_enter(mp);
273
274 /* No more waiters a different mutex could be used */
275 if (atomic_dec_and_test(&cvp->cv_waiters)) {
276 cvp->cv_mutex = NULL;
277 wake_up(&cvp->cv_destroy);
278 }
279
280 finish_wait(&cvp->cv_event, &wait);
281 atomic_dec(&cvp->cv_refs);
282
283 time_left = expire_time - gethrtime();
284 SRETURN(time_left > 0 ? time_left : -1);
285 }
286
287 /*
288 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
289 */
290 clock_t
291 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
292 hrtime_t res, int flag)
293 {
294 if (res > 1) {
295 /*
296 * Align expiration to the specified resolution.
297 */
298 if (flag & CALLOUT_FLAG_ROUNDUP)
299 tim += res - 1;
300 tim = (tim / res) * res;
301 }
302
303 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
304 tim += gethrtime();
305
306 return __cv_timedwait_hires(cvp, mp, tim, TASK_UNINTERRUPTIBLE);
307 }
308 EXPORT_SYMBOL(cv_timedwait_hires);
309
310 void
311 __cv_signal(kcondvar_t *cvp)
312 {
313 SENTRY;
314 ASSERT(cvp);
315 ASSERT(cvp->cv_magic == CV_MAGIC);
316 atomic_inc(&cvp->cv_refs);
317
318 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
319 * waiter will be set runable with each call to wake_up().
320 * Additionally wake_up() holds a spin_lock assoicated with
321 * the wait queue to ensure we don't race waking up processes. */
322 if (atomic_read(&cvp->cv_waiters) > 0)
323 wake_up(&cvp->cv_event);
324
325 atomic_dec(&cvp->cv_refs);
326 SEXIT;
327 }
328 EXPORT_SYMBOL(__cv_signal);
329
330 void
331 __cv_broadcast(kcondvar_t *cvp)
332 {
333 SENTRY;
334 ASSERT(cvp);
335 ASSERT(cvp->cv_magic == CV_MAGIC);
336 atomic_inc(&cvp->cv_refs);
337
338 /* Wake_up_all() will wake up all waiters even those which
339 * have the WQ_FLAG_EXCLUSIVE flag set. */
340 if (atomic_read(&cvp->cv_waiters) > 0)
341 wake_up_all(&cvp->cv_event);
342
343 atomic_dec(&cvp->cv_refs);
344 SEXIT;
345 }
346 EXPORT_SYMBOL(__cv_broadcast);