]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
2a0052f56988ae98b5220e14539b71d1be09fed2
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 \*****************************************************************************/
26
27 #include <sys/condvar.h>
28
29 void
30 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
31 {
32 ASSERT(cvp);
33 ASSERT(name == NULL);
34 ASSERT(type == CV_DEFAULT);
35 ASSERT(arg == NULL);
36
37 cvp->cv_magic = CV_MAGIC;
38 init_waitqueue_head(&cvp->cv_event);
39 init_waitqueue_head(&cvp->cv_destroy);
40 atomic_set(&cvp->cv_waiters, 0);
41 atomic_set(&cvp->cv_refs, 1);
42 cvp->cv_mutex = NULL;
43 }
44 EXPORT_SYMBOL(__cv_init);
45
46 static int
47 cv_destroy_wakeup(kcondvar_t *cvp)
48 {
49 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
50 ASSERT(cvp->cv_mutex == NULL);
51 ASSERT(!waitqueue_active(&cvp->cv_event));
52 return 1;
53 }
54
55 return 0;
56 }
57
58 void
59 __cv_destroy(kcondvar_t *cvp)
60 {
61 ASSERT(cvp);
62 ASSERT(cvp->cv_magic == CV_MAGIC);
63
64 cvp->cv_magic = CV_DESTROY;
65 atomic_dec(&cvp->cv_refs);
66
67 /* Block until all waiters are woken and references dropped. */
68 while (cv_destroy_wakeup(cvp) == 0)
69 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
70
71 ASSERT3P(cvp->cv_mutex, ==, NULL);
72 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
73 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
74 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
75 }
76 EXPORT_SYMBOL(__cv_destroy);
77
78 static void
79 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
80 {
81 DEFINE_WAIT(wait);
82
83 ASSERT(cvp);
84 ASSERT(mp);
85 ASSERT(cvp->cv_magic == CV_MAGIC);
86 ASSERT(mutex_owned(mp));
87 atomic_inc(&cvp->cv_refs);
88
89 if (cvp->cv_mutex == NULL)
90 cvp->cv_mutex = mp;
91
92 /* Ensure the same mutex is used by all callers */
93 ASSERT(cvp->cv_mutex == mp);
94
95 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
96 atomic_inc(&cvp->cv_waiters);
97
98 /* Mutex should be dropped after prepare_to_wait() this
99 * ensures we're linked in to the waiters list and avoids the
100 * race where 'cvp->cv_waiters > 0' but the list is empty. */
101 mutex_exit(mp);
102 if (io)
103 io_schedule();
104 else
105 schedule();
106 mutex_enter(mp);
107
108 /* No more waiters a different mutex could be used */
109 if (atomic_dec_and_test(&cvp->cv_waiters)) {
110 cvp->cv_mutex = NULL;
111 wake_up(&cvp->cv_destroy);
112 }
113
114 finish_wait(&cvp->cv_event, &wait);
115 atomic_dec(&cvp->cv_refs);
116 }
117
118 void
119 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
120 {
121 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
122 }
123 EXPORT_SYMBOL(__cv_wait);
124
125 void
126 __cv_wait_interruptible(kcondvar_t *cvp, kmutex_t *mp)
127 {
128 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
129 }
130 EXPORT_SYMBOL(__cv_wait_interruptible);
131
132 void
133 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
134 {
135 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
136 }
137 EXPORT_SYMBOL(__cv_wait_io);
138
139 /* 'expire_time' argument is an absolute wall clock time in jiffies.
140 * Return value is time left (expire_time - now) or -1 if timeout occurred.
141 */
142 static clock_t
143 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
144 clock_t expire_time, int state)
145 {
146 DEFINE_WAIT(wait);
147 clock_t time_left;
148
149 ASSERT(cvp);
150 ASSERT(mp);
151 ASSERT(cvp->cv_magic == CV_MAGIC);
152 ASSERT(mutex_owned(mp));
153 atomic_inc(&cvp->cv_refs);
154
155 if (cvp->cv_mutex == NULL)
156 cvp->cv_mutex = mp;
157
158 /* Ensure the same mutex is used by all callers */
159 ASSERT(cvp->cv_mutex == mp);
160
161 /* XXX - Does not handle jiffie wrap properly */
162 time_left = expire_time - jiffies;
163 if (time_left <= 0) {
164 atomic_dec(&cvp->cv_refs);
165 return (-1);
166 }
167
168 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
169 atomic_inc(&cvp->cv_waiters);
170
171 /* Mutex should be dropped after prepare_to_wait() this
172 * ensures we're linked in to the waiters list and avoids the
173 * race where 'cvp->cv_waiters > 0' but the list is empty. */
174 mutex_exit(mp);
175 time_left = schedule_timeout(time_left);
176 mutex_enter(mp);
177
178 /* No more waiters a different mutex could be used */
179 if (atomic_dec_and_test(&cvp->cv_waiters)) {
180 cvp->cv_mutex = NULL;
181 wake_up(&cvp->cv_destroy);
182 }
183
184 finish_wait(&cvp->cv_event, &wait);
185 atomic_dec(&cvp->cv_refs);
186
187 return (time_left > 0 ? time_left : -1);
188 }
189
190 clock_t
191 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
192 {
193 return __cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE);
194 }
195 EXPORT_SYMBOL(__cv_timedwait);
196
197 clock_t
198 __cv_timedwait_interruptible(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
199 {
200 return __cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE);
201 }
202 EXPORT_SYMBOL(__cv_timedwait_interruptible);
203
204 /*
205 *'expire_time' argument is an absolute clock time in nanoseconds.
206 * Return value is time left (expire_time - now) or -1 if timeout occurred.
207 */
208 static clock_t
209 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp,
210 hrtime_t expire_time, int state)
211 {
212 DEFINE_WAIT(wait);
213 hrtime_t time_left, now;
214 unsigned long time_left_us;
215
216 ASSERT(cvp);
217 ASSERT(mp);
218 ASSERT(cvp->cv_magic == CV_MAGIC);
219 ASSERT(mutex_owned(mp));
220 atomic_inc(&cvp->cv_refs);
221
222 if (cvp->cv_mutex == NULL)
223 cvp->cv_mutex = mp;
224
225 /* Ensure the same mutex is used by all callers */
226 ASSERT(cvp->cv_mutex == mp);
227
228 now = gethrtime();
229 time_left = expire_time - now;
230 if (time_left <= 0) {
231 atomic_dec(&cvp->cv_refs);
232 return (-1);
233 }
234 time_left_us = time_left / NSEC_PER_USEC;
235
236 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
237 atomic_inc(&cvp->cv_waiters);
238
239 /* Mutex should be dropped after prepare_to_wait() this
240 * ensures we're linked in to the waiters list and avoids the
241 * race where 'cvp->cv_waiters > 0' but the list is empty. */
242 mutex_exit(mp);
243 /* Allow a 100 us range to give kernel an opportunity to coalesce
244 * interrupts */
245 usleep_range(time_left_us, time_left_us + 100);
246 mutex_enter(mp);
247
248 /* No more waiters a different mutex could be used */
249 if (atomic_dec_and_test(&cvp->cv_waiters)) {
250 cvp->cv_mutex = NULL;
251 wake_up(&cvp->cv_destroy);
252 }
253
254 finish_wait(&cvp->cv_event, &wait);
255 atomic_dec(&cvp->cv_refs);
256
257 time_left = expire_time - gethrtime();
258 return (time_left > 0 ? time_left : -1);
259 }
260
261 /*
262 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
263 */
264 clock_t
265 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
266 hrtime_t res, int flag)
267 {
268 if (res > 1) {
269 /*
270 * Align expiration to the specified resolution.
271 */
272 if (flag & CALLOUT_FLAG_ROUNDUP)
273 tim += res - 1;
274 tim = (tim / res) * res;
275 }
276
277 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
278 tim += gethrtime();
279
280 return __cv_timedwait_hires(cvp, mp, tim, TASK_UNINTERRUPTIBLE);
281 }
282 EXPORT_SYMBOL(cv_timedwait_hires);
283
284 void
285 __cv_signal(kcondvar_t *cvp)
286 {
287 ASSERT(cvp);
288 ASSERT(cvp->cv_magic == CV_MAGIC);
289 atomic_inc(&cvp->cv_refs);
290
291 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
292 * waiter will be set runable with each call to wake_up().
293 * Additionally wake_up() holds a spin_lock assoicated with
294 * the wait queue to ensure we don't race waking up processes. */
295 if (atomic_read(&cvp->cv_waiters) > 0)
296 wake_up(&cvp->cv_event);
297
298 atomic_dec(&cvp->cv_refs);
299 }
300 EXPORT_SYMBOL(__cv_signal);
301
302 void
303 __cv_broadcast(kcondvar_t *cvp)
304 {
305 ASSERT(cvp);
306 ASSERT(cvp->cv_magic == CV_MAGIC);
307 atomic_inc(&cvp->cv_refs);
308
309 /* Wake_up_all() will wake up all waiters even those which
310 * have the WQ_FLAG_EXCLUSIVE flag set. */
311 if (atomic_read(&cvp->cv_waiters) > 0)
312 wake_up_all(&cvp->cv_event);
313
314 atomic_dec(&cvp->cv_refs);
315 }
316 EXPORT_SYMBOL(__cv_broadcast);