]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
Merge branch 'kmem-rework'
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 \*****************************************************************************/
26
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29
30 void
31 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
32 {
33 ASSERT(cvp);
34 ASSERT(name == NULL);
35 ASSERT(type == CV_DEFAULT);
36 ASSERT(arg == NULL);
37
38 cvp->cv_magic = CV_MAGIC;
39 init_waitqueue_head(&cvp->cv_event);
40 init_waitqueue_head(&cvp->cv_destroy);
41 atomic_set(&cvp->cv_waiters, 0);
42 atomic_set(&cvp->cv_refs, 1);
43 cvp->cv_mutex = NULL;
44 }
45 EXPORT_SYMBOL(__cv_init);
46
47 static int
48 cv_destroy_wakeup(kcondvar_t *cvp)
49 {
50 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
51 ASSERT(cvp->cv_mutex == NULL);
52 ASSERT(!waitqueue_active(&cvp->cv_event));
53 return 1;
54 }
55
56 return 0;
57 }
58
59 void
60 __cv_destroy(kcondvar_t *cvp)
61 {
62 ASSERT(cvp);
63 ASSERT(cvp->cv_magic == CV_MAGIC);
64
65 cvp->cv_magic = CV_DESTROY;
66 atomic_dec(&cvp->cv_refs);
67
68 /* Block until all waiters are woken and references dropped. */
69 while (cv_destroy_wakeup(cvp) == 0)
70 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
71
72 ASSERT3P(cvp->cv_mutex, ==, NULL);
73 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
74 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
75 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
76 }
77 EXPORT_SYMBOL(__cv_destroy);
78
79 static void
80 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
81 {
82 DEFINE_WAIT(wait);
83
84 ASSERT(cvp);
85 ASSERT(mp);
86 ASSERT(cvp->cv_magic == CV_MAGIC);
87 ASSERT(mutex_owned(mp));
88 atomic_inc(&cvp->cv_refs);
89
90 if (cvp->cv_mutex == NULL)
91 cvp->cv_mutex = mp;
92
93 /* Ensure the same mutex is used by all callers */
94 ASSERT(cvp->cv_mutex == mp);
95
96 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
97 atomic_inc(&cvp->cv_waiters);
98
99 /* Mutex should be dropped after prepare_to_wait() this
100 * ensures we're linked in to the waiters list and avoids the
101 * race where 'cvp->cv_waiters > 0' but the list is empty. */
102 mutex_exit(mp);
103 if (io)
104 io_schedule();
105 else
106 schedule();
107 mutex_enter(mp);
108
109 /* No more waiters a different mutex could be used */
110 if (atomic_dec_and_test(&cvp->cv_waiters)) {
111 cvp->cv_mutex = NULL;
112 wake_up(&cvp->cv_destroy);
113 }
114
115 finish_wait(&cvp->cv_event, &wait);
116 atomic_dec(&cvp->cv_refs);
117 }
118
119 void
120 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
121 {
122 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
123 }
124 EXPORT_SYMBOL(__cv_wait);
125
126 void
127 __cv_wait_interruptible(kcondvar_t *cvp, kmutex_t *mp)
128 {
129 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
130 }
131 EXPORT_SYMBOL(__cv_wait_interruptible);
132
133 void
134 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
135 {
136 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
137 }
138 EXPORT_SYMBOL(__cv_wait_io);
139
140 /* 'expire_time' argument is an absolute wall clock time in jiffies.
141 * Return value is time left (expire_time - now) or -1 if timeout occurred.
142 */
143 static clock_t
144 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
145 clock_t expire_time, int state)
146 {
147 DEFINE_WAIT(wait);
148 clock_t time_left;
149
150 ASSERT(cvp);
151 ASSERT(mp);
152 ASSERT(cvp->cv_magic == CV_MAGIC);
153 ASSERT(mutex_owned(mp));
154 atomic_inc(&cvp->cv_refs);
155
156 if (cvp->cv_mutex == NULL)
157 cvp->cv_mutex = mp;
158
159 /* Ensure the same mutex is used by all callers */
160 ASSERT(cvp->cv_mutex == mp);
161
162 /* XXX - Does not handle jiffie wrap properly */
163 time_left = expire_time - jiffies;
164 if (time_left <= 0) {
165 atomic_dec(&cvp->cv_refs);
166 return (-1);
167 }
168
169 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
170 atomic_inc(&cvp->cv_waiters);
171
172 /* Mutex should be dropped after prepare_to_wait() this
173 * ensures we're linked in to the waiters list and avoids the
174 * race where 'cvp->cv_waiters > 0' but the list is empty. */
175 mutex_exit(mp);
176 time_left = schedule_timeout(time_left);
177 mutex_enter(mp);
178
179 /* No more waiters a different mutex could be used */
180 if (atomic_dec_and_test(&cvp->cv_waiters)) {
181 cvp->cv_mutex = NULL;
182 wake_up(&cvp->cv_destroy);
183 }
184
185 finish_wait(&cvp->cv_event, &wait);
186 atomic_dec(&cvp->cv_refs);
187
188 return (time_left > 0 ? time_left : -1);
189 }
190
191 clock_t
192 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
193 {
194 return __cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE);
195 }
196 EXPORT_SYMBOL(__cv_timedwait);
197
198 clock_t
199 __cv_timedwait_interruptible(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
200 {
201 return __cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE);
202 }
203 EXPORT_SYMBOL(__cv_timedwait_interruptible);
204
205 /*
206 *'expire_time' argument is an absolute clock time in nanoseconds.
207 * Return value is time left (expire_time - now) or -1 if timeout occurred.
208 */
209 static clock_t
210 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp,
211 hrtime_t expire_time, int state)
212 {
213 DEFINE_WAIT(wait);
214 hrtime_t time_left, now;
215 unsigned long time_left_us;
216
217 ASSERT(cvp);
218 ASSERT(mp);
219 ASSERT(cvp->cv_magic == CV_MAGIC);
220 ASSERT(mutex_owned(mp));
221 atomic_inc(&cvp->cv_refs);
222
223 if (cvp->cv_mutex == NULL)
224 cvp->cv_mutex = mp;
225
226 /* Ensure the same mutex is used by all callers */
227 ASSERT(cvp->cv_mutex == mp);
228
229 now = gethrtime();
230 time_left = expire_time - now;
231 if (time_left <= 0) {
232 atomic_dec(&cvp->cv_refs);
233 return (-1);
234 }
235 time_left_us = time_left / NSEC_PER_USEC;
236
237 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
238 atomic_inc(&cvp->cv_waiters);
239
240 /* Mutex should be dropped after prepare_to_wait() this
241 * ensures we're linked in to the waiters list and avoids the
242 * race where 'cvp->cv_waiters > 0' but the list is empty. */
243 mutex_exit(mp);
244 /* Allow a 100 us range to give kernel an opportunity to coalesce
245 * interrupts */
246 usleep_range(time_left_us, time_left_us + 100);
247 mutex_enter(mp);
248
249 /* No more waiters a different mutex could be used */
250 if (atomic_dec_and_test(&cvp->cv_waiters)) {
251 cvp->cv_mutex = NULL;
252 wake_up(&cvp->cv_destroy);
253 }
254
255 finish_wait(&cvp->cv_event, &wait);
256 atomic_dec(&cvp->cv_refs);
257
258 time_left = expire_time - gethrtime();
259 return (time_left > 0 ? time_left : -1);
260 }
261
262 /*
263 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
264 */
265 clock_t
266 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
267 hrtime_t res, int flag)
268 {
269 if (res > 1) {
270 /*
271 * Align expiration to the specified resolution.
272 */
273 if (flag & CALLOUT_FLAG_ROUNDUP)
274 tim += res - 1;
275 tim = (tim / res) * res;
276 }
277
278 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
279 tim += gethrtime();
280
281 return __cv_timedwait_hires(cvp, mp, tim, TASK_UNINTERRUPTIBLE);
282 }
283 EXPORT_SYMBOL(cv_timedwait_hires);
284
285 void
286 __cv_signal(kcondvar_t *cvp)
287 {
288 ASSERT(cvp);
289 ASSERT(cvp->cv_magic == CV_MAGIC);
290 atomic_inc(&cvp->cv_refs);
291
292 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
293 * waiter will be set runable with each call to wake_up().
294 * Additionally wake_up() holds a spin_lock assoicated with
295 * the wait queue to ensure we don't race waking up processes. */
296 if (atomic_read(&cvp->cv_waiters) > 0)
297 wake_up(&cvp->cv_event);
298
299 atomic_dec(&cvp->cv_refs);
300 }
301 EXPORT_SYMBOL(__cv_signal);
302
303 void
304 __cv_broadcast(kcondvar_t *cvp)
305 {
306 ASSERT(cvp);
307 ASSERT(cvp->cv_magic == CV_MAGIC);
308 atomic_inc(&cvp->cv_refs);
309
310 /* Wake_up_all() will wake up all waiters even those which
311 * have the WQ_FLAG_EXCLUSIVE flag set. */
312 if (atomic_read(&cvp->cv_waiters) > 0)
313 wake_up_all(&cvp->cv_event);
314
315 atomic_dec(&cvp->cv_refs);
316 }
317 EXPORT_SYMBOL(__cv_broadcast);