]> git.proxmox.com Git - mirror_zfs.git/blob - module/spl/spl-condvar.c
Provide kstat for taskqs
[mirror_zfs.git] / module / spl / spl-condvar.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 */
26
27 #include <sys/condvar.h>
28 #include <sys/time.h>
29
30 void
31 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
32 {
33 ASSERT(cvp);
34 ASSERT(name == NULL);
35 ASSERT(type == CV_DEFAULT);
36 ASSERT(arg == NULL);
37
38 cvp->cv_magic = CV_MAGIC;
39 init_waitqueue_head(&cvp->cv_event);
40 init_waitqueue_head(&cvp->cv_destroy);
41 atomic_set(&cvp->cv_waiters, 0);
42 atomic_set(&cvp->cv_refs, 1);
43 cvp->cv_mutex = NULL;
44 }
45 EXPORT_SYMBOL(__cv_init);
46
47 static int
48 cv_destroy_wakeup(kcondvar_t *cvp)
49 {
50 if (!atomic_read(&cvp->cv_waiters) && !atomic_read(&cvp->cv_refs)) {
51 ASSERT(cvp->cv_mutex == NULL);
52 ASSERT(!waitqueue_active(&cvp->cv_event));
53 return (1);
54 }
55
56 return (0);
57 }
58
59 void
60 __cv_destroy(kcondvar_t *cvp)
61 {
62 ASSERT(cvp);
63 ASSERT(cvp->cv_magic == CV_MAGIC);
64
65 cvp->cv_magic = CV_DESTROY;
66 atomic_dec(&cvp->cv_refs);
67
68 /* Block until all waiters are woken and references dropped. */
69 while (cv_destroy_wakeup(cvp) == 0)
70 wait_event_timeout(cvp->cv_destroy, cv_destroy_wakeup(cvp), 1);
71
72 ASSERT3P(cvp->cv_mutex, ==, NULL);
73 ASSERT3S(atomic_read(&cvp->cv_refs), ==, 0);
74 ASSERT3S(atomic_read(&cvp->cv_waiters), ==, 0);
75 ASSERT3S(waitqueue_active(&cvp->cv_event), ==, 0);
76 }
77 EXPORT_SYMBOL(__cv_destroy);
78
79 static void
80 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state, int io)
81 {
82 DEFINE_WAIT(wait);
83
84 ASSERT(cvp);
85 ASSERT(mp);
86 ASSERT(cvp->cv_magic == CV_MAGIC);
87 ASSERT(mutex_owned(mp));
88 atomic_inc(&cvp->cv_refs);
89
90 if (cvp->cv_mutex == NULL)
91 cvp->cv_mutex = mp;
92
93 /* Ensure the same mutex is used by all callers */
94 ASSERT(cvp->cv_mutex == mp);
95
96 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
97 atomic_inc(&cvp->cv_waiters);
98
99 /*
100 * Mutex should be dropped after prepare_to_wait() this
101 * ensures we're linked in to the waiters list and avoids the
102 * race where 'cvp->cv_waiters > 0' but the list is empty.
103 */
104 mutex_exit(mp);
105 if (io)
106 io_schedule();
107 else
108 schedule();
109 mutex_enter(mp);
110
111 /* No more waiters a different mutex could be used */
112 if (atomic_dec_and_test(&cvp->cv_waiters)) {
113 cvp->cv_mutex = NULL;
114 wake_up(&cvp->cv_destroy);
115 }
116
117 finish_wait(&cvp->cv_event, &wait);
118 atomic_dec(&cvp->cv_refs);
119 }
120
121 void
122 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
123 {
124 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 0);
125 }
126 EXPORT_SYMBOL(__cv_wait);
127
128 void
129 __cv_wait_sig(kcondvar_t *cvp, kmutex_t *mp)
130 {
131 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE, 0);
132 }
133 EXPORT_SYMBOL(__cv_wait_sig);
134
135 void
136 __cv_wait_io(kcondvar_t *cvp, kmutex_t *mp)
137 {
138 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE, 1);
139 }
140 EXPORT_SYMBOL(__cv_wait_io);
141
142 /*
143 * 'expire_time' argument is an absolute wall clock time in jiffies.
144 * Return value is time left (expire_time - now) or -1 if timeout occurred.
145 */
146 static clock_t
147 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time,
148 int state)
149 {
150 DEFINE_WAIT(wait);
151 clock_t time_left;
152
153 ASSERT(cvp);
154 ASSERT(mp);
155 ASSERT(cvp->cv_magic == CV_MAGIC);
156 ASSERT(mutex_owned(mp));
157 atomic_inc(&cvp->cv_refs);
158
159 if (cvp->cv_mutex == NULL)
160 cvp->cv_mutex = mp;
161
162 /* Ensure the same mutex is used by all callers */
163 ASSERT(cvp->cv_mutex == mp);
164
165 /* XXX - Does not handle jiffie wrap properly */
166 time_left = expire_time - jiffies;
167 if (time_left <= 0) {
168 atomic_dec(&cvp->cv_refs);
169 return (-1);
170 }
171
172 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
173 atomic_inc(&cvp->cv_waiters);
174
175 /*
176 * Mutex should be dropped after prepare_to_wait() this
177 * ensures we're linked in to the waiters list and avoids the
178 * race where 'cvp->cv_waiters > 0' but the list is empty.
179 */
180 mutex_exit(mp);
181 time_left = schedule_timeout(time_left);
182 mutex_enter(mp);
183
184 /* No more waiters a different mutex could be used */
185 if (atomic_dec_and_test(&cvp->cv_waiters)) {
186 cvp->cv_mutex = NULL;
187 wake_up(&cvp->cv_destroy);
188 }
189
190 finish_wait(&cvp->cv_event, &wait);
191 atomic_dec(&cvp->cv_refs);
192
193 return (time_left > 0 ? time_left : -1);
194 }
195
196 clock_t
197 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
198 {
199 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE));
200 }
201 EXPORT_SYMBOL(__cv_timedwait);
202
203 clock_t
204 __cv_timedwait_sig(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
205 {
206 return (__cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE));
207 }
208 EXPORT_SYMBOL(__cv_timedwait_sig);
209
210 /*
211 * 'expire_time' argument is an absolute clock time in nanoseconds.
212 * Return value is time left (expire_time - now) or -1 if timeout occurred.
213 */
214 static clock_t
215 __cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t expire_time,
216 int state)
217 {
218 DEFINE_WAIT(wait);
219 hrtime_t time_left, now;
220 unsigned long time_left_us;
221
222 ASSERT(cvp);
223 ASSERT(mp);
224 ASSERT(cvp->cv_magic == CV_MAGIC);
225 ASSERT(mutex_owned(mp));
226 atomic_inc(&cvp->cv_refs);
227
228 if (cvp->cv_mutex == NULL)
229 cvp->cv_mutex = mp;
230
231 /* Ensure the same mutex is used by all callers */
232 ASSERT(cvp->cv_mutex == mp);
233
234 now = gethrtime();
235 time_left = expire_time - now;
236 if (time_left <= 0) {
237 atomic_dec(&cvp->cv_refs);
238 return (-1);
239 }
240 time_left_us = time_left / NSEC_PER_USEC;
241
242 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
243 atomic_inc(&cvp->cv_waiters);
244
245 /*
246 * Mutex should be dropped after prepare_to_wait() this
247 * ensures we're linked in to the waiters list and avoids the
248 * race where 'cvp->cv_waiters > 0' but the list is empty.
249 */
250 mutex_exit(mp);
251 /*
252 * Allow a 100 us range to give kernel an opportunity to coalesce
253 * interrupts
254 */
255 usleep_range(time_left_us, time_left_us + 100);
256 mutex_enter(mp);
257
258 /* No more waiters a different mutex could be used */
259 if (atomic_dec_and_test(&cvp->cv_waiters)) {
260 cvp->cv_mutex = NULL;
261 wake_up(&cvp->cv_destroy);
262 }
263
264 finish_wait(&cvp->cv_event, &wait);
265 atomic_dec(&cvp->cv_refs);
266
267 time_left = expire_time - gethrtime();
268 return (time_left > 0 ? time_left : -1);
269 }
270
271 /*
272 * Compatibility wrapper for the cv_timedwait_hires() Illumos interface.
273 */
274 clock_t
275 cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim, hrtime_t res,
276 int flag)
277 {
278 if (res > 1) {
279 /*
280 * Align expiration to the specified resolution.
281 */
282 if (flag & CALLOUT_FLAG_ROUNDUP)
283 tim += res - 1;
284 tim = (tim / res) * res;
285 }
286
287 if (!(flag & CALLOUT_FLAG_ABSOLUTE))
288 tim += gethrtime();
289
290 return (__cv_timedwait_hires(cvp, mp, tim, TASK_UNINTERRUPTIBLE));
291 }
292 EXPORT_SYMBOL(cv_timedwait_hires);
293
294 void
295 __cv_signal(kcondvar_t *cvp)
296 {
297 ASSERT(cvp);
298 ASSERT(cvp->cv_magic == CV_MAGIC);
299 atomic_inc(&cvp->cv_refs);
300
301 /*
302 * All waiters are added with WQ_FLAG_EXCLUSIVE so only one
303 * waiter will be set runable with each call to wake_up().
304 * Additionally wake_up() holds a spin_lock assoicated with
305 * the wait queue to ensure we don't race waking up processes.
306 */
307 if (atomic_read(&cvp->cv_waiters) > 0)
308 wake_up(&cvp->cv_event);
309
310 atomic_dec(&cvp->cv_refs);
311 }
312 EXPORT_SYMBOL(__cv_signal);
313
314 void
315 __cv_broadcast(kcondvar_t *cvp)
316 {
317 ASSERT(cvp);
318 ASSERT(cvp->cv_magic == CV_MAGIC);
319 atomic_inc(&cvp->cv_refs);
320
321 /*
322 * Wake_up_all() will wake up all waiters even those which
323 * have the WQ_FLAG_EXCLUSIVE flag set.
324 */
325 if (atomic_read(&cvp->cv_waiters) > 0)
326 wake_up_all(&cvp->cv_event);
327
328 atomic_dec(&cvp->cv_refs);
329 }
330 EXPORT_SYMBOL(__cv_broadcast);