]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
Add cv_timedwait_interruptible() function
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Credential Implementation.
25 \*****************************************************************************/
26
27 #include <sys/condvar.h>
28 #include <spl-debug.h>
29
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
32 #endif
33
34 #define SS_DEBUG_SUBSYS SS_CONDVAR
35
36 void
37 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
38 {
39 int flags = KM_SLEEP;
40
41 SENTRY;
42 ASSERT(cvp);
43 ASSERT(name);
44 ASSERT(type == CV_DEFAULT);
45 ASSERT(arg == NULL);
46
47 cvp->cv_magic = CV_MAGIC;
48 init_waitqueue_head(&cvp->cv_event);
49 atomic_set(&cvp->cv_waiters, 0);
50 cvp->cv_mutex = NULL;
51 cvp->cv_name = NULL;
52 cvp->cv_name_size = strlen(name) + 1;
53
54 /* We may be called when there is a non-zero preempt_count or
55 * interrupts are disabled is which case we must not sleep.
56 */
57 if (current_thread_info()->preempt_count || irqs_disabled())
58 flags = KM_NOSLEEP;
59
60 cvp->cv_name = kmem_alloc(cvp->cv_name_size, flags);
61 if (cvp->cv_name)
62 strcpy(cvp->cv_name, name);
63
64 SEXIT;
65 }
66 EXPORT_SYMBOL(__cv_init);
67
68 void
69 __cv_destroy(kcondvar_t *cvp)
70 {
71 SENTRY;
72 ASSERT(cvp);
73 ASSERT(cvp->cv_magic == CV_MAGIC);
74 ASSERT(cvp->cv_mutex == NULL);
75 ASSERT(atomic_read(&cvp->cv_waiters) == 0);
76 ASSERT(!waitqueue_active(&cvp->cv_event));
77
78 if (cvp->cv_name)
79 kmem_free(cvp->cv_name, cvp->cv_name_size);
80
81 ASSERT3P(memset(cvp, CV_POISON, sizeof(*cvp)), ==, cvp);
82 SEXIT;
83 }
84 EXPORT_SYMBOL(__cv_destroy);
85
86 static void
87 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state)
88 {
89 DEFINE_WAIT(wait);
90 SENTRY;
91
92 ASSERT(cvp);
93 ASSERT(mp);
94 ASSERT(cvp->cv_magic == CV_MAGIC);
95 ASSERT(mutex_owned(mp));
96
97 if (cvp->cv_mutex == NULL)
98 cvp->cv_mutex = mp;
99
100 /* Ensure the same mutex is used by all callers */
101 ASSERT(cvp->cv_mutex == mp);
102
103 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
104 atomic_inc(&cvp->cv_waiters);
105
106 /* Mutex should be dropped after prepare_to_wait() this
107 * ensures we're linked in to the waiters list and avoids the
108 * race where 'cvp->cv_waiters > 0' but the list is empty. */
109 mutex_exit(mp);
110 schedule();
111 mutex_enter(mp);
112
113 /* No more waiters a different mutex could be used */
114 if (atomic_dec_and_test(&cvp->cv_waiters))
115 cvp->cv_mutex = NULL;
116
117 finish_wait(&cvp->cv_event, &wait);
118 SEXIT;
119 }
120
121 void
122 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
123 {
124 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE);
125 }
126 EXPORT_SYMBOL(__cv_wait);
127
128 void
129 __cv_wait_interruptible(kcondvar_t *cvp, kmutex_t *mp)
130 {
131 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE);
132 }
133 EXPORT_SYMBOL(__cv_wait_interruptible);
134
135 /* 'expire_time' argument is an absolute wall clock time in jiffies.
136 * Return value is time left (expire_time - now) or -1 if timeout occurred.
137 */
138 static clock_t
139 __cv_timedwait_common(kcondvar_t *cvp, kmutex_t *mp,
140 clock_t expire_time, int state)
141 {
142 DEFINE_WAIT(wait);
143 clock_t time_left;
144 SENTRY;
145
146 ASSERT(cvp);
147 ASSERT(mp);
148 ASSERT(cvp->cv_magic == CV_MAGIC);
149 ASSERT(mutex_owned(mp));
150
151 if (cvp->cv_mutex == NULL)
152 cvp->cv_mutex = mp;
153
154 /* Ensure the same mutex is used by all callers */
155 ASSERT(cvp->cv_mutex == mp);
156
157 /* XXX - Does not handle jiffie wrap properly */
158 time_left = expire_time - jiffies;
159 if (time_left <= 0)
160 SRETURN(-1);
161
162 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
163 atomic_inc(&cvp->cv_waiters);
164
165 /* Mutex should be dropped after prepare_to_wait() this
166 * ensures we're linked in to the waiters list and avoids the
167 * race where 'cvp->cv_waiters > 0' but the list is empty. */
168 mutex_exit(mp);
169 time_left = schedule_timeout(time_left);
170 mutex_enter(mp);
171
172 /* No more waiters a different mutex could be used */
173 if (atomic_dec_and_test(&cvp->cv_waiters))
174 cvp->cv_mutex = NULL;
175
176 finish_wait(&cvp->cv_event, &wait);
177
178 SRETURN(time_left > 0 ? time_left : -1);
179 }
180
181 clock_t
182 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
183 {
184 return __cv_timedwait_common(cvp, mp, exp_time, TASK_UNINTERRUPTIBLE);
185 }
186 EXPORT_SYMBOL(__cv_timedwait);
187
188 clock_t
189 __cv_timedwait_interruptible(kcondvar_t *cvp, kmutex_t *mp, clock_t exp_time)
190 {
191 return __cv_timedwait_common(cvp, mp, exp_time, TASK_INTERRUPTIBLE);
192 }
193 EXPORT_SYMBOL(__cv_timedwait_interruptible);
194
195 void
196 __cv_signal(kcondvar_t *cvp)
197 {
198 SENTRY;
199 ASSERT(cvp);
200 ASSERT(cvp->cv_magic == CV_MAGIC);
201
202 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
203 * waiter will be set runable with each call to wake_up().
204 * Additionally wake_up() holds a spin_lock assoicated with
205 * the wait queue to ensure we don't race waking up processes. */
206 if (atomic_read(&cvp->cv_waiters) > 0)
207 wake_up(&cvp->cv_event);
208
209 SEXIT;
210 }
211 EXPORT_SYMBOL(__cv_signal);
212
213 void
214 __cv_broadcast(kcondvar_t *cvp)
215 {
216 ASSERT(cvp);
217 ASSERT(cvp->cv_magic == CV_MAGIC);
218 SENTRY;
219
220 /* Wake_up_all() will wake up all waiters even those which
221 * have the WQ_FLAG_EXCLUSIVE flag set. */
222 if (atomic_read(&cvp->cv_waiters) > 0)
223 wake_up_all(&cvp->cv_event);
224
225 SEXIT;
226 }
227 EXPORT_SYMBOL(__cv_broadcast);