]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-condvar.c
Add cv_wait_interruptible() function.
[mirror_spl.git] / module / spl / spl-condvar.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include <sys/condvar.h>
28
29 #ifdef DEBUG_SUBSYSTEM
30 #undef DEBUG_SUBSYSTEM
31 #endif
32
33 #define DEBUG_SUBSYSTEM S_CONDVAR
34
35 void
36 __cv_init(kcondvar_t *cvp, char *name, kcv_type_t type, void *arg)
37 {
38 int flags = KM_SLEEP;
39
40 ENTRY;
41 ASSERT(cvp);
42 ASSERT(name);
43 ASSERT(type == CV_DEFAULT);
44 ASSERT(arg == NULL);
45
46 cvp->cv_magic = CV_MAGIC;
47 init_waitqueue_head(&cvp->cv_event);
48 spin_lock_init(&cvp->cv_lock);
49 atomic_set(&cvp->cv_waiters, 0);
50 cvp->cv_mutex = NULL;
51 cvp->cv_name = NULL;
52 cvp->cv_name_size = strlen(name) + 1;
53
54 /* We may be called when there is a non-zero preempt_count or
55 * interrupts are disabled is which case we must not sleep.
56 */
57 if (current_thread_info()->preempt_count || irqs_disabled())
58 flags = KM_NOSLEEP;
59
60 cvp->cv_name = kmem_alloc(cvp->cv_name_size, flags);
61 if (cvp->cv_name)
62 strcpy(cvp->cv_name, name);
63
64 EXIT;
65 }
66 EXPORT_SYMBOL(__cv_init);
67
68 void
69 __cv_destroy(kcondvar_t *cvp)
70 {
71 ENTRY;
72 ASSERT(cvp);
73 ASSERT(cvp->cv_magic == CV_MAGIC);
74 spin_lock(&cvp->cv_lock);
75 ASSERT(atomic_read(&cvp->cv_waiters) == 0);
76 ASSERT(!waitqueue_active(&cvp->cv_event));
77
78 if (cvp->cv_name)
79 kmem_free(cvp->cv_name, cvp->cv_name_size);
80
81 spin_unlock(&cvp->cv_lock);
82 memset(cvp, CV_POISON, sizeof(*cvp));
83 EXIT;
84 }
85 EXPORT_SYMBOL(__cv_destroy);
86
87 static void
88 cv_wait_common(kcondvar_t *cvp, kmutex_t *mp, int state)
89 {
90 DEFINE_WAIT(wait);
91 ENTRY;
92
93 ASSERT(cvp);
94 ASSERT(mp);
95 ASSERT(cvp->cv_magic == CV_MAGIC);
96 spin_lock(&cvp->cv_lock);
97 ASSERT(mutex_owned(mp));
98
99 if (cvp->cv_mutex == NULL)
100 cvp->cv_mutex = mp;
101
102 /* Ensure the same mutex is used by all callers */
103 ASSERT(cvp->cv_mutex == mp);
104 spin_unlock(&cvp->cv_lock);
105
106 prepare_to_wait_exclusive(&cvp->cv_event, &wait, state);
107 atomic_inc(&cvp->cv_waiters);
108
109 /* Mutex should be dropped after prepare_to_wait() this
110 * ensures we're linked in to the waiters list and avoids the
111 * race where 'cvp->cv_waiters > 0' but the list is empty. */
112 mutex_exit(mp);
113 schedule();
114 mutex_enter(mp);
115
116 atomic_dec(&cvp->cv_waiters);
117 finish_wait(&cvp->cv_event, &wait);
118 EXIT;
119 }
120
121 void
122 __cv_wait(kcondvar_t *cvp, kmutex_t *mp)
123 {
124 cv_wait_common(cvp, mp, TASK_UNINTERRUPTIBLE);
125 }
126 EXPORT_SYMBOL(__cv_wait);
127
128 void
129 __cv_wait_interruptible(kcondvar_t *cvp, kmutex_t *mp)
130 {
131 cv_wait_common(cvp, mp, TASK_INTERRUPTIBLE);
132 }
133 EXPORT_SYMBOL(__cv_wait_interruptible);
134
135 /* 'expire_time' argument is an absolute wall clock time in jiffies.
136 * Return value is time left (expire_time - now) or -1 if timeout occurred.
137 */
138 clock_t
139 __cv_timedwait(kcondvar_t *cvp, kmutex_t *mp, clock_t expire_time)
140 {
141 DEFINE_WAIT(wait);
142 clock_t time_left;
143 ENTRY;
144
145 ASSERT(cvp);
146 ASSERT(mp);
147 ASSERT(cvp->cv_magic == CV_MAGIC);
148 spin_lock(&cvp->cv_lock);
149 ASSERT(mutex_owned(mp));
150
151 if (cvp->cv_mutex == NULL)
152 cvp->cv_mutex = mp;
153
154 /* Ensure the same mutex is used by all callers */
155 ASSERT(cvp->cv_mutex == mp);
156 spin_unlock(&cvp->cv_lock);
157
158 /* XXX - Does not handle jiffie wrap properly */
159 time_left = expire_time - jiffies;
160 if (time_left <= 0)
161 RETURN(-1);
162
163 prepare_to_wait_exclusive(&cvp->cv_event, &wait,
164 TASK_UNINTERRUPTIBLE);
165 atomic_inc(&cvp->cv_waiters);
166
167 /* Mutex should be dropped after prepare_to_wait() this
168 * ensures we're linked in to the waiters list and avoids the
169 * race where 'cvp->cv_waiters > 0' but the list is empty. */
170 mutex_exit(mp);
171 time_left = schedule_timeout(time_left);
172 mutex_enter(mp);
173
174 atomic_dec(&cvp->cv_waiters);
175 finish_wait(&cvp->cv_event, &wait);
176
177 RETURN(time_left > 0 ? time_left : -1);
178 }
179 EXPORT_SYMBOL(__cv_timedwait);
180
181 void
182 __cv_signal(kcondvar_t *cvp)
183 {
184 ENTRY;
185 ASSERT(cvp);
186 ASSERT(cvp->cv_magic == CV_MAGIC);
187
188 /* All waiters are added with WQ_FLAG_EXCLUSIVE so only one
189 * waiter will be set runable with each call to wake_up().
190 * Additionally wake_up() holds a spin_lock assoicated with
191 * the wait queue to ensure we don't race waking up processes. */
192 if (atomic_read(&cvp->cv_waiters) > 0)
193 wake_up(&cvp->cv_event);
194
195 EXIT;
196 }
197 EXPORT_SYMBOL(__cv_signal);
198
199 void
200 __cv_broadcast(kcondvar_t *cvp)
201 {
202 ASSERT(cvp);
203 ASSERT(cvp->cv_magic == CV_MAGIC);
204 ENTRY;
205
206 /* Wake_up_all() will wake up all waiters even those which
207 * have the WQ_FLAG_EXCLUSIVE flag set. */
208 if (atomic_read(&cvp->cv_waiters) > 0)
209 wake_up_all(&cvp->cv_event);
210
211 EXIT;
212 }
213 EXPORT_SYMBOL(__cv_broadcast);