]> git.proxmox.com Git - mirror_spl-debian.git/blob - module/spl/spl-rwlock.c
Rename modules to module and update references
[mirror_spl-debian.git] / module / spl / spl-rwlock.c
1 /*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27 #include <sys/rwlock.h>
28
29 #ifdef DEBUG_SUBSYSTEM
30 #undef DEBUG_SUBSYSTEM
31 #endif
32
33 #define DEBUG_SUBSYSTEM S_RWLOCK
34
35 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
36 struct rwsem_waiter {
37 struct list_head list;
38 struct task_struct *task;
39 unsigned int flags;
40 #define RWSEM_WAITING_FOR_READ 0x00000001
41 #define RWSEM_WAITING_FOR_WRITE 0x00000002
42 };
43 /* wake a single writer */
44 static struct rw_semaphore *
45 __rwsem_wake_one_writer_locked(struct rw_semaphore *sem)
46 {
47 struct rwsem_waiter *waiter;
48 struct task_struct *tsk;
49
50 sem->activity = -1;
51
52 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
53 list_del(&waiter->list);
54
55 tsk = waiter->task;
56 smp_mb();
57 waiter->task = NULL;
58 wake_up_process(tsk);
59 put_task_struct(tsk);
60 return sem;
61 }
62
63 /* release a read lock on the semaphore */
64 static void
65 __up_read_locked(struct rw_semaphore *sem)
66 {
67 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
68 (void)__rwsem_wake_one_writer_locked(sem);
69 }
70
71 /* trylock for writing -- returns 1 if successful, 0 if contention */
72 static int
73 __down_write_trylock_locked(struct rw_semaphore *sem)
74 {
75 int ret = 0;
76
77 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
78 /* granted */
79 sem->activity = -1;
80 ret = 1;
81 }
82
83 return ret;
84 }
85 #endif
86
87 void
88 __rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
89 {
90 int flags = KM_SLEEP;
91
92 ASSERT(rwlp);
93 ASSERT(name);
94 ASSERT(type == RW_DEFAULT); /* XXX no irq handler use */
95 ASSERT(arg == NULL); /* XXX no irq handler use */
96
97 rwlp->rw_magic = RW_MAGIC;
98 rwlp->rw_owner = NULL;
99 rwlp->rw_name = NULL;
100 rwlp->rw_name_size = strlen(name) + 1;
101
102 /* We may be called when there is a non-zero preempt_count or
103 * interrupts are disabled is which case we must not sleep.
104 */
105 if (current_thread_info()->preempt_count || irqs_disabled())
106 flags = KM_NOSLEEP;
107
108 rwlp->rw_name = kmem_alloc(rwlp->rw_name_size, flags);
109 if (rwlp->rw_name == NULL)
110 return;
111
112 init_rwsem(&rwlp->rw_sem);
113 strcpy(rwlp->rw_name, name);
114 }
115 EXPORT_SYMBOL(__rw_init);
116
117 void
118 __rw_destroy(krwlock_t *rwlp)
119 {
120 ASSERT(rwlp);
121 ASSERT(rwlp->rw_magic == RW_MAGIC);
122 ASSERT(rwlp->rw_owner == NULL);
123 spin_lock(&rwlp->rw_sem.wait_lock);
124 ASSERT(list_empty(&rwlp->rw_sem.wait_list));
125 spin_unlock(&rwlp->rw_sem.wait_lock);
126
127 kmem_free(rwlp->rw_name, rwlp->rw_name_size);
128
129 memset(rwlp, RW_POISON, sizeof(krwlock_t));
130 }
131 EXPORT_SYMBOL(__rw_destroy);
132
133 /* Return 0 if the lock could not be obtained without blocking. */
134 int
135 __rw_tryenter(krwlock_t *rwlp, krw_t rw)
136 {
137 int rc = 0;
138 ENTRY;
139
140 ASSERT(rwlp);
141 ASSERT(rwlp->rw_magic == RW_MAGIC);
142
143 switch (rw) {
144 /* these functions return 1 if success, 0 if contention */
145 case RW_READER:
146 /* Here the Solaris code would return 0
147 * if there were any write waiters. Specifically
148 * thinking about the case where readers may have
149 * the lock and we would also allow this thread
150 * to grab the read lock with a writer waiting in the
151 * queue. This doesn't seem like a correctness
152 * issue, so just call down_read_trylock()
153 * for the test. We may have to revisit this if
154 * it becomes an issue */
155 rc = down_read_trylock(&rwlp->rw_sem);
156 break;
157 case RW_WRITER:
158 rc = down_write_trylock(&rwlp->rw_sem);
159 if (rc) {
160 /* there better not be anyone else
161 * holding the write lock here */
162 ASSERT(rwlp->rw_owner == NULL);
163 rwlp->rw_owner = current;
164 }
165 break;
166 default:
167 SBUG();
168 }
169
170 RETURN(rc);
171 }
172 EXPORT_SYMBOL(__rw_tryenter);
173
174 void
175 __rw_enter(krwlock_t *rwlp, krw_t rw)
176 {
177 ENTRY;
178 ASSERT(rwlp);
179 ASSERT(rwlp->rw_magic == RW_MAGIC);
180
181 switch (rw) {
182 case RW_READER:
183 /* Here the Solaris code would block
184 * if there were any write waiters. Specifically
185 * thinking about the case where readers may have
186 * the lock and we would also allow this thread
187 * to grab the read lock with a writer waiting in the
188 * queue. This doesn't seem like a correctness
189 * issue, so just call down_read()
190 * for the test. We may have to revisit this if
191 * it becomes an issue */
192 down_read(&rwlp->rw_sem);
193 break;
194 case RW_WRITER:
195 down_write(&rwlp->rw_sem);
196
197 /* there better not be anyone else
198 * holding the write lock here */
199 ASSERT(rwlp->rw_owner == NULL);
200 rwlp->rw_owner = current;
201 break;
202 default:
203 SBUG();
204 }
205 EXIT;
206 }
207 EXPORT_SYMBOL(__rw_enter);
208
209 void
210 __rw_exit(krwlock_t *rwlp)
211 {
212 ENTRY;
213 ASSERT(rwlp);
214 ASSERT(rwlp->rw_magic == RW_MAGIC);
215
216 /* rw_owner is held by current
217 * thread iff it is a writer */
218 if (rwlp->rw_owner == current) {
219 rwlp->rw_owner = NULL;
220 up_write(&rwlp->rw_sem);
221 } else {
222 up_read(&rwlp->rw_sem);
223 }
224 EXIT;
225 }
226 EXPORT_SYMBOL(__rw_exit);
227
228 void
229 __rw_downgrade(krwlock_t *rwlp)
230 {
231 ENTRY;
232 ASSERT(rwlp);
233 ASSERT(rwlp->rw_magic == RW_MAGIC);
234 ASSERT(rwlp->rw_owner == current);
235
236 rwlp->rw_owner = NULL;
237 downgrade_write(&rwlp->rw_sem);
238 EXIT;
239 }
240 EXPORT_SYMBOL(__rw_downgrade);
241
242 /* Return 0 if unable to perform the upgrade.
243 * Might be wise to fix the caller
244 * to acquire the write lock first?
245 */
246 int
247 __rw_tryupgrade(krwlock_t *rwlp)
248 {
249 int rc = 0;
250 ENTRY;
251
252 ASSERT(rwlp);
253 ASSERT(rwlp->rw_magic == RW_MAGIC);
254
255 spin_lock(&rwlp->rw_sem.wait_lock);
256
257 /* Check if there is anyone waiting for the
258 * lock. If there is, then we know we should
259 * not try to upgrade the lock */
260 if (!list_empty(&rwlp->rw_sem.wait_list)) {
261 spin_unlock(&rwlp->rw_sem.wait_lock);
262 RETURN(0);
263 }
264 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
265 /* Note that activity is protected by
266 * the wait_lock. Don't try to upgrade
267 * if there are multiple readers currently
268 * holding the lock */
269 if (rwlp->rw_sem.activity > 1) {
270 #else
271 /* Don't try to upgrade
272 * if there are multiple readers currently
273 * holding the lock */
274 if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
275 #endif
276 spin_unlock(&rwlp->rw_sem.wait_lock);
277 RETURN(0);
278 }
279
280 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
281 /* Here it should be safe to drop the
282 * read lock and reacquire it for writing since
283 * we know there are no waiters */
284 __up_read_locked(&rwlp->rw_sem);
285
286 /* returns 1 if success, 0 if contention */
287 rc = __down_write_trylock_locked(&rwlp->rw_sem);
288 #else
289 /* Here it should be safe to drop the
290 * read lock and reacquire it for writing since
291 * we know there are no waiters */
292 up_read(&rwlp->rw_sem);
293
294 /* returns 1 if success, 0 if contention */
295 rc = down_write_trylock(&rwlp->rw_sem);
296 #endif
297
298 /* Check if upgrade failed. Should not ever happen
299 * if we got to this point */
300 ASSERT(rc);
301 ASSERT(rwlp->rw_owner == NULL);
302 rwlp->rw_owner = current;
303 spin_unlock(&rwlp->rw_sem.wait_lock);
304
305 RETURN(1);
306 }
307 EXPORT_SYMBOL(__rw_tryupgrade);
308
309 kthread_t *
310 __rw_owner(krwlock_t *rwlp)
311 {
312 ENTRY;
313 ASSERT(rwlp);
314 ASSERT(rwlp->rw_magic == RW_MAGIC);
315 RETURN(rwlp->rw_owner);
316 }
317 EXPORT_SYMBOL(__rw_owner);
318
319 int
320 __rw_read_held(krwlock_t *rwlp)
321 {
322 ENTRY;
323 ASSERT(rwlp);
324 ASSERT(rwlp->rw_magic == RW_MAGIC);
325 RETURN(__rw_lock_held(rwlp) && rwlp->rw_owner == NULL);
326 }
327 EXPORT_SYMBOL(__rw_read_held);
328
329 int
330 __rw_write_held(krwlock_t *rwlp)
331 {
332 ENTRY;
333 ASSERT(rwlp);
334 ASSERT(rwlp->rw_magic == RW_MAGIC);
335 RETURN(rwlp->rw_owner == current);
336 }
337 EXPORT_SYMBOL(__rw_write_held);
338
339 int
340 __rw_lock_held(krwlock_t *rwlp)
341 {
342 int rc = 0;
343 ENTRY;
344
345 ASSERT(rwlp);
346 ASSERT(rwlp->rw_magic == RW_MAGIC);
347
348 spin_lock_irq(&(rwlp->rw_sem.wait_lock));
349 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
350 if (rwlp->rw_sem.activity != 0) {
351 #else
352 if (rwlp->rw_sem.count != 0) {
353 #endif
354 rc = 1;
355 }
356
357 spin_unlock_irq(&(rwlp->rw_sem.wait_lock));
358
359 RETURN(rc);
360 }
361 EXPORT_SYMBOL(__rw_lock_held);