]> git.proxmox.com Git - mirror_spl-debian.git/blame - include/sys/rwlock.h
Apply fix from bug239 for rwlock deadlock.
[mirror_spl-debian.git] / include / sys / rwlock.h
CommitLineData
09b414e8 1#ifndef _SPL_RWLOCK_H
2#define _SPL_RWLOCK_H
f1ca4da6 3
f1b59d26 4#include <linux/module.h>
f1ca4da6 5#include <linux/slab.h>
6#include <linux/rwsem.h>
7#include <asm/current.h>
f4b37741 8#include <sys/types.h>
f1ca4da6 9
10#ifdef __cplusplus
11extern "C" {
12#endif
13
14typedef enum {
15 RW_DRIVER = 2, /* driver (DDI) rwlock */
16 RW_DEFAULT = 4 /* kernel default rwlock */
17} krw_type_t;
18
19typedef enum {
20 RW_WRITER,
21 RW_READER
22} krw_t;
23
ed61a7d0 24#define RW_READ_HELD(x) (__rw_read_held((x)))
25#define RW_WRITE_HELD(x) (__rw_write_held((x)))
26#define RW_LOCK_HELD(x) (__rw_lock_held((x)))
27#define RW_ISWRITER(x) (__rw_iswriter(x))
f1ca4da6 28
29#define RW_MAGIC 0x3423645a
30#define RW_POISON 0xa6
31
32typedef struct {
33 int rw_magic;
34 char *rw_name;
35 struct rw_semaphore rw_sem;
f1b59d26 36 struct task_struct *rw_owner; /* holder of the write lock */
f1ca4da6 37} krwlock_t;
38
9490c148 39#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
40struct rwsem_waiter {
41 struct list_head list;
42 struct task_struct *task;
43 unsigned int flags;
44#define RWSEM_WAITING_FOR_READ 0x00000001
45#define RWSEM_WAITING_FOR_WRITE 0x00000002
46};
47
48/*
49 * wake a single writer
50 */
51static inline struct rw_semaphore *
52__rwsem_wake_one_writer_locked(struct rw_semaphore *sem)
53{
54 struct rwsem_waiter *waiter;
55 struct task_struct *tsk;
56
57 sem->activity = -1;
58
59 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
60 list_del(&waiter->list);
61
62 tsk = waiter->task;
63 smp_mb();
64 waiter->task = NULL;
65 wake_up_process(tsk);
66 put_task_struct(tsk);
67 return sem;
68}
69
70/*
71 * release a read lock on the semaphore
72 */
73static void fastcall
74__up_read_locked(struct rw_semaphore *sem)
75{
76 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
77 sem = __rwsem_wake_one_writer_locked(sem);
78}
79
80/*
81 * trylock for writing -- returns 1 if successful, 0 if contention
82 */
83static int fastcall
84__down_write_trylock_locked(struct rw_semaphore *sem)
85{
86 int ret = 0;
87
88 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
89 /* granted */
90 sem->activity = -1;
91 ret = 1;
92 }
93
94 return ret;
95}
96#endif
97
ed61a7d0 98extern int __rw_read_held(krwlock_t *rwlp);
99extern int __rw_write_held(krwlock_t *rwlp);
100extern int __rw_lock_held(krwlock_t *rwlp);
101
f1ca4da6 102static __inline__ void
103rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg)
104{
105 BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */
106 BUG_ON(arg != NULL); /* XXX no irq handler use */
107 rwlp->rw_magic = RW_MAGIC;
108 rwlp->rw_owner = NULL; /* no one holds the write lock yet */
109 init_rwsem(&rwlp->rw_sem);
110 rwlp->rw_name = NULL;
111
112 if (name) {
113 rwlp->rw_name = kmalloc(strlen(name) + 1, GFP_KERNEL);
114 if (rwlp->rw_name)
115 strcpy(rwlp->rw_name, name);
116 }
117}
118
119static __inline__ void
120rw_destroy(krwlock_t *rwlp)
121{
122 BUG_ON(rwlp == NULL);
123 BUG_ON(rwlp->rw_magic != RW_MAGIC);
124 BUG_ON(rwlp->rw_owner != NULL);
125 spin_lock(&rwlp->rw_sem.wait_lock);
126 BUG_ON(!list_empty(&rwlp->rw_sem.wait_list));
127 spin_unlock(&rwlp->rw_sem.wait_lock);
128
129 if (rwlp->rw_name)
130 kfree(rwlp->rw_name);
131
132 memset(rwlp, RW_POISON, sizeof(krwlock_t));
133}
134
135/* Return 0 if the lock could not be obtained without blocking.
136 */
137static __inline__ int
138rw_tryenter(krwlock_t *rwlp, krw_t rw)
139{
140 int result;
141
142 BUG_ON(rwlp->rw_magic != RW_MAGIC);
143 switch (rw) {
144 /* these functions return 1 if success, 0 if contention */
145 case RW_READER:
146 /* Here the Solaris code would return 0
147 * if there were any write waiters. Specifically
148 * thinking about the case where readers may have
149 * the lock and we would also allow this thread
150 * to grab the read lock with a writer waiting in the
151 * queue. This doesn't seem like a correctness
152 * issue, so just call down_read_trylock()
153 * for the test. We may have to revisit this if
154 * it becomes an issue */
155 result = down_read_trylock(&rwlp->rw_sem);
156 break;
157 case RW_WRITER:
158 result = down_write_trylock(&rwlp->rw_sem);
159 if (result) {
160 /* there better not be anyone else
161 * holding the write lock here */
162 BUG_ON(rwlp->rw_owner != NULL);
163 rwlp->rw_owner = current;
164 }
165 break;
166 }
167
168 return result;
169}
170
171static __inline__ void
172rw_enter(krwlock_t *rwlp, krw_t rw)
173{
174 BUG_ON(rwlp->rw_magic != RW_MAGIC);
175 switch (rw) {
176 case RW_READER:
177 /* Here the Solaris code would block
178 * if there were any write waiters. Specifically
179 * thinking about the case where readers may have
180 * the lock and we would also allow this thread
181 * to grab the read lock with a writer waiting in the
182 * queue. This doesn't seem like a correctness
183 * issue, so just call down_read()
184 * for the test. We may have to revisit this if
185 * it becomes an issue */
186 down_read(&rwlp->rw_sem);
187 break;
188 case RW_WRITER:
189 down_write(&rwlp->rw_sem);
190
191 /* there better not be anyone else
192 * holding the write lock here */
193 BUG_ON(rwlp->rw_owner != NULL);
194 rwlp->rw_owner = current;
195 break;
196 }
197}
198
199static __inline__ void
200rw_exit(krwlock_t *rwlp)
201{
202 BUG_ON(rwlp->rw_magic != RW_MAGIC);
203
204 /* rw_owner is held by current
205 * thread iff it is a writer */
206 if (rwlp->rw_owner == current) {
207 rwlp->rw_owner = NULL;
208 up_write(&rwlp->rw_sem);
209 } else {
210 up_read(&rwlp->rw_sem);
211 }
212}
213
214static __inline__ void
215rw_downgrade(krwlock_t *rwlp)
216{
217 BUG_ON(rwlp->rw_magic != RW_MAGIC);
218 BUG_ON(rwlp->rw_owner != current);
219 rwlp->rw_owner = NULL;
220 downgrade_write(&rwlp->rw_sem);
221}
222
223/* Return 0 if unable to perform the upgrade.
224 * Might be wise to fix the caller
225 * to acquire the write lock first?
226 */
227static __inline__ int
228rw_tryupgrade(krwlock_t *rwlp)
229{
9490c148 230 int result = 0;
f1ca4da6 231 BUG_ON(rwlp->rw_magic != RW_MAGIC);
232
233 spin_lock(&rwlp->rw_sem.wait_lock);
234
235 /* Check if there is anyone waiting for the
236 * lock. If there is, then we know we should
237 * not try to upgrade the lock */
238 if (!list_empty(&rwlp->rw_sem.wait_list)) {
239 printk(KERN_WARNING "There are threads waiting\n");
240 spin_unlock(&rwlp->rw_sem.wait_lock);
241 return 0;
242 }
243#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
244 /* Note that activity is protected by
245 * the wait_lock. Don't try to upgrade
246 * if there are multiple readers currently
247 * holding the lock */
248 if (rwlp->rw_sem.activity > 1) {
249#else
250 /* Don't try to upgrade
251 * if there are multiple readers currently
252 * holding the lock */
253 if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) {
254#endif
255 spin_unlock(&rwlp->rw_sem.wait_lock);
256 return 0;
257 }
258
9490c148 259#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
260 /* Here it should be safe to drop the
261 * read lock and reacquire it for writing since
262 * we know there are no waiters */
263 __up_read_locked(&rwlp->rw_sem);
264
265 /* returns 1 if success, 0 if contention */
266 result = __down_write_trylock_locked(&rwlp->rw_sem);
267#else
f1ca4da6 268 /* Here it should be safe to drop the
269 * read lock and reacquire it for writing since
270 * we know there are no waiters */
271 up_read(&rwlp->rw_sem);
f1b59d26 272
f1ca4da6 273 /* returns 1 if success, 0 if contention */
274 result = down_write_trylock(&rwlp->rw_sem);
9490c148 275#endif
f1b59d26 276
277 /* Check if upgrade failed. Should not ever happen
f1ca4da6 278 * if we got to this point */
279 BUG_ON(!result);
280 BUG_ON(rwlp->rw_owner != NULL);
281 rwlp->rw_owner = current;
282 spin_unlock(&rwlp->rw_sem.wait_lock);
283 return 1;
284}
285
286static __inline__ kthread_t *
287rw_owner(krwlock_t *rwlp)
288{
289 BUG_ON(rwlp->rw_magic != RW_MAGIC);
290 return rwlp->rw_owner;
291}
292
293#ifdef __cplusplus
294}
295#endif
296
09b414e8 297#endif /* _SPL_RWLOCK_H */