]>
Commit | Line | Data |
---|---|---|
09b414e8 | 1 | #ifndef _SPL_RWLOCK_H |
2 | #define _SPL_RWLOCK_H | |
f1ca4da6 | 3 | |
f1b59d26 | 4 | #include <linux/module.h> |
f1ca4da6 | 5 | #include <linux/slab.h> |
6 | #include <linux/rwsem.h> | |
7 | #include <asm/current.h> | |
f4b37741 | 8 | #include <sys/types.h> |
f1ca4da6 | 9 | |
10 | #ifdef __cplusplus | |
11 | extern "C" { | |
12 | #endif | |
13 | ||
14 | typedef enum { | |
15 | RW_DRIVER = 2, /* driver (DDI) rwlock */ | |
16 | RW_DEFAULT = 4 /* kernel default rwlock */ | |
17 | } krw_type_t; | |
18 | ||
19 | typedef enum { | |
20 | RW_WRITER, | |
21 | RW_READER | |
22 | } krw_t; | |
23 | ||
24 | #define RW_READ_HELD(x) (rw_read_held((x))) | |
25 | #define RW_WRITE_HELD(x) (rw_write_held((x))) | |
26 | #define RW_LOCK_HELD(x) (rw_lock_held((x))) | |
27 | #define RW_ISWRITER(x) (rw_iswriter(x)) | |
28 | ||
29 | #define RW_MAGIC 0x3423645a | |
30 | #define RW_POISON 0xa6 | |
31 | ||
32 | typedef struct { | |
33 | int rw_magic; | |
34 | char *rw_name; | |
35 | struct rw_semaphore rw_sem; | |
f1b59d26 | 36 | struct task_struct *rw_owner; /* holder of the write lock */ |
f1ca4da6 | 37 | } krwlock_t; |
38 | ||
39 | static __inline__ void | |
40 | rw_init(krwlock_t *rwlp, char *name, krw_type_t type, void *arg) | |
41 | { | |
42 | BUG_ON(type != RW_DEFAULT); /* XXX no irq handler use */ | |
43 | BUG_ON(arg != NULL); /* XXX no irq handler use */ | |
44 | rwlp->rw_magic = RW_MAGIC; | |
45 | rwlp->rw_owner = NULL; /* no one holds the write lock yet */ | |
46 | init_rwsem(&rwlp->rw_sem); | |
47 | rwlp->rw_name = NULL; | |
48 | ||
49 | if (name) { | |
50 | rwlp->rw_name = kmalloc(strlen(name) + 1, GFP_KERNEL); | |
51 | if (rwlp->rw_name) | |
52 | strcpy(rwlp->rw_name, name); | |
53 | } | |
54 | } | |
55 | ||
56 | static __inline__ void | |
57 | rw_destroy(krwlock_t *rwlp) | |
58 | { | |
59 | BUG_ON(rwlp == NULL); | |
60 | BUG_ON(rwlp->rw_magic != RW_MAGIC); | |
61 | BUG_ON(rwlp->rw_owner != NULL); | |
62 | spin_lock(&rwlp->rw_sem.wait_lock); | |
63 | BUG_ON(!list_empty(&rwlp->rw_sem.wait_list)); | |
64 | spin_unlock(&rwlp->rw_sem.wait_lock); | |
65 | ||
66 | if (rwlp->rw_name) | |
67 | kfree(rwlp->rw_name); | |
68 | ||
69 | memset(rwlp, RW_POISON, sizeof(krwlock_t)); | |
70 | } | |
71 | ||
72 | /* Return 0 if the lock could not be obtained without blocking. | |
73 | */ | |
74 | static __inline__ int | |
75 | rw_tryenter(krwlock_t *rwlp, krw_t rw) | |
76 | { | |
77 | int result; | |
78 | ||
79 | BUG_ON(rwlp->rw_magic != RW_MAGIC); | |
80 | switch (rw) { | |
81 | /* these functions return 1 if success, 0 if contention */ | |
82 | case RW_READER: | |
83 | /* Here the Solaris code would return 0 | |
84 | * if there were any write waiters. Specifically | |
85 | * thinking about the case where readers may have | |
86 | * the lock and we would also allow this thread | |
87 | * to grab the read lock with a writer waiting in the | |
88 | * queue. This doesn't seem like a correctness | |
89 | * issue, so just call down_read_trylock() | |
90 | * for the test. We may have to revisit this if | |
91 | * it becomes an issue */ | |
92 | result = down_read_trylock(&rwlp->rw_sem); | |
93 | break; | |
94 | case RW_WRITER: | |
95 | result = down_write_trylock(&rwlp->rw_sem); | |
96 | if (result) { | |
97 | /* there better not be anyone else | |
98 | * holding the write lock here */ | |
99 | BUG_ON(rwlp->rw_owner != NULL); | |
100 | rwlp->rw_owner = current; | |
101 | } | |
102 | break; | |
103 | } | |
104 | ||
105 | return result; | |
106 | } | |
107 | ||
108 | static __inline__ void | |
109 | rw_enter(krwlock_t *rwlp, krw_t rw) | |
110 | { | |
111 | BUG_ON(rwlp->rw_magic != RW_MAGIC); | |
112 | switch (rw) { | |
113 | case RW_READER: | |
114 | /* Here the Solaris code would block | |
115 | * if there were any write waiters. Specifically | |
116 | * thinking about the case where readers may have | |
117 | * the lock and we would also allow this thread | |
118 | * to grab the read lock with a writer waiting in the | |
119 | * queue. This doesn't seem like a correctness | |
120 | * issue, so just call down_read() | |
121 | * for the test. We may have to revisit this if | |
122 | * it becomes an issue */ | |
123 | down_read(&rwlp->rw_sem); | |
124 | break; | |
125 | case RW_WRITER: | |
126 | down_write(&rwlp->rw_sem); | |
127 | ||
128 | /* there better not be anyone else | |
129 | * holding the write lock here */ | |
130 | BUG_ON(rwlp->rw_owner != NULL); | |
131 | rwlp->rw_owner = current; | |
132 | break; | |
133 | } | |
134 | } | |
135 | ||
136 | static __inline__ void | |
137 | rw_exit(krwlock_t *rwlp) | |
138 | { | |
139 | BUG_ON(rwlp->rw_magic != RW_MAGIC); | |
140 | ||
141 | /* rw_owner is held by current | |
142 | * thread iff it is a writer */ | |
143 | if (rwlp->rw_owner == current) { | |
144 | rwlp->rw_owner = NULL; | |
145 | up_write(&rwlp->rw_sem); | |
146 | } else { | |
147 | up_read(&rwlp->rw_sem); | |
148 | } | |
149 | } | |
150 | ||
151 | static __inline__ void | |
152 | rw_downgrade(krwlock_t *rwlp) | |
153 | { | |
154 | BUG_ON(rwlp->rw_magic != RW_MAGIC); | |
155 | BUG_ON(rwlp->rw_owner != current); | |
156 | rwlp->rw_owner = NULL; | |
157 | downgrade_write(&rwlp->rw_sem); | |
158 | } | |
159 | ||
160 | /* Return 0 if unable to perform the upgrade. | |
161 | * Might be wise to fix the caller | |
162 | * to acquire the write lock first? | |
163 | */ | |
164 | static __inline__ int | |
165 | rw_tryupgrade(krwlock_t *rwlp) | |
166 | { | |
167 | int result; | |
168 | BUG_ON(rwlp->rw_magic != RW_MAGIC); | |
169 | ||
170 | spin_lock(&rwlp->rw_sem.wait_lock); | |
171 | ||
172 | /* Check if there is anyone waiting for the | |
173 | * lock. If there is, then we know we should | |
174 | * not try to upgrade the lock */ | |
175 | if (!list_empty(&rwlp->rw_sem.wait_list)) { | |
176 | printk(KERN_WARNING "There are threads waiting\n"); | |
177 | spin_unlock(&rwlp->rw_sem.wait_lock); | |
178 | return 0; | |
179 | } | |
180 | #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK | |
181 | /* Note that activity is protected by | |
182 | * the wait_lock. Don't try to upgrade | |
183 | * if there are multiple readers currently | |
184 | * holding the lock */ | |
185 | if (rwlp->rw_sem.activity > 1) { | |
186 | #else | |
187 | /* Don't try to upgrade | |
188 | * if there are multiple readers currently | |
189 | * holding the lock */ | |
190 | if ((rwlp->rw_sem.count & RWSEM_ACTIVE_MASK) > 1) { | |
191 | #endif | |
192 | spin_unlock(&rwlp->rw_sem.wait_lock); | |
193 | return 0; | |
194 | } | |
195 | ||
196 | /* Here it should be safe to drop the | |
197 | * read lock and reacquire it for writing since | |
198 | * we know there are no waiters */ | |
199 | up_read(&rwlp->rw_sem); | |
f1b59d26 | 200 | |
f1ca4da6 | 201 | /* returns 1 if success, 0 if contention */ |
202 | result = down_write_trylock(&rwlp->rw_sem); | |
f1b59d26 | 203 | |
204 | /* Check if upgrade failed. Should not ever happen | |
f1ca4da6 | 205 | * if we got to this point */ |
206 | BUG_ON(!result); | |
207 | BUG_ON(rwlp->rw_owner != NULL); | |
208 | rwlp->rw_owner = current; | |
209 | spin_unlock(&rwlp->rw_sem.wait_lock); | |
210 | return 1; | |
211 | } | |
212 | ||
213 | static __inline__ kthread_t * | |
214 | rw_owner(krwlock_t *rwlp) | |
215 | { | |
216 | BUG_ON(rwlp->rw_magic != RW_MAGIC); | |
217 | return rwlp->rw_owner; | |
218 | } | |
219 | ||
220 | #ifdef __cplusplus | |
221 | } | |
222 | #endif | |
223 | ||
09b414e8 | 224 | #endif /* _SPL_RWLOCK_H */ |