]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem-spinlock.c: R/W semaphores: contention handling functions for |
2 | * generic spinlock implementation | |
3 | * | |
4 | * Copyright (c) 2001 David Howells (dhowells@redhat.com). | |
5 | * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> | |
6 | * - Derived also from comments by Linus | |
7 | */ | |
8 | #include <linux/rwsem.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/module.h> | |
11 | ||
12 | struct rwsem_waiter { | |
13 | struct list_head list; | |
14 | struct task_struct *task; | |
15 | unsigned int flags; | |
16 | #define RWSEM_WAITING_FOR_READ 0x00000001 | |
17 | #define RWSEM_WAITING_FOR_WRITE 0x00000002 | |
18 | }; | |
19 | ||
1da177e4 LT |
20 | /* |
21 | * initialise the semaphore | |
22 | */ | |
4ea2176d IM |
23 | void __init_rwsem(struct rw_semaphore *sem, const char *name, |
24 | struct lock_class_key *key) | |
1da177e4 | 25 | { |
4ea2176d IM |
26 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
27 | /* | |
28 | * Make sure we are not reinitializing a held semaphore: | |
29 | */ | |
30 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 31 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d | 32 | #endif |
1da177e4 LT |
33 | sem->activity = 0; |
34 | spin_lock_init(&sem->wait_lock); | |
35 | INIT_LIST_HEAD(&sem->wait_list); | |
1da177e4 | 36 | } |
118d52da | 37 | EXPORT_SYMBOL(__init_rwsem); |
1da177e4 LT |
38 | |
39 | /* | |
40 | * handle the lock release when processes blocked on it that can now run | |
41 | * - if we come here, then: | |
42 | * - the 'active count' _reached_ zero | |
43 | * - the 'waiting count' is non-zero | |
44 | * - the spinlock must be held by the caller | |
45 | * - woken process blocks are discarded from the list after having task zeroed | |
46 | * - writers are only woken if wakewrite is non-zero | |
47 | */ | |
48 | static inline struct rw_semaphore * | |
49 | __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite) | |
50 | { | |
51 | struct rwsem_waiter *waiter; | |
52 | struct task_struct *tsk; | |
53 | int woken; | |
54 | ||
1da177e4 LT |
55 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
56 | ||
57 | if (!wakewrite) { | |
58 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) | |
59 | goto out; | |
60 | goto dont_wake_writers; | |
61 | } | |
62 | ||
63 | /* if we are allowed to wake writers try to grant a single write lock | |
64 | * if there's a writer at the front of the queue | |
65 | * - we leave the 'waiting count' incremented to signify potential | |
66 | * contention | |
67 | */ | |
68 | if (waiter->flags & RWSEM_WAITING_FOR_WRITE) { | |
69 | sem->activity = -1; | |
70 | list_del(&waiter->list); | |
71 | tsk = waiter->task; | |
72 | /* Don't touch waiter after ->task has been NULLed */ | |
d59dd462 | 73 | smp_mb(); |
1da177e4 LT |
74 | waiter->task = NULL; |
75 | wake_up_process(tsk); | |
76 | put_task_struct(tsk); | |
77 | goto out; | |
78 | } | |
79 | ||
80 | /* grant an infinite number of read locks to the front of the queue */ | |
81 | dont_wake_writers: | |
82 | woken = 0; | |
83 | while (waiter->flags & RWSEM_WAITING_FOR_READ) { | |
84 | struct list_head *next = waiter->list.next; | |
85 | ||
86 | list_del(&waiter->list); | |
87 | tsk = waiter->task; | |
d59dd462 | 88 | smp_mb(); |
1da177e4 LT |
89 | waiter->task = NULL; |
90 | wake_up_process(tsk); | |
91 | put_task_struct(tsk); | |
92 | woken++; | |
93 | if (list_empty(&sem->wait_list)) | |
94 | break; | |
95 | waiter = list_entry(next, struct rwsem_waiter, list); | |
96 | } | |
97 | ||
98 | sem->activity += woken; | |
99 | ||
100 | out: | |
1da177e4 LT |
101 | return sem; |
102 | } | |
103 | ||
104 | /* | |
105 | * wake a single writer | |
106 | */ | |
107 | static inline struct rw_semaphore * | |
108 | __rwsem_wake_one_writer(struct rw_semaphore *sem) | |
109 | { | |
110 | struct rwsem_waiter *waiter; | |
111 | struct task_struct *tsk; | |
112 | ||
113 | sem->activity = -1; | |
114 | ||
115 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | |
116 | list_del(&waiter->list); | |
117 | ||
118 | tsk = waiter->task; | |
d59dd462 | 119 | smp_mb(); |
1da177e4 LT |
120 | waiter->task = NULL; |
121 | wake_up_process(tsk); | |
122 | put_task_struct(tsk); | |
123 | return sem; | |
124 | } | |
125 | ||
126 | /* | |
127 | * get a read lock on the semaphore | |
128 | */ | |
9f741cb8 | 129 | void __sched __down_read(struct rw_semaphore *sem) |
1da177e4 LT |
130 | { |
131 | struct rwsem_waiter waiter; | |
132 | struct task_struct *tsk; | |
133 | ||
1da177e4 LT |
134 | spin_lock_irq(&sem->wait_lock); |
135 | ||
136 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | |
137 | /* granted */ | |
138 | sem->activity++; | |
139 | spin_unlock_irq(&sem->wait_lock); | |
140 | goto out; | |
141 | } | |
142 | ||
143 | tsk = current; | |
144 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
145 | ||
146 | /* set up my own style of waitqueue */ | |
147 | waiter.task = tsk; | |
148 | waiter.flags = RWSEM_WAITING_FOR_READ; | |
149 | get_task_struct(tsk); | |
150 | ||
151 | list_add_tail(&waiter.list, &sem->wait_list); | |
152 | ||
153 | /* we don't need to touch the semaphore struct anymore */ | |
154 | spin_unlock_irq(&sem->wait_lock); | |
155 | ||
156 | /* wait to be given the lock */ | |
157 | for (;;) { | |
158 | if (!waiter.task) | |
159 | break; | |
160 | schedule(); | |
161 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
162 | } | |
163 | ||
164 | tsk->state = TASK_RUNNING; | |
1da177e4 | 165 | out: |
c4e05116 | 166 | ; |
1da177e4 LT |
167 | } |
168 | ||
169 | /* | |
170 | * trylock for reading -- returns 1 if successful, 0 if contention | |
171 | */ | |
9f741cb8 | 172 | int __down_read_trylock(struct rw_semaphore *sem) |
1da177e4 LT |
173 | { |
174 | unsigned long flags; | |
175 | int ret = 0; | |
176 | ||
1da177e4 LT |
177 | |
178 | spin_lock_irqsave(&sem->wait_lock, flags); | |
179 | ||
180 | if (sem->activity >= 0 && list_empty(&sem->wait_list)) { | |
181 | /* granted */ | |
182 | sem->activity++; | |
183 | ret = 1; | |
184 | } | |
185 | ||
186 | spin_unlock_irqrestore(&sem->wait_lock, flags); | |
187 | ||
1da177e4 LT |
188 | return ret; |
189 | } | |
190 | ||
191 | /* | |
192 | * get a write lock on the semaphore | |
193 | * - we increment the waiting count anyway to indicate an exclusive lock | |
194 | */ | |
9f741cb8 | 195 | void __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
1da177e4 LT |
196 | { |
197 | struct rwsem_waiter waiter; | |
198 | struct task_struct *tsk; | |
199 | ||
1da177e4 LT |
200 | spin_lock_irq(&sem->wait_lock); |
201 | ||
202 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | |
203 | /* granted */ | |
204 | sem->activity = -1; | |
205 | spin_unlock_irq(&sem->wait_lock); | |
206 | goto out; | |
207 | } | |
208 | ||
209 | tsk = current; | |
210 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
211 | ||
212 | /* set up my own style of waitqueue */ | |
213 | waiter.task = tsk; | |
214 | waiter.flags = RWSEM_WAITING_FOR_WRITE; | |
215 | get_task_struct(tsk); | |
216 | ||
217 | list_add_tail(&waiter.list, &sem->wait_list); | |
218 | ||
219 | /* we don't need to touch the semaphore struct anymore */ | |
220 | spin_unlock_irq(&sem->wait_lock); | |
221 | ||
222 | /* wait to be given the lock */ | |
223 | for (;;) { | |
224 | if (!waiter.task) | |
225 | break; | |
226 | schedule(); | |
227 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
228 | } | |
229 | ||
230 | tsk->state = TASK_RUNNING; | |
1da177e4 | 231 | out: |
c4e05116 | 232 | ; |
1da177e4 LT |
233 | } |
234 | ||
9f741cb8 | 235 | void __sched __down_write(struct rw_semaphore *sem) |
4ea2176d IM |
236 | { |
237 | __down_write_nested(sem, 0); | |
238 | } | |
239 | ||
1da177e4 LT |
240 | /* |
241 | * trylock for writing -- returns 1 if successful, 0 if contention | |
242 | */ | |
9f741cb8 | 243 | int __down_write_trylock(struct rw_semaphore *sem) |
1da177e4 LT |
244 | { |
245 | unsigned long flags; | |
246 | int ret = 0; | |
247 | ||
1da177e4 LT |
248 | spin_lock_irqsave(&sem->wait_lock, flags); |
249 | ||
250 | if (sem->activity == 0 && list_empty(&sem->wait_list)) { | |
251 | /* granted */ | |
252 | sem->activity = -1; | |
253 | ret = 1; | |
254 | } | |
255 | ||
256 | spin_unlock_irqrestore(&sem->wait_lock, flags); | |
257 | ||
1da177e4 LT |
258 | return ret; |
259 | } | |
260 | ||
261 | /* | |
262 | * release a read lock on the semaphore | |
263 | */ | |
9f741cb8 | 264 | void __up_read(struct rw_semaphore *sem) |
1da177e4 LT |
265 | { |
266 | unsigned long flags; | |
267 | ||
1da177e4 LT |
268 | spin_lock_irqsave(&sem->wait_lock, flags); |
269 | ||
270 | if (--sem->activity == 0 && !list_empty(&sem->wait_list)) | |
271 | sem = __rwsem_wake_one_writer(sem); | |
272 | ||
273 | spin_unlock_irqrestore(&sem->wait_lock, flags); | |
1da177e4 LT |
274 | } |
275 | ||
276 | /* | |
277 | * release a write lock on the semaphore | |
278 | */ | |
9f741cb8 | 279 | void __up_write(struct rw_semaphore *sem) |
1da177e4 LT |
280 | { |
281 | unsigned long flags; | |
282 | ||
1da177e4 LT |
283 | spin_lock_irqsave(&sem->wait_lock, flags); |
284 | ||
285 | sem->activity = 0; | |
286 | if (!list_empty(&sem->wait_list)) | |
287 | sem = __rwsem_do_wake(sem, 1); | |
288 | ||
289 | spin_unlock_irqrestore(&sem->wait_lock, flags); | |
1da177e4 LT |
290 | } |
291 | ||
292 | /* | |
293 | * downgrade a write lock into a read lock | |
294 | * - just wake up any readers at the front of the queue | |
295 | */ | |
9f741cb8 | 296 | void __downgrade_write(struct rw_semaphore *sem) |
1da177e4 LT |
297 | { |
298 | unsigned long flags; | |
299 | ||
1da177e4 LT |
300 | spin_lock_irqsave(&sem->wait_lock, flags); |
301 | ||
302 | sem->activity = 1; | |
303 | if (!list_empty(&sem->wait_list)) | |
304 | sem = __rwsem_do_wake(sem, 0); | |
305 | ||
306 | spin_unlock_irqrestore(&sem->wait_lock, flags); | |
1da177e4 LT |
307 | } |
308 |