]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - lib/rwsem-spinlock.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / lib / rwsem-spinlock.c
1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2 * generic spinlock implementation
3 *
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 */
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11
12 struct rwsem_waiter {
13 struct list_head list;
14 struct task_struct *task;
15 unsigned int flags;
16 #define RWSEM_WAITING_FOR_READ 0x00000001
17 #define RWSEM_WAITING_FOR_WRITE 0x00000002
18 };
19
20 #if RWSEM_DEBUG
21 void rwsemtrace(struct rw_semaphore *sem, const char *str)
22 {
23 if (sem->debug)
24 printk("[%d] %s({%d,%d})\n",
25 current->pid, str, sem->activity,
26 list_empty(&sem->wait_list) ? 0 : 1);
27 }
28 #endif
29
30 /*
31 * initialise the semaphore
32 */
33 void fastcall init_rwsem(struct rw_semaphore *sem)
34 {
35 sem->activity = 0;
36 spin_lock_init(&sem->wait_lock);
37 INIT_LIST_HEAD(&sem->wait_list);
38 #if RWSEM_DEBUG
39 sem->debug = 0;
40 #endif
41 }
42
43 /*
44 * handle the lock release when processes blocked on it that can now run
45 * - if we come here, then:
46 * - the 'active count' _reached_ zero
47 * - the 'waiting count' is non-zero
48 * - the spinlock must be held by the caller
49 * - woken process blocks are discarded from the list after having task zeroed
50 * - writers are only woken if wakewrite is non-zero
51 */
52 static inline struct rw_semaphore *
53 __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
54 {
55 struct rwsem_waiter *waiter;
56 struct task_struct *tsk;
57 int woken;
58
59 rwsemtrace(sem, "Entering __rwsem_do_wake");
60
61 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
62
63 if (!wakewrite) {
64 if (waiter->flags & RWSEM_WAITING_FOR_WRITE)
65 goto out;
66 goto dont_wake_writers;
67 }
68
69 /* if we are allowed to wake writers try to grant a single write lock
70 * if there's a writer at the front of the queue
71 * - we leave the 'waiting count' incremented to signify potential
72 * contention
73 */
74 if (waiter->flags & RWSEM_WAITING_FOR_WRITE) {
75 sem->activity = -1;
76 list_del(&waiter->list);
77 tsk = waiter->task;
78 /* Don't touch waiter after ->task has been NULLed */
79 mb();
80 waiter->task = NULL;
81 wake_up_process(tsk);
82 put_task_struct(tsk);
83 goto out;
84 }
85
86 /* grant an infinite number of read locks to the front of the queue */
87 dont_wake_writers:
88 woken = 0;
89 while (waiter->flags & RWSEM_WAITING_FOR_READ) {
90 struct list_head *next = waiter->list.next;
91
92 list_del(&waiter->list);
93 tsk = waiter->task;
94 mb();
95 waiter->task = NULL;
96 wake_up_process(tsk);
97 put_task_struct(tsk);
98 woken++;
99 if (list_empty(&sem->wait_list))
100 break;
101 waiter = list_entry(next, struct rwsem_waiter, list);
102 }
103
104 sem->activity += woken;
105
106 out:
107 rwsemtrace(sem, "Leaving __rwsem_do_wake");
108 return sem;
109 }
110
111 /*
112 * wake a single writer
113 */
114 static inline struct rw_semaphore *
115 __rwsem_wake_one_writer(struct rw_semaphore *sem)
116 {
117 struct rwsem_waiter *waiter;
118 struct task_struct *tsk;
119
120 sem->activity = -1;
121
122 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
123 list_del(&waiter->list);
124
125 tsk = waiter->task;
126 mb();
127 waiter->task = NULL;
128 wake_up_process(tsk);
129 put_task_struct(tsk);
130 return sem;
131 }
132
133 /*
134 * get a read lock on the semaphore
135 */
136 void fastcall __sched __down_read(struct rw_semaphore *sem)
137 {
138 struct rwsem_waiter waiter;
139 struct task_struct *tsk;
140
141 rwsemtrace(sem, "Entering __down_read");
142
143 spin_lock_irq(&sem->wait_lock);
144
145 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
146 /* granted */
147 sem->activity++;
148 spin_unlock_irq(&sem->wait_lock);
149 goto out;
150 }
151
152 tsk = current;
153 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
154
155 /* set up my own style of waitqueue */
156 waiter.task = tsk;
157 waiter.flags = RWSEM_WAITING_FOR_READ;
158 get_task_struct(tsk);
159
160 list_add_tail(&waiter.list, &sem->wait_list);
161
162 /* we don't need to touch the semaphore struct anymore */
163 spin_unlock_irq(&sem->wait_lock);
164
165 /* wait to be given the lock */
166 for (;;) {
167 if (!waiter.task)
168 break;
169 schedule();
170 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
171 }
172
173 tsk->state = TASK_RUNNING;
174
175 out:
176 rwsemtrace(sem, "Leaving __down_read");
177 }
178
179 /*
180 * trylock for reading -- returns 1 if successful, 0 if contention
181 */
182 int fastcall __down_read_trylock(struct rw_semaphore *sem)
183 {
184 unsigned long flags;
185 int ret = 0;
186
187 rwsemtrace(sem, "Entering __down_read_trylock");
188
189 spin_lock_irqsave(&sem->wait_lock, flags);
190
191 if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
192 /* granted */
193 sem->activity++;
194 ret = 1;
195 }
196
197 spin_unlock_irqrestore(&sem->wait_lock, flags);
198
199 rwsemtrace(sem, "Leaving __down_read_trylock");
200 return ret;
201 }
202
203 /*
204 * get a write lock on the semaphore
205 * - we increment the waiting count anyway to indicate an exclusive lock
206 */
207 void fastcall __sched __down_write(struct rw_semaphore *sem)
208 {
209 struct rwsem_waiter waiter;
210 struct task_struct *tsk;
211
212 rwsemtrace(sem, "Entering __down_write");
213
214 spin_lock_irq(&sem->wait_lock);
215
216 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
217 /* granted */
218 sem->activity = -1;
219 spin_unlock_irq(&sem->wait_lock);
220 goto out;
221 }
222
223 tsk = current;
224 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
225
226 /* set up my own style of waitqueue */
227 waiter.task = tsk;
228 waiter.flags = RWSEM_WAITING_FOR_WRITE;
229 get_task_struct(tsk);
230
231 list_add_tail(&waiter.list, &sem->wait_list);
232
233 /* we don't need to touch the semaphore struct anymore */
234 spin_unlock_irq(&sem->wait_lock);
235
236 /* wait to be given the lock */
237 for (;;) {
238 if (!waiter.task)
239 break;
240 schedule();
241 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
242 }
243
244 tsk->state = TASK_RUNNING;
245
246 out:
247 rwsemtrace(sem, "Leaving __down_write");
248 }
249
250 /*
251 * trylock for writing -- returns 1 if successful, 0 if contention
252 */
253 int fastcall __down_write_trylock(struct rw_semaphore *sem)
254 {
255 unsigned long flags;
256 int ret = 0;
257
258 rwsemtrace(sem, "Entering __down_write_trylock");
259
260 spin_lock_irqsave(&sem->wait_lock, flags);
261
262 if (sem->activity == 0 && list_empty(&sem->wait_list)) {
263 /* granted */
264 sem->activity = -1;
265 ret = 1;
266 }
267
268 spin_unlock_irqrestore(&sem->wait_lock, flags);
269
270 rwsemtrace(sem, "Leaving __down_write_trylock");
271 return ret;
272 }
273
274 /*
275 * release a read lock on the semaphore
276 */
277 void fastcall __up_read(struct rw_semaphore *sem)
278 {
279 unsigned long flags;
280
281 rwsemtrace(sem, "Entering __up_read");
282
283 spin_lock_irqsave(&sem->wait_lock, flags);
284
285 if (--sem->activity == 0 && !list_empty(&sem->wait_list))
286 sem = __rwsem_wake_one_writer(sem);
287
288 spin_unlock_irqrestore(&sem->wait_lock, flags);
289
290 rwsemtrace(sem, "Leaving __up_read");
291 }
292
293 /*
294 * release a write lock on the semaphore
295 */
296 void fastcall __up_write(struct rw_semaphore *sem)
297 {
298 unsigned long flags;
299
300 rwsemtrace(sem, "Entering __up_write");
301
302 spin_lock_irqsave(&sem->wait_lock, flags);
303
304 sem->activity = 0;
305 if (!list_empty(&sem->wait_list))
306 sem = __rwsem_do_wake(sem, 1);
307
308 spin_unlock_irqrestore(&sem->wait_lock, flags);
309
310 rwsemtrace(sem, "Leaving __up_write");
311 }
312
313 /*
314 * downgrade a write lock into a read lock
315 * - just wake up any readers at the front of the queue
316 */
317 void fastcall __downgrade_write(struct rw_semaphore *sem)
318 {
319 unsigned long flags;
320
321 rwsemtrace(sem, "Entering __downgrade_write");
322
323 spin_lock_irqsave(&sem->wait_lock, flags);
324
325 sem->activity = 1;
326 if (!list_empty(&sem->wait_list))
327 sem = __rwsem_do_wake(sem, 0);
328
329 spin_unlock_irqrestore(&sem->wait_lock, flags);
330
331 rwsemtrace(sem, "Leaving __downgrade_write");
332 }
333
334 EXPORT_SYMBOL(init_rwsem);
335 EXPORT_SYMBOL(__down_read);
336 EXPORT_SYMBOL(__down_read_trylock);
337 EXPORT_SYMBOL(__down_write);
338 EXPORT_SYMBOL(__down_write_trylock);
339 EXPORT_SYMBOL(__up_read);
340 EXPORT_SYMBOL(__up_write);
341 EXPORT_SYMBOL(__downgrade_write);
342 #if RWSEM_DEBUG
343 EXPORT_SYMBOL(rwsemtrace);
344 #endif