]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/locking/rwsem-spinlock.c
Merge tag 'for-4.11/linus-merge-signed' of git://git.kernel.dk/linux-block
[mirror_ubuntu-artful-kernel.git] / kernel / locking / rwsem-spinlock.c
1 /* rwsem-spinlock.c: R/W semaphores: contention handling functions for
2 * generic spinlock implementation
3 *
4 * Copyright (c) 2001 David Howells (dhowells@redhat.com).
5 * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
6 * - Derived also from comments by Linus
7 */
8 #include <linux/rwsem.h>
9 #include <linux/sched.h>
10 #include <linux/export.h>
11
12 enum rwsem_waiter_type {
13 RWSEM_WAITING_FOR_WRITE,
14 RWSEM_WAITING_FOR_READ
15 };
16
17 struct rwsem_waiter {
18 struct list_head list;
19 struct task_struct *task;
20 enum rwsem_waiter_type type;
21 };
22
23 int rwsem_is_locked(struct rw_semaphore *sem)
24 {
25 int ret = 1;
26 unsigned long flags;
27
28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29 ret = (sem->count != 0);
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31 }
32 return ret;
33 }
34 EXPORT_SYMBOL(rwsem_is_locked);
35
36 /*
37 * initialise the semaphore
38 */
39 void __init_rwsem(struct rw_semaphore *sem, const char *name,
40 struct lock_class_key *key)
41 {
42 #ifdef CONFIG_DEBUG_LOCK_ALLOC
43 /*
44 * Make sure we are not reinitializing a held semaphore:
45 */
46 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47 lockdep_init_map(&sem->dep_map, name, key, 0);
48 #endif
49 sem->count = 0;
50 raw_spin_lock_init(&sem->wait_lock);
51 INIT_LIST_HEAD(&sem->wait_list);
52 }
53 EXPORT_SYMBOL(__init_rwsem);
54
55 /*
56 * handle the lock release when processes blocked on it that can now run
57 * - if we come here, then:
58 * - the 'active count' _reached_ zero
59 * - the 'waiting count' is non-zero
60 * - the spinlock must be held by the caller
61 * - woken process blocks are discarded from the list after having task zeroed
62 * - writers are only woken if wakewrite is non-zero
63 */
64 static inline struct rw_semaphore *
65 __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
66 {
67 struct rwsem_waiter *waiter;
68 struct task_struct *tsk;
69 int woken;
70
71 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
72
73 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
74 if (wakewrite)
75 /* Wake up a writer. Note that we do not grant it the
76 * lock - it will have to acquire it when it runs. */
77 wake_up_process(waiter->task);
78 goto out;
79 }
80
81 /* grant an infinite number of read locks to the front of the queue */
82 woken = 0;
83 do {
84 struct list_head *next = waiter->list.next;
85
86 list_del(&waiter->list);
87 tsk = waiter->task;
88 /*
89 * Make sure we do not wakeup the next reader before
90 * setting the nil condition to grant the next reader;
91 * otherwise we could miss the wakeup on the other
92 * side and end up sleeping again. See the pairing
93 * in rwsem_down_read_failed().
94 */
95 smp_mb();
96 waiter->task = NULL;
97 wake_up_process(tsk);
98 put_task_struct(tsk);
99 woken++;
100 if (next == &sem->wait_list)
101 break;
102 waiter = list_entry(next, struct rwsem_waiter, list);
103 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
104
105 sem->count += woken;
106
107 out:
108 return sem;
109 }
110
111 /*
112 * wake a single writer
113 */
114 static inline struct rw_semaphore *
115 __rwsem_wake_one_writer(struct rw_semaphore *sem)
116 {
117 struct rwsem_waiter *waiter;
118
119 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
120 wake_up_process(waiter->task);
121
122 return sem;
123 }
124
125 /*
126 * get a read lock on the semaphore
127 */
128 void __sched __down_read(struct rw_semaphore *sem)
129 {
130 struct rwsem_waiter waiter;
131 unsigned long flags;
132
133 raw_spin_lock_irqsave(&sem->wait_lock, flags);
134
135 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
136 /* granted */
137 sem->count++;
138 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
139 goto out;
140 }
141
142 set_current_state(TASK_UNINTERRUPTIBLE);
143
144 /* set up my own style of waitqueue */
145 waiter.task = current;
146 waiter.type = RWSEM_WAITING_FOR_READ;
147 get_task_struct(current);
148
149 list_add_tail(&waiter.list, &sem->wait_list);
150
151 /* we don't need to touch the semaphore struct anymore */
152 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
153
154 /* wait to be given the lock */
155 for (;;) {
156 if (!waiter.task)
157 break;
158 schedule();
159 set_current_state(TASK_UNINTERRUPTIBLE);
160 }
161
162 __set_current_state(TASK_RUNNING);
163 out:
164 ;
165 }
166
167 /*
168 * trylock for reading -- returns 1 if successful, 0 if contention
169 */
170 int __down_read_trylock(struct rw_semaphore *sem)
171 {
172 unsigned long flags;
173 int ret = 0;
174
175
176 raw_spin_lock_irqsave(&sem->wait_lock, flags);
177
178 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
179 /* granted */
180 sem->count++;
181 ret = 1;
182 }
183
184 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
185
186 return ret;
187 }
188
189 /*
190 * get a write lock on the semaphore
191 */
192 int __sched __down_write_common(struct rw_semaphore *sem, int state)
193 {
194 struct rwsem_waiter waiter;
195 unsigned long flags;
196 int ret = 0;
197
198 raw_spin_lock_irqsave(&sem->wait_lock, flags);
199
200 /* set up my own style of waitqueue */
201 waiter.task = current;
202 waiter.type = RWSEM_WAITING_FOR_WRITE;
203 list_add_tail(&waiter.list, &sem->wait_list);
204
205 /* wait for someone to release the lock */
206 for (;;) {
207 /*
208 * That is the key to support write lock stealing: allows the
209 * task already on CPU to get the lock soon rather than put
210 * itself into sleep and waiting for system woke it or someone
211 * else in the head of the wait list up.
212 */
213 if (sem->count == 0)
214 break;
215 if (signal_pending_state(state, current)) {
216 ret = -EINTR;
217 goto out;
218 }
219 set_current_state(state);
220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
221 schedule();
222 raw_spin_lock_irqsave(&sem->wait_lock, flags);
223 }
224 /* got the lock */
225 sem->count = -1;
226 out:
227 list_del(&waiter.list);
228
229 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
230
231 return ret;
232 }
233
234 void __sched __down_write(struct rw_semaphore *sem)
235 {
236 __down_write_common(sem, TASK_UNINTERRUPTIBLE);
237 }
238
239 int __sched __down_write_killable(struct rw_semaphore *sem)
240 {
241 return __down_write_common(sem, TASK_KILLABLE);
242 }
243
244 /*
245 * trylock for writing -- returns 1 if successful, 0 if contention
246 */
247 int __down_write_trylock(struct rw_semaphore *sem)
248 {
249 unsigned long flags;
250 int ret = 0;
251
252 raw_spin_lock_irqsave(&sem->wait_lock, flags);
253
254 if (sem->count == 0) {
255 /* got the lock */
256 sem->count = -1;
257 ret = 1;
258 }
259
260 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
261
262 return ret;
263 }
264
265 /*
266 * release a read lock on the semaphore
267 */
268 void __up_read(struct rw_semaphore *sem)
269 {
270 unsigned long flags;
271
272 raw_spin_lock_irqsave(&sem->wait_lock, flags);
273
274 if (--sem->count == 0 && !list_empty(&sem->wait_list))
275 sem = __rwsem_wake_one_writer(sem);
276
277 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
278 }
279
280 /*
281 * release a write lock on the semaphore
282 */
283 void __up_write(struct rw_semaphore *sem)
284 {
285 unsigned long flags;
286
287 raw_spin_lock_irqsave(&sem->wait_lock, flags);
288
289 sem->count = 0;
290 if (!list_empty(&sem->wait_list))
291 sem = __rwsem_do_wake(sem, 1);
292
293 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
294 }
295
296 /*
297 * downgrade a write lock into a read lock
298 * - just wake up any readers at the front of the queue
299 */
300 void __downgrade_write(struct rw_semaphore *sem)
301 {
302 unsigned long flags;
303
304 raw_spin_lock_irqsave(&sem->wait_lock, flags);
305
306 sem->count = 1;
307 if (!list_empty(&sem->wait_list))
308 sem = __rwsem_do_wake(sem, 0);
309
310 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
311 }
312