]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* rwsem.c: R/W semaphores: contention handling functions |
2 | * | |
3 | * Written by David Howells (dhowells@redhat.com). | |
4 | * Derived from arch/i386/kernel/semaphore.c | |
ce6711f3 AS |
5 | * |
6 | * Writer lock-stealing by Alex Shi <alex.shi@intel.com> | |
1da177e4 LT |
7 | */ |
8 | #include <linux/rwsem.h> | |
9 | #include <linux/sched.h> | |
10 | #include <linux/init.h> | |
8bc3bcc9 | 11 | #include <linux/export.h> |
1da177e4 | 12 | |
4ea2176d IM |
13 | /* |
14 | * Initialize an rwsem: | |
15 | */ | |
16 | void __init_rwsem(struct rw_semaphore *sem, const char *name, | |
17 | struct lock_class_key *key) | |
18 | { | |
19 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
20 | /* | |
21 | * Make sure we are not reinitializing a held semaphore: | |
22 | */ | |
23 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | |
4dfbb9d8 | 24 | lockdep_init_map(&sem->dep_map, name, key, 0); |
4ea2176d IM |
25 | #endif |
26 | sem->count = RWSEM_UNLOCKED_VALUE; | |
ddb6c9b5 | 27 | raw_spin_lock_init(&sem->wait_lock); |
4ea2176d IM |
28 | INIT_LIST_HEAD(&sem->wait_list); |
29 | } | |
30 | ||
31 | EXPORT_SYMBOL(__init_rwsem); | |
32 | ||
e2d57f78 ML |
33 | enum rwsem_waiter_type { |
34 | RWSEM_WAITING_FOR_WRITE, | |
35 | RWSEM_WAITING_FOR_READ | |
36 | }; | |
37 | ||
1da177e4 LT |
38 | struct rwsem_waiter { |
39 | struct list_head list; | |
40 | struct task_struct *task; | |
e2d57f78 | 41 | enum rwsem_waiter_type type; |
1da177e4 LT |
42 | }; |
43 | ||
70bdc6e0 ML |
44 | /* Wake types for __rwsem_do_wake(). Note that RWSEM_WAKE_NO_ACTIVE and |
45 | * RWSEM_WAKE_READ_OWNED imply that the spinlock must have been kept held | |
46 | * since the rwsem value was observed. | |
47 | */ | |
48 | #define RWSEM_WAKE_ANY 0 /* Wake whatever's at head of wait list */ | |
49 | #define RWSEM_WAKE_NO_ACTIVE 1 /* rwsem was observed with no active thread */ | |
50 | #define RWSEM_WAKE_READ_OWNED 2 /* rwsem was observed to be read owned */ | |
51 | ||
1da177e4 LT |
52 | /* |
53 | * handle the lock release when processes blocked on it that can now run | |
54 | * - if we come here from up_xxxx(), then: | |
55 | * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed) | |
56 | * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so) | |
345af7bf | 57 | * - there must be someone on the queue |
1da177e4 LT |
58 | * - the spinlock must be held by the caller |
59 | * - woken process blocks are discarded from the list after having task zeroed | |
60 | * - writers are only woken if downgrading is false | |
61 | */ | |
70bdc6e0 ML |
62 | static struct rw_semaphore * |
63 | __rwsem_do_wake(struct rw_semaphore *sem, int wake_type) | |
1da177e4 LT |
64 | { |
65 | struct rwsem_waiter *waiter; | |
66 | struct task_struct *tsk; | |
67 | struct list_head *next; | |
ce6711f3 | 68 | signed long woken, loop, adjustment; |
1da177e4 | 69 | |
345af7bf | 70 | waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); |
e2d57f78 | 71 | if (waiter->type != RWSEM_WAITING_FOR_WRITE) |
345af7bf ML |
72 | goto readers_only; |
73 | ||
70bdc6e0 | 74 | if (wake_type == RWSEM_WAKE_READ_OWNED) |
424acaae ML |
75 | /* Another active reader was observed, so wakeup is not |
76 | * likely to succeed. Save the atomic op. | |
77 | */ | |
345af7bf | 78 | goto out; |
1da177e4 | 79 | |
ce6711f3 AS |
80 | /* Wake up the writing waiter and let the task grab the sem: */ |
81 | wake_up_process(waiter->task); | |
1da177e4 LT |
82 | goto out; |
83 | ||
345af7bf | 84 | readers_only: |
70bdc6e0 ML |
85 | /* If we come here from up_xxxx(), another thread might have reached |
86 | * rwsem_down_failed_common() before we acquired the spinlock and | |
87 | * woken up a waiter, making it now active. We prefer to check for | |
88 | * this first in order to not spend too much time with the spinlock | |
89 | * held if we're not going to be able to wake up readers in the end. | |
90 | * | |
91 | * Note that we do not need to update the rwsem count: any writer | |
92 | * trying to acquire rwsem will run rwsem_down_write_failed() due | |
93 | * to the waiting threads and block trying to acquire the spinlock. | |
94 | * | |
95 | * We use a dummy atomic update in order to acquire the cache line | |
96 | * exclusively since we expect to succeed and run the final rwsem | |
97 | * count adjustment pretty soon. | |
98 | */ | |
99 | if (wake_type == RWSEM_WAKE_ANY && | |
424acaae ML |
100 | rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS) |
101 | /* Someone grabbed the sem for write already */ | |
70bdc6e0 | 102 | goto out; |
1da177e4 | 103 | |
345af7bf ML |
104 | /* Grant an infinite number of read locks to the readers at the front |
105 | * of the queue. Note we increment the 'active part' of the count by | |
106 | * the number of readers before waking any processes up. | |
1da177e4 | 107 | */ |
1da177e4 LT |
108 | woken = 0; |
109 | do { | |
110 | woken++; | |
111 | ||
112 | if (waiter->list.next == &sem->wait_list) | |
113 | break; | |
114 | ||
115 | waiter = list_entry(waiter->list.next, | |
116 | struct rwsem_waiter, list); | |
117 | ||
e2d57f78 | 118 | } while (waiter->type != RWSEM_WAITING_FOR_WRITE); |
1da177e4 | 119 | |
fd41b334 | 120 | adjustment = woken * RWSEM_ACTIVE_READ_BIAS; |
e2d57f78 | 121 | if (waiter->type != RWSEM_WAITING_FOR_WRITE) |
fd41b334 ML |
122 | /* hit end of list above */ |
123 | adjustment -= RWSEM_WAITING_BIAS; | |
1da177e4 | 124 | |
fd41b334 | 125 | rwsem_atomic_add(adjustment, sem); |
1da177e4 LT |
126 | |
127 | next = sem->wait_list.next; | |
fd41b334 | 128 | for (loop = woken; loop > 0; loop--) { |
1da177e4 LT |
129 | waiter = list_entry(next, struct rwsem_waiter, list); |
130 | next = waiter->list.next; | |
131 | tsk = waiter->task; | |
d59dd462 | 132 | smp_mb(); |
1da177e4 LT |
133 | waiter->task = NULL; |
134 | wake_up_process(tsk); | |
135 | put_task_struct(tsk); | |
136 | } | |
137 | ||
138 | sem->wait_list.next = next; | |
139 | next->prev = &sem->wait_list; | |
140 | ||
141 | out: | |
1da177e4 | 142 | return sem; |
ce6711f3 AS |
143 | } |
144 | ||
145 | /* Try to get write sem, caller holds sem->wait_lock: */ | |
146 | static int try_get_writer_sem(struct rw_semaphore *sem, | |
147 | struct rwsem_waiter *waiter) | |
148 | { | |
149 | struct rwsem_waiter *fwaiter; | |
150 | long oldcount, adjustment; | |
1da177e4 | 151 | |
ce6711f3 AS |
152 | /* only steal when first waiter is writing */ |
153 | fwaiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list); | |
e2d57f78 | 154 | if (fwaiter->type != RWSEM_WAITING_FOR_WRITE) |
ce6711f3 AS |
155 | return 0; |
156 | ||
157 | adjustment = RWSEM_ACTIVE_WRITE_BIAS; | |
158 | /* Only one waiter in the queue: */ | |
159 | if (fwaiter == waiter && waiter->list.next == &sem->wait_list) | |
160 | adjustment -= RWSEM_WAITING_BIAS; | |
161 | ||
162 | try_again_write: | |
163 | oldcount = rwsem_atomic_update(adjustment, sem) - adjustment; | |
164 | if (!(oldcount & RWSEM_ACTIVE_MASK)) { | |
165 | /* No active lock: */ | |
166 | struct task_struct *tsk = waiter->task; | |
167 | ||
168 | list_del(&waiter->list); | |
169 | smp_mb(); | |
170 | put_task_struct(tsk); | |
171 | tsk->state = TASK_RUNNING; | |
172 | return 1; | |
173 | } | |
174 | /* some one grabbed the sem already */ | |
fd41b334 | 175 | if (rwsem_atomic_update(-adjustment, sem) & RWSEM_ACTIVE_MASK) |
ce6711f3 | 176 | return 0; |
345af7bf | 177 | goto try_again_write; |
1da177e4 LT |
178 | } |
179 | ||
180 | /* | |
1e78277c | 181 | * wait for the read lock to be granted |
1da177e4 | 182 | */ |
1e78277c | 183 | struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem) |
1da177e4 | 184 | { |
1e78277c ML |
185 | enum rwsem_waiter_type type = RWSEM_WAITING_FOR_READ; |
186 | signed long adjustment = -RWSEM_ACTIVE_READ_BIAS; | |
a8618a0e | 187 | struct rwsem_waiter waiter; |
1da177e4 LT |
188 | struct task_struct *tsk = current; |
189 | signed long count; | |
190 | ||
1da177e4 | 191 | /* set up my own style of waitqueue */ |
a8618a0e | 192 | waiter.task = tsk; |
e2d57f78 | 193 | waiter.type = type; |
1da177e4 LT |
194 | get_task_struct(tsk); |
195 | ||
f7dd1cee | 196 | raw_spin_lock_irq(&sem->wait_lock); |
fd41b334 ML |
197 | if (list_empty(&sem->wait_list)) |
198 | adjustment += RWSEM_WAITING_BIAS; | |
a8618a0e | 199 | list_add_tail(&waiter.list, &sem->wait_list); |
1da177e4 | 200 | |
70bdc6e0 | 201 | /* we're now waiting on the lock, but no longer actively locking */ |
1da177e4 LT |
202 | count = rwsem_atomic_update(adjustment, sem); |
203 | ||
424acaae ML |
204 | /* If there are no active locks, wake the front queued process(es) up. |
205 | * | |
206 | * Alternatively, if we're called from a failed down_write(), there | |
207 | * were already threads queued before us and there are no active | |
208 | * writers, the lock must be read owned; so we try to wake any read | |
209 | * locks that were queued ahead of us. */ | |
210 | if (count == RWSEM_WAITING_BIAS) | |
70bdc6e0 | 211 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); |
424acaae ML |
212 | else if (count > RWSEM_WAITING_BIAS && |
213 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | |
214 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | |
1da177e4 | 215 | |
ddb6c9b5 | 216 | raw_spin_unlock_irq(&sem->wait_lock); |
1da177e4 LT |
217 | |
218 | /* wait to be given the lock */ | |
f7dd1cee ML |
219 | while (true) { |
220 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
a8618a0e | 221 | if (!waiter.task) |
1da177e4 | 222 | break; |
ce6711f3 AS |
223 | |
224 | raw_spin_lock_irq(&sem->wait_lock); | |
225 | /* Try to get the writer sem, may steal from the head writer: */ | |
e2d57f78 | 226 | if (type == RWSEM_WAITING_FOR_WRITE) |
ce6711f3 AS |
227 | if (try_get_writer_sem(sem, &waiter)) { |
228 | raw_spin_unlock_irq(&sem->wait_lock); | |
229 | return sem; | |
230 | } | |
231 | raw_spin_unlock_irq(&sem->wait_lock); | |
1da177e4 | 232 | schedule(); |
1da177e4 LT |
233 | } |
234 | ||
235 | tsk->state = TASK_RUNNING; | |
236 | ||
237 | return sem; | |
238 | } | |
239 | ||
1da177e4 LT |
240 | /* |
241 | * wait for the write lock to be granted | |
242 | */ | |
d1233754 | 243 | struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem) |
1da177e4 | 244 | { |
1e78277c ML |
245 | enum rwsem_waiter_type type = RWSEM_WAITING_FOR_WRITE; |
246 | signed long adjustment = -RWSEM_ACTIVE_WRITE_BIAS; | |
247 | struct rwsem_waiter waiter; | |
248 | struct task_struct *tsk = current; | |
249 | signed long count; | |
250 | ||
251 | /* set up my own style of waitqueue */ | |
252 | waiter.task = tsk; | |
253 | waiter.type = type; | |
254 | get_task_struct(tsk); | |
255 | ||
256 | raw_spin_lock_irq(&sem->wait_lock); | |
257 | if (list_empty(&sem->wait_list)) | |
258 | adjustment += RWSEM_WAITING_BIAS; | |
259 | list_add_tail(&waiter.list, &sem->wait_list); | |
260 | ||
261 | /* we're now waiting on the lock, but no longer actively locking */ | |
262 | count = rwsem_atomic_update(adjustment, sem); | |
263 | ||
264 | /* If there are no active locks, wake the front queued process(es) up. | |
265 | * | |
266 | * Alternatively, if we're called from a failed down_write(), there | |
267 | * were already threads queued before us and there are no active | |
268 | * writers, the lock must be read owned; so we try to wake any read | |
269 | * locks that were queued ahead of us. */ | |
270 | if (count == RWSEM_WAITING_BIAS) | |
271 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_NO_ACTIVE); | |
272 | else if (count > RWSEM_WAITING_BIAS && | |
273 | adjustment == -RWSEM_ACTIVE_WRITE_BIAS) | |
274 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); | |
275 | ||
276 | raw_spin_unlock_irq(&sem->wait_lock); | |
277 | ||
278 | /* wait to be given the lock */ | |
279 | while (true) { | |
280 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); | |
281 | if (!waiter.task) | |
282 | break; | |
283 | ||
284 | raw_spin_lock_irq(&sem->wait_lock); | |
285 | /* Try to get the writer sem, may steal from the head writer: */ | |
286 | if (type == RWSEM_WAITING_FOR_WRITE) | |
287 | if (try_get_writer_sem(sem, &waiter)) { | |
288 | raw_spin_unlock_irq(&sem->wait_lock); | |
289 | return sem; | |
290 | } | |
291 | raw_spin_unlock_irq(&sem->wait_lock); | |
292 | schedule(); | |
293 | } | |
294 | ||
295 | tsk->state = TASK_RUNNING; | |
296 | ||
297 | return sem; | |
1da177e4 LT |
298 | } |
299 | ||
300 | /* | |
301 | * handle waking up a waiter on the semaphore | |
302 | * - up_read/up_write has decremented the active part of count if we come here | |
303 | */ | |
d1233754 | 304 | struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem) |
1da177e4 LT |
305 | { |
306 | unsigned long flags; | |
307 | ||
ddb6c9b5 | 308 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
309 | |
310 | /* do nothing if list empty */ | |
311 | if (!list_empty(&sem->wait_list)) | |
70bdc6e0 | 312 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY); |
1da177e4 | 313 | |
ddb6c9b5 | 314 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 315 | |
1da177e4 LT |
316 | return sem; |
317 | } | |
318 | ||
319 | /* | |
320 | * downgrade a write lock into a read lock | |
321 | * - caller incremented waiting part of count and discovered it still negative | |
322 | * - just wake up any readers at the front of the queue | |
323 | */ | |
d1233754 | 324 | struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem) |
1da177e4 LT |
325 | { |
326 | unsigned long flags; | |
327 | ||
ddb6c9b5 | 328 | raw_spin_lock_irqsave(&sem->wait_lock, flags); |
1da177e4 LT |
329 | |
330 | /* do nothing if list empty */ | |
331 | if (!list_empty(&sem->wait_list)) | |
70bdc6e0 | 332 | sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED); |
1da177e4 | 333 | |
ddb6c9b5 | 334 | raw_spin_unlock_irqrestore(&sem->wait_lock, flags); |
1da177e4 | 335 | |
1da177e4 LT |
336 | return sem; |
337 | } | |
338 | ||
339 | EXPORT_SYMBOL(rwsem_down_read_failed); | |
340 | EXPORT_SYMBOL(rwsem_down_write_failed); | |
341 | EXPORT_SYMBOL(rwsem_wake); | |
342 | EXPORT_SYMBOL(rwsem_downgrade_wake); |