]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/locking/rwsem-xadd.c
crypto: qat - change the adf_ctl_stop_devices to void
[mirror_ubuntu-artful-kernel.git] / kernel / locking / rwsem-xadd.c
1 /* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
8 *
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
11 */
12 #include <linux/rwsem.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/rt.h>
17 #include <linux/osq_lock.h>
18
19 #include "rwsem.h"
20
21 /*
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
24 *
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
27 * (X*ACTIVE_BIAS)
28 *
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
31 *
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 *
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
45 * (ACTIVE_WRITE_BIAS)
46 *
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
49 * (WAITING_BIAS)
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
52 *
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
55 *
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
60 *
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
66 * steal the lock.
67 *
68 */
69
70 /*
71 * Initialize an rwsem:
72 */
73 void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
75 {
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /*
78 * Make sure we are not reinitializing a held semaphore:
79 */
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81 lockdep_init_map(&sem->dep_map, name, key, 0);
82 #endif
83 sem->count = RWSEM_UNLOCKED_VALUE;
84 raw_spin_lock_init(&sem->wait_lock);
85 INIT_LIST_HEAD(&sem->wait_list);
86 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
87 sem->owner = NULL;
88 osq_lock_init(&sem->osq);
89 #endif
90 }
91
92 EXPORT_SYMBOL(__init_rwsem);
93
94 enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
97 };
98
99 struct rwsem_waiter {
100 struct list_head list;
101 struct task_struct *task;
102 enum rwsem_waiter_type type;
103 };
104
105 enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
109 };
110
111 /*
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116 * - there must be someone on the queue
117 * - the spinlock must be held by the caller
118 * - woken process blocks are discarded from the list after having task zeroed
119 * - writers are only woken if downgrading is false
120 */
121 static struct rw_semaphore *
122 __rwsem_do_wake(struct rw_semaphore *sem, enum rwsem_wake_type wake_type)
123 {
124 struct rwsem_waiter *waiter;
125 struct task_struct *tsk;
126 struct list_head *next;
127 long oldcount, woken, loop, adjustment;
128
129 waiter = list_entry(sem->wait_list.next, struct rwsem_waiter, list);
130 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
131 if (wake_type == RWSEM_WAKE_ANY)
132 /* Wake writer at the front of the queue, but do not
133 * grant it the lock yet as we want other writers
134 * to be able to steal it. Readers, on the other hand,
135 * will block as they will notice the queued writer.
136 */
137 wake_up_process(waiter->task);
138 goto out;
139 }
140
141 /* Writers might steal the lock before we grant it to the next reader.
142 * We prefer to do the first reader grant before counting readers
143 * so we can bail out early if a writer stole the lock.
144 */
145 adjustment = 0;
146 if (wake_type != RWSEM_WAKE_READ_OWNED) {
147 adjustment = RWSEM_ACTIVE_READ_BIAS;
148 try_reader_grant:
149 oldcount = rwsem_atomic_update(adjustment, sem) - adjustment;
150 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
151 /* A writer stole the lock. Undo our reader grant. */
152 if (rwsem_atomic_update(-adjustment, sem) &
153 RWSEM_ACTIVE_MASK)
154 goto out;
155 /* Last active locker left. Retry waking readers. */
156 goto try_reader_grant;
157 }
158 }
159
160 /* Grant an infinite number of read locks to the readers at the front
161 * of the queue. Note we increment the 'active part' of the count by
162 * the number of readers before waking any processes up.
163 */
164 woken = 0;
165 do {
166 woken++;
167
168 if (waiter->list.next == &sem->wait_list)
169 break;
170
171 waiter = list_entry(waiter->list.next,
172 struct rwsem_waiter, list);
173
174 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
175
176 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
177 if (waiter->type != RWSEM_WAITING_FOR_WRITE)
178 /* hit end of list above */
179 adjustment -= RWSEM_WAITING_BIAS;
180
181 if (adjustment)
182 rwsem_atomic_add(adjustment, sem);
183
184 next = sem->wait_list.next;
185 loop = woken;
186 do {
187 waiter = list_entry(next, struct rwsem_waiter, list);
188 next = waiter->list.next;
189 tsk = waiter->task;
190 /*
191 * Make sure we do not wakeup the next reader before
192 * setting the nil condition to grant the next reader;
193 * otherwise we could miss the wakeup on the other
194 * side and end up sleeping again. See the pairing
195 * in rwsem_down_read_failed().
196 */
197 smp_mb();
198 waiter->task = NULL;
199 wake_up_process(tsk);
200 put_task_struct(tsk);
201 } while (--loop);
202
203 sem->wait_list.next = next;
204 next->prev = &sem->wait_list;
205
206 out:
207 return sem;
208 }
209
210 /*
211 * Wait for the read lock to be granted
212 */
213 __visible
214 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
215 {
216 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
217 struct rwsem_waiter waiter;
218 struct task_struct *tsk = current;
219
220 /* set up my own style of waitqueue */
221 waiter.task = tsk;
222 waiter.type = RWSEM_WAITING_FOR_READ;
223 get_task_struct(tsk);
224
225 raw_spin_lock_irq(&sem->wait_lock);
226 if (list_empty(&sem->wait_list))
227 adjustment += RWSEM_WAITING_BIAS;
228 list_add_tail(&waiter.list, &sem->wait_list);
229
230 /* we're now waiting on the lock, but no longer actively locking */
231 count = rwsem_atomic_update(adjustment, sem);
232
233 /* If there are no active locks, wake the front queued process(es).
234 *
235 * If there are no writers and we are first in the queue,
236 * wake our own waiter to join the existing active readers !
237 */
238 if (count == RWSEM_WAITING_BIAS ||
239 (count > RWSEM_WAITING_BIAS &&
240 adjustment != -RWSEM_ACTIVE_READ_BIAS))
241 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
242
243 raw_spin_unlock_irq(&sem->wait_lock);
244
245 /* wait to be given the lock */
246 while (true) {
247 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
248 if (!waiter.task)
249 break;
250 schedule();
251 }
252
253 __set_task_state(tsk, TASK_RUNNING);
254 return sem;
255 }
256 EXPORT_SYMBOL(rwsem_down_read_failed);
257
258 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
259 {
260 /*
261 * Try acquiring the write lock. Check count first in order
262 * to reduce unnecessary expensive cmpxchg() operations.
263 */
264 if (count == RWSEM_WAITING_BIAS &&
265 cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS,
266 RWSEM_ACTIVE_WRITE_BIAS) == RWSEM_WAITING_BIAS) {
267 if (!list_is_singular(&sem->wait_list))
268 rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
269 rwsem_set_owner(sem);
270 return true;
271 }
272
273 return false;
274 }
275
276 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
277 /*
278 * Try to acquire write lock before the writer has been put on wait queue.
279 */
280 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
281 {
282 long old, count = READ_ONCE(sem->count);
283
284 while (true) {
285 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
286 return false;
287
288 old = cmpxchg_acquire(&sem->count, count,
289 count + RWSEM_ACTIVE_WRITE_BIAS);
290 if (old == count) {
291 rwsem_set_owner(sem);
292 return true;
293 }
294
295 count = old;
296 }
297 }
298
299 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
300 {
301 struct task_struct *owner;
302 bool ret = true;
303
304 if (need_resched())
305 return false;
306
307 rcu_read_lock();
308 owner = READ_ONCE(sem->owner);
309 if (!owner) {
310 long count = READ_ONCE(sem->count);
311 /*
312 * If sem->owner is not set, yet we have just recently entered the
313 * slowpath with the lock being active, then there is a possibility
314 * reader(s) may have the lock. To be safe, bail spinning in these
315 * situations.
316 */
317 if (count & RWSEM_ACTIVE_MASK)
318 ret = false;
319 goto done;
320 }
321
322 ret = owner->on_cpu;
323 done:
324 rcu_read_unlock();
325 return ret;
326 }
327
328 static noinline
329 bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
330 {
331 long count;
332
333 rcu_read_lock();
334 while (sem->owner == owner) {
335 /*
336 * Ensure we emit the owner->on_cpu, dereference _after_
337 * checking sem->owner still matches owner, if that fails,
338 * owner might point to free()d memory, if it still matches,
339 * the rcu_read_lock() ensures the memory stays valid.
340 */
341 barrier();
342
343 /* abort spinning when need_resched or owner is not running */
344 if (!owner->on_cpu || need_resched()) {
345 rcu_read_unlock();
346 return false;
347 }
348
349 cpu_relax_lowlatency();
350 }
351 rcu_read_unlock();
352
353 if (READ_ONCE(sem->owner))
354 return true; /* new owner, continue spinning */
355
356 /*
357 * When the owner is not set, the lock could be free or
358 * held by readers. Check the counter to verify the
359 * state.
360 */
361 count = READ_ONCE(sem->count);
362 return (count == 0 || count == RWSEM_WAITING_BIAS);
363 }
364
365 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
366 {
367 struct task_struct *owner;
368 bool taken = false;
369
370 preempt_disable();
371
372 /* sem->wait_lock should not be held when doing optimistic spinning */
373 if (!rwsem_can_spin_on_owner(sem))
374 goto done;
375
376 if (!osq_lock(&sem->osq))
377 goto done;
378
379 while (true) {
380 owner = READ_ONCE(sem->owner);
381 if (owner && !rwsem_spin_on_owner(sem, owner))
382 break;
383
384 /* wait_lock will be acquired if write_lock is obtained */
385 if (rwsem_try_write_lock_unqueued(sem)) {
386 taken = true;
387 break;
388 }
389
390 /*
391 * When there's no owner, we might have preempted between the
392 * owner acquiring the lock and setting the owner field. If
393 * we're an RT task that will live-lock because we won't let
394 * the owner complete.
395 */
396 if (!owner && (need_resched() || rt_task(current)))
397 break;
398
399 /*
400 * The cpu_relax() call is a compiler barrier which forces
401 * everything in this loop to be re-loaded. We don't need
402 * memory barriers as we'll eventually observe the right
403 * values at the cost of a few extra spins.
404 */
405 cpu_relax_lowlatency();
406 }
407 osq_unlock(&sem->osq);
408 done:
409 preempt_enable();
410 return taken;
411 }
412
413 /*
414 * Return true if the rwsem has active spinner
415 */
416 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
417 {
418 return osq_is_locked(&sem->osq);
419 }
420
421 #else
422 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
423 {
424 return false;
425 }
426
427 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
428 {
429 return false;
430 }
431 #endif
432
433 /*
434 * Wait until we successfully acquire the write lock
435 */
436 __visible
437 struct rw_semaphore __sched *rwsem_down_write_failed(struct rw_semaphore *sem)
438 {
439 long count;
440 bool waiting = true; /* any queued threads before us */
441 struct rwsem_waiter waiter;
442
443 /* undo write bias from down_write operation, stop active locking */
444 count = rwsem_atomic_update(-RWSEM_ACTIVE_WRITE_BIAS, sem);
445
446 /* do optimistic spinning and steal lock if possible */
447 if (rwsem_optimistic_spin(sem))
448 return sem;
449
450 /*
451 * Optimistic spinning failed, proceed to the slowpath
452 * and block until we can acquire the sem.
453 */
454 waiter.task = current;
455 waiter.type = RWSEM_WAITING_FOR_WRITE;
456
457 raw_spin_lock_irq(&sem->wait_lock);
458
459 /* account for this before adding a new element to the list */
460 if (list_empty(&sem->wait_list))
461 waiting = false;
462
463 list_add_tail(&waiter.list, &sem->wait_list);
464
465 /* we're now waiting on the lock, but no longer actively locking */
466 if (waiting) {
467 count = READ_ONCE(sem->count);
468
469 /*
470 * If there were already threads queued before us and there are
471 * no active writers, the lock must be read owned; so we try to
472 * wake any read locks that were queued ahead of us.
473 */
474 if (count > RWSEM_WAITING_BIAS)
475 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READERS);
476
477 } else
478 count = rwsem_atomic_update(RWSEM_WAITING_BIAS, sem);
479
480 /* wait until we successfully acquire the lock */
481 set_current_state(TASK_UNINTERRUPTIBLE);
482 while (true) {
483 if (rwsem_try_write_lock(count, sem))
484 break;
485 raw_spin_unlock_irq(&sem->wait_lock);
486
487 /* Block until there are no active lockers. */
488 do {
489 schedule();
490 set_current_state(TASK_UNINTERRUPTIBLE);
491 } while ((count = sem->count) & RWSEM_ACTIVE_MASK);
492
493 raw_spin_lock_irq(&sem->wait_lock);
494 }
495 __set_current_state(TASK_RUNNING);
496
497 list_del(&waiter.list);
498 raw_spin_unlock_irq(&sem->wait_lock);
499
500 return sem;
501 }
502 EXPORT_SYMBOL(rwsem_down_write_failed);
503
504 /*
505 * handle waking up a waiter on the semaphore
506 * - up_read/up_write has decremented the active part of count if we come here
507 */
508 __visible
509 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
510 {
511 unsigned long flags;
512
513 /*
514 * If a spinner is present, it is not necessary to do the wakeup.
515 * Try to do wakeup only if the trylock succeeds to minimize
516 * spinlock contention which may introduce too much delay in the
517 * unlock operation.
518 *
519 * spinning writer up_write/up_read caller
520 * --------------- -----------------------
521 * [S] osq_unlock() [L] osq
522 * MB RMB
523 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
524 *
525 * Here, it is important to make sure that there won't be a missed
526 * wakeup while the rwsem is free and the only spinning writer goes
527 * to sleep without taking the rwsem. Even when the spinning writer
528 * is just going to break out of the waiting loop, it will still do
529 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
530 * rwsem_has_spinner() is true, it will guarantee at least one
531 * trylock attempt on the rwsem later on.
532 */
533 if (rwsem_has_spinner(sem)) {
534 /*
535 * The smp_rmb() here is to make sure that the spinner
536 * state is consulted before reading the wait_lock.
537 */
538 smp_rmb();
539 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
540 return sem;
541 goto locked;
542 }
543 raw_spin_lock_irqsave(&sem->wait_lock, flags);
544 locked:
545
546 /* do nothing if list empty */
547 if (!list_empty(&sem->wait_list))
548 sem = __rwsem_do_wake(sem, RWSEM_WAKE_ANY);
549
550 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
551
552 return sem;
553 }
554 EXPORT_SYMBOL(rwsem_wake);
555
556 /*
557 * downgrade a write lock into a read lock
558 * - caller incremented waiting part of count and discovered it still negative
559 * - just wake up any readers at the front of the queue
560 */
561 __visible
562 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
563 {
564 unsigned long flags;
565
566 raw_spin_lock_irqsave(&sem->wait_lock, flags);
567
568 /* do nothing if list empty */
569 if (!list_empty(&sem->wait_list))
570 sem = __rwsem_do_wake(sem, RWSEM_WAKE_READ_OWNED);
571
572 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
573
574 return sem;
575 }
576 EXPORT_SYMBOL(rwsem_downgrade_wake);