]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - kernel/locking/rwsem-xadd.c
efi/arm: Fix boot crash with CONFIG_CPUMASK_OFFSTACK=y
[mirror_ubuntu-artful-kernel.git] / kernel / locking / rwsem-xadd.c
1 /* rwsem.c: R/W semaphores: contention handling functions
2 *
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
5 *
6 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
7 * and Michel Lespinasse <walken@google.com>
8 *
9 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
10 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
11 */
12 #include <linux/rwsem.h>
13 #include <linux/sched.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/rt.h>
17 #include <linux/osq_lock.h>
18
19 #include "rwsem.h"
20
21 /*
22 * Guide to the rw_semaphore's count field for common values.
23 * (32-bit case illustrated, similar for 64-bit)
24 *
25 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
26 * X = #active_readers + #readers attempting to lock
27 * (X*ACTIVE_BIAS)
28 *
29 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
30 * attempting to read lock or write lock.
31 *
32 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
33 * X = #active readers + # readers attempting lock
34 * (X*ACTIVE_BIAS + WAITING_BIAS)
35 * (2) 1 writer attempting lock, no waiters for lock
36 * X-1 = #active readers + #readers attempting lock
37 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
38 * (3) 1 writer active, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 *
42 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
43 * (WAITING_BIAS + ACTIVE_BIAS)
44 * (2) 1 writer active or attempting lock, no waiters for lock
45 * (ACTIVE_WRITE_BIAS)
46 *
47 * 0xffff0000 (1) There are writers or readers queued but none active
48 * or in the process of attempting lock.
49 * (WAITING_BIAS)
50 * Note: writer can attempt to steal lock for this count by adding
51 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
52 *
53 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
54 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
55 *
56 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
57 * the count becomes more than 0 for successful lock acquisition,
58 * i.e. the case where there are only readers or nobody has lock.
59 * (1st and 2nd case above).
60 *
61 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
62 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
63 * acquisition (i.e. nobody else has lock or attempts lock). If
64 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
65 * are only waiters but none active (5th case above), and attempt to
66 * steal the lock.
67 *
68 */
69
70 /*
71 * Initialize an rwsem:
72 */
73 void __init_rwsem(struct rw_semaphore *sem, const char *name,
74 struct lock_class_key *key)
75 {
76 #ifdef CONFIG_DEBUG_LOCK_ALLOC
77 /*
78 * Make sure we are not reinitializing a held semaphore:
79 */
80 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
81 lockdep_init_map(&sem->dep_map, name, key, 0);
82 #endif
83 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
84 raw_spin_lock_init(&sem->wait_lock);
85 INIT_LIST_HEAD(&sem->wait_list);
86 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
87 sem->owner = NULL;
88 osq_lock_init(&sem->osq);
89 #endif
90 }
91
92 EXPORT_SYMBOL(__init_rwsem);
93
94 enum rwsem_waiter_type {
95 RWSEM_WAITING_FOR_WRITE,
96 RWSEM_WAITING_FOR_READ
97 };
98
99 struct rwsem_waiter {
100 struct list_head list;
101 struct task_struct *task;
102 enum rwsem_waiter_type type;
103 };
104
105 enum rwsem_wake_type {
106 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
107 RWSEM_WAKE_READERS, /* Wake readers only */
108 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
109 };
110
111 /*
112 * handle the lock release when processes blocked on it that can now run
113 * - if we come here from up_xxxx(), then:
114 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
115 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
116 * - there must be someone on the queue
117 * - the wait_lock must be held by the caller
118 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
119 * to actually wakeup the blocked task(s) and drop the reference count,
120 * preferably when the wait_lock is released
121 * - woken process blocks are discarded from the list after having task zeroed
122 * - writers are only marked woken if downgrading is false
123 */
124 static void __rwsem_mark_wake(struct rw_semaphore *sem,
125 enum rwsem_wake_type wake_type,
126 struct wake_q_head *wake_q)
127 {
128 struct rwsem_waiter *waiter, *tmp;
129 long oldcount, woken = 0, adjustment = 0;
130
131 /*
132 * Take a peek at the queue head waiter such that we can determine
133 * the wakeup(s) to perform.
134 */
135 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
136
137 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
138 if (wake_type == RWSEM_WAKE_ANY) {
139 /*
140 * Mark writer at the front of the queue for wakeup.
141 * Until the task is actually later awoken later by
142 * the caller, other writers are able to steal it.
143 * Readers, on the other hand, will block as they
144 * will notice the queued writer.
145 */
146 wake_q_add(wake_q, waiter->task);
147 }
148
149 return;
150 }
151
152 /*
153 * Writers might steal the lock before we grant it to the next reader.
154 * We prefer to do the first reader grant before counting readers
155 * so we can bail out early if a writer stole the lock.
156 */
157 if (wake_type != RWSEM_WAKE_READ_OWNED) {
158 adjustment = RWSEM_ACTIVE_READ_BIAS;
159 try_reader_grant:
160 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
161 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
162 /*
163 * If the count is still less than RWSEM_WAITING_BIAS
164 * after removing the adjustment, it is assumed that
165 * a writer has stolen the lock. We have to undo our
166 * reader grant.
167 */
168 if (atomic_long_add_return(-adjustment, &sem->count) <
169 RWSEM_WAITING_BIAS)
170 return;
171
172 /* Last active locker left. Retry waking readers. */
173 goto try_reader_grant;
174 }
175 /*
176 * It is not really necessary to set it to reader-owned here,
177 * but it gives the spinners an early indication that the
178 * readers now have the lock.
179 */
180 rwsem_set_reader_owned(sem);
181 }
182
183 /*
184 * Grant an infinite number of read locks to the readers at the front
185 * of the queue. We know that woken will be at least 1 as we accounted
186 * for above. Note we increment the 'active part' of the count by the
187 * number of readers before waking any processes up.
188 */
189 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
190 struct task_struct *tsk;
191
192 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
193 break;
194
195 woken++;
196 tsk = waiter->task;
197
198 wake_q_add(wake_q, tsk);
199 list_del(&waiter->list);
200 /*
201 * Ensure that the last operation is setting the reader
202 * waiter to nil such that rwsem_down_read_failed() cannot
203 * race with do_exit() by always holding a reference count
204 * to the task to wakeup.
205 */
206 smp_store_release(&waiter->task, NULL);
207 }
208
209 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
210 if (list_empty(&sem->wait_list)) {
211 /* hit end of list above */
212 adjustment -= RWSEM_WAITING_BIAS;
213 }
214
215 if (adjustment)
216 atomic_long_add(adjustment, &sem->count);
217 }
218
219 /*
220 * Wait for the read lock to be granted
221 */
222 __visible
223 struct rw_semaphore __sched *rwsem_down_read_failed(struct rw_semaphore *sem)
224 {
225 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
226 struct rwsem_waiter waiter;
227 DEFINE_WAKE_Q(wake_q);
228
229 waiter.task = current;
230 waiter.type = RWSEM_WAITING_FOR_READ;
231
232 raw_spin_lock_irq(&sem->wait_lock);
233 if (list_empty(&sem->wait_list))
234 adjustment += RWSEM_WAITING_BIAS;
235 list_add_tail(&waiter.list, &sem->wait_list);
236
237 /* we're now waiting on the lock, but no longer actively locking */
238 count = atomic_long_add_return(adjustment, &sem->count);
239
240 /*
241 * If there are no active locks, wake the front queued process(es).
242 *
243 * If there are no writers and we are first in the queue,
244 * wake our own waiter to join the existing active readers !
245 */
246 if (count == RWSEM_WAITING_BIAS ||
247 (count > RWSEM_WAITING_BIAS &&
248 adjustment != -RWSEM_ACTIVE_READ_BIAS))
249 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
250
251 raw_spin_unlock_irq(&sem->wait_lock);
252 wake_up_q(&wake_q);
253
254 /* wait to be given the lock */
255 while (true) {
256 set_current_state(TASK_UNINTERRUPTIBLE);
257 if (!waiter.task)
258 break;
259 schedule();
260 }
261
262 __set_current_state(TASK_RUNNING);
263 return sem;
264 }
265 EXPORT_SYMBOL(rwsem_down_read_failed);
266
267 /*
268 * This function must be called with the sem->wait_lock held to prevent
269 * race conditions between checking the rwsem wait list and setting the
270 * sem->count accordingly.
271 */
272 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
273 {
274 /*
275 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
276 */
277 if (count != RWSEM_WAITING_BIAS)
278 return false;
279
280 /*
281 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
282 * are other tasks on the wait list, we need to add on WAITING_BIAS.
283 */
284 count = list_is_singular(&sem->wait_list) ?
285 RWSEM_ACTIVE_WRITE_BIAS :
286 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
287
288 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
289 == RWSEM_WAITING_BIAS) {
290 rwsem_set_owner(sem);
291 return true;
292 }
293
294 return false;
295 }
296
297 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
298 /*
299 * Try to acquire write lock before the writer has been put on wait queue.
300 */
301 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
302 {
303 long old, count = atomic_long_read(&sem->count);
304
305 while (true) {
306 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
307 return false;
308
309 old = atomic_long_cmpxchg_acquire(&sem->count, count,
310 count + RWSEM_ACTIVE_WRITE_BIAS);
311 if (old == count) {
312 rwsem_set_owner(sem);
313 return true;
314 }
315
316 count = old;
317 }
318 }
319
320 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
321 {
322 struct task_struct *owner;
323 bool ret = true;
324
325 if (need_resched())
326 return false;
327
328 rcu_read_lock();
329 owner = READ_ONCE(sem->owner);
330 if (!rwsem_owner_is_writer(owner)) {
331 /*
332 * Don't spin if the rwsem is readers owned.
333 */
334 ret = !rwsem_owner_is_reader(owner);
335 goto done;
336 }
337
338 /*
339 * As lock holder preemption issue, we both skip spinning if task is not
340 * on cpu or its cpu is preempted
341 */
342 ret = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
343 done:
344 rcu_read_unlock();
345 return ret;
346 }
347
348 /*
349 * Return true only if we can still spin on the owner field of the rwsem.
350 */
351 static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
352 {
353 struct task_struct *owner = READ_ONCE(sem->owner);
354
355 if (!rwsem_owner_is_writer(owner))
356 goto out;
357
358 rcu_read_lock();
359 while (sem->owner == owner) {
360 /*
361 * Ensure we emit the owner->on_cpu, dereference _after_
362 * checking sem->owner still matches owner, if that fails,
363 * owner might point to free()d memory, if it still matches,
364 * the rcu_read_lock() ensures the memory stays valid.
365 */
366 barrier();
367
368 /*
369 * abort spinning when need_resched or owner is not running or
370 * owner's cpu is preempted.
371 */
372 if (!owner->on_cpu || need_resched() ||
373 vcpu_is_preempted(task_cpu(owner))) {
374 rcu_read_unlock();
375 return false;
376 }
377
378 cpu_relax();
379 }
380 rcu_read_unlock();
381 out:
382 /*
383 * If there is a new owner or the owner is not set, we continue
384 * spinning.
385 */
386 return !rwsem_owner_is_reader(READ_ONCE(sem->owner));
387 }
388
389 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
390 {
391 bool taken = false;
392
393 preempt_disable();
394
395 /* sem->wait_lock should not be held when doing optimistic spinning */
396 if (!rwsem_can_spin_on_owner(sem))
397 goto done;
398
399 if (!osq_lock(&sem->osq))
400 goto done;
401
402 /*
403 * Optimistically spin on the owner field and attempt to acquire the
404 * lock whenever the owner changes. Spinning will be stopped when:
405 * 1) the owning writer isn't running; or
406 * 2) readers own the lock as we can't determine if they are
407 * actively running or not.
408 */
409 while (rwsem_spin_on_owner(sem)) {
410 /*
411 * Try to acquire the lock
412 */
413 if (rwsem_try_write_lock_unqueued(sem)) {
414 taken = true;
415 break;
416 }
417
418 /*
419 * When there's no owner, we might have preempted between the
420 * owner acquiring the lock and setting the owner field. If
421 * we're an RT task that will live-lock because we won't let
422 * the owner complete.
423 */
424 if (!sem->owner && (need_resched() || rt_task(current)))
425 break;
426
427 /*
428 * The cpu_relax() call is a compiler barrier which forces
429 * everything in this loop to be re-loaded. We don't need
430 * memory barriers as we'll eventually observe the right
431 * values at the cost of a few extra spins.
432 */
433 cpu_relax();
434 }
435 osq_unlock(&sem->osq);
436 done:
437 preempt_enable();
438 return taken;
439 }
440
441 /*
442 * Return true if the rwsem has active spinner
443 */
444 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
445 {
446 return osq_is_locked(&sem->osq);
447 }
448
449 #else
450 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
451 {
452 return false;
453 }
454
455 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
456 {
457 return false;
458 }
459 #endif
460
461 /*
462 * Wait until we successfully acquire the write lock
463 */
464 static inline struct rw_semaphore *
465 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
466 {
467 long count;
468 bool waiting = true; /* any queued threads before us */
469 struct rwsem_waiter waiter;
470 struct rw_semaphore *ret = sem;
471 DEFINE_WAKE_Q(wake_q);
472
473 /* undo write bias from down_write operation, stop active locking */
474 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
475
476 /* do optimistic spinning and steal lock if possible */
477 if (rwsem_optimistic_spin(sem))
478 return sem;
479
480 /*
481 * Optimistic spinning failed, proceed to the slowpath
482 * and block until we can acquire the sem.
483 */
484 waiter.task = current;
485 waiter.type = RWSEM_WAITING_FOR_WRITE;
486
487 raw_spin_lock_irq(&sem->wait_lock);
488
489 /* account for this before adding a new element to the list */
490 if (list_empty(&sem->wait_list))
491 waiting = false;
492
493 list_add_tail(&waiter.list, &sem->wait_list);
494
495 /* we're now waiting on the lock, but no longer actively locking */
496 if (waiting) {
497 count = atomic_long_read(&sem->count);
498
499 /*
500 * If there were already threads queued before us and there are
501 * no active writers, the lock must be read owned; so we try to
502 * wake any read locks that were queued ahead of us.
503 */
504 if (count > RWSEM_WAITING_BIAS) {
505 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
506 /*
507 * The wakeup is normally called _after_ the wait_lock
508 * is released, but given that we are proactively waking
509 * readers we can deal with the wake_q overhead as it is
510 * similar to releasing and taking the wait_lock again
511 * for attempting rwsem_try_write_lock().
512 */
513 wake_up_q(&wake_q);
514
515 /*
516 * Reinitialize wake_q after use.
517 */
518 wake_q_init(&wake_q);
519 }
520
521 } else
522 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
523
524 /* wait until we successfully acquire the lock */
525 set_current_state(state);
526 while (true) {
527 if (rwsem_try_write_lock(count, sem))
528 break;
529 raw_spin_unlock_irq(&sem->wait_lock);
530
531 /* Block until there are no active lockers. */
532 do {
533 if (signal_pending_state(state, current))
534 goto out_nolock;
535
536 schedule();
537 set_current_state(state);
538 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
539
540 raw_spin_lock_irq(&sem->wait_lock);
541 }
542 __set_current_state(TASK_RUNNING);
543 list_del(&waiter.list);
544 raw_spin_unlock_irq(&sem->wait_lock);
545
546 return ret;
547
548 out_nolock:
549 __set_current_state(TASK_RUNNING);
550 raw_spin_lock_irq(&sem->wait_lock);
551 list_del(&waiter.list);
552 if (list_empty(&sem->wait_list))
553 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
554 else
555 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
556 raw_spin_unlock_irq(&sem->wait_lock);
557 wake_up_q(&wake_q);
558
559 return ERR_PTR(-EINTR);
560 }
561
562 __visible struct rw_semaphore * __sched
563 rwsem_down_write_failed(struct rw_semaphore *sem)
564 {
565 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
566 }
567 EXPORT_SYMBOL(rwsem_down_write_failed);
568
569 __visible struct rw_semaphore * __sched
570 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
571 {
572 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
573 }
574 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
575
576 /*
577 * handle waking up a waiter on the semaphore
578 * - up_read/up_write has decremented the active part of count if we come here
579 */
580 __visible
581 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
582 {
583 unsigned long flags;
584 DEFINE_WAKE_Q(wake_q);
585
586 /*
587 * If a spinner is present, it is not necessary to do the wakeup.
588 * Try to do wakeup only if the trylock succeeds to minimize
589 * spinlock contention which may introduce too much delay in the
590 * unlock operation.
591 *
592 * spinning writer up_write/up_read caller
593 * --------------- -----------------------
594 * [S] osq_unlock() [L] osq
595 * MB RMB
596 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
597 *
598 * Here, it is important to make sure that there won't be a missed
599 * wakeup while the rwsem is free and the only spinning writer goes
600 * to sleep without taking the rwsem. Even when the spinning writer
601 * is just going to break out of the waiting loop, it will still do
602 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
603 * rwsem_has_spinner() is true, it will guarantee at least one
604 * trylock attempt on the rwsem later on.
605 */
606 if (rwsem_has_spinner(sem)) {
607 /*
608 * The smp_rmb() here is to make sure that the spinner
609 * state is consulted before reading the wait_lock.
610 */
611 smp_rmb();
612 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
613 return sem;
614 goto locked;
615 }
616 raw_spin_lock_irqsave(&sem->wait_lock, flags);
617 locked:
618
619 if (!list_empty(&sem->wait_list))
620 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
621
622 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
623 wake_up_q(&wake_q);
624
625 return sem;
626 }
627 EXPORT_SYMBOL(rwsem_wake);
628
629 /*
630 * downgrade a write lock into a read lock
631 * - caller incremented waiting part of count and discovered it still negative
632 * - just wake up any readers at the front of the queue
633 */
634 __visible
635 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
636 {
637 unsigned long flags;
638 DEFINE_WAKE_Q(wake_q);
639
640 raw_spin_lock_irqsave(&sem->wait_lock, flags);
641
642 if (!list_empty(&sem->wait_list))
643 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
644
645 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
646 wake_up_q(&wake_q);
647
648 return sem;
649 }
650 EXPORT_SYMBOL(rwsem_downgrade_wake);