]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - kernel/locking/rwsem-xadd.c
perf bpf: Save BTF in a rbtree in perf_env
[mirror_ubuntu-eoan-kernel.git] / kernel / locking / rwsem-xadd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* rwsem.c: R/W semaphores: contention handling functions
3 *
4 * Written by David Howells (dhowells@redhat.com).
5 * Derived from arch/i386/kernel/semaphore.c
6 *
7 * Writer lock-stealing by Alex Shi <alex.shi@intel.com>
8 * and Michel Lespinasse <walken@google.com>
9 *
10 * Optimistic spinning by Tim Chen <tim.c.chen@intel.com>
11 * and Davidlohr Bueso <davidlohr@hp.com>. Based on mutexes.
12 */
13 #include <linux/rwsem.h>
14 #include <linux/init.h>
15 #include <linux/export.h>
16 #include <linux/sched/signal.h>
17 #include <linux/sched/rt.h>
18 #include <linux/sched/wake_q.h>
19 #include <linux/sched/debug.h>
20 #include <linux/osq_lock.h>
21
22 #include "rwsem.h"
23
24 /*
25 * Guide to the rw_semaphore's count field for common values.
26 * (32-bit case illustrated, similar for 64-bit)
27 *
28 * 0x0000000X (1) X readers active or attempting lock, no writer waiting
29 * X = #active_readers + #readers attempting to lock
30 * (X*ACTIVE_BIAS)
31 *
32 * 0x00000000 rwsem is unlocked, and no one is waiting for the lock or
33 * attempting to read lock or write lock.
34 *
35 * 0xffff000X (1) X readers active or attempting lock, with waiters for lock
36 * X = #active readers + # readers attempting lock
37 * (X*ACTIVE_BIAS + WAITING_BIAS)
38 * (2) 1 writer attempting lock, no waiters for lock
39 * X-1 = #active readers + #readers attempting lock
40 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
41 * (3) 1 writer active, no waiters for lock
42 * X-1 = #active readers + #readers attempting lock
43 * ((X-1)*ACTIVE_BIAS + ACTIVE_WRITE_BIAS)
44 *
45 * 0xffff0001 (1) 1 reader active or attempting lock, waiters for lock
46 * (WAITING_BIAS + ACTIVE_BIAS)
47 * (2) 1 writer active or attempting lock, no waiters for lock
48 * (ACTIVE_WRITE_BIAS)
49 *
50 * 0xffff0000 (1) There are writers or readers queued but none active
51 * or in the process of attempting lock.
52 * (WAITING_BIAS)
53 * Note: writer can attempt to steal lock for this count by adding
54 * ACTIVE_WRITE_BIAS in cmpxchg and checking the old count
55 *
56 * 0xfffe0001 (1) 1 writer active, or attempting lock. Waiters on queue.
57 * (ACTIVE_WRITE_BIAS + WAITING_BIAS)
58 *
59 * Note: Readers attempt to lock by adding ACTIVE_BIAS in down_read and checking
60 * the count becomes more than 0 for successful lock acquisition,
61 * i.e. the case where there are only readers or nobody has lock.
62 * (1st and 2nd case above).
63 *
64 * Writers attempt to lock by adding ACTIVE_WRITE_BIAS in down_write and
65 * checking the count becomes ACTIVE_WRITE_BIAS for successful lock
66 * acquisition (i.e. nobody else has lock or attempts lock). If
67 * unsuccessful, in rwsem_down_write_failed, we'll check to see if there
68 * are only waiters but none active (5th case above), and attempt to
69 * steal the lock.
70 *
71 */
72
73 /*
74 * Initialize an rwsem:
75 */
76 void __init_rwsem(struct rw_semaphore *sem, const char *name,
77 struct lock_class_key *key)
78 {
79 #ifdef CONFIG_DEBUG_LOCK_ALLOC
80 /*
81 * Make sure we are not reinitializing a held semaphore:
82 */
83 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
84 lockdep_init_map(&sem->dep_map, name, key, 0);
85 #endif
86 atomic_long_set(&sem->count, RWSEM_UNLOCKED_VALUE);
87 raw_spin_lock_init(&sem->wait_lock);
88 INIT_LIST_HEAD(&sem->wait_list);
89 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
90 sem->owner = NULL;
91 osq_lock_init(&sem->osq);
92 #endif
93 }
94
95 EXPORT_SYMBOL(__init_rwsem);
96
97 enum rwsem_waiter_type {
98 RWSEM_WAITING_FOR_WRITE,
99 RWSEM_WAITING_FOR_READ
100 };
101
102 struct rwsem_waiter {
103 struct list_head list;
104 struct task_struct *task;
105 enum rwsem_waiter_type type;
106 };
107
108 enum rwsem_wake_type {
109 RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
110 RWSEM_WAKE_READERS, /* Wake readers only */
111 RWSEM_WAKE_READ_OWNED /* Waker thread holds the read lock */
112 };
113
114 /*
115 * handle the lock release when processes blocked on it that can now run
116 * - if we come here from up_xxxx(), then:
117 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
118 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
119 * - there must be someone on the queue
120 * - the wait_lock must be held by the caller
121 * - tasks are marked for wakeup, the caller must later invoke wake_up_q()
122 * to actually wakeup the blocked task(s) and drop the reference count,
123 * preferably when the wait_lock is released
124 * - woken process blocks are discarded from the list after having task zeroed
125 * - writers are only marked woken if downgrading is false
126 */
127 static void __rwsem_mark_wake(struct rw_semaphore *sem,
128 enum rwsem_wake_type wake_type,
129 struct wake_q_head *wake_q)
130 {
131 struct rwsem_waiter *waiter, *tmp;
132 long oldcount, woken = 0, adjustment = 0;
133
134 /*
135 * Take a peek at the queue head waiter such that we can determine
136 * the wakeup(s) to perform.
137 */
138 waiter = list_first_entry(&sem->wait_list, struct rwsem_waiter, list);
139
140 if (waiter->type == RWSEM_WAITING_FOR_WRITE) {
141 if (wake_type == RWSEM_WAKE_ANY) {
142 /*
143 * Mark writer at the front of the queue for wakeup.
144 * Until the task is actually later awoken later by
145 * the caller, other writers are able to steal it.
146 * Readers, on the other hand, will block as they
147 * will notice the queued writer.
148 */
149 wake_q_add(wake_q, waiter->task);
150 }
151
152 return;
153 }
154
155 /*
156 * Writers might steal the lock before we grant it to the next reader.
157 * We prefer to do the first reader grant before counting readers
158 * so we can bail out early if a writer stole the lock.
159 */
160 if (wake_type != RWSEM_WAKE_READ_OWNED) {
161 adjustment = RWSEM_ACTIVE_READ_BIAS;
162 try_reader_grant:
163 oldcount = atomic_long_fetch_add(adjustment, &sem->count);
164 if (unlikely(oldcount < RWSEM_WAITING_BIAS)) {
165 /*
166 * If the count is still less than RWSEM_WAITING_BIAS
167 * after removing the adjustment, it is assumed that
168 * a writer has stolen the lock. We have to undo our
169 * reader grant.
170 */
171 if (atomic_long_add_return(-adjustment, &sem->count) <
172 RWSEM_WAITING_BIAS)
173 return;
174
175 /* Last active locker left. Retry waking readers. */
176 goto try_reader_grant;
177 }
178 /*
179 * It is not really necessary to set it to reader-owned here,
180 * but it gives the spinners an early indication that the
181 * readers now have the lock.
182 */
183 __rwsem_set_reader_owned(sem, waiter->task);
184 }
185
186 /*
187 * Grant an infinite number of read locks to the readers at the front
188 * of the queue. We know that woken will be at least 1 as we accounted
189 * for above. Note we increment the 'active part' of the count by the
190 * number of readers before waking any processes up.
191 */
192 list_for_each_entry_safe(waiter, tmp, &sem->wait_list, list) {
193 struct task_struct *tsk;
194
195 if (waiter->type == RWSEM_WAITING_FOR_WRITE)
196 break;
197
198 woken++;
199 tsk = waiter->task;
200
201 get_task_struct(tsk);
202 list_del(&waiter->list);
203 /*
204 * Ensure calling get_task_struct() before setting the reader
205 * waiter to nil such that rwsem_down_read_failed() cannot
206 * race with do_exit() by always holding a reference count
207 * to the task to wakeup.
208 */
209 smp_store_release(&waiter->task, NULL);
210 /*
211 * Ensure issuing the wakeup (either by us or someone else)
212 * after setting the reader waiter to nil.
213 */
214 wake_q_add_safe(wake_q, tsk);
215 }
216
217 adjustment = woken * RWSEM_ACTIVE_READ_BIAS - adjustment;
218 if (list_empty(&sem->wait_list)) {
219 /* hit end of list above */
220 adjustment -= RWSEM_WAITING_BIAS;
221 }
222
223 if (adjustment)
224 atomic_long_add(adjustment, &sem->count);
225 }
226
227 /*
228 * Wait for the read lock to be granted
229 */
230 static inline struct rw_semaphore __sched *
231 __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
232 {
233 long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
234 struct rwsem_waiter waiter;
235 DEFINE_WAKE_Q(wake_q);
236
237 waiter.task = current;
238 waiter.type = RWSEM_WAITING_FOR_READ;
239
240 raw_spin_lock_irq(&sem->wait_lock);
241 if (list_empty(&sem->wait_list)) {
242 /*
243 * In case the wait queue is empty and the lock isn't owned
244 * by a writer, this reader can exit the slowpath and return
245 * immediately as its RWSEM_ACTIVE_READ_BIAS has already
246 * been set in the count.
247 */
248 if (atomic_long_read(&sem->count) >= 0) {
249 raw_spin_unlock_irq(&sem->wait_lock);
250 return sem;
251 }
252 adjustment += RWSEM_WAITING_BIAS;
253 }
254 list_add_tail(&waiter.list, &sem->wait_list);
255
256 /* we're now waiting on the lock, but no longer actively locking */
257 count = atomic_long_add_return(adjustment, &sem->count);
258
259 /*
260 * If there are no active locks, wake the front queued process(es).
261 *
262 * If there are no writers and we are first in the queue,
263 * wake our own waiter to join the existing active readers !
264 */
265 if (count == RWSEM_WAITING_BIAS ||
266 (count > RWSEM_WAITING_BIAS &&
267 adjustment != -RWSEM_ACTIVE_READ_BIAS))
268 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
269
270 raw_spin_unlock_irq(&sem->wait_lock);
271 wake_up_q(&wake_q);
272
273 /* wait to be given the lock */
274 while (true) {
275 set_current_state(state);
276 if (!waiter.task)
277 break;
278 if (signal_pending_state(state, current)) {
279 raw_spin_lock_irq(&sem->wait_lock);
280 if (waiter.task)
281 goto out_nolock;
282 raw_spin_unlock_irq(&sem->wait_lock);
283 break;
284 }
285 schedule();
286 }
287
288 __set_current_state(TASK_RUNNING);
289 return sem;
290 out_nolock:
291 list_del(&waiter.list);
292 if (list_empty(&sem->wait_list))
293 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
294 raw_spin_unlock_irq(&sem->wait_lock);
295 __set_current_state(TASK_RUNNING);
296 return ERR_PTR(-EINTR);
297 }
298
299 __visible struct rw_semaphore * __sched
300 rwsem_down_read_failed(struct rw_semaphore *sem)
301 {
302 return __rwsem_down_read_failed_common(sem, TASK_UNINTERRUPTIBLE);
303 }
304 EXPORT_SYMBOL(rwsem_down_read_failed);
305
306 __visible struct rw_semaphore * __sched
307 rwsem_down_read_failed_killable(struct rw_semaphore *sem)
308 {
309 return __rwsem_down_read_failed_common(sem, TASK_KILLABLE);
310 }
311 EXPORT_SYMBOL(rwsem_down_read_failed_killable);
312
313 /*
314 * This function must be called with the sem->wait_lock held to prevent
315 * race conditions between checking the rwsem wait list and setting the
316 * sem->count accordingly.
317 */
318 static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
319 {
320 /*
321 * Avoid trying to acquire write lock if count isn't RWSEM_WAITING_BIAS.
322 */
323 if (count != RWSEM_WAITING_BIAS)
324 return false;
325
326 /*
327 * Acquire the lock by trying to set it to ACTIVE_WRITE_BIAS. If there
328 * are other tasks on the wait list, we need to add on WAITING_BIAS.
329 */
330 count = list_is_singular(&sem->wait_list) ?
331 RWSEM_ACTIVE_WRITE_BIAS :
332 RWSEM_ACTIVE_WRITE_BIAS + RWSEM_WAITING_BIAS;
333
334 if (atomic_long_cmpxchg_acquire(&sem->count, RWSEM_WAITING_BIAS, count)
335 == RWSEM_WAITING_BIAS) {
336 rwsem_set_owner(sem);
337 return true;
338 }
339
340 return false;
341 }
342
343 #ifdef CONFIG_RWSEM_SPIN_ON_OWNER
344 /*
345 * Try to acquire write lock before the writer has been put on wait queue.
346 */
347 static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
348 {
349 long old, count = atomic_long_read(&sem->count);
350
351 while (true) {
352 if (!(count == 0 || count == RWSEM_WAITING_BIAS))
353 return false;
354
355 old = atomic_long_cmpxchg_acquire(&sem->count, count,
356 count + RWSEM_ACTIVE_WRITE_BIAS);
357 if (old == count) {
358 rwsem_set_owner(sem);
359 return true;
360 }
361
362 count = old;
363 }
364 }
365
366 static inline bool owner_on_cpu(struct task_struct *owner)
367 {
368 /*
369 * As lock holder preemption issue, we both skip spinning if
370 * task is not on cpu or its cpu is preempted
371 */
372 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
373 }
374
375 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
376 {
377 struct task_struct *owner;
378 bool ret = true;
379
380 BUILD_BUG_ON(!rwsem_has_anonymous_owner(RWSEM_OWNER_UNKNOWN));
381
382 if (need_resched())
383 return false;
384
385 rcu_read_lock();
386 owner = READ_ONCE(sem->owner);
387 if (owner) {
388 ret = is_rwsem_owner_spinnable(owner) &&
389 owner_on_cpu(owner);
390 }
391 rcu_read_unlock();
392 return ret;
393 }
394
395 /*
396 * Return true only if we can still spin on the owner field of the rwsem.
397 */
398 static noinline bool rwsem_spin_on_owner(struct rw_semaphore *sem)
399 {
400 struct task_struct *owner = READ_ONCE(sem->owner);
401
402 if (!is_rwsem_owner_spinnable(owner))
403 return false;
404
405 rcu_read_lock();
406 while (owner && (READ_ONCE(sem->owner) == owner)) {
407 /*
408 * Ensure we emit the owner->on_cpu, dereference _after_
409 * checking sem->owner still matches owner, if that fails,
410 * owner might point to free()d memory, if it still matches,
411 * the rcu_read_lock() ensures the memory stays valid.
412 */
413 barrier();
414
415 /*
416 * abort spinning when need_resched or owner is not running or
417 * owner's cpu is preempted.
418 */
419 if (need_resched() || !owner_on_cpu(owner)) {
420 rcu_read_unlock();
421 return false;
422 }
423
424 cpu_relax();
425 }
426 rcu_read_unlock();
427
428 /*
429 * If there is a new owner or the owner is not set, we continue
430 * spinning.
431 */
432 return is_rwsem_owner_spinnable(READ_ONCE(sem->owner));
433 }
434
435 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
436 {
437 bool taken = false;
438
439 preempt_disable();
440
441 /* sem->wait_lock should not be held when doing optimistic spinning */
442 if (!rwsem_can_spin_on_owner(sem))
443 goto done;
444
445 if (!osq_lock(&sem->osq))
446 goto done;
447
448 /*
449 * Optimistically spin on the owner field and attempt to acquire the
450 * lock whenever the owner changes. Spinning will be stopped when:
451 * 1) the owning writer isn't running; or
452 * 2) readers own the lock as we can't determine if they are
453 * actively running or not.
454 */
455 while (rwsem_spin_on_owner(sem)) {
456 /*
457 * Try to acquire the lock
458 */
459 if (rwsem_try_write_lock_unqueued(sem)) {
460 taken = true;
461 break;
462 }
463
464 /*
465 * When there's no owner, we might have preempted between the
466 * owner acquiring the lock and setting the owner field. If
467 * we're an RT task that will live-lock because we won't let
468 * the owner complete.
469 */
470 if (!sem->owner && (need_resched() || rt_task(current)))
471 break;
472
473 /*
474 * The cpu_relax() call is a compiler barrier which forces
475 * everything in this loop to be re-loaded. We don't need
476 * memory barriers as we'll eventually observe the right
477 * values at the cost of a few extra spins.
478 */
479 cpu_relax();
480 }
481 osq_unlock(&sem->osq);
482 done:
483 preempt_enable();
484 return taken;
485 }
486
487 /*
488 * Return true if the rwsem has active spinner
489 */
490 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
491 {
492 return osq_is_locked(&sem->osq);
493 }
494
495 #else
496 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
497 {
498 return false;
499 }
500
501 static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
502 {
503 return false;
504 }
505 #endif
506
507 /*
508 * Wait until we successfully acquire the write lock
509 */
510 static inline struct rw_semaphore *
511 __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
512 {
513 long count;
514 bool waiting = true; /* any queued threads before us */
515 struct rwsem_waiter waiter;
516 struct rw_semaphore *ret = sem;
517 DEFINE_WAKE_Q(wake_q);
518
519 /* undo write bias from down_write operation, stop active locking */
520 count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
521
522 /* do optimistic spinning and steal lock if possible */
523 if (rwsem_optimistic_spin(sem))
524 return sem;
525
526 /*
527 * Optimistic spinning failed, proceed to the slowpath
528 * and block until we can acquire the sem.
529 */
530 waiter.task = current;
531 waiter.type = RWSEM_WAITING_FOR_WRITE;
532
533 raw_spin_lock_irq(&sem->wait_lock);
534
535 /* account for this before adding a new element to the list */
536 if (list_empty(&sem->wait_list))
537 waiting = false;
538
539 list_add_tail(&waiter.list, &sem->wait_list);
540
541 /* we're now waiting on the lock, but no longer actively locking */
542 if (waiting) {
543 count = atomic_long_read(&sem->count);
544
545 /*
546 * If there were already threads queued before us and there are
547 * no active writers, the lock must be read owned; so we try to
548 * wake any read locks that were queued ahead of us.
549 */
550 if (count > RWSEM_WAITING_BIAS) {
551 __rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
552 /*
553 * The wakeup is normally called _after_ the wait_lock
554 * is released, but given that we are proactively waking
555 * readers we can deal with the wake_q overhead as it is
556 * similar to releasing and taking the wait_lock again
557 * for attempting rwsem_try_write_lock().
558 */
559 wake_up_q(&wake_q);
560
561 /*
562 * Reinitialize wake_q after use.
563 */
564 wake_q_init(&wake_q);
565 }
566
567 } else
568 count = atomic_long_add_return(RWSEM_WAITING_BIAS, &sem->count);
569
570 /* wait until we successfully acquire the lock */
571 set_current_state(state);
572 while (true) {
573 if (rwsem_try_write_lock(count, sem))
574 break;
575 raw_spin_unlock_irq(&sem->wait_lock);
576
577 /* Block until there are no active lockers. */
578 do {
579 if (signal_pending_state(state, current))
580 goto out_nolock;
581
582 schedule();
583 set_current_state(state);
584 } while ((count = atomic_long_read(&sem->count)) & RWSEM_ACTIVE_MASK);
585
586 raw_spin_lock_irq(&sem->wait_lock);
587 }
588 __set_current_state(TASK_RUNNING);
589 list_del(&waiter.list);
590 raw_spin_unlock_irq(&sem->wait_lock);
591
592 return ret;
593
594 out_nolock:
595 __set_current_state(TASK_RUNNING);
596 raw_spin_lock_irq(&sem->wait_lock);
597 list_del(&waiter.list);
598 if (list_empty(&sem->wait_list))
599 atomic_long_add(-RWSEM_WAITING_BIAS, &sem->count);
600 else
601 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
602 raw_spin_unlock_irq(&sem->wait_lock);
603 wake_up_q(&wake_q);
604
605 return ERR_PTR(-EINTR);
606 }
607
608 __visible struct rw_semaphore * __sched
609 rwsem_down_write_failed(struct rw_semaphore *sem)
610 {
611 return __rwsem_down_write_failed_common(sem, TASK_UNINTERRUPTIBLE);
612 }
613 EXPORT_SYMBOL(rwsem_down_write_failed);
614
615 __visible struct rw_semaphore * __sched
616 rwsem_down_write_failed_killable(struct rw_semaphore *sem)
617 {
618 return __rwsem_down_write_failed_common(sem, TASK_KILLABLE);
619 }
620 EXPORT_SYMBOL(rwsem_down_write_failed_killable);
621
622 /*
623 * handle waking up a waiter on the semaphore
624 * - up_read/up_write has decremented the active part of count if we come here
625 */
626 __visible
627 struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
628 {
629 unsigned long flags;
630 DEFINE_WAKE_Q(wake_q);
631
632 /*
633 * __rwsem_down_write_failed_common(sem)
634 * rwsem_optimistic_spin(sem)
635 * osq_unlock(sem->osq)
636 * ...
637 * atomic_long_add_return(&sem->count)
638 *
639 * - VS -
640 *
641 * __up_write()
642 * if (atomic_long_sub_return_release(&sem->count) < 0)
643 * rwsem_wake(sem)
644 * osq_is_locked(&sem->osq)
645 *
646 * And __up_write() must observe !osq_is_locked() when it observes the
647 * atomic_long_add_return() in order to not miss a wakeup.
648 *
649 * This boils down to:
650 *
651 * [S.rel] X = 1 [RmW] r0 = (Y += 0)
652 * MB RMB
653 * [RmW] Y += 1 [L] r1 = X
654 *
655 * exists (r0=1 /\ r1=0)
656 */
657 smp_rmb();
658
659 /*
660 * If a spinner is present, it is not necessary to do the wakeup.
661 * Try to do wakeup only if the trylock succeeds to minimize
662 * spinlock contention which may introduce too much delay in the
663 * unlock operation.
664 *
665 * spinning writer up_write/up_read caller
666 * --------------- -----------------------
667 * [S] osq_unlock() [L] osq
668 * MB RMB
669 * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
670 *
671 * Here, it is important to make sure that there won't be a missed
672 * wakeup while the rwsem is free and the only spinning writer goes
673 * to sleep without taking the rwsem. Even when the spinning writer
674 * is just going to break out of the waiting loop, it will still do
675 * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
676 * rwsem_has_spinner() is true, it will guarantee at least one
677 * trylock attempt on the rwsem later on.
678 */
679 if (rwsem_has_spinner(sem)) {
680 /*
681 * The smp_rmb() here is to make sure that the spinner
682 * state is consulted before reading the wait_lock.
683 */
684 smp_rmb();
685 if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
686 return sem;
687 goto locked;
688 }
689 raw_spin_lock_irqsave(&sem->wait_lock, flags);
690 locked:
691
692 if (!list_empty(&sem->wait_list))
693 __rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
694
695 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
696 wake_up_q(&wake_q);
697
698 return sem;
699 }
700 EXPORT_SYMBOL(rwsem_wake);
701
702 /*
703 * downgrade a write lock into a read lock
704 * - caller incremented waiting part of count and discovered it still negative
705 * - just wake up any readers at the front of the queue
706 */
707 __visible
708 struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
709 {
710 unsigned long flags;
711 DEFINE_WAKE_Q(wake_q);
712
713 raw_spin_lock_irqsave(&sem->wait_lock, flags);
714
715 if (!list_empty(&sem->wait_list))
716 __rwsem_mark_wake(sem, RWSEM_WAKE_READ_OWNED, &wake_q);
717
718 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
719 wake_up_q(&wake_q);
720
721 return sem;
722 }
723 EXPORT_SYMBOL(rwsem_downgrade_wake);