]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/locking/qrwlock.c
Merge branch 'next-general' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[mirror_ubuntu-bionic-kernel.git] / kernel / locking / qrwlock.c
1 /*
2 * Queued read/write locks
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * (C) Copyright 2013-2014 Hewlett-Packard Development Company, L.P.
15 *
16 * Authors: Waiman Long <waiman.long@hp.com>
17 */
18 #include <linux/smp.h>
19 #include <linux/bug.h>
20 #include <linux/cpumask.h>
21 #include <linux/percpu.h>
22 #include <linux/hardirq.h>
23 #include <linux/spinlock.h>
24 #include <asm/qrwlock.h>
25
26 /*
27 * This internal data structure is used for optimizing access to some of
28 * the subfields within the atomic_t cnts.
29 */
30 struct __qrwlock {
31 union {
32 atomic_t cnts;
33 struct {
34 #ifdef __LITTLE_ENDIAN
35 u8 wmode; /* Writer mode */
36 u8 rcnts[3]; /* Reader counts */
37 #else
38 u8 rcnts[3]; /* Reader counts */
39 u8 wmode; /* Writer mode */
40 #endif
41 };
42 };
43 arch_spinlock_t lock;
44 };
45
46 /**
47 * rspin_until_writer_unlock - inc reader count & spin until writer is gone
48 * @lock : Pointer to queue rwlock structure
49 * @writer: Current queue rwlock writer status byte
50 *
51 * In interrupt context or at the head of the queue, the reader will just
52 * increment the reader count & wait until the writer releases the lock.
53 */
54 static __always_inline void
55 rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
56 {
57 while ((cnts & _QW_WMASK) == _QW_LOCKED) {
58 cpu_relax();
59 cnts = atomic_read_acquire(&lock->cnts);
60 }
61 }
62
63 /**
64 * queued_read_lock_slowpath - acquire read lock of a queue rwlock
65 * @lock: Pointer to queue rwlock structure
66 * @cnts: Current qrwlock lock value
67 */
68 void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
69 {
70 /*
71 * Readers come here when they cannot get the lock without waiting
72 */
73 if (unlikely(in_interrupt())) {
74 /*
75 * Readers in interrupt context will get the lock immediately
76 * if the writer is just waiting (not holding the lock yet).
77 * The rspin_until_writer_unlock() function returns immediately
78 * in this case. Otherwise, they will spin (with ACQUIRE
79 * semantics) until the lock is available without waiting in
80 * the queue.
81 */
82 rspin_until_writer_unlock(lock, cnts);
83 return;
84 }
85 atomic_sub(_QR_BIAS, &lock->cnts);
86
87 /*
88 * Put the reader into the wait queue
89 */
90 arch_spin_lock(&lock->wait_lock);
91
92 /*
93 * The ACQUIRE semantics of the following spinning code ensure
94 * that accesses can't leak upwards out of our subsequent critical
95 * section in the case that the lock is currently held for write.
96 */
97 cnts = atomic_fetch_add_acquire(_QR_BIAS, &lock->cnts);
98 rspin_until_writer_unlock(lock, cnts);
99
100 /*
101 * Signal the next one in queue to become queue head
102 */
103 arch_spin_unlock(&lock->wait_lock);
104 }
105 EXPORT_SYMBOL(queued_read_lock_slowpath);
106
107 /**
108 * queued_write_lock_slowpath - acquire write lock of a queue rwlock
109 * @lock : Pointer to queue rwlock structure
110 */
111 void queued_write_lock_slowpath(struct qrwlock *lock)
112 {
113 u32 cnts;
114
115 /* Put the writer into the wait queue */
116 arch_spin_lock(&lock->wait_lock);
117
118 /* Try to acquire the lock directly if no reader is present */
119 if (!atomic_read(&lock->cnts) &&
120 (atomic_cmpxchg_acquire(&lock->cnts, 0, _QW_LOCKED) == 0))
121 goto unlock;
122
123 /*
124 * Set the waiting flag to notify readers that a writer is pending,
125 * or wait for a previous writer to go away.
126 */
127 for (;;) {
128 struct __qrwlock *l = (struct __qrwlock *)lock;
129
130 if (!READ_ONCE(l->wmode) &&
131 (cmpxchg_relaxed(&l->wmode, 0, _QW_WAITING) == 0))
132 break;
133
134 cpu_relax();
135 }
136
137 /* When no more readers, set the locked flag */
138 for (;;) {
139 cnts = atomic_read(&lock->cnts);
140 if ((cnts == _QW_WAITING) &&
141 (atomic_cmpxchg_acquire(&lock->cnts, _QW_WAITING,
142 _QW_LOCKED) == _QW_WAITING))
143 break;
144
145 cpu_relax();
146 }
147 unlock:
148 arch_spin_unlock(&lock->wait_lock);
149 }
150 EXPORT_SYMBOL(queued_write_lock_slowpath);