]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/s390/lib/spinlock.c
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_...
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / lib / spinlock.c
CommitLineData
951f22d5 1/*
951f22d5
MS
2 * Out of line spinlock code.
3 *
a53c8fab 4 * Copyright IBM Corp. 2004, 2006
951f22d5
MS
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8#include <linux/types.h>
d3217967 9#include <linux/export.h>
951f22d5
MS
10#include <linux/spinlock.h>
11#include <linux/init.h>
8b646bd7 12#include <linux/smp.h>
951f22d5
MS
13#include <asm/io.h>
14
2c72a44e
MS
15int spin_retry = -1;
16
17static int __init spin_retry_init(void)
18{
19 if (spin_retry < 0)
b13de4b7 20 spin_retry = 1000;
2c72a44e
MS
21 return 0;
22}
23early_initcall(spin_retry_init);
951f22d5
MS
24
25/**
26 * spin_retry= parameter
27 */
28static int __init spin_retry_setup(char *str)
29{
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
32}
33__setup("spin_retry=", spin_retry_setup);
34
7f7e6e28
MS
35static inline int arch_load_niai4(int *lock)
36{
37 int owner;
38
39 asm volatile(
40#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
41 " .long 0xb2fa0040\n" /* NIAI 4 */
42#endif
43 " l %0,%1\n"
44 : "=d" (owner) : "Q" (*lock) : "memory");
45 return owner;
46}
47
48static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
49{
50 int expected = old;
51
52 asm volatile(
53#ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
54 " .long 0xb2fa0080\n" /* NIAI 8 */
55#endif
56 " cs %0,%3,%1\n"
57 : "=d" (old), "=Q" (*lock)
58 : "0" (old), "d" (new), "Q" (*lock)
59 : "cc", "memory");
60 return expected == old;
61}
62
0199c4e6 63void arch_spin_lock_wait(arch_spinlock_t *lp)
951f22d5 64{
02c503ff 65 int cpu = SPINLOCK_LOCKVAL;
7f7e6e28
MS
66 int owner, count;
67
68 /* Pass the virtual CPU to the lock holder if it is not running */
69 owner = arch_load_niai4(&lp->lock);
70 if (owner && arch_vcpu_is_preempted(~owner))
71 smp_yield_cpu(~owner);
951f22d5 72
7f7e6e28 73 count = spin_retry;
951f22d5 74 while (1) {
7f7e6e28 75 owner = arch_load_niai4(&lp->lock);
470ada6b
MS
76 /* Try to get the lock if it is free. */
77 if (!owner) {
7f7e6e28 78 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
470ada6b
MS
79 return;
80 continue;
951f22d5 81 }
7f7e6e28 82 if (count-- >= 0)
470ada6b 83 continue;
470ada6b 84 count = spin_retry;
470ada6b
MS
85 /*
86 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
87 * yield the CPU unconditionally. For LPAR rely on the
88 * sense running status.
470ada6b 89 */
7f7e6e28 90 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
8b646bd7 91 smp_yield_cpu(~owner);
951f22d5
MS
92 }
93}
0199c4e6 94EXPORT_SYMBOL(arch_spin_lock_wait);
951f22d5 95
0199c4e6 96void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
894cdde2 97{
02c503ff 98 int cpu = SPINLOCK_LOCKVAL;
7f7e6e28 99 int owner, count;
894cdde2
HH
100
101 local_irq_restore(flags);
7f7e6e28
MS
102
103 /* Pass the virtual CPU to the lock holder if it is not running */
104 owner = arch_load_niai4(&lp->lock);
105 if (owner && arch_vcpu_is_preempted(~owner))
106 smp_yield_cpu(~owner);
107
108 count = spin_retry;
894cdde2 109 while (1) {
7f7e6e28 110 owner = arch_load_niai4(&lp->lock);
470ada6b
MS
111 /* Try to get the lock if it is free. */
112 if (!owner) {
113 local_irq_disable();
7f7e6e28 114 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
470ada6b
MS
115 return;
116 local_irq_restore(flags);
84976952 117 continue;
470ada6b 118 }
7f7e6e28 119 if (count-- >= 0)
470ada6b 120 continue;
470ada6b 121 count = spin_retry;
470ada6b
MS
122 /*
123 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
124 * yield the CPU unconditionally. For LPAR rely on the
125 * sense running status.
470ada6b 126 */
7f7e6e28 127 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
8b646bd7 128 smp_yield_cpu(~owner);
894cdde2
HH
129 }
130}
0199c4e6 131EXPORT_SYMBOL(arch_spin_lock_wait_flags);
894cdde2 132
0199c4e6 133int arch_spin_trylock_retry(arch_spinlock_t *lp)
951f22d5 134{
02c503ff
MS
135 int cpu = SPINLOCK_LOCKVAL;
136 int owner, count;
951f22d5 137
2c72a44e 138 for (count = spin_retry; count > 0; count--) {
187b5f41 139 owner = READ_ONCE(lp->lock);
2c72a44e
MS
140 /* Try to get the lock if it is free. */
141 if (!owner) {
02c503ff 142 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
2c72a44e 143 return 1;
b13de4b7 144 }
2c72a44e 145 }
951f22d5
MS
146 return 0;
147}
0199c4e6 148EXPORT_SYMBOL(arch_spin_trylock_retry);
951f22d5 149
fb3a6bbc 150void _raw_read_lock_wait(arch_rwlock_t *rw)
951f22d5 151{
951f22d5 152 int count = spin_retry;
02c503ff 153 int owner, old;
951f22d5 154
bbae71bf
MS
155#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
156 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
157#endif
d59b93da 158 owner = 0;
951f22d5
MS
159 while (1) {
160 if (count-- <= 0) {
760928c0 161 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 162 smp_yield_cpu(~owner);
951f22d5
MS
163 count = spin_retry;
164 }
6aa7de05
MR
165 old = READ_ONCE(rw->lock);
166 owner = READ_ONCE(rw->owner);
b13de4b7 167 if (old < 0)
96567161 168 continue;
02c503ff 169 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
951f22d5
MS
170 return;
171 }
172}
173EXPORT_SYMBOL(_raw_read_lock_wait);
174
fb3a6bbc 175int _raw_read_trylock_retry(arch_rwlock_t *rw)
951f22d5 176{
951f22d5 177 int count = spin_retry;
02c503ff 178 int old;
951f22d5
MS
179
180 while (count-- > 0) {
6aa7de05 181 old = READ_ONCE(rw->lock);
b13de4b7 182 if (old < 0)
96567161 183 continue;
02c503ff 184 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
951f22d5
MS
185 return 1;
186 }
187 return 0;
188}
189EXPORT_SYMBOL(_raw_read_trylock_retry);
190
bbae71bf
MS
191#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
192
02c503ff 193void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
bbae71bf 194{
bbae71bf 195 int count = spin_retry;
02c503ff 196 int owner, old;
bbae71bf
MS
197
198 owner = 0;
199 while (1) {
200 if (count-- <= 0) {
760928c0 201 if (owner && arch_vcpu_is_preempted(~owner))
bbae71bf
MS
202 smp_yield_cpu(~owner);
203 count = spin_retry;
204 }
6aa7de05
MR
205 old = READ_ONCE(rw->lock);
206 owner = READ_ONCE(rw->owner);
e0af21c5 207 smp_mb();
02c503ff 208 if (old >= 0) {
bbae71bf
MS
209 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
210 old = prev;
211 }
02c503ff 212 if ((old & 0x7fffffff) == 0 && prev >= 0)
bbae71bf
MS
213 break;
214 }
215}
216EXPORT_SYMBOL(_raw_write_lock_wait);
217
218#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
219
fb3a6bbc 220void _raw_write_lock_wait(arch_rwlock_t *rw)
951f22d5
MS
221{
222 int count = spin_retry;
02c503ff 223 int owner, old, prev;
951f22d5 224
94232a43 225 prev = 0x80000000;
d59b93da 226 owner = 0;
951f22d5
MS
227 while (1) {
228 if (count-- <= 0) {
760928c0 229 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 230 smp_yield_cpu(~owner);
951f22d5
MS
231 count = spin_retry;
232 }
6aa7de05
MR
233 old = READ_ONCE(rw->lock);
234 owner = READ_ONCE(rw->owner);
02c503ff
MS
235 if (old >= 0 &&
236 __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
94232a43
MS
237 prev = old;
238 else
e0af21c5 239 smp_mb();
02c503ff 240 if ((old & 0x7fffffff) == 0 && prev >= 0)
94232a43 241 break;
951f22d5
MS
242 }
243}
244EXPORT_SYMBOL(_raw_write_lock_wait);
245
bbae71bf
MS
246#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
247
fb3a6bbc 248int _raw_write_trylock_retry(arch_rwlock_t *rw)
951f22d5
MS
249{
250 int count = spin_retry;
02c503ff 251 int old;
951f22d5
MS
252
253 while (count-- > 0) {
6aa7de05 254 old = READ_ONCE(rw->lock);
b13de4b7 255 if (old)
96567161 256 continue;
02c503ff 257 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
951f22d5
MS
258 return 1;
259 }
260 return 0;
261}
262EXPORT_SYMBOL(_raw_write_trylock_retry);
d59b93da 263
02c503ff 264void arch_lock_relax(int cpu)
d59b93da
MS
265{
266 if (!cpu)
267 return;
760928c0 268 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
d59b93da
MS
269 return;
270 smp_yield_cpu(~cpu);
271}
272EXPORT_SYMBOL(arch_lock_relax);