]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/lib/spinlock.c
s390/cpumf: simplify detection of guest samples
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / lib / spinlock.c
CommitLineData
951f22d5 1/*
951f22d5
MS
2 * Out of line spinlock code.
3 *
a53c8fab 4 * Copyright IBM Corp. 2004, 2006
951f22d5
MS
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8#include <linux/types.h>
d3217967 9#include <linux/export.h>
951f22d5
MS
10#include <linux/spinlock.h>
11#include <linux/init.h>
8b646bd7 12#include <linux/smp.h>
951f22d5
MS
13#include <asm/io.h>
14
2c72a44e
MS
15int spin_retry = -1;
16
17static int __init spin_retry_init(void)
18{
19 if (spin_retry < 0)
20 spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
21 return 0;
22}
23early_initcall(spin_retry_init);
951f22d5
MS
24
25/**
26 * spin_retry= parameter
27 */
28static int __init spin_retry_setup(char *str)
29{
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
32}
33__setup("spin_retry=", spin_retry_setup);
34
2c72a44e
MS
35static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
36{
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
38}
39
0199c4e6 40void arch_spin_lock_wait(arch_spinlock_t *lp)
951f22d5 41{
6c8cd5bb 42 unsigned int cpu = SPINLOCK_LOCKVAL;
59b69787 43 unsigned int owner;
db1c4515 44 int count, first_diag;
951f22d5 45
db1c4515 46 first_diag = 1;
951f22d5 47 while (1) {
470ada6b
MS
48 owner = ACCESS_ONCE(lp->lock);
49 /* Try to get the lock if it is free. */
50 if (!owner) {
51 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
52 return;
53 continue;
951f22d5 54 }
db1c4515 55 /* First iteration: check if the lock owner is running. */
760928c0 56 if (first_diag && arch_vcpu_is_preempted(~owner)) {
470ada6b 57 smp_yield_cpu(~owner);
db1c4515 58 first_diag = 0;
470ada6b
MS
59 continue;
60 }
61 /* Loop for a while on the lock value. */
62 count = spin_retry;
63 do {
2c72a44e
MS
64 if (MACHINE_HAS_CAD)
65 _raw_compare_and_delay(&lp->lock, owner);
470ada6b
MS
66 owner = ACCESS_ONCE(lp->lock);
67 } while (owner && count-- > 0);
68 if (!owner)
69 continue;
70 /*
71 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
72 * yield the CPU unconditionally. For LPAR rely on the
73 * sense running status.
470ada6b 74 */
760928c0 75 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
8b646bd7 76 smp_yield_cpu(~owner);
db1c4515
MS
77 first_diag = 0;
78 }
951f22d5
MS
79 }
80}
0199c4e6 81EXPORT_SYMBOL(arch_spin_lock_wait);
951f22d5 82
0199c4e6 83void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
894cdde2 84{
6c8cd5bb 85 unsigned int cpu = SPINLOCK_LOCKVAL;
59b69787 86 unsigned int owner;
db1c4515 87 int count, first_diag;
894cdde2
HH
88
89 local_irq_restore(flags);
db1c4515 90 first_diag = 1;
894cdde2 91 while (1) {
470ada6b
MS
92 owner = ACCESS_ONCE(lp->lock);
93 /* Try to get the lock if it is free. */
94 if (!owner) {
95 local_irq_disable();
96 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
97 return;
98 local_irq_restore(flags);
84976952 99 continue;
470ada6b
MS
100 }
101 /* Check if the lock owner is running. */
760928c0 102 if (first_diag && arch_vcpu_is_preempted(~owner)) {
470ada6b 103 smp_yield_cpu(~owner);
db1c4515 104 first_diag = 0;
470ada6b 105 continue;
894cdde2 106 }
470ada6b
MS
107 /* Loop for a while on the lock value. */
108 count = spin_retry;
109 do {
2c72a44e
MS
110 if (MACHINE_HAS_CAD)
111 _raw_compare_and_delay(&lp->lock, owner);
470ada6b
MS
112 owner = ACCESS_ONCE(lp->lock);
113 } while (owner && count-- > 0);
114 if (!owner)
115 continue;
116 /*
117 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
118 * yield the CPU unconditionally. For LPAR rely on the
119 * sense running status.
470ada6b 120 */
760928c0 121 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
8b646bd7 122 smp_yield_cpu(~owner);
db1c4515
MS
123 first_diag = 0;
124 }
894cdde2
HH
125 }
126}
0199c4e6 127EXPORT_SYMBOL(arch_spin_lock_wait_flags);
894cdde2 128
0199c4e6 129int arch_spin_trylock_retry(arch_spinlock_t *lp)
951f22d5 130{
2c72a44e
MS
131 unsigned int cpu = SPINLOCK_LOCKVAL;
132 unsigned int owner;
3c1fcfe2 133 int count;
951f22d5 134
2c72a44e 135 for (count = spin_retry; count > 0; count--) {
187b5f41 136 owner = READ_ONCE(lp->lock);
2c72a44e
MS
137 /* Try to get the lock if it is free. */
138 if (!owner) {
139 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
140 return 1;
141 } else if (MACHINE_HAS_CAD)
142 _raw_compare_and_delay(&lp->lock, owner);
143 }
951f22d5
MS
144 return 0;
145}
0199c4e6 146EXPORT_SYMBOL(arch_spin_trylock_retry);
951f22d5 147
fb3a6bbc 148void _raw_read_lock_wait(arch_rwlock_t *rw)
951f22d5 149{
d59b93da 150 unsigned int owner, old;
951f22d5
MS
151 int count = spin_retry;
152
bbae71bf
MS
153#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
154 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
155#endif
d59b93da 156 owner = 0;
951f22d5
MS
157 while (1) {
158 if (count-- <= 0) {
760928c0 159 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 160 smp_yield_cpu(~owner);
951f22d5
MS
161 count = spin_retry;
162 }
bae8f567 163 old = ACCESS_ONCE(rw->lock);
d59b93da 164 owner = ACCESS_ONCE(rw->owner);
2c72a44e
MS
165 if ((int) old < 0) {
166 if (MACHINE_HAS_CAD)
167 _raw_compare_and_delay(&rw->lock, old);
96567161 168 continue;
2c72a44e 169 }
5b3f683e 170 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
951f22d5
MS
171 return;
172 }
173}
174EXPORT_SYMBOL(_raw_read_lock_wait);
175
fb3a6bbc 176int _raw_read_trylock_retry(arch_rwlock_t *rw)
951f22d5
MS
177{
178 unsigned int old;
179 int count = spin_retry;
180
181 while (count-- > 0) {
bae8f567 182 old = ACCESS_ONCE(rw->lock);
2c72a44e
MS
183 if ((int) old < 0) {
184 if (MACHINE_HAS_CAD)
185 _raw_compare_and_delay(&rw->lock, old);
96567161 186 continue;
2c72a44e 187 }
5b3f683e 188 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
951f22d5
MS
189 return 1;
190 }
191 return 0;
192}
193EXPORT_SYMBOL(_raw_read_trylock_retry);
194
bbae71bf
MS
195#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
196
197void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
198{
199 unsigned int owner, old;
200 int count = spin_retry;
201
202 owner = 0;
203 while (1) {
204 if (count-- <= 0) {
760928c0 205 if (owner && arch_vcpu_is_preempted(~owner))
bbae71bf
MS
206 smp_yield_cpu(~owner);
207 count = spin_retry;
208 }
209 old = ACCESS_ONCE(rw->lock);
210 owner = ACCESS_ONCE(rw->owner);
e0af21c5 211 smp_mb();
bbae71bf
MS
212 if ((int) old >= 0) {
213 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
214 old = prev;
215 }
216 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
217 break;
2c72a44e
MS
218 if (MACHINE_HAS_CAD)
219 _raw_compare_and_delay(&rw->lock, old);
bbae71bf
MS
220 }
221}
222EXPORT_SYMBOL(_raw_write_lock_wait);
223
224#else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
225
fb3a6bbc 226void _raw_write_lock_wait(arch_rwlock_t *rw)
951f22d5 227{
94232a43 228 unsigned int owner, old, prev;
951f22d5
MS
229 int count = spin_retry;
230
94232a43 231 prev = 0x80000000;
d59b93da 232 owner = 0;
951f22d5
MS
233 while (1) {
234 if (count-- <= 0) {
760928c0 235 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 236 smp_yield_cpu(~owner);
951f22d5
MS
237 count = spin_retry;
238 }
bae8f567 239 old = ACCESS_ONCE(rw->lock);
d59b93da 240 owner = ACCESS_ONCE(rw->owner);
94232a43
MS
241 if ((int) old >= 0 &&
242 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
243 prev = old;
244 else
e0af21c5 245 smp_mb();
94232a43
MS
246 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
247 break;
2c72a44e
MS
248 if (MACHINE_HAS_CAD)
249 _raw_compare_and_delay(&rw->lock, old);
951f22d5
MS
250 }
251}
252EXPORT_SYMBOL(_raw_write_lock_wait);
253
bbae71bf
MS
254#endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
255
fb3a6bbc 256int _raw_write_trylock_retry(arch_rwlock_t *rw)
951f22d5 257{
bae8f567 258 unsigned int old;
951f22d5
MS
259 int count = spin_retry;
260
261 while (count-- > 0) {
bae8f567 262 old = ACCESS_ONCE(rw->lock);
2c72a44e
MS
263 if (old) {
264 if (MACHINE_HAS_CAD)
265 _raw_compare_and_delay(&rw->lock, old);
96567161 266 continue;
2c72a44e 267 }
5b3f683e 268 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
951f22d5
MS
269 return 1;
270 }
271 return 0;
272}
273EXPORT_SYMBOL(_raw_write_trylock_retry);
d59b93da
MS
274
275void arch_lock_relax(unsigned int cpu)
276{
277 if (!cpu)
278 return;
760928c0 279 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
d59b93da
MS
280 return;
281 smp_yield_cpu(~cpu);
282}
283EXPORT_SYMBOL(arch_lock_relax);