]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/s390/lib/spinlock.c
Input: wm97xx: add new AC97 bus support
[mirror_ubuntu-focal-kernel.git] / arch / s390 / lib / spinlock.c
1 /*
2 * Out of line spinlock code.
3 *
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8 #include <linux/types.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = -1;
16
17 static int __init spin_retry_init(void)
18 {
19 if (spin_retry < 0)
20 spin_retry = 1000;
21 return 0;
22 }
23 early_initcall(spin_retry_init);
24
25 /**
26 * spin_retry= parameter
27 */
28 static int __init spin_retry_setup(char *str)
29 {
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
32 }
33 __setup("spin_retry=", spin_retry_setup);
34
35 static inline int arch_load_niai4(int *lock)
36 {
37 int owner;
38
39 asm volatile(
40 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
41 " .long 0xb2fa0040\n" /* NIAI 4 */
42 #endif
43 " l %0,%1\n"
44 : "=d" (owner) : "Q" (*lock) : "memory");
45 return owner;
46 }
47
48 static inline int arch_cmpxchg_niai8(int *lock, int old, int new)
49 {
50 int expected = old;
51
52 asm volatile(
53 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
54 " .long 0xb2fa0080\n" /* NIAI 8 */
55 #endif
56 " cs %0,%3,%1\n"
57 : "=d" (old), "=Q" (*lock)
58 : "0" (old), "d" (new), "Q" (*lock)
59 : "cc", "memory");
60 return expected == old;
61 }
62
63 void arch_spin_lock_wait(arch_spinlock_t *lp)
64 {
65 int cpu = SPINLOCK_LOCKVAL;
66 int owner, count;
67
68 /* Pass the virtual CPU to the lock holder if it is not running */
69 owner = arch_load_niai4(&lp->lock);
70 if (owner && arch_vcpu_is_preempted(~owner))
71 smp_yield_cpu(~owner);
72
73 count = spin_retry;
74 while (1) {
75 owner = arch_load_niai4(&lp->lock);
76 /* Try to get the lock if it is free. */
77 if (!owner) {
78 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
79 return;
80 continue;
81 }
82 if (count-- >= 0)
83 continue;
84 count = spin_retry;
85 /*
86 * For multiple layers of hypervisors, e.g. z/VM + LPAR
87 * yield the CPU unconditionally. For LPAR rely on the
88 * sense running status.
89 */
90 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
91 smp_yield_cpu(~owner);
92 }
93 }
94 EXPORT_SYMBOL(arch_spin_lock_wait);
95
96 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
97 {
98 int cpu = SPINLOCK_LOCKVAL;
99 int owner, count;
100
101 local_irq_restore(flags);
102
103 /* Pass the virtual CPU to the lock holder if it is not running */
104 owner = arch_load_niai4(&lp->lock);
105 if (owner && arch_vcpu_is_preempted(~owner))
106 smp_yield_cpu(~owner);
107
108 count = spin_retry;
109 while (1) {
110 owner = arch_load_niai4(&lp->lock);
111 /* Try to get the lock if it is free. */
112 if (!owner) {
113 local_irq_disable();
114 if (arch_cmpxchg_niai8(&lp->lock, 0, cpu))
115 return;
116 local_irq_restore(flags);
117 continue;
118 }
119 if (count-- >= 0)
120 continue;
121 count = spin_retry;
122 /*
123 * For multiple layers of hypervisors, e.g. z/VM + LPAR
124 * yield the CPU unconditionally. For LPAR rely on the
125 * sense running status.
126 */
127 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner))
128 smp_yield_cpu(~owner);
129 }
130 }
131 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
132
133 int arch_spin_trylock_retry(arch_spinlock_t *lp)
134 {
135 int cpu = SPINLOCK_LOCKVAL;
136 int owner, count;
137
138 for (count = spin_retry; count > 0; count--) {
139 owner = READ_ONCE(lp->lock);
140 /* Try to get the lock if it is free. */
141 if (!owner) {
142 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
143 return 1;
144 }
145 }
146 return 0;
147 }
148 EXPORT_SYMBOL(arch_spin_trylock_retry);
149
150 void _raw_read_lock_wait(arch_rwlock_t *rw)
151 {
152 int count = spin_retry;
153 int owner, old;
154
155 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
156 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
157 #endif
158 owner = 0;
159 while (1) {
160 if (count-- <= 0) {
161 if (owner && arch_vcpu_is_preempted(~owner))
162 smp_yield_cpu(~owner);
163 count = spin_retry;
164 }
165 old = ACCESS_ONCE(rw->lock);
166 owner = ACCESS_ONCE(rw->owner);
167 if (old < 0)
168 continue;
169 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
170 return;
171 }
172 }
173 EXPORT_SYMBOL(_raw_read_lock_wait);
174
175 int _raw_read_trylock_retry(arch_rwlock_t *rw)
176 {
177 int count = spin_retry;
178 int old;
179
180 while (count-- > 0) {
181 old = ACCESS_ONCE(rw->lock);
182 if (old < 0)
183 continue;
184 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
185 return 1;
186 }
187 return 0;
188 }
189 EXPORT_SYMBOL(_raw_read_trylock_retry);
190
191 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
192
193 void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
194 {
195 int count = spin_retry;
196 int owner, old;
197
198 owner = 0;
199 while (1) {
200 if (count-- <= 0) {
201 if (owner && arch_vcpu_is_preempted(~owner))
202 smp_yield_cpu(~owner);
203 count = spin_retry;
204 }
205 old = ACCESS_ONCE(rw->lock);
206 owner = ACCESS_ONCE(rw->owner);
207 smp_mb();
208 if (old >= 0) {
209 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
210 old = prev;
211 }
212 if ((old & 0x7fffffff) == 0 && prev >= 0)
213 break;
214 }
215 }
216 EXPORT_SYMBOL(_raw_write_lock_wait);
217
218 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
219
220 void _raw_write_lock_wait(arch_rwlock_t *rw)
221 {
222 int count = spin_retry;
223 int owner, old, prev;
224
225 prev = 0x80000000;
226 owner = 0;
227 while (1) {
228 if (count-- <= 0) {
229 if (owner && arch_vcpu_is_preempted(~owner))
230 smp_yield_cpu(~owner);
231 count = spin_retry;
232 }
233 old = ACCESS_ONCE(rw->lock);
234 owner = ACCESS_ONCE(rw->owner);
235 if (old >= 0 &&
236 __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
237 prev = old;
238 else
239 smp_mb();
240 if ((old & 0x7fffffff) == 0 && prev >= 0)
241 break;
242 }
243 }
244 EXPORT_SYMBOL(_raw_write_lock_wait);
245
246 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
247
248 int _raw_write_trylock_retry(arch_rwlock_t *rw)
249 {
250 int count = spin_retry;
251 int old;
252
253 while (count-- > 0) {
254 old = ACCESS_ONCE(rw->lock);
255 if (old)
256 continue;
257 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
258 return 1;
259 }
260 return 0;
261 }
262 EXPORT_SYMBOL(_raw_write_trylock_retry);
263
264 void arch_lock_relax(int cpu)
265 {
266 if (!cpu)
267 return;
268 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
269 return;
270 smp_yield_cpu(~cpu);
271 }
272 EXPORT_SYMBOL(arch_lock_relax);