]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/s390/lib/spinlock.c
NFC: pn533: handle interrupted commands in pn533_recv_frame
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / lib / spinlock.c
1 /*
2 * Out of line spinlock code.
3 *
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = -1;
16
17 static int __init spin_retry_init(void)
18 {
19 if (spin_retry < 0)
20 spin_retry = MACHINE_HAS_CAD ? 10 : 1000;
21 return 0;
22 }
23 early_initcall(spin_retry_init);
24
25 /**
26 * spin_retry= parameter
27 */
28 static int __init spin_retry_setup(char *str)
29 {
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
32 }
33 __setup("spin_retry=", spin_retry_setup);
34
35 static inline void _raw_compare_and_delay(unsigned int *lock, unsigned int old)
36 {
37 asm(".insn rsy,0xeb0000000022,%0,0,%1" : : "d" (old), "Q" (*lock));
38 }
39
40 static inline int cpu_is_preempted(int cpu)
41 {
42 if (test_cpu_flag_of(CIF_ENABLED_WAIT, cpu))
43 return 0;
44 if (smp_vcpu_scheduled(cpu))
45 return 0;
46 return 1;
47 }
48
49 void arch_spin_lock_wait(arch_spinlock_t *lp)
50 {
51 unsigned int cpu = SPINLOCK_LOCKVAL;
52 unsigned int owner;
53 int count, first_diag;
54
55 first_diag = 1;
56 while (1) {
57 owner = ACCESS_ONCE(lp->lock);
58 /* Try to get the lock if it is free. */
59 if (!owner) {
60 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
61 return;
62 continue;
63 }
64 /* First iteration: check if the lock owner is running. */
65 if (first_diag && cpu_is_preempted(~owner)) {
66 smp_yield_cpu(~owner);
67 first_diag = 0;
68 continue;
69 }
70 /* Loop for a while on the lock value. */
71 count = spin_retry;
72 do {
73 if (MACHINE_HAS_CAD)
74 _raw_compare_and_delay(&lp->lock, owner);
75 owner = ACCESS_ONCE(lp->lock);
76 } while (owner && count-- > 0);
77 if (!owner)
78 continue;
79 /*
80 * For multiple layers of hypervisors, e.g. z/VM + LPAR
81 * yield the CPU unconditionally. For LPAR rely on the
82 * sense running status.
83 */
84 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
85 smp_yield_cpu(~owner);
86 first_diag = 0;
87 }
88 }
89 }
90 EXPORT_SYMBOL(arch_spin_lock_wait);
91
92 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
93 {
94 unsigned int cpu = SPINLOCK_LOCKVAL;
95 unsigned int owner;
96 int count, first_diag;
97
98 local_irq_restore(flags);
99 first_diag = 1;
100 while (1) {
101 owner = ACCESS_ONCE(lp->lock);
102 /* Try to get the lock if it is free. */
103 if (!owner) {
104 local_irq_disable();
105 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
106 return;
107 local_irq_restore(flags);
108 }
109 /* Check if the lock owner is running. */
110 if (first_diag && cpu_is_preempted(~owner)) {
111 smp_yield_cpu(~owner);
112 first_diag = 0;
113 continue;
114 }
115 /* Loop for a while on the lock value. */
116 count = spin_retry;
117 do {
118 if (MACHINE_HAS_CAD)
119 _raw_compare_and_delay(&lp->lock, owner);
120 owner = ACCESS_ONCE(lp->lock);
121 } while (owner && count-- > 0);
122 if (!owner)
123 continue;
124 /*
125 * For multiple layers of hypervisors, e.g. z/VM + LPAR
126 * yield the CPU unconditionally. For LPAR rely on the
127 * sense running status.
128 */
129 if (!MACHINE_IS_LPAR || cpu_is_preempted(~owner)) {
130 smp_yield_cpu(~owner);
131 first_diag = 0;
132 }
133 }
134 }
135 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
136
137 int arch_spin_trylock_retry(arch_spinlock_t *lp)
138 {
139 unsigned int cpu = SPINLOCK_LOCKVAL;
140 unsigned int owner;
141 int count;
142
143 for (count = spin_retry; count > 0; count--) {
144 owner = ACCESS_ONCE(lp->lock);
145 /* Try to get the lock if it is free. */
146 if (!owner) {
147 if (_raw_compare_and_swap(&lp->lock, 0, cpu))
148 return 1;
149 } else if (MACHINE_HAS_CAD)
150 _raw_compare_and_delay(&lp->lock, owner);
151 }
152 return 0;
153 }
154 EXPORT_SYMBOL(arch_spin_trylock_retry);
155
156 void _raw_read_lock_wait(arch_rwlock_t *rw)
157 {
158 unsigned int owner, old;
159 int count = spin_retry;
160
161 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
162 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
163 #endif
164 owner = 0;
165 while (1) {
166 if (count-- <= 0) {
167 if (owner && cpu_is_preempted(~owner))
168 smp_yield_cpu(~owner);
169 count = spin_retry;
170 }
171 old = ACCESS_ONCE(rw->lock);
172 owner = ACCESS_ONCE(rw->owner);
173 if ((int) old < 0) {
174 if (MACHINE_HAS_CAD)
175 _raw_compare_and_delay(&rw->lock, old);
176 continue;
177 }
178 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
179 return;
180 }
181 }
182 EXPORT_SYMBOL(_raw_read_lock_wait);
183
184 int _raw_read_trylock_retry(arch_rwlock_t *rw)
185 {
186 unsigned int old;
187 int count = spin_retry;
188
189 while (count-- > 0) {
190 old = ACCESS_ONCE(rw->lock);
191 if ((int) old < 0) {
192 if (MACHINE_HAS_CAD)
193 _raw_compare_and_delay(&rw->lock, old);
194 continue;
195 }
196 if (_raw_compare_and_swap(&rw->lock, old, old + 1))
197 return 1;
198 }
199 return 0;
200 }
201 EXPORT_SYMBOL(_raw_read_trylock_retry);
202
203 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
204
205 void _raw_write_lock_wait(arch_rwlock_t *rw, unsigned int prev)
206 {
207 unsigned int owner, old;
208 int count = spin_retry;
209
210 owner = 0;
211 while (1) {
212 if (count-- <= 0) {
213 if (owner && cpu_is_preempted(~owner))
214 smp_yield_cpu(~owner);
215 count = spin_retry;
216 }
217 old = ACCESS_ONCE(rw->lock);
218 owner = ACCESS_ONCE(rw->owner);
219 smp_mb();
220 if ((int) old >= 0) {
221 prev = __RAW_LOCK(&rw->lock, 0x80000000, __RAW_OP_OR);
222 old = prev;
223 }
224 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
225 break;
226 if (MACHINE_HAS_CAD)
227 _raw_compare_and_delay(&rw->lock, old);
228 }
229 }
230 EXPORT_SYMBOL(_raw_write_lock_wait);
231
232 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
233
234 void _raw_write_lock_wait(arch_rwlock_t *rw)
235 {
236 unsigned int owner, old, prev;
237 int count = spin_retry;
238
239 prev = 0x80000000;
240 owner = 0;
241 while (1) {
242 if (count-- <= 0) {
243 if (owner && cpu_is_preempted(~owner))
244 smp_yield_cpu(~owner);
245 count = spin_retry;
246 }
247 old = ACCESS_ONCE(rw->lock);
248 owner = ACCESS_ONCE(rw->owner);
249 if ((int) old >= 0 &&
250 _raw_compare_and_swap(&rw->lock, old, old | 0x80000000))
251 prev = old;
252 else
253 smp_mb();
254 if ((old & 0x7fffffff) == 0 && (int) prev >= 0)
255 break;
256 if (MACHINE_HAS_CAD)
257 _raw_compare_and_delay(&rw->lock, old);
258 }
259 }
260 EXPORT_SYMBOL(_raw_write_lock_wait);
261
262 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
263
264 int _raw_write_trylock_retry(arch_rwlock_t *rw)
265 {
266 unsigned int old;
267 int count = spin_retry;
268
269 while (count-- > 0) {
270 old = ACCESS_ONCE(rw->lock);
271 if (old) {
272 if (MACHINE_HAS_CAD)
273 _raw_compare_and_delay(&rw->lock, old);
274 continue;
275 }
276 if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000))
277 return 1;
278 }
279 return 0;
280 }
281 EXPORT_SYMBOL(_raw_write_trylock_retry);
282
283 void arch_lock_relax(unsigned int cpu)
284 {
285 if (!cpu)
286 return;
287 if (MACHINE_IS_LPAR && !cpu_is_preempted(~cpu))
288 return;
289 smp_yield_cpu(~cpu);
290 }
291 EXPORT_SYMBOL(arch_lock_relax);