2 * Out of line spinlock code.
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
8 #include <linux/types.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
17 static int __init
spin_retry_init(void)
23 early_initcall(spin_retry_init
);
26 * spin_retry= parameter
28 static int __init
spin_retry_setup(char *str
)
30 spin_retry
= simple_strtoul(str
, &str
, 0);
33 __setup("spin_retry=", spin_retry_setup
);
35 static inline int arch_load_niai4(int *lock
)
40 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
41 " .long 0xb2fa0040\n" /* NIAI 4 */
44 : "=d" (owner
) : "Q" (*lock
) : "memory");
48 static inline int arch_cmpxchg_niai8(int *lock
, int old
, int new)
53 #ifdef CONFIG_HAVE_MARCH_ZEC12_FEATURES
54 " .long 0xb2fa0080\n" /* NIAI 8 */
57 : "=d" (old
), "=Q" (*lock
)
58 : "0" (old
), "d" (new), "Q" (*lock
)
60 return expected
== old
;
63 void arch_spin_lock_wait(arch_spinlock_t
*lp
)
65 int cpu
= SPINLOCK_LOCKVAL
;
68 /* Pass the virtual CPU to the lock holder if it is not running */
69 owner
= arch_load_niai4(&lp
->lock
);
70 if (owner
&& arch_vcpu_is_preempted(~owner
))
71 smp_yield_cpu(~owner
);
75 owner
= arch_load_niai4(&lp
->lock
);
76 /* Try to get the lock if it is free. */
78 if (arch_cmpxchg_niai8(&lp
->lock
, 0, cpu
))
86 * For multiple layers of hypervisors, e.g. z/VM + LPAR
87 * yield the CPU unconditionally. For LPAR rely on the
88 * sense running status.
90 if (!MACHINE_IS_LPAR
|| arch_vcpu_is_preempted(~owner
))
91 smp_yield_cpu(~owner
);
94 EXPORT_SYMBOL(arch_spin_lock_wait
);
96 void arch_spin_lock_wait_flags(arch_spinlock_t
*lp
, unsigned long flags
)
98 int cpu
= SPINLOCK_LOCKVAL
;
101 local_irq_restore(flags
);
103 /* Pass the virtual CPU to the lock holder if it is not running */
104 owner
= arch_load_niai4(&lp
->lock
);
105 if (owner
&& arch_vcpu_is_preempted(~owner
))
106 smp_yield_cpu(~owner
);
110 owner
= arch_load_niai4(&lp
->lock
);
111 /* Try to get the lock if it is free. */
114 if (arch_cmpxchg_niai8(&lp
->lock
, 0, cpu
))
116 local_irq_restore(flags
);
123 * For multiple layers of hypervisors, e.g. z/VM + LPAR
124 * yield the CPU unconditionally. For LPAR rely on the
125 * sense running status.
127 if (!MACHINE_IS_LPAR
|| arch_vcpu_is_preempted(~owner
))
128 smp_yield_cpu(~owner
);
131 EXPORT_SYMBOL(arch_spin_lock_wait_flags
);
133 int arch_spin_trylock_retry(arch_spinlock_t
*lp
)
135 int cpu
= SPINLOCK_LOCKVAL
;
138 for (count
= spin_retry
; count
> 0; count
--) {
139 owner
= READ_ONCE(lp
->lock
);
140 /* Try to get the lock if it is free. */
142 if (__atomic_cmpxchg_bool(&lp
->lock
, 0, cpu
))
148 EXPORT_SYMBOL(arch_spin_trylock_retry
);
150 void _raw_read_lock_wait(arch_rwlock_t
*rw
)
152 int count
= spin_retry
;
155 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
156 __RAW_LOCK(&rw
->lock
, -1, __RAW_OP_ADD
);
161 if (owner
&& arch_vcpu_is_preempted(~owner
))
162 smp_yield_cpu(~owner
);
165 old
= READ_ONCE(rw
->lock
);
166 owner
= READ_ONCE(rw
->owner
);
169 if (__atomic_cmpxchg_bool(&rw
->lock
, old
, old
+ 1))
173 EXPORT_SYMBOL(_raw_read_lock_wait
);
175 int _raw_read_trylock_retry(arch_rwlock_t
*rw
)
177 int count
= spin_retry
;
180 while (count
-- > 0) {
181 old
= READ_ONCE(rw
->lock
);
184 if (__atomic_cmpxchg_bool(&rw
->lock
, old
, old
+ 1))
189 EXPORT_SYMBOL(_raw_read_trylock_retry
);
191 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
193 void _raw_write_lock_wait(arch_rwlock_t
*rw
, int prev
)
195 int count
= spin_retry
;
201 if (owner
&& arch_vcpu_is_preempted(~owner
))
202 smp_yield_cpu(~owner
);
205 old
= READ_ONCE(rw
->lock
);
206 owner
= READ_ONCE(rw
->owner
);
209 prev
= __RAW_LOCK(&rw
->lock
, 0x80000000, __RAW_OP_OR
);
212 if ((old
& 0x7fffffff) == 0 && prev
>= 0)
216 EXPORT_SYMBOL(_raw_write_lock_wait
);
218 #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
220 void _raw_write_lock_wait(arch_rwlock_t
*rw
)
222 int count
= spin_retry
;
223 int owner
, old
, prev
;
229 if (owner
&& arch_vcpu_is_preempted(~owner
))
230 smp_yield_cpu(~owner
);
233 old
= READ_ONCE(rw
->lock
);
234 owner
= READ_ONCE(rw
->owner
);
236 __atomic_cmpxchg_bool(&rw
->lock
, old
, old
| 0x80000000))
240 if ((old
& 0x7fffffff) == 0 && prev
>= 0)
244 EXPORT_SYMBOL(_raw_write_lock_wait
);
246 #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
248 int _raw_write_trylock_retry(arch_rwlock_t
*rw
)
250 int count
= spin_retry
;
253 while (count
-- > 0) {
254 old
= READ_ONCE(rw
->lock
);
257 if (__atomic_cmpxchg_bool(&rw
->lock
, 0, 0x80000000))
262 EXPORT_SYMBOL(_raw_write_trylock_retry
);
264 void arch_lock_relax(int cpu
)
268 if (MACHINE_IS_LPAR
&& !arch_vcpu_is_preempted(~cpu
))
272 EXPORT_SYMBOL(arch_lock_relax
);