]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/lib/spinlock.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / arch / s390 / lib / spinlock.c
CommitLineData
951f22d5 1/*
951f22d5
MS
2 * Out of line spinlock code.
3 *
a53c8fab 4 * Copyright IBM Corp. 2004, 2006
951f22d5
MS
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8#include <linux/types.h>
d3217967 9#include <linux/export.h>
951f22d5
MS
10#include <linux/spinlock.h>
11#include <linux/init.h>
8b646bd7 12#include <linux/smp.h>
951f22d5
MS
13#include <asm/io.h>
14
2c72a44e
MS
15int spin_retry = -1;
16
17static int __init spin_retry_init(void)
18{
19 if (spin_retry < 0)
b13de4b7 20 spin_retry = 1000;
2c72a44e
MS
21 return 0;
22}
23early_initcall(spin_retry_init);
951f22d5
MS
24
25/**
26 * spin_retry= parameter
27 */
28static int __init spin_retry_setup(char *str)
29{
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
32}
33__setup("spin_retry=", spin_retry_setup);
34
0199c4e6 35void arch_spin_lock_wait(arch_spinlock_t *lp)
951f22d5 36{
02c503ff
MS
37 int cpu = SPINLOCK_LOCKVAL;
38 int owner, count, first_diag;
951f22d5 39
db1c4515 40 first_diag = 1;
951f22d5 41 while (1) {
470ada6b
MS
42 owner = ACCESS_ONCE(lp->lock);
43 /* Try to get the lock if it is free. */
44 if (!owner) {
02c503ff 45 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
470ada6b
MS
46 return;
47 continue;
951f22d5 48 }
db1c4515 49 /* First iteration: check if the lock owner is running. */
760928c0 50 if (first_diag && arch_vcpu_is_preempted(~owner)) {
470ada6b 51 smp_yield_cpu(~owner);
db1c4515 52 first_diag = 0;
470ada6b
MS
53 continue;
54 }
55 /* Loop for a while on the lock value. */
56 count = spin_retry;
57 do {
58 owner = ACCESS_ONCE(lp->lock);
59 } while (owner && count-- > 0);
60 if (!owner)
61 continue;
62 /*
63 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
64 * yield the CPU unconditionally. For LPAR rely on the
65 * sense running status.
470ada6b 66 */
760928c0 67 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
8b646bd7 68 smp_yield_cpu(~owner);
db1c4515
MS
69 first_diag = 0;
70 }
951f22d5
MS
71 }
72}
0199c4e6 73EXPORT_SYMBOL(arch_spin_lock_wait);
951f22d5 74
0199c4e6 75void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
894cdde2 76{
02c503ff
MS
77 int cpu = SPINLOCK_LOCKVAL;
78 int owner, count, first_diag;
894cdde2
HH
79
80 local_irq_restore(flags);
db1c4515 81 first_diag = 1;
894cdde2 82 while (1) {
470ada6b
MS
83 owner = ACCESS_ONCE(lp->lock);
84 /* Try to get the lock if it is free. */
85 if (!owner) {
86 local_irq_disable();
02c503ff 87 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
470ada6b
MS
88 return;
89 local_irq_restore(flags);
84976952 90 continue;
470ada6b
MS
91 }
92 /* Check if the lock owner is running. */
760928c0 93 if (first_diag && arch_vcpu_is_preempted(~owner)) {
470ada6b 94 smp_yield_cpu(~owner);
db1c4515 95 first_diag = 0;
470ada6b 96 continue;
894cdde2 97 }
470ada6b
MS
98 /* Loop for a while on the lock value. */
99 count = spin_retry;
100 do {
101 owner = ACCESS_ONCE(lp->lock);
102 } while (owner && count-- > 0);
103 if (!owner)
104 continue;
105 /*
106 * For multiple layers of hypervisors, e.g. z/VM + LPAR
db1c4515
MS
107 * yield the CPU unconditionally. For LPAR rely on the
108 * sense running status.
470ada6b 109 */
760928c0 110 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
8b646bd7 111 smp_yield_cpu(~owner);
db1c4515
MS
112 first_diag = 0;
113 }
894cdde2
HH
114 }
115}
0199c4e6 116EXPORT_SYMBOL(arch_spin_lock_wait_flags);
894cdde2 117
0199c4e6 118int arch_spin_trylock_retry(arch_spinlock_t *lp)
951f22d5 119{
02c503ff
MS
120 int cpu = SPINLOCK_LOCKVAL;
121 int owner, count;
951f22d5 122
2c72a44e 123 for (count = spin_retry; count > 0; count--) {
187b5f41 124 owner = READ_ONCE(lp->lock);
2c72a44e
MS
125 /* Try to get the lock if it is free. */
126 if (!owner) {
02c503ff 127 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
2c72a44e 128 return 1;
b13de4b7 129 }
2c72a44e 130 }
951f22d5
MS
131 return 0;
132}
0199c4e6 133EXPORT_SYMBOL(arch_spin_trylock_retry);
951f22d5 134
fb3a6bbc 135void _raw_read_lock_wait(arch_rwlock_t *rw)
951f22d5 136{
951f22d5 137 int count = spin_retry;
02c503ff 138 int owner, old;
951f22d5 139
bbae71bf
MS
140#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
141 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
142#endif
d59b93da 143 owner = 0;
951f22d5
MS
144 while (1) {
145 if (count-- <= 0) {
760928c0 146 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 147 smp_yield_cpu(~owner);
951f22d5
MS
148 count = spin_retry;
149 }
bae8f567 150 old = ACCESS_ONCE(rw->lock);
d59b93da 151 owner = ACCESS_ONCE(rw->owner);
b13de4b7 152 if (old < 0)
96567161 153 continue;
02c503ff 154 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
951f22d5
MS
155 return;
156 }
157}
158EXPORT_SYMBOL(_raw_read_lock_wait);
159
fb3a6bbc 160int _raw_read_trylock_retry(arch_rwlock_t *rw)
951f22d5 161{
951f22d5 162 int count = spin_retry;
02c503ff 163 int old;
951f22d5
MS
164
165 while (count-- > 0) {
bae8f567 166 old = ACCESS_ONCE(rw->lock);
b13de4b7 167 if (old < 0)
96567161 168 continue;
02c503ff 169 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
951f22d5
MS
170 return 1;
171 }
172 return 0;
173}
174EXPORT_SYMBOL(_raw_read_trylock_retry);
175
bbae71bf 176#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
02c503ff 177void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
700d7728 178#else
fb3a6bbc 179void _raw_write_lock_wait(arch_rwlock_t *rw)
700d7728 180#endif
951f22d5
MS
181{
182 int count = spin_retry;
700d7728 183 int owner, old;
951f22d5 184
700d7728
HC
185#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
186 if ((int) prev > 0)
187 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
188#endif
d59b93da 189 owner = 0;
951f22d5
MS
190 while (1) {
191 if (count-- <= 0) {
760928c0 192 if (owner && arch_vcpu_is_preempted(~owner))
d59b93da 193 smp_yield_cpu(~owner);
951f22d5
MS
194 count = spin_retry;
195 }
bae8f567 196 old = ACCESS_ONCE(rw->lock);
d59b93da 197 owner = ACCESS_ONCE(rw->owner);
700d7728 198 if (old == 0 && __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
94232a43 199 break;
700d7728 200 smp_mb();
951f22d5
MS
201 }
202}
203EXPORT_SYMBOL(_raw_write_lock_wait);
204
fb3a6bbc 205int _raw_write_trylock_retry(arch_rwlock_t *rw)
951f22d5
MS
206{
207 int count = spin_retry;
02c503ff 208 int old;
951f22d5
MS
209
210 while (count-- > 0) {
bae8f567 211 old = ACCESS_ONCE(rw->lock);
b13de4b7 212 if (old)
96567161 213 continue;
02c503ff 214 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
951f22d5
MS
215 return 1;
216 }
217 return 0;
218}
219EXPORT_SYMBOL(_raw_write_trylock_retry);
d59b93da 220
02c503ff 221void arch_lock_relax(int cpu)
d59b93da
MS
222{
223 if (!cpu)
224 return;
760928c0 225 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
d59b93da
MS
226 return;
227 smp_yield_cpu(~cpu);
228}
229EXPORT_SYMBOL(arch_lock_relax);