]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/lib/spinlock.c
UBUNTU: SAUCE: (no-up) s390: fix rwlock implementation
[mirror_ubuntu-artful-kernel.git] / arch / s390 / lib / spinlock.c
1 /*
2 * Out of line spinlock code.
3 *
4 * Copyright IBM Corp. 2004, 2006
5 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6 */
7
8 #include <linux/types.h>
9 #include <linux/export.h>
10 #include <linux/spinlock.h>
11 #include <linux/init.h>
12 #include <linux/smp.h>
13 #include <asm/io.h>
14
15 int spin_retry = -1;
16
17 static int __init spin_retry_init(void)
18 {
19 if (spin_retry < 0)
20 spin_retry = 1000;
21 return 0;
22 }
23 early_initcall(spin_retry_init);
24
25 /**
26 * spin_retry= parameter
27 */
28 static int __init spin_retry_setup(char *str)
29 {
30 spin_retry = simple_strtoul(str, &str, 0);
31 return 1;
32 }
33 __setup("spin_retry=", spin_retry_setup);
34
35 void arch_spin_lock_wait(arch_spinlock_t *lp)
36 {
37 int cpu = SPINLOCK_LOCKVAL;
38 int owner, count, first_diag;
39
40 first_diag = 1;
41 while (1) {
42 owner = ACCESS_ONCE(lp->lock);
43 /* Try to get the lock if it is free. */
44 if (!owner) {
45 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
46 return;
47 continue;
48 }
49 /* First iteration: check if the lock owner is running. */
50 if (first_diag && arch_vcpu_is_preempted(~owner)) {
51 smp_yield_cpu(~owner);
52 first_diag = 0;
53 continue;
54 }
55 /* Loop for a while on the lock value. */
56 count = spin_retry;
57 do {
58 owner = ACCESS_ONCE(lp->lock);
59 } while (owner && count-- > 0);
60 if (!owner)
61 continue;
62 /*
63 * For multiple layers of hypervisors, e.g. z/VM + LPAR
64 * yield the CPU unconditionally. For LPAR rely on the
65 * sense running status.
66 */
67 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
68 smp_yield_cpu(~owner);
69 first_diag = 0;
70 }
71 }
72 }
73 EXPORT_SYMBOL(arch_spin_lock_wait);
74
75 void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
76 {
77 int cpu = SPINLOCK_LOCKVAL;
78 int owner, count, first_diag;
79
80 local_irq_restore(flags);
81 first_diag = 1;
82 while (1) {
83 owner = ACCESS_ONCE(lp->lock);
84 /* Try to get the lock if it is free. */
85 if (!owner) {
86 local_irq_disable();
87 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
88 return;
89 local_irq_restore(flags);
90 continue;
91 }
92 /* Check if the lock owner is running. */
93 if (first_diag && arch_vcpu_is_preempted(~owner)) {
94 smp_yield_cpu(~owner);
95 first_diag = 0;
96 continue;
97 }
98 /* Loop for a while on the lock value. */
99 count = spin_retry;
100 do {
101 owner = ACCESS_ONCE(lp->lock);
102 } while (owner && count-- > 0);
103 if (!owner)
104 continue;
105 /*
106 * For multiple layers of hypervisors, e.g. z/VM + LPAR
107 * yield the CPU unconditionally. For LPAR rely on the
108 * sense running status.
109 */
110 if (!MACHINE_IS_LPAR || arch_vcpu_is_preempted(~owner)) {
111 smp_yield_cpu(~owner);
112 first_diag = 0;
113 }
114 }
115 }
116 EXPORT_SYMBOL(arch_spin_lock_wait_flags);
117
118 int arch_spin_trylock_retry(arch_spinlock_t *lp)
119 {
120 int cpu = SPINLOCK_LOCKVAL;
121 int owner, count;
122
123 for (count = spin_retry; count > 0; count--) {
124 owner = READ_ONCE(lp->lock);
125 /* Try to get the lock if it is free. */
126 if (!owner) {
127 if (__atomic_cmpxchg_bool(&lp->lock, 0, cpu))
128 return 1;
129 }
130 }
131 return 0;
132 }
133 EXPORT_SYMBOL(arch_spin_trylock_retry);
134
135 void _raw_read_lock_wait(arch_rwlock_t *rw)
136 {
137 int count = spin_retry;
138 int owner, old;
139
140 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
141 __RAW_LOCK(&rw->lock, -1, __RAW_OP_ADD);
142 #endif
143 owner = 0;
144 while (1) {
145 if (count-- <= 0) {
146 if (owner && arch_vcpu_is_preempted(~owner))
147 smp_yield_cpu(~owner);
148 count = spin_retry;
149 }
150 old = ACCESS_ONCE(rw->lock);
151 owner = ACCESS_ONCE(rw->owner);
152 if (old < 0)
153 continue;
154 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
155 return;
156 }
157 }
158 EXPORT_SYMBOL(_raw_read_lock_wait);
159
160 int _raw_read_trylock_retry(arch_rwlock_t *rw)
161 {
162 int count = spin_retry;
163 int old;
164
165 while (count-- > 0) {
166 old = ACCESS_ONCE(rw->lock);
167 if (old < 0)
168 continue;
169 if (__atomic_cmpxchg_bool(&rw->lock, old, old + 1))
170 return 1;
171 }
172 return 0;
173 }
174 EXPORT_SYMBOL(_raw_read_trylock_retry);
175
176 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
177 void _raw_write_lock_wait(arch_rwlock_t *rw, int prev)
178 #else
179 void _raw_write_lock_wait(arch_rwlock_t *rw)
180 #endif
181 {
182 int count = spin_retry;
183 int owner, old;
184
185 #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
186 if ((int) prev > 0)
187 __RAW_UNLOCK(&rw->lock, 0x7fffffff, __RAW_OP_AND);
188 #endif
189 owner = 0;
190 while (1) {
191 if (count-- <= 0) {
192 if (owner && arch_vcpu_is_preempted(~owner))
193 smp_yield_cpu(~owner);
194 count = spin_retry;
195 }
196 old = ACCESS_ONCE(rw->lock);
197 owner = ACCESS_ONCE(rw->owner);
198 if (old == 0 && __atomic_cmpxchg_bool(&rw->lock, old, old | 0x80000000))
199 break;
200 smp_mb();
201 }
202 }
203 EXPORT_SYMBOL(_raw_write_lock_wait);
204
205 int _raw_write_trylock_retry(arch_rwlock_t *rw)
206 {
207 int count = spin_retry;
208 int old;
209
210 while (count-- > 0) {
211 old = ACCESS_ONCE(rw->lock);
212 if (old)
213 continue;
214 if (__atomic_cmpxchg_bool(&rw->lock, 0, 0x80000000))
215 return 1;
216 }
217 return 0;
218 }
219 EXPORT_SYMBOL(_raw_write_trylock_retry);
220
221 void arch_lock_relax(int cpu)
222 {
223 if (!cpu)
224 return;
225 if (MACHINE_IS_LPAR && !arch_vcpu_is_preempted(~cpu))
226 return;
227 smp_yield_cpu(~cpu);
228 }
229 EXPORT_SYMBOL(arch_lock_relax);