]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/ia64/include/asm/spinlock.h
locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_...
[mirror_ubuntu-hirsute-kernel.git] / arch / ia64 / include / asm / spinlock.h
1 #ifndef _ASM_IA64_SPINLOCK_H
2 #define _ASM_IA64_SPINLOCK_H
3
4 /*
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 *
9 * This file is used for SMP configurations only.
10 */
11
12 #include <linux/compiler.h>
13 #include <linux/kernel.h>
14 #include <linux/bitops.h>
15
16 #include <linux/atomic.h>
17 #include <asm/intrinsics.h>
18 #include <asm/barrier.h>
19 #include <asm/processor.h>
20
21 #define arch_spin_lock_init(x) ((x)->lock = 0)
22
23 /*
24 * Ticket locks are conceptually two parts, one indicating the current head of
25 * the queue, and the other indicating the current tail. The lock is acquired
26 * by atomically noting the tail and incrementing it by one (thus adding
27 * ourself to the queue and noting our position), then waiting until the head
28 * becomes equal to the the initial value of the tail.
29 * The pad bits in the middle are used to prevent the next_ticket number
30 * overflowing into the now_serving number.
31 *
32 * 31 17 16 15 14 0
33 * +----------------------------------------------------+
34 * | now_serving | padding | next_ticket |
35 * +----------------------------------------------------+
36 */
37
38 #define TICKET_SHIFT 17
39 #define TICKET_BITS 15
40 #define TICKET_MASK ((1 << TICKET_BITS) - 1)
41
42 static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
43 {
44 int *p = (int *)&lock->lock, ticket, serve;
45
46 ticket = ia64_fetchadd(1, p, acq);
47
48 if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
49 return;
50
51 ia64_invala();
52
53 for (;;) {
54 asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
55
56 if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
57 return;
58 cpu_relax();
59 }
60 }
61
62 static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
63 {
64 int tmp = READ_ONCE(lock->lock);
65
66 if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
67 return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
68 return 0;
69 }
70
71 static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
72 {
73 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
74
75 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
76 WRITE_ONCE(*p, (tmp + 2) & ~1);
77 }
78
79 static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
80 {
81 long tmp = READ_ONCE(lock->lock);
82
83 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
84 }
85
86 static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
87 {
88 long tmp = READ_ONCE(lock->lock);
89
90 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
91 }
92
93 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
94 {
95 return !(((lock.lock >> TICKET_SHIFT) ^ lock.lock) & TICKET_MASK);
96 }
97
98 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
99 {
100 return __ticket_spin_is_locked(lock);
101 }
102
103 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
104 {
105 return __ticket_spin_is_contended(lock);
106 }
107 #define arch_spin_is_contended arch_spin_is_contended
108
109 static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
110 {
111 __ticket_spin_lock(lock);
112 }
113
114 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
115 {
116 return __ticket_spin_trylock(lock);
117 }
118
119 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
120 {
121 __ticket_spin_unlock(lock);
122 }
123
124 static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
125 unsigned long flags)
126 {
127 arch_spin_lock(lock);
128 }
129 #define arch_spin_lock_flags arch_spin_lock_flags
130
131 #ifdef ASM_SUPPORTED
132
133 static __always_inline void
134 arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
135 {
136 __asm__ __volatile__ (
137 "tbit.nz p6, p0 = %1,%2\n"
138 "br.few 3f\n"
139 "1:\n"
140 "fetchadd4.rel r2 = [%0], -1;;\n"
141 "(p6) ssm psr.i\n"
142 "2:\n"
143 "hint @pause\n"
144 "ld4 r2 = [%0];;\n"
145 "cmp4.lt p7,p0 = r2, r0\n"
146 "(p7) br.cond.spnt.few 2b\n"
147 "(p6) rsm psr.i\n"
148 ";;\n"
149 "3:\n"
150 "fetchadd4.acq r2 = [%0], 1;;\n"
151 "cmp4.lt p7,p0 = r2, r0\n"
152 "(p7) br.cond.spnt.few 1b\n"
153 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
154 : "p6", "p7", "r2", "memory");
155 }
156
157 #define arch_read_lock_flags arch_read_lock_flags
158 #define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
159
160 #else /* !ASM_SUPPORTED */
161
162 #define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
163
164 #define arch_read_lock(rw) \
165 do { \
166 arch_rwlock_t *__read_lock_ptr = (rw); \
167 \
168 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
169 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
170 while (*(volatile int *)__read_lock_ptr < 0) \
171 cpu_relax(); \
172 } \
173 } while (0)
174
175 #endif /* !ASM_SUPPORTED */
176
177 #define arch_read_unlock(rw) \
178 do { \
179 arch_rwlock_t *__read_lock_ptr = (rw); \
180 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
181 } while (0)
182
183 #ifdef ASM_SUPPORTED
184
185 static __always_inline void
186 arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
187 {
188 __asm__ __volatile__ (
189 "tbit.nz p6, p0 = %1, %2\n"
190 "mov ar.ccv = r0\n"
191 "dep r29 = -1, r0, 31, 1\n"
192 "br.few 3f;;\n"
193 "1:\n"
194 "(p6) ssm psr.i\n"
195 "2:\n"
196 "hint @pause\n"
197 "ld4 r2 = [%0];;\n"
198 "cmp4.eq p0,p7 = r0, r2\n"
199 "(p7) br.cond.spnt.few 2b\n"
200 "(p6) rsm psr.i\n"
201 ";;\n"
202 "3:\n"
203 "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n"
204 "cmp4.eq p0,p7 = r0, r2\n"
205 "(p7) br.cond.spnt.few 1b;;\n"
206 : : "r"(lock), "r"(flags), "i"(IA64_PSR_I_BIT)
207 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
208 }
209
210 #define arch_write_lock_flags arch_write_lock_flags
211 #define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
212
213 #define arch_write_trylock(rw) \
214 ({ \
215 register long result; \
216 \
217 __asm__ __volatile__ ( \
218 "mov ar.ccv = r0\n" \
219 "dep r29 = -1, r0, 31, 1;;\n" \
220 "cmpxchg4.acq %0 = [%1], r29, ar.ccv\n" \
221 : "=r"(result) : "r"(rw) : "ar.ccv", "r29", "memory"); \
222 (result == 0); \
223 })
224
225 static inline void arch_write_unlock(arch_rwlock_t *x)
226 {
227 u8 *y = (u8 *)x;
228 barrier();
229 asm volatile ("st1.rel.nta [%0] = r0\n\t" :: "r"(y+3) : "memory" );
230 }
231
232 #else /* !ASM_SUPPORTED */
233
234 #define arch_write_lock(l) \
235 ({ \
236 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
237 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
238 do { \
239 while (*ia64_write_lock_ptr) \
240 ia64_barrier(); \
241 ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
242 } while (ia64_val); \
243 })
244
245 #define arch_write_trylock(rw) \
246 ({ \
247 __u64 ia64_val; \
248 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
249 ia64_val = ia64_cmpxchg4_acq((__u32 *)(rw), ia64_set_val, 0); \
250 (ia64_val == 0); \
251 })
252
253 static inline void arch_write_unlock(arch_rwlock_t *x)
254 {
255 barrier();
256 x->write_lock = 0;
257 }
258
259 #endif /* !ASM_SUPPORTED */
260
261 static inline int arch_read_trylock(arch_rwlock_t *x)
262 {
263 union {
264 arch_rwlock_t lock;
265 __u32 word;
266 } old, new;
267 old.lock = new.lock = *x;
268 old.lock.write_lock = new.lock.write_lock = 0;
269 ++new.lock.read_counter;
270 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
271 }
272
273 #endif /* _ASM_IA64_SPINLOCK_H */