]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/sparc/include/asm/cmpxchg_64.h
Merge branch 'for-4.13-part3' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[mirror_ubuntu-jammy-kernel.git] / arch / sparc / include / asm / cmpxchg_64.h
1 /* 64-bit atomic xchg() and cmpxchg() definitions.
2 *
3 * Copyright (C) 1996, 1997, 2000 David S. Miller (davem@redhat.com)
4 */
5
6 #ifndef __ARCH_SPARC64_CMPXCHG__
7 #define __ARCH_SPARC64_CMPXCHG__
8
9 static inline unsigned long
10 __cmpxchg_u32(volatile int *m, int old, int new)
11 {
12 __asm__ __volatile__("cas [%2], %3, %0"
13 : "=&r" (new)
14 : "0" (new), "r" (m), "r" (old)
15 : "memory");
16
17 return new;
18 }
19
20 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
21 {
22 unsigned long tmp1, tmp2;
23
24 __asm__ __volatile__(
25 " mov %0, %1\n"
26 "1: lduw [%4], %2\n"
27 " cas [%4], %2, %0\n"
28 " cmp %2, %0\n"
29 " bne,a,pn %%icc, 1b\n"
30 " mov %1, %0\n"
31 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
32 : "0" (val), "r" (m)
33 : "cc", "memory");
34 return val;
35 }
36
37 static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
38 {
39 unsigned long tmp1, tmp2;
40
41 __asm__ __volatile__(
42 " mov %0, %1\n"
43 "1: ldx [%4], %2\n"
44 " casx [%4], %2, %0\n"
45 " cmp %2, %0\n"
46 " bne,a,pn %%xcc, 1b\n"
47 " mov %1, %0\n"
48 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2)
49 : "0" (val), "r" (m)
50 : "cc", "memory");
51 return val;
52 }
53
54 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
55
56 void __xchg_called_with_bad_pointer(void);
57
58 /*
59 * Use 4 byte cas instruction to achieve 2 byte xchg. Main logic
60 * here is to get the bit shift of the byte we are interested in.
61 * The XOR is handy for reversing the bits for big-endian byte order.
62 */
63 static inline unsigned long
64 xchg16(__volatile__ unsigned short *m, unsigned short val)
65 {
66 unsigned long maddr = (unsigned long)m;
67 int bit_shift = (((unsigned long)m & 2) ^ 2) << 3;
68 unsigned int mask = 0xffff << bit_shift;
69 unsigned int *ptr = (unsigned int *) (maddr & ~2);
70 unsigned int old32, new32, load32;
71
72 /* Read the old value */
73 load32 = *ptr;
74
75 do {
76 old32 = load32;
77 new32 = (load32 & (~mask)) | val << bit_shift;
78 load32 = __cmpxchg_u32(ptr, old32, new32);
79 } while (load32 != old32);
80
81 return (load32 & mask) >> bit_shift;
82 }
83
84 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
85 int size)
86 {
87 switch (size) {
88 case 2:
89 return xchg16(ptr, x);
90 case 4:
91 return xchg32(ptr, x);
92 case 8:
93 return xchg64(ptr, x);
94 }
95 __xchg_called_with_bad_pointer();
96 return x;
97 }
98
99 /*
100 * Atomic compare and exchange. Compare OLD with MEM, if identical,
101 * store NEW in MEM. Return the initial value in MEM. Success is
102 * indicated by comparing RETURN with OLD.
103 */
104
105 #include <asm-generic/cmpxchg-local.h>
106
107
108 static inline unsigned long
109 __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
110 {
111 __asm__ __volatile__("casx [%2], %3, %0"
112 : "=&r" (new)
113 : "0" (new), "r" (m), "r" (old)
114 : "memory");
115
116 return new;
117 }
118
119 /*
120 * Use 4 byte cas instruction to achieve 1 byte cmpxchg. Main logic
121 * here is to get the bit shift of the byte we are interested in.
122 * The XOR is handy for reversing the bits for big-endian byte order
123 */
124 static inline unsigned long
125 __cmpxchg_u8(volatile unsigned char *m, unsigned char old, unsigned char new)
126 {
127 unsigned long maddr = (unsigned long)m;
128 int bit_shift = (((unsigned long)m & 3) ^ 3) << 3;
129 unsigned int mask = 0xff << bit_shift;
130 unsigned int *ptr = (unsigned int *) (maddr & ~3);
131 unsigned int old32, new32, load;
132 unsigned int load32 = *ptr;
133
134 do {
135 new32 = (load32 & ~mask) | (new << bit_shift);
136 old32 = (load32 & ~mask) | (old << bit_shift);
137 load32 = __cmpxchg_u32(ptr, old32, new32);
138 if (load32 == old32)
139 return old;
140 load = (load32 & mask) >> bit_shift;
141 } while (load == old);
142
143 return load;
144 }
145
146 /* This function doesn't exist, so you'll get a linker error
147 if something tries to do an invalid cmpxchg(). */
148 void __cmpxchg_called_with_bad_pointer(void);
149
150 static inline unsigned long
151 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
152 {
153 switch (size) {
154 case 1:
155 return __cmpxchg_u8(ptr, old, new);
156 case 4:
157 return __cmpxchg_u32(ptr, old, new);
158 case 8:
159 return __cmpxchg_u64(ptr, old, new);
160 }
161 __cmpxchg_called_with_bad_pointer();
162 return old;
163 }
164
165 #define cmpxchg(ptr,o,n) \
166 ({ \
167 __typeof__(*(ptr)) _o_ = (o); \
168 __typeof__(*(ptr)) _n_ = (n); \
169 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
170 (unsigned long)_n_, sizeof(*(ptr))); \
171 })
172
173 /*
174 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
175 * them available.
176 */
177
178 static inline unsigned long __cmpxchg_local(volatile void *ptr,
179 unsigned long old,
180 unsigned long new, int size)
181 {
182 switch (size) {
183 case 4:
184 case 8: return __cmpxchg(ptr, old, new, size);
185 default:
186 return __cmpxchg_local_generic(ptr, old, new, size);
187 }
188
189 return old;
190 }
191
192 #define cmpxchg_local(ptr, o, n) \
193 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
194 (unsigned long)(n), sizeof(*(ptr))))
195 #define cmpxchg64_local(ptr, o, n) \
196 ({ \
197 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
198 cmpxchg_local((ptr), (o), (n)); \
199 })
200 #define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
201
202 #endif /* __ARCH_SPARC64_CMPXCHG__ */