]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/include/asm/percpu.h
arm64: percpu: Initialize ret in the default case
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / include / asm / percpu.h
CommitLineData
71586276
WD
1/*
2 * Copyright (C) 2013 ARM Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16#ifndef __ASM_PERCPU_H
17#define __ASM_PERCPU_H
18
53390852 19#include <asm/alternative.h>
a9ea0017
MR
20#include <asm/stack_pointer.h>
21
71586276
WD
22static inline void set_my_cpu_offset(unsigned long off)
23{
53390852
JM
24 asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
25 "msr tpidr_el2, %0",
26 ARM64_HAS_VIRT_HOST_EXTN)
27 :: "r" (off) : "memory");
71586276
WD
28}
29
30static inline unsigned long __my_cpu_offset(void)
31{
32 unsigned long off;
71586276
WD
33
34 /*
35 * We want to allow caching the value, so avoid using volatile and
36 * instead use a fake stack read to hazard against barrier().
37 */
53390852
JM
38 asm(ALTERNATIVE("mrs %0, tpidr_el1",
39 "mrs %0, tpidr_el2",
40 ARM64_HAS_VIRT_HOST_EXTN)
41 : "=r" (off) :
34ccf8f4 42 "Q" (*(const unsigned long *)current_stack_pointer));
71586276
WD
43
44 return off;
45}
46#define __my_cpu_offset __my_cpu_offset()
47
f97fc810
SC
48#define PERCPU_OP(op, asm_op) \
49static inline unsigned long __percpu_##op(void *ptr, \
50 unsigned long val, int size) \
51{ \
52 unsigned long loop, ret; \
53 \
54 switch (size) { \
55 case 1: \
1e6e57d9
WD
56 asm ("//__per_cpu_" #op "_1\n" \
57 "1: ldxrb %w[ret], %[ptr]\n" \
f97fc810 58 #asm_op " %w[ret], %w[ret], %w[val]\n" \
1e6e57d9
WD
59 " stxrb %w[loop], %w[ret], %[ptr]\n" \
60 " cbnz %w[loop], 1b" \
61 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
62 [ptr] "+Q"(*(u8 *)ptr) \
63 : [val] "Ir" (val)); \
f97fc810
SC
64 break; \
65 case 2: \
1e6e57d9
WD
66 asm ("//__per_cpu_" #op "_2\n" \
67 "1: ldxrh %w[ret], %[ptr]\n" \
f97fc810 68 #asm_op " %w[ret], %w[ret], %w[val]\n" \
1e6e57d9
WD
69 " stxrh %w[loop], %w[ret], %[ptr]\n" \
70 " cbnz %w[loop], 1b" \
71 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
72 [ptr] "+Q"(*(u16 *)ptr) \
73 : [val] "Ir" (val)); \
f97fc810
SC
74 break; \
75 case 4: \
1e6e57d9
WD
76 asm ("//__per_cpu_" #op "_4\n" \
77 "1: ldxr %w[ret], %[ptr]\n" \
f97fc810 78 #asm_op " %w[ret], %w[ret], %w[val]\n" \
1e6e57d9
WD
79 " stxr %w[loop], %w[ret], %[ptr]\n" \
80 " cbnz %w[loop], 1b" \
81 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
82 [ptr] "+Q"(*(u32 *)ptr) \
83 : [val] "Ir" (val)); \
f97fc810
SC
84 break; \
85 case 8: \
1e6e57d9
WD
86 asm ("//__per_cpu_" #op "_8\n" \
87 "1: ldxr %[ret], %[ptr]\n" \
f97fc810 88 #asm_op " %[ret], %[ret], %[val]\n" \
1e6e57d9
WD
89 " stxr %w[loop], %[ret], %[ptr]\n" \
90 " cbnz %w[loop], 1b" \
91 : [loop] "=&r" (loop), [ret] "=&r" (ret), \
92 [ptr] "+Q"(*(u64 *)ptr) \
93 : [val] "Ir" (val)); \
f97fc810
SC
94 break; \
95 default: \
bdbee0c1 96 ret = 0; \
f97fc810
SC
97 BUILD_BUG(); \
98 } \
99 \
100 return ret; \
101}
102
103PERCPU_OP(add, add)
104PERCPU_OP(and, and)
105PERCPU_OP(or, orr)
106#undef PERCPU_OP
107
108static inline unsigned long __percpu_read(void *ptr, int size)
109{
110 unsigned long ret;
111
112 switch (size) {
113 case 1:
09433944 114 ret = READ_ONCE(*(u8 *)ptr);
f97fc810
SC
115 break;
116 case 2:
09433944 117 ret = READ_ONCE(*(u16 *)ptr);
f97fc810
SC
118 break;
119 case 4:
09433944 120 ret = READ_ONCE(*(u32 *)ptr);
f97fc810
SC
121 break;
122 case 8:
09433944 123 ret = READ_ONCE(*(u64 *)ptr);
f97fc810
SC
124 break;
125 default:
bdbee0c1 126 ret = 0;
f97fc810
SC
127 BUILD_BUG();
128 }
129
130 return ret;
131}
132
133static inline void __percpu_write(void *ptr, unsigned long val, int size)
134{
135 switch (size) {
136 case 1:
09433944 137 WRITE_ONCE(*(u8 *)ptr, (u8)val);
f97fc810
SC
138 break;
139 case 2:
09433944 140 WRITE_ONCE(*(u16 *)ptr, (u16)val);
f97fc810
SC
141 break;
142 case 4:
09433944 143 WRITE_ONCE(*(u32 *)ptr, (u32)val);
f97fc810
SC
144 break;
145 case 8:
09433944 146 WRITE_ONCE(*(u64 *)ptr, (u64)val);
f97fc810
SC
147 break;
148 default:
149 BUILD_BUG();
150 }
151}
152
153static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
154 int size)
155{
156 unsigned long ret, loop;
157
158 switch (size) {
159 case 1:
1e6e57d9
WD
160 asm ("//__percpu_xchg_1\n"
161 "1: ldxrb %w[ret], %[ptr]\n"
162 " stxrb %w[loop], %w[val], %[ptr]\n"
163 " cbnz %w[loop], 1b"
164 : [loop] "=&r"(loop), [ret] "=&r"(ret),
165 [ptr] "+Q"(*(u8 *)ptr)
166 : [val] "r" (val));
f97fc810
SC
167 break;
168 case 2:
1e6e57d9
WD
169 asm ("//__percpu_xchg_2\n"
170 "1: ldxrh %w[ret], %[ptr]\n"
171 " stxrh %w[loop], %w[val], %[ptr]\n"
172 " cbnz %w[loop], 1b"
173 : [loop] "=&r"(loop), [ret] "=&r"(ret),
174 [ptr] "+Q"(*(u16 *)ptr)
175 : [val] "r" (val));
f97fc810
SC
176 break;
177 case 4:
1e6e57d9
WD
178 asm ("//__percpu_xchg_4\n"
179 "1: ldxr %w[ret], %[ptr]\n"
180 " stxr %w[loop], %w[val], %[ptr]\n"
181 " cbnz %w[loop], 1b"
182 : [loop] "=&r"(loop), [ret] "=&r"(ret),
183 [ptr] "+Q"(*(u32 *)ptr)
184 : [val] "r" (val));
f97fc810
SC
185 break;
186 case 8:
1e6e57d9
WD
187 asm ("//__percpu_xchg_8\n"
188 "1: ldxr %[ret], %[ptr]\n"
189 " stxr %w[loop], %[val], %[ptr]\n"
190 " cbnz %w[loop], 1b"
191 : [loop] "=&r"(loop), [ret] "=&r"(ret),
192 [ptr] "+Q"(*(u64 *)ptr)
193 : [val] "r" (val));
f97fc810
SC
194 break;
195 default:
bdbee0c1 196 ret = 0;
f97fc810
SC
197 BUILD_BUG();
198 }
199
200 return ret;
201}
202
f3eab718
SC
203#define _percpu_read(pcp) \
204({ \
205 typeof(pcp) __retval; \
2b974344 206 preempt_disable_notrace(); \
f3eab718
SC
207 __retval = (typeof(pcp))__percpu_read(raw_cpu_ptr(&(pcp)), \
208 sizeof(pcp)); \
2b974344 209 preempt_enable_notrace(); \
f3eab718
SC
210 __retval; \
211})
212
213#define _percpu_write(pcp, val) \
214do { \
2b974344 215 preempt_disable_notrace(); \
f3eab718
SC
216 __percpu_write(raw_cpu_ptr(&(pcp)), (unsigned long)(val), \
217 sizeof(pcp)); \
2b974344 218 preempt_enable_notrace(); \
f3eab718
SC
219} while(0) \
220
221#define _pcp_protect(operation, pcp, val) \
222({ \
223 typeof(pcp) __retval; \
224 preempt_disable(); \
225 __retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
226 (val), sizeof(pcp)); \
227 preempt_enable(); \
228 __retval; \
229})
230
f97fc810 231#define _percpu_add(pcp, val) \
f3eab718 232 _pcp_protect(__percpu_add, pcp, val)
f97fc810 233
f3eab718 234#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
f97fc810
SC
235
236#define _percpu_and(pcp, val) \
f3eab718 237 _pcp_protect(__percpu_and, pcp, val)
f97fc810
SC
238
239#define _percpu_or(pcp, val) \
f3eab718 240 _pcp_protect(__percpu_or, pcp, val)
f97fc810
SC
241
242#define _percpu_xchg(pcp, val) (typeof(pcp)) \
f3eab718 243 _pcp_protect(__percpu_xchg, pcp, (unsigned long)(val))
f97fc810
SC
244
245#define this_cpu_add_1(pcp, val) _percpu_add(pcp, val)
246#define this_cpu_add_2(pcp, val) _percpu_add(pcp, val)
247#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
248#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
249
250#define this_cpu_add_return_1(pcp, val) _percpu_add_return(pcp, val)
251#define this_cpu_add_return_2(pcp, val) _percpu_add_return(pcp, val)
252#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
253#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
254
255#define this_cpu_and_1(pcp, val) _percpu_and(pcp, val)
256#define this_cpu_and_2(pcp, val) _percpu_and(pcp, val)
257#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
258#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
259
260#define this_cpu_or_1(pcp, val) _percpu_or(pcp, val)
261#define this_cpu_or_2(pcp, val) _percpu_or(pcp, val)
262#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
263#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
264
265#define this_cpu_read_1(pcp) _percpu_read(pcp)
266#define this_cpu_read_2(pcp) _percpu_read(pcp)
267#define this_cpu_read_4(pcp) _percpu_read(pcp)
268#define this_cpu_read_8(pcp) _percpu_read(pcp)
269
270#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
271#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
272#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
273#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
274
275#define this_cpu_xchg_1(pcp, val) _percpu_xchg(pcp, val)
276#define this_cpu_xchg_2(pcp, val) _percpu_xchg(pcp, val)
277#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
278#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
279
71586276
WD
280#include <asm-generic/percpu.h>
281
282#endif /* __ASM_PERCPU_H */