]>
Commit | Line | Data |
---|---|---|
359b7064 MZ |
1 | /* |
2 | * Contains CPU feature definitions | |
3 | * | |
4 | * Copyright (C) 2015 ARM Ltd. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
9cdf8ec4 | 19 | #define pr_fmt(fmt) "CPU features: " fmt |
359b7064 | 20 | |
3c739b57 SP |
21 | #include <linux/bsearch.h> |
22 | #include <linux/sort.h> | |
359b7064 MZ |
23 | #include <linux/types.h> |
24 | #include <asm/cpu.h> | |
25 | #include <asm/cpufeature.h> | |
338d4f49 | 26 | #include <asm/processor.h> |
cdcf817b | 27 | #include <asm/sysreg.h> |
359b7064 | 28 | |
9cdf8ec4 SP |
29 | unsigned long elf_hwcap __read_mostly; |
30 | EXPORT_SYMBOL_GPL(elf_hwcap); | |
31 | ||
32 | #ifdef CONFIG_COMPAT | |
33 | #define COMPAT_ELF_HWCAP_DEFAULT \ | |
34 | (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\ | |
35 | COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\ | |
36 | COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\ | |
37 | COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\ | |
38 | COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\ | |
39 | COMPAT_HWCAP_LPAE) | |
40 | unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT; | |
41 | unsigned int compat_elf_hwcap2 __read_mostly; | |
42 | #endif | |
43 | ||
44 | DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); | |
45 | ||
3c739b57 SP |
46 | #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \ |
47 | { \ | |
48 | .strict = STRICT, \ | |
49 | .type = TYPE, \ | |
50 | .shift = SHIFT, \ | |
51 | .width = WIDTH, \ | |
52 | .safe_val = SAFE_VAL, \ | |
53 | } | |
54 | ||
55 | #define ARM64_FTR_END \ | |
56 | { \ | |
57 | .width = 0, \ | |
58 | } | |
59 | ||
60 | static struct arm64_ftr_bits ftr_id_aa64isar0[] = { | |
61 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), | |
62 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0), | |
63 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), | |
64 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0), | |
65 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0), | |
66 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0), | |
67 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0), | |
68 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0), | |
69 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ | |
70 | ARM64_FTR_END, | |
71 | }; | |
72 | ||
73 | static struct arm64_ftr_bits ftr_id_aa64pfr0[] = { | |
74 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), | |
75 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), | |
76 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0), | |
77 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), | |
78 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), | |
79 | /* Linux doesn't care about the EL3 */ | |
80 | ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0), | |
81 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0), | |
82 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY), | |
83 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY), | |
84 | ARM64_FTR_END, | |
85 | }; | |
86 | ||
87 | static struct arm64_ftr_bits ftr_id_aa64mmfr0[] = { | |
88 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), | |
89 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI), | |
90 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI), | |
91 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI), | |
92 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0), | |
93 | /* Linux shouldn't care about secure memory */ | |
94 | ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0), | |
95 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0), | |
96 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0), | |
97 | /* | |
98 | * Differing PARange is fine as long as all peripherals and memory are mapped | |
99 | * within the minimum PARange of all CPUs | |
100 | */ | |
101 | ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0), | |
102 | ARM64_FTR_END, | |
103 | }; | |
104 | ||
105 | static struct arm64_ftr_bits ftr_id_aa64mmfr1[] = { | |
106 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), | |
107 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0), | |
108 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0), | |
109 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0), | |
110 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0), | |
111 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0), | |
112 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0), | |
113 | ARM64_FTR_END, | |
114 | }; | |
115 | ||
116 | static struct arm64_ftr_bits ftr_ctr[] = { | |
117 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */ | |
118 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 3, 0), | |
119 | ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */ | |
120 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */ | |
121 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */ | |
122 | /* | |
123 | * Linux can handle differing I-cache policies. Userspace JITs will | |
124 | * make use of *minLine | |
125 | */ | |
126 | ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, 0), /* L1Ip */ | |
127 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */ | |
128 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */ | |
129 | ARM64_FTR_END, | |
130 | }; | |
131 | ||
132 | static struct arm64_ftr_bits ftr_id_mmfr0[] = { | |
133 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0), /* InnerShr */ | |
134 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */ | |
135 | ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */ | |
136 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */ | |
137 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */ | |
138 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* OuterShr */ | |
139 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */ | |
140 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */ | |
141 | ARM64_FTR_END, | |
142 | }; | |
143 | ||
144 | static struct arm64_ftr_bits ftr_id_aa64dfr0[] = { | |
145 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0), | |
146 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0), | |
147 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0), | |
148 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0), | |
149 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0), | |
150 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0), | |
151 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6), | |
152 | ARM64_FTR_END, | |
153 | }; | |
154 | ||
155 | static struct arm64_ftr_bits ftr_mvfr2[] = { | |
156 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ | |
157 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */ | |
158 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */ | |
159 | ARM64_FTR_END, | |
160 | }; | |
161 | ||
162 | static struct arm64_ftr_bits ftr_dczid[] = { | |
163 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */ | |
164 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */ | |
165 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */ | |
166 | ARM64_FTR_END, | |
167 | }; | |
168 | ||
169 | ||
170 | static struct arm64_ftr_bits ftr_id_isar5[] = { | |
171 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0), | |
172 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */ | |
173 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0), | |
174 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0), | |
175 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0), | |
176 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0), | |
177 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0), | |
178 | ARM64_FTR_END, | |
179 | }; | |
180 | ||
181 | static struct arm64_ftr_bits ftr_id_mmfr4[] = { | |
182 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */ | |
183 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */ | |
184 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */ | |
185 | ARM64_FTR_END, | |
186 | }; | |
187 | ||
188 | static struct arm64_ftr_bits ftr_id_pfr0[] = { | |
189 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */ | |
190 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */ | |
191 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */ | |
192 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */ | |
193 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */ | |
194 | ARM64_FTR_END, | |
195 | }; | |
196 | ||
197 | /* | |
198 | * Common ftr bits for a 32bit register with all hidden, strict | |
199 | * attributes, with 4bit feature fields and a default safe value of | |
200 | * 0. Covers the following 32bit registers: | |
201 | * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1] | |
202 | */ | |
203 | static struct arm64_ftr_bits ftr_generic_32bits[] = { | |
204 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0), | |
205 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), | |
206 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), | |
207 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), | |
208 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), | |
209 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), | |
210 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), | |
211 | ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), | |
212 | ARM64_FTR_END, | |
213 | }; | |
214 | ||
215 | static struct arm64_ftr_bits ftr_generic[] = { | |
216 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), | |
217 | ARM64_FTR_END, | |
218 | }; | |
219 | ||
220 | static struct arm64_ftr_bits ftr_generic32[] = { | |
221 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0), | |
222 | ARM64_FTR_END, | |
223 | }; | |
224 | ||
225 | static struct arm64_ftr_bits ftr_aa64raz[] = { | |
226 | ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0), | |
227 | ARM64_FTR_END, | |
228 | }; | |
229 | ||
230 | #define ARM64_FTR_REG(id, table) \ | |
231 | { \ | |
232 | .sys_id = id, \ | |
233 | .name = #id, \ | |
234 | .ftr_bits = &((table)[0]), \ | |
235 | } | |
236 | ||
237 | static struct arm64_ftr_reg arm64_ftr_regs[] = { | |
238 | ||
239 | /* Op1 = 0, CRn = 0, CRm = 1 */ | |
240 | ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0), | |
241 | ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits), | |
242 | ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_generic_32bits), | |
243 | ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0), | |
244 | ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits), | |
245 | ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits), | |
246 | ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits), | |
247 | ||
248 | /* Op1 = 0, CRn = 0, CRm = 2 */ | |
249 | ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits), | |
250 | ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits), | |
251 | ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits), | |
252 | ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits), | |
253 | ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits), | |
254 | ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5), | |
255 | ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4), | |
256 | ||
257 | /* Op1 = 0, CRn = 0, CRm = 3 */ | |
258 | ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits), | |
259 | ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits), | |
260 | ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2), | |
261 | ||
262 | /* Op1 = 0, CRn = 0, CRm = 4 */ | |
263 | ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0), | |
264 | ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz), | |
265 | ||
266 | /* Op1 = 0, CRn = 0, CRm = 5 */ | |
267 | ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0), | |
268 | ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic), | |
269 | ||
270 | /* Op1 = 0, CRn = 0, CRm = 6 */ | |
271 | ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0), | |
272 | ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz), | |
273 | ||
274 | /* Op1 = 0, CRn = 0, CRm = 7 */ | |
275 | ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0), | |
276 | ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1), | |
277 | ||
278 | /* Op1 = 3, CRn = 0, CRm = 0 */ | |
279 | ARM64_FTR_REG(SYS_CTR_EL0, ftr_ctr), | |
280 | ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid), | |
281 | ||
282 | /* Op1 = 3, CRn = 14, CRm = 0 */ | |
283 | ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32), | |
284 | }; | |
285 | ||
286 | static int search_cmp_ftr_reg(const void *id, const void *regp) | |
287 | { | |
288 | return (int)(unsigned long)id - (int)((const struct arm64_ftr_reg *)regp)->sys_id; | |
289 | } | |
290 | ||
291 | /* | |
292 | * get_arm64_ftr_reg - Lookup a feature register entry using its | |
293 | * sys_reg() encoding. With the array arm64_ftr_regs sorted in the | |
294 | * ascending order of sys_id , we use binary search to find a matching | |
295 | * entry. | |
296 | * | |
297 | * returns - Upon success, matching ftr_reg entry for id. | |
298 | * - NULL on failure. It is upto the caller to decide | |
299 | * the impact of a failure. | |
300 | */ | |
301 | static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id) | |
302 | { | |
303 | return bsearch((const void *)(unsigned long)sys_id, | |
304 | arm64_ftr_regs, | |
305 | ARRAY_SIZE(arm64_ftr_regs), | |
306 | sizeof(arm64_ftr_regs[0]), | |
307 | search_cmp_ftr_reg); | |
308 | } | |
309 | ||
310 | static u64 arm64_ftr_set_value(struct arm64_ftr_bits *ftrp, s64 reg, s64 ftr_val) | |
311 | { | |
312 | u64 mask = arm64_ftr_mask(ftrp); | |
313 | ||
314 | reg &= ~mask; | |
315 | reg |= (ftr_val << ftrp->shift) & mask; | |
316 | return reg; | |
317 | } | |
318 | ||
319 | static s64 arm64_ftr_safe_value(struct arm64_ftr_bits *ftrp, s64 new, s64 cur) | |
320 | { | |
321 | s64 ret = 0; | |
322 | ||
323 | switch (ftrp->type) { | |
324 | case FTR_EXACT: | |
325 | ret = ftrp->safe_val; | |
326 | break; | |
327 | case FTR_LOWER_SAFE: | |
328 | ret = new < cur ? new : cur; | |
329 | break; | |
330 | case FTR_HIGHER_SAFE: | |
331 | ret = new > cur ? new : cur; | |
332 | break; | |
333 | default: | |
334 | BUG(); | |
335 | } | |
336 | ||
337 | return ret; | |
338 | } | |
339 | ||
340 | static int __init sort_cmp_ftr_regs(const void *a, const void *b) | |
341 | { | |
342 | return ((const struct arm64_ftr_reg *)a)->sys_id - | |
343 | ((const struct arm64_ftr_reg *)b)->sys_id; | |
344 | } | |
345 | ||
346 | static void __init swap_ftr_regs(void *a, void *b, int size) | |
347 | { | |
348 | struct arm64_ftr_reg tmp = *(struct arm64_ftr_reg *)a; | |
349 | *(struct arm64_ftr_reg *)a = *(struct arm64_ftr_reg *)b; | |
350 | *(struct arm64_ftr_reg *)b = tmp; | |
351 | } | |
352 | ||
353 | static void __init sort_ftr_regs(void) | |
354 | { | |
355 | /* Keep the array sorted so that we can do the binary search */ | |
356 | sort(arm64_ftr_regs, | |
357 | ARRAY_SIZE(arm64_ftr_regs), | |
358 | sizeof(arm64_ftr_regs[0]), | |
359 | sort_cmp_ftr_regs, | |
360 | swap_ftr_regs); | |
361 | } | |
362 | ||
363 | /* | |
364 | * Initialise the CPU feature register from Boot CPU values. | |
365 | * Also initiliases the strict_mask for the register. | |
366 | */ | |
367 | static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new) | |
368 | { | |
369 | u64 val = 0; | |
370 | u64 strict_mask = ~0x0ULL; | |
371 | struct arm64_ftr_bits *ftrp; | |
372 | struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg); | |
373 | ||
374 | BUG_ON(!reg); | |
375 | ||
376 | for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { | |
377 | s64 ftr_new = arm64_ftr_value(ftrp, new); | |
378 | ||
379 | val = arm64_ftr_set_value(ftrp, val, ftr_new); | |
380 | if (!ftrp->strict) | |
381 | strict_mask &= ~arm64_ftr_mask(ftrp); | |
382 | } | |
383 | reg->sys_val = val; | |
384 | reg->strict_mask = strict_mask; | |
385 | } | |
386 | ||
387 | void __init init_cpu_features(struct cpuinfo_arm64 *info) | |
388 | { | |
389 | /* Before we start using the tables, make sure it is sorted */ | |
390 | sort_ftr_regs(); | |
391 | ||
392 | init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr); | |
393 | init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid); | |
394 | init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq); | |
395 | init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0); | |
396 | init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1); | |
397 | init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0); | |
398 | init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1); | |
399 | init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0); | |
400 | init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1); | |
401 | init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0); | |
402 | init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1); | |
403 | init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0); | |
404 | init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0); | |
405 | init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1); | |
406 | init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2); | |
407 | init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3); | |
408 | init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4); | |
409 | init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5); | |
410 | init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0); | |
411 | init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1); | |
412 | init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2); | |
413 | init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3); | |
414 | init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0); | |
415 | init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1); | |
416 | init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0); | |
417 | init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1); | |
418 | init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2); | |
3c739b57 SP |
419 | } |
420 | ||
3086d391 | 421 | static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new) |
3c739b57 SP |
422 | { |
423 | struct arm64_ftr_bits *ftrp; | |
3c739b57 SP |
424 | |
425 | for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) { | |
426 | s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val); | |
427 | s64 ftr_new = arm64_ftr_value(ftrp, new); | |
428 | ||
429 | if (ftr_cur == ftr_new) | |
430 | continue; | |
431 | /* Find a safe value */ | |
432 | ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur); | |
433 | reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new); | |
434 | } | |
435 | ||
436 | } | |
437 | ||
3086d391 | 438 | static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot) |
cdcf817b | 439 | { |
3086d391 SP |
440 | struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id); |
441 | ||
442 | BUG_ON(!regp); | |
443 | update_cpu_ftr_reg(regp, val); | |
444 | if ((boot & regp->strict_mask) == (val & regp->strict_mask)) | |
445 | return 0; | |
446 | pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n", | |
447 | regp->name, boot, cpu, val); | |
448 | return 1; | |
449 | } | |
450 | ||
451 | /* | |
452 | * Update system wide CPU feature registers with the values from a | |
453 | * non-boot CPU. Also performs SANITY checks to make sure that there | |
454 | * aren't any insane variations from that of the boot CPU. | |
455 | */ | |
456 | void update_cpu_features(int cpu, | |
457 | struct cpuinfo_arm64 *info, | |
458 | struct cpuinfo_arm64 *boot) | |
459 | { | |
460 | int taint = 0; | |
461 | ||
462 | /* | |
463 | * The kernel can handle differing I-cache policies, but otherwise | |
464 | * caches should look identical. Userspace JITs will make use of | |
465 | * *minLine. | |
466 | */ | |
467 | taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu, | |
468 | info->reg_ctr, boot->reg_ctr); | |
469 | ||
470 | /* | |
471 | * Userspace may perform DC ZVA instructions. Mismatched block sizes | |
472 | * could result in too much or too little memory being zeroed if a | |
473 | * process is preempted and migrated between CPUs. | |
474 | */ | |
475 | taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu, | |
476 | info->reg_dczid, boot->reg_dczid); | |
477 | ||
478 | /* If different, timekeeping will be broken (especially with KVM) */ | |
479 | taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu, | |
480 | info->reg_cntfrq, boot->reg_cntfrq); | |
481 | ||
482 | /* | |
483 | * The kernel uses self-hosted debug features and expects CPUs to | |
484 | * support identical debug features. We presently need CTX_CMPs, WRPs, | |
485 | * and BRPs to be identical. | |
486 | * ID_AA64DFR1 is currently RES0. | |
487 | */ | |
488 | taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu, | |
489 | info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0); | |
490 | taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu, | |
491 | info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1); | |
492 | /* | |
493 | * Even in big.LITTLE, processors should be identical instruction-set | |
494 | * wise. | |
495 | */ | |
496 | taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu, | |
497 | info->reg_id_aa64isar0, boot->reg_id_aa64isar0); | |
498 | taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu, | |
499 | info->reg_id_aa64isar1, boot->reg_id_aa64isar1); | |
500 | ||
501 | /* | |
502 | * Differing PARange support is fine as long as all peripherals and | |
503 | * memory are mapped within the minimum PARange of all CPUs. | |
504 | * Linux should not care about secure memory. | |
505 | */ | |
506 | taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu, | |
507 | info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0); | |
508 | taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu, | |
509 | info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1); | |
510 | ||
511 | /* | |
512 | * EL3 is not our concern. | |
513 | * ID_AA64PFR1 is currently RES0. | |
514 | */ | |
515 | taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu, | |
516 | info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0); | |
517 | taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu, | |
518 | info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1); | |
519 | ||
520 | /* | |
521 | * If we have AArch32, we care about 32-bit features for compat. These | |
522 | * registers should be RES0 otherwise. | |
523 | */ | |
524 | taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu, | |
525 | info->reg_id_dfr0, boot->reg_id_dfr0); | |
526 | taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu, | |
527 | info->reg_id_isar0, boot->reg_id_isar0); | |
528 | taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu, | |
529 | info->reg_id_isar1, boot->reg_id_isar1); | |
530 | taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu, | |
531 | info->reg_id_isar2, boot->reg_id_isar2); | |
532 | taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu, | |
533 | info->reg_id_isar3, boot->reg_id_isar3); | |
534 | taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu, | |
535 | info->reg_id_isar4, boot->reg_id_isar4); | |
536 | taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu, | |
537 | info->reg_id_isar5, boot->reg_id_isar5); | |
538 | ||
539 | /* | |
540 | * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and | |
541 | * ACTLR formats could differ across CPUs and therefore would have to | |
542 | * be trapped for virtualization anyway. | |
543 | */ | |
544 | taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu, | |
545 | info->reg_id_mmfr0, boot->reg_id_mmfr0); | |
546 | taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu, | |
547 | info->reg_id_mmfr1, boot->reg_id_mmfr1); | |
548 | taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu, | |
549 | info->reg_id_mmfr2, boot->reg_id_mmfr2); | |
550 | taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu, | |
551 | info->reg_id_mmfr3, boot->reg_id_mmfr3); | |
552 | taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu, | |
553 | info->reg_id_pfr0, boot->reg_id_pfr0); | |
554 | taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu, | |
555 | info->reg_id_pfr1, boot->reg_id_pfr1); | |
556 | taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu, | |
557 | info->reg_mvfr0, boot->reg_mvfr0); | |
558 | taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu, | |
559 | info->reg_mvfr1, boot->reg_mvfr1); | |
560 | taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu, | |
561 | info->reg_mvfr2, boot->reg_mvfr2); | |
562 | ||
563 | /* | |
564 | * Mismatched CPU features are a recipe for disaster. Don't even | |
565 | * pretend to support them. | |
566 | */ | |
567 | WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC, | |
568 | "Unsupported CPU feature variation.\n"); | |
cdcf817b SP |
569 | } |
570 | ||
b3f15378 SP |
571 | u64 read_system_reg(u32 id) |
572 | { | |
573 | struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id); | |
574 | ||
575 | /* We shouldn't get a request for an unsupported register */ | |
576 | BUG_ON(!regp); | |
577 | return regp->sys_val; | |
578 | } | |
579 | ||
18ffa046 JM |
580 | static bool |
581 | feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry) | |
582 | { | |
583 | int val = cpuid_feature_extract_field(reg, entry->field_pos); | |
584 | ||
585 | return val >= entry->min_field_value; | |
586 | } | |
587 | ||
2e94da13 WD |
588 | #define __ID_FEAT_CHK(reg) \ |
589 | static bool __maybe_unused \ | |
590 | has_##reg##_feature(const struct arm64_cpu_capabilities *entry) \ | |
591 | { \ | |
592 | u64 val; \ | |
593 | \ | |
594 | val = read_cpuid(reg##_el1); \ | |
595 | return feature_matches(val, entry); \ | |
94a9e04a MZ |
596 | } |
597 | ||
2e94da13 WD |
598 | __ID_FEAT_CHK(id_aa64pfr0); |
599 | __ID_FEAT_CHK(id_aa64mmfr1); | |
600 | __ID_FEAT_CHK(id_aa64isar0); | |
338d4f49 | 601 | |
359b7064 | 602 | static const struct arm64_cpu_capabilities arm64_features[] = { |
94a9e04a MZ |
603 | { |
604 | .desc = "GIC system register CPU interface", | |
605 | .capability = ARM64_HAS_SYSREG_GIC_CPUIF, | |
606 | .matches = has_id_aa64pfr0_feature, | |
18ffa046 JM |
607 | .field_pos = 24, |
608 | .min_field_value = 1, | |
94a9e04a | 609 | }, |
338d4f49 JM |
610 | #ifdef CONFIG_ARM64_PAN |
611 | { | |
612 | .desc = "Privileged Access Never", | |
613 | .capability = ARM64_HAS_PAN, | |
614 | .matches = has_id_aa64mmfr1_feature, | |
615 | .field_pos = 20, | |
616 | .min_field_value = 1, | |
617 | .enable = cpu_enable_pan, | |
618 | }, | |
619 | #endif /* CONFIG_ARM64_PAN */ | |
2e94da13 WD |
620 | #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) |
621 | { | |
622 | .desc = "LSE atomic instructions", | |
623 | .capability = ARM64_HAS_LSE_ATOMICS, | |
624 | .matches = has_id_aa64isar0_feature, | |
625 | .field_pos = 20, | |
626 | .min_field_value = 2, | |
627 | }, | |
628 | #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ | |
359b7064 MZ |
629 | {}, |
630 | }; | |
631 | ||
ce8b602c | 632 | void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps, |
359b7064 MZ |
633 | const char *info) |
634 | { | |
635 | int i; | |
636 | ||
637 | for (i = 0; caps[i].desc; i++) { | |
638 | if (!caps[i].matches(&caps[i])) | |
639 | continue; | |
640 | ||
641 | if (!cpus_have_cap(caps[i].capability)) | |
642 | pr_info("%s %s\n", info, caps[i].desc); | |
643 | cpus_set_cap(caps[i].capability); | |
644 | } | |
ce8b602c SP |
645 | } |
646 | ||
647 | /* | |
648 | * Run through the enabled capabilities and enable() it on the CPUs | |
649 | */ | |
650 | void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) | |
651 | { | |
652 | int i; | |
1c076303 | 653 | |
1c076303 JM |
654 | for (i = 0; caps[i].desc; i++) { |
655 | if (cpus_have_cap(caps[i].capability) && caps[i].enable) | |
656 | caps[i].enable(); | |
657 | } | |
359b7064 MZ |
658 | } |
659 | ||
660 | void check_local_cpu_features(void) | |
661 | { | |
ce8b602c SP |
662 | update_cpu_capabilities(arm64_features, "detected feature:"); |
663 | enable_cpu_capabilities(arm64_features); | |
359b7064 | 664 | } |
9cdf8ec4 SP |
665 | |
666 | void __init setup_cpu_features(void) | |
667 | { | |
668 | u64 features; | |
669 | s64 block; | |
670 | u32 cwg; | |
671 | int cls; | |
672 | ||
673 | /* | |
674 | * Check for sane CTR_EL0.CWG value. | |
675 | */ | |
676 | cwg = cache_type_cwg(); | |
677 | cls = cache_line_size(); | |
678 | if (!cwg) | |
679 | pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n", | |
680 | cls); | |
681 | if (L1_CACHE_BYTES < cls) | |
682 | pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n", | |
683 | L1_CACHE_BYTES, cls); | |
684 | ||
685 | /* | |
686 | * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks. | |
687 | * The blocks we test below represent incremental functionality | |
688 | * for non-negative values. Negative values are reserved. | |
689 | */ | |
690 | features = read_cpuid(ID_AA64ISAR0_EL1); | |
691 | block = cpuid_feature_extract_field(features, 4); | |
692 | if (block > 0) { | |
693 | switch (block) { | |
694 | default: | |
695 | case 2: | |
696 | elf_hwcap |= HWCAP_PMULL; | |
697 | case 1: | |
698 | elf_hwcap |= HWCAP_AES; | |
699 | case 0: | |
700 | break; | |
701 | } | |
702 | } | |
703 | ||
704 | if (cpuid_feature_extract_field(features, 8) > 0) | |
705 | elf_hwcap |= HWCAP_SHA1; | |
706 | ||
707 | if (cpuid_feature_extract_field(features, 12) > 0) | |
708 | elf_hwcap |= HWCAP_SHA2; | |
709 | ||
710 | if (cpuid_feature_extract_field(features, 16) > 0) | |
711 | elf_hwcap |= HWCAP_CRC32; | |
712 | ||
713 | block = cpuid_feature_extract_field(features, 20); | |
714 | if (block > 0) { | |
715 | switch (block) { | |
716 | default: | |
717 | case 2: | |
718 | elf_hwcap |= HWCAP_ATOMICS; | |
719 | case 1: | |
720 | /* RESERVED */ | |
721 | case 0: | |
722 | break; | |
723 | } | |
724 | } | |
725 | ||
726 | #ifdef CONFIG_COMPAT | |
727 | /* | |
728 | * ID_ISAR5_EL1 carries similar information as above, but pertaining to | |
729 | * the AArch32 32-bit execution state. | |
730 | */ | |
731 | features = read_cpuid(ID_ISAR5_EL1); | |
732 | block = cpuid_feature_extract_field(features, 4); | |
733 | if (block > 0) { | |
734 | switch (block) { | |
735 | default: | |
736 | case 2: | |
737 | compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL; | |
738 | case 1: | |
739 | compat_elf_hwcap2 |= COMPAT_HWCAP2_AES; | |
740 | case 0: | |
741 | break; | |
742 | } | |
743 | } | |
744 | ||
745 | if (cpuid_feature_extract_field(features, 8) > 0) | |
746 | compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1; | |
747 | ||
748 | if (cpuid_feature_extract_field(features, 12) > 0) | |
749 | compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2; | |
750 | ||
751 | if (cpuid_feature_extract_field(features, 16) > 0) | |
752 | compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32; | |
753 | #endif | |
754 | } |