2 * Contains CPU feature definitions
4 * Copyright (C) 2015 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "CPU features: " fmt
21 #include <linux/bsearch.h>
22 #include <linux/sort.h>
23 #include <linux/types.h>
25 #include <asm/cpufeature.h>
26 #include <asm/cpu_ops.h>
27 #include <asm/processor.h>
28 #include <asm/sysreg.h>
30 unsigned long elf_hwcap __read_mostly
;
31 EXPORT_SYMBOL_GPL(elf_hwcap
);
34 #define COMPAT_ELF_HWCAP_DEFAULT \
35 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
36 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
37 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
38 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
39 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
41 unsigned int compat_elf_hwcap __read_mostly
= COMPAT_ELF_HWCAP_DEFAULT
;
42 unsigned int compat_elf_hwcap2 __read_mostly
;
45 DECLARE_BITMAP(cpu_hwcaps
, ARM64_NCAPS
);
47 #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
53 .safe_val = SAFE_VAL, \
56 #define ARM64_FTR_END \
61 static struct arm64_ftr_bits ftr_id_aa64isar0
[] = {
62 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 32, 32, 0),
63 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64ISAR0_RDM_SHIFT
, 4, 0),
64 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 24, 4, 0),
65 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64ISAR0_ATOMICS_SHIFT
, 4, 0),
66 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64ISAR0_CRC32_SHIFT
, 4, 0),
67 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64ISAR0_SHA2_SHIFT
, 4, 0),
68 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64ISAR0_SHA1_SHIFT
, 4, 0),
69 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64ISAR0_AES_SHIFT
, 4, 0),
70 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 4, 0), /* RAZ */
74 static struct arm64_ftr_bits ftr_id_aa64pfr0
[] = {
75 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 32, 32, 0),
76 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 28, 4, 0),
77 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64PFR0_GIC_SHIFT
, 4, 0),
78 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64PFR0_ASIMD_SHIFT
, 4, ID_AA64PFR0_ASIMD_NI
),
79 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64PFR0_FP_SHIFT
, 4, ID_AA64PFR0_FP_NI
),
80 /* Linux doesn't care about the EL3 */
81 ARM64_FTR_BITS(FTR_NONSTRICT
, FTR_EXACT
, ID_AA64PFR0_EL3_SHIFT
, 4, 0),
82 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64PFR0_EL2_SHIFT
, 4, 0),
83 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64PFR0_EL1_SHIFT
, 4, ID_AA64PFR0_EL1_64BIT_ONLY
),
84 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64PFR0_EL0_SHIFT
, 4, ID_AA64PFR0_EL0_64BIT_ONLY
),
88 static struct arm64_ftr_bits ftr_id_aa64mmfr0
[] = {
89 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 32, 32, 0),
90 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR0_TGRAN4_SHIFT
, 4, ID_AA64MMFR0_TGRAN4_NI
),
91 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR0_TGRAN64_SHIFT
, 4, ID_AA64MMFR0_TGRAN64_NI
),
92 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR0_TGRAN16_SHIFT
, 4, ID_AA64MMFR0_TGRAN16_NI
),
93 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR0_BIGENDEL0_SHIFT
, 4, 0),
94 /* Linux shouldn't care about secure memory */
95 ARM64_FTR_BITS(FTR_NONSTRICT
, FTR_EXACT
, ID_AA64MMFR0_SNSMEM_SHIFT
, 4, 0),
96 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR0_BIGENDEL_SHIFT
, 4, 0),
97 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR0_ASID_SHIFT
, 4, 0),
99 * Differing PARange is fine as long as all peripherals and memory are mapped
100 * within the minimum PARange of all CPUs
102 ARM64_FTR_BITS(FTR_NONSTRICT
, FTR_LOWER_SAFE
, ID_AA64MMFR0_PARANGE_SHIFT
, 4, 0),
106 static struct arm64_ftr_bits ftr_id_aa64mmfr1
[] = {
107 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 32, 32, 0),
108 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64MMFR1_PAN_SHIFT
, 4, 0),
109 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR1_LOR_SHIFT
, 4, 0),
110 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR1_HPD_SHIFT
, 4, 0),
111 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR1_VHE_SHIFT
, 4, 0),
112 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR1_VMIDBITS_SHIFT
, 4, 0),
113 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64MMFR1_HADBS_SHIFT
, 4, 0),
117 static struct arm64_ftr_bits ftr_ctr
[] = {
118 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 31, 1, 1), /* RAO */
119 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 28, 3, 0),
120 ARM64_FTR_BITS(FTR_STRICT
, FTR_HIGHER_SAFE
, 24, 4, 0), /* CWG */
121 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 20, 4, 0), /* ERG */
122 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 16, 4, 1), /* DminLine */
124 * Linux can handle differing I-cache policies. Userspace JITs will
125 * make use of *minLine
127 ARM64_FTR_BITS(FTR_NONSTRICT
, FTR_EXACT
, 14, 2, 0), /* L1Ip */
128 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 4, 10, 0), /* RAZ */
129 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 0, 4, 0), /* IminLine */
133 static struct arm64_ftr_bits ftr_id_mmfr0
[] = {
134 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 28, 4, 0), /* InnerShr */
135 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 24, 4, 0), /* FCSE */
136 ARM64_FTR_BITS(FTR_NONSTRICT
, FTR_LOWER_SAFE
, 20, 4, 0), /* AuxReg */
137 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 16, 4, 0), /* TCM */
138 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 12, 4, 0), /* ShareLvl */
139 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 8, 4, 0), /* OuterShr */
140 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 4, 4, 0), /* PMSA */
141 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 4, 0), /* VMSA */
145 static struct arm64_ftr_bits ftr_id_aa64dfr0
[] = {
146 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 32, 32, 0),
147 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64DFR0_CTX_CMPS_SHIFT
, 4, 0),
148 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64DFR0_WRPS_SHIFT
, 4, 0),
149 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, ID_AA64DFR0_BRPS_SHIFT
, 4, 0),
150 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64DFR0_PMUVER_SHIFT
, 4, 0),
151 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64DFR0_TRACEVER_SHIFT
, 4, 0),
152 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_AA64DFR0_DEBUGVER_SHIFT
, 4, 0x6),
156 static struct arm64_ftr_bits ftr_mvfr2
[] = {
157 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 8, 24, 0), /* RAZ */
158 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 4, 4, 0), /* FPMisc */
159 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 4, 0), /* SIMDMisc */
163 static struct arm64_ftr_bits ftr_dczid
[] = {
164 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 5, 27, 0), /* RAZ */
165 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 4, 1, 1), /* DZP */
166 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 0, 4, 0), /* BS */
171 static struct arm64_ftr_bits ftr_id_isar5
[] = {
172 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_ISAR5_RDM_SHIFT
, 4, 0),
173 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 20, 4, 0), /* RAZ */
174 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_ISAR5_CRC32_SHIFT
, 4, 0),
175 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_ISAR5_SHA2_SHIFT
, 4, 0),
176 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_ISAR5_SHA1_SHIFT
, 4, 0),
177 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_ISAR5_AES_SHIFT
, 4, 0),
178 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, ID_ISAR5_SEVL_SHIFT
, 4, 0),
182 static struct arm64_ftr_bits ftr_id_mmfr4
[] = {
183 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 8, 24, 0), /* RAZ */
184 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 4, 4, 0), /* ac2 */
185 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 4, 0), /* RAZ */
189 static struct arm64_ftr_bits ftr_id_pfr0
[] = {
190 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 16, 16, 0), /* RAZ */
191 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 12, 4, 0), /* State3 */
192 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 8, 4, 0), /* State2 */
193 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 4, 4, 0), /* State1 */
194 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 4, 0), /* State0 */
199 * Common ftr bits for a 32bit register with all hidden, strict
200 * attributes, with 4bit feature fields and a default safe value of
201 * 0. Covers the following 32bit registers:
202 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
204 static struct arm64_ftr_bits ftr_generic_32bits
[] = {
205 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 28, 4, 0),
206 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 24, 4, 0),
207 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 20, 4, 0),
208 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 16, 4, 0),
209 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 12, 4, 0),
210 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 8, 4, 0),
211 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 4, 4, 0),
212 ARM64_FTR_BITS(FTR_STRICT
, FTR_LOWER_SAFE
, 0, 4, 0),
216 static struct arm64_ftr_bits ftr_generic
[] = {
217 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 64, 0),
221 static struct arm64_ftr_bits ftr_generic32
[] = {
222 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 32, 0),
226 static struct arm64_ftr_bits ftr_aa64raz
[] = {
227 ARM64_FTR_BITS(FTR_STRICT
, FTR_EXACT
, 0, 64, 0),
231 #define ARM64_FTR_REG(id, table) \
235 .ftr_bits = &((table)[0]), \
238 static struct arm64_ftr_reg arm64_ftr_regs
[] = {
240 /* Op1 = 0, CRn = 0, CRm = 1 */
241 ARM64_FTR_REG(SYS_ID_PFR0_EL1
, ftr_id_pfr0
),
242 ARM64_FTR_REG(SYS_ID_PFR1_EL1
, ftr_generic_32bits
),
243 ARM64_FTR_REG(SYS_ID_DFR0_EL1
, ftr_generic_32bits
),
244 ARM64_FTR_REG(SYS_ID_MMFR0_EL1
, ftr_id_mmfr0
),
245 ARM64_FTR_REG(SYS_ID_MMFR1_EL1
, ftr_generic_32bits
),
246 ARM64_FTR_REG(SYS_ID_MMFR2_EL1
, ftr_generic_32bits
),
247 ARM64_FTR_REG(SYS_ID_MMFR3_EL1
, ftr_generic_32bits
),
249 /* Op1 = 0, CRn = 0, CRm = 2 */
250 ARM64_FTR_REG(SYS_ID_ISAR0_EL1
, ftr_generic_32bits
),
251 ARM64_FTR_REG(SYS_ID_ISAR1_EL1
, ftr_generic_32bits
),
252 ARM64_FTR_REG(SYS_ID_ISAR2_EL1
, ftr_generic_32bits
),
253 ARM64_FTR_REG(SYS_ID_ISAR3_EL1
, ftr_generic_32bits
),
254 ARM64_FTR_REG(SYS_ID_ISAR4_EL1
, ftr_generic_32bits
),
255 ARM64_FTR_REG(SYS_ID_ISAR5_EL1
, ftr_id_isar5
),
256 ARM64_FTR_REG(SYS_ID_MMFR4_EL1
, ftr_id_mmfr4
),
258 /* Op1 = 0, CRn = 0, CRm = 3 */
259 ARM64_FTR_REG(SYS_MVFR0_EL1
, ftr_generic_32bits
),
260 ARM64_FTR_REG(SYS_MVFR1_EL1
, ftr_generic_32bits
),
261 ARM64_FTR_REG(SYS_MVFR2_EL1
, ftr_mvfr2
),
263 /* Op1 = 0, CRn = 0, CRm = 4 */
264 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1
, ftr_id_aa64pfr0
),
265 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1
, ftr_aa64raz
),
267 /* Op1 = 0, CRn = 0, CRm = 5 */
268 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1
, ftr_id_aa64dfr0
),
269 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1
, ftr_generic
),
271 /* Op1 = 0, CRn = 0, CRm = 6 */
272 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1
, ftr_id_aa64isar0
),
273 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1
, ftr_aa64raz
),
275 /* Op1 = 0, CRn = 0, CRm = 7 */
276 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1
, ftr_id_aa64mmfr0
),
277 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1
, ftr_id_aa64mmfr1
),
279 /* Op1 = 3, CRn = 0, CRm = 0 */
280 ARM64_FTR_REG(SYS_CTR_EL0
, ftr_ctr
),
281 ARM64_FTR_REG(SYS_DCZID_EL0
, ftr_dczid
),
283 /* Op1 = 3, CRn = 14, CRm = 0 */
284 ARM64_FTR_REG(SYS_CNTFRQ_EL0
, ftr_generic32
),
287 static int search_cmp_ftr_reg(const void *id
, const void *regp
)
289 return (int)(unsigned long)id
- (int)((const struct arm64_ftr_reg
*)regp
)->sys_id
;
293 * get_arm64_ftr_reg - Lookup a feature register entry using its
294 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
295 * ascending order of sys_id , we use binary search to find a matching
298 * returns - Upon success, matching ftr_reg entry for id.
299 * - NULL on failure. It is upto the caller to decide
300 * the impact of a failure.
302 static struct arm64_ftr_reg
*get_arm64_ftr_reg(u32 sys_id
)
304 return bsearch((const void *)(unsigned long)sys_id
,
306 ARRAY_SIZE(arm64_ftr_regs
),
307 sizeof(arm64_ftr_regs
[0]),
311 static u64
arm64_ftr_set_value(struct arm64_ftr_bits
*ftrp
, s64 reg
, s64 ftr_val
)
313 u64 mask
= arm64_ftr_mask(ftrp
);
316 reg
|= (ftr_val
<< ftrp
->shift
) & mask
;
320 static s64
arm64_ftr_safe_value(struct arm64_ftr_bits
*ftrp
, s64
new, s64 cur
)
324 switch (ftrp
->type
) {
326 ret
= ftrp
->safe_val
;
329 ret
= new < cur
? new : cur
;
331 case FTR_HIGHER_SAFE
:
332 ret
= new > cur
? new : cur
;
341 static int __init
sort_cmp_ftr_regs(const void *a
, const void *b
)
343 return ((const struct arm64_ftr_reg
*)a
)->sys_id
-
344 ((const struct arm64_ftr_reg
*)b
)->sys_id
;
347 static void __init
swap_ftr_regs(void *a
, void *b
, int size
)
349 struct arm64_ftr_reg tmp
= *(struct arm64_ftr_reg
*)a
;
350 *(struct arm64_ftr_reg
*)a
= *(struct arm64_ftr_reg
*)b
;
351 *(struct arm64_ftr_reg
*)b
= tmp
;
354 static void __init
sort_ftr_regs(void)
356 /* Keep the array sorted so that we can do the binary search */
358 ARRAY_SIZE(arm64_ftr_regs
),
359 sizeof(arm64_ftr_regs
[0]),
365 * Initialise the CPU feature register from Boot CPU values.
366 * Also initiliases the strict_mask for the register.
368 static void __init
init_cpu_ftr_reg(u32 sys_reg
, u64
new)
371 u64 strict_mask
= ~0x0ULL
;
372 struct arm64_ftr_bits
*ftrp
;
373 struct arm64_ftr_reg
*reg
= get_arm64_ftr_reg(sys_reg
);
377 for (ftrp
= reg
->ftr_bits
; ftrp
->width
; ftrp
++) {
378 s64 ftr_new
= arm64_ftr_value(ftrp
, new);
380 val
= arm64_ftr_set_value(ftrp
, val
, ftr_new
);
382 strict_mask
&= ~arm64_ftr_mask(ftrp
);
385 reg
->strict_mask
= strict_mask
;
388 void __init
init_cpu_features(struct cpuinfo_arm64
*info
)
390 /* Before we start using the tables, make sure it is sorted */
393 init_cpu_ftr_reg(SYS_CTR_EL0
, info
->reg_ctr
);
394 init_cpu_ftr_reg(SYS_DCZID_EL0
, info
->reg_dczid
);
395 init_cpu_ftr_reg(SYS_CNTFRQ_EL0
, info
->reg_cntfrq
);
396 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1
, info
->reg_id_aa64dfr0
);
397 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1
, info
->reg_id_aa64dfr1
);
398 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1
, info
->reg_id_aa64isar0
);
399 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1
, info
->reg_id_aa64isar1
);
400 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1
, info
->reg_id_aa64mmfr0
);
401 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1
, info
->reg_id_aa64mmfr1
);
402 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1
, info
->reg_id_aa64pfr0
);
403 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1
, info
->reg_id_aa64pfr1
);
404 init_cpu_ftr_reg(SYS_ID_DFR0_EL1
, info
->reg_id_dfr0
);
405 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1
, info
->reg_id_isar0
);
406 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1
, info
->reg_id_isar1
);
407 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1
, info
->reg_id_isar2
);
408 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1
, info
->reg_id_isar3
);
409 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1
, info
->reg_id_isar4
);
410 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1
, info
->reg_id_isar5
);
411 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1
, info
->reg_id_mmfr0
);
412 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1
, info
->reg_id_mmfr1
);
413 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1
, info
->reg_id_mmfr2
);
414 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1
, info
->reg_id_mmfr3
);
415 init_cpu_ftr_reg(SYS_ID_PFR0_EL1
, info
->reg_id_pfr0
);
416 init_cpu_ftr_reg(SYS_ID_PFR1_EL1
, info
->reg_id_pfr1
);
417 init_cpu_ftr_reg(SYS_MVFR0_EL1
, info
->reg_mvfr0
);
418 init_cpu_ftr_reg(SYS_MVFR1_EL1
, info
->reg_mvfr1
);
419 init_cpu_ftr_reg(SYS_MVFR2_EL1
, info
->reg_mvfr2
);
422 static void update_cpu_ftr_reg(struct arm64_ftr_reg
*reg
, u64
new)
424 struct arm64_ftr_bits
*ftrp
;
426 for (ftrp
= reg
->ftr_bits
; ftrp
->width
; ftrp
++) {
427 s64 ftr_cur
= arm64_ftr_value(ftrp
, reg
->sys_val
);
428 s64 ftr_new
= arm64_ftr_value(ftrp
, new);
430 if (ftr_cur
== ftr_new
)
432 /* Find a safe value */
433 ftr_new
= arm64_ftr_safe_value(ftrp
, ftr_new
, ftr_cur
);
434 reg
->sys_val
= arm64_ftr_set_value(ftrp
, reg
->sys_val
, ftr_new
);
439 static int check_update_ftr_reg(u32 sys_id
, int cpu
, u64 val
, u64 boot
)
441 struct arm64_ftr_reg
*regp
= get_arm64_ftr_reg(sys_id
);
444 update_cpu_ftr_reg(regp
, val
);
445 if ((boot
& regp
->strict_mask
) == (val
& regp
->strict_mask
))
447 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
448 regp
->name
, boot
, cpu
, val
);
453 * Update system wide CPU feature registers with the values from a
454 * non-boot CPU. Also performs SANITY checks to make sure that there
455 * aren't any insane variations from that of the boot CPU.
457 void update_cpu_features(int cpu
,
458 struct cpuinfo_arm64
*info
,
459 struct cpuinfo_arm64
*boot
)
464 * The kernel can handle differing I-cache policies, but otherwise
465 * caches should look identical. Userspace JITs will make use of
468 taint
|= check_update_ftr_reg(SYS_CTR_EL0
, cpu
,
469 info
->reg_ctr
, boot
->reg_ctr
);
472 * Userspace may perform DC ZVA instructions. Mismatched block sizes
473 * could result in too much or too little memory being zeroed if a
474 * process is preempted and migrated between CPUs.
476 taint
|= check_update_ftr_reg(SYS_DCZID_EL0
, cpu
,
477 info
->reg_dczid
, boot
->reg_dczid
);
479 /* If different, timekeeping will be broken (especially with KVM) */
480 taint
|= check_update_ftr_reg(SYS_CNTFRQ_EL0
, cpu
,
481 info
->reg_cntfrq
, boot
->reg_cntfrq
);
484 * The kernel uses self-hosted debug features and expects CPUs to
485 * support identical debug features. We presently need CTX_CMPs, WRPs,
486 * and BRPs to be identical.
487 * ID_AA64DFR1 is currently RES0.
489 taint
|= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1
, cpu
,
490 info
->reg_id_aa64dfr0
, boot
->reg_id_aa64dfr0
);
491 taint
|= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1
, cpu
,
492 info
->reg_id_aa64dfr1
, boot
->reg_id_aa64dfr1
);
494 * Even in big.LITTLE, processors should be identical instruction-set
497 taint
|= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1
, cpu
,
498 info
->reg_id_aa64isar0
, boot
->reg_id_aa64isar0
);
499 taint
|= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1
, cpu
,
500 info
->reg_id_aa64isar1
, boot
->reg_id_aa64isar1
);
503 * Differing PARange support is fine as long as all peripherals and
504 * memory are mapped within the minimum PARange of all CPUs.
505 * Linux should not care about secure memory.
507 taint
|= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1
, cpu
,
508 info
->reg_id_aa64mmfr0
, boot
->reg_id_aa64mmfr0
);
509 taint
|= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1
, cpu
,
510 info
->reg_id_aa64mmfr1
, boot
->reg_id_aa64mmfr1
);
513 * EL3 is not our concern.
514 * ID_AA64PFR1 is currently RES0.
516 taint
|= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1
, cpu
,
517 info
->reg_id_aa64pfr0
, boot
->reg_id_aa64pfr0
);
518 taint
|= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1
, cpu
,
519 info
->reg_id_aa64pfr1
, boot
->reg_id_aa64pfr1
);
522 * If we have AArch32, we care about 32-bit features for compat. These
523 * registers should be RES0 otherwise.
525 taint
|= check_update_ftr_reg(SYS_ID_DFR0_EL1
, cpu
,
526 info
->reg_id_dfr0
, boot
->reg_id_dfr0
);
527 taint
|= check_update_ftr_reg(SYS_ID_ISAR0_EL1
, cpu
,
528 info
->reg_id_isar0
, boot
->reg_id_isar0
);
529 taint
|= check_update_ftr_reg(SYS_ID_ISAR1_EL1
, cpu
,
530 info
->reg_id_isar1
, boot
->reg_id_isar1
);
531 taint
|= check_update_ftr_reg(SYS_ID_ISAR2_EL1
, cpu
,
532 info
->reg_id_isar2
, boot
->reg_id_isar2
);
533 taint
|= check_update_ftr_reg(SYS_ID_ISAR3_EL1
, cpu
,
534 info
->reg_id_isar3
, boot
->reg_id_isar3
);
535 taint
|= check_update_ftr_reg(SYS_ID_ISAR4_EL1
, cpu
,
536 info
->reg_id_isar4
, boot
->reg_id_isar4
);
537 taint
|= check_update_ftr_reg(SYS_ID_ISAR5_EL1
, cpu
,
538 info
->reg_id_isar5
, boot
->reg_id_isar5
);
541 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
542 * ACTLR formats could differ across CPUs and therefore would have to
543 * be trapped for virtualization anyway.
545 taint
|= check_update_ftr_reg(SYS_ID_MMFR0_EL1
, cpu
,
546 info
->reg_id_mmfr0
, boot
->reg_id_mmfr0
);
547 taint
|= check_update_ftr_reg(SYS_ID_MMFR1_EL1
, cpu
,
548 info
->reg_id_mmfr1
, boot
->reg_id_mmfr1
);
549 taint
|= check_update_ftr_reg(SYS_ID_MMFR2_EL1
, cpu
,
550 info
->reg_id_mmfr2
, boot
->reg_id_mmfr2
);
551 taint
|= check_update_ftr_reg(SYS_ID_MMFR3_EL1
, cpu
,
552 info
->reg_id_mmfr3
, boot
->reg_id_mmfr3
);
553 taint
|= check_update_ftr_reg(SYS_ID_PFR0_EL1
, cpu
,
554 info
->reg_id_pfr0
, boot
->reg_id_pfr0
);
555 taint
|= check_update_ftr_reg(SYS_ID_PFR1_EL1
, cpu
,
556 info
->reg_id_pfr1
, boot
->reg_id_pfr1
);
557 taint
|= check_update_ftr_reg(SYS_MVFR0_EL1
, cpu
,
558 info
->reg_mvfr0
, boot
->reg_mvfr0
);
559 taint
|= check_update_ftr_reg(SYS_MVFR1_EL1
, cpu
,
560 info
->reg_mvfr1
, boot
->reg_mvfr1
);
561 taint
|= check_update_ftr_reg(SYS_MVFR2_EL1
, cpu
,
562 info
->reg_mvfr2
, boot
->reg_mvfr2
);
565 * Mismatched CPU features are a recipe for disaster. Don't even
566 * pretend to support them.
568 WARN_TAINT_ONCE(taint
, TAINT_CPU_OUT_OF_SPEC
,
569 "Unsupported CPU feature variation.\n");
572 u64
read_system_reg(u32 id
)
574 struct arm64_ftr_reg
*regp
= get_arm64_ftr_reg(id
);
576 /* We shouldn't get a request for an unsupported register */
578 return regp
->sys_val
;
581 #include <linux/irqchip/arm-gic-v3.h>
584 feature_matches(u64 reg
, const struct arm64_cpu_capabilities
*entry
)
586 int val
= cpuid_feature_extract_field(reg
, entry
->field_pos
);
588 return val
>= entry
->min_field_value
;
592 has_cpuid_feature(const struct arm64_cpu_capabilities
*entry
)
596 val
= read_system_reg(entry
->sys_reg
);
597 return feature_matches(val
, entry
);
600 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities
*entry
)
604 if (!has_cpuid_feature(entry
))
607 has_sre
= gic_enable_sre();
609 pr_warn_once("%s present but disabled by higher exception level\n",
615 static const struct arm64_cpu_capabilities arm64_features
[] = {
617 .desc
= "GIC system register CPU interface",
618 .capability
= ARM64_HAS_SYSREG_GIC_CPUIF
,
619 .matches
= has_useable_gicv3_cpuif
,
620 .sys_reg
= SYS_ID_AA64PFR0_EL1
,
621 .field_pos
= ID_AA64PFR0_GIC_SHIFT
,
622 .min_field_value
= 1,
624 #ifdef CONFIG_ARM64_PAN
626 .desc
= "Privileged Access Never",
627 .capability
= ARM64_HAS_PAN
,
628 .matches
= has_cpuid_feature
,
629 .sys_reg
= SYS_ID_AA64MMFR1_EL1
,
630 .field_pos
= ID_AA64MMFR1_PAN_SHIFT
,
631 .min_field_value
= 1,
632 .enable
= cpu_enable_pan
,
634 #endif /* CONFIG_ARM64_PAN */
635 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
637 .desc
= "LSE atomic instructions",
638 .capability
= ARM64_HAS_LSE_ATOMICS
,
639 .matches
= has_cpuid_feature
,
640 .sys_reg
= SYS_ID_AA64ISAR0_EL1
,
641 .field_pos
= ID_AA64ISAR0_ATOMICS_SHIFT
,
642 .min_field_value
= 2,
644 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
648 #define HWCAP_CAP(reg, field, min_value, type, cap) \
651 .matches = has_cpuid_feature, \
653 .field_pos = field, \
654 .min_field_value = min_value, \
655 .hwcap_type = type, \
659 static const struct arm64_cpu_capabilities arm64_hwcaps
[] = {
660 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1
, ID_AA64ISAR0_AES_SHIFT
, 2, CAP_HWCAP
, HWCAP_PMULL
),
661 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1
, ID_AA64ISAR0_AES_SHIFT
, 1, CAP_HWCAP
, HWCAP_AES
),
662 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1
, ID_AA64ISAR0_SHA1_SHIFT
, 1, CAP_HWCAP
, HWCAP_SHA1
),
663 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1
, ID_AA64ISAR0_SHA2_SHIFT
, 1, CAP_HWCAP
, HWCAP_SHA2
),
664 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1
, ID_AA64ISAR0_CRC32_SHIFT
, 1, CAP_HWCAP
, HWCAP_CRC32
),
665 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1
, ID_AA64ISAR0_ATOMICS_SHIFT
, 2, CAP_HWCAP
, HWCAP_ATOMICS
),
666 HWCAP_CAP(SYS_ID_AA64PFR0_EL1
, ID_AA64PFR0_FP_SHIFT
, 0, CAP_HWCAP
, HWCAP_FP
),
667 HWCAP_CAP(SYS_ID_AA64PFR0_EL1
, ID_AA64PFR0_ASIMD_SHIFT
, 0, CAP_HWCAP
, HWCAP_ASIMD
),
669 HWCAP_CAP(SYS_ID_ISAR5_EL1
, ID_ISAR5_AES_SHIFT
, 2, CAP_COMPAT_HWCAP2
, COMPAT_HWCAP2_PMULL
),
670 HWCAP_CAP(SYS_ID_ISAR5_EL1
, ID_ISAR5_AES_SHIFT
, 1, CAP_COMPAT_HWCAP2
, COMPAT_HWCAP2_AES
),
671 HWCAP_CAP(SYS_ID_ISAR5_EL1
, ID_ISAR5_SHA1_SHIFT
, 1, CAP_COMPAT_HWCAP2
, COMPAT_HWCAP2_SHA1
),
672 HWCAP_CAP(SYS_ID_ISAR5_EL1
, ID_ISAR5_SHA2_SHIFT
, 1, CAP_COMPAT_HWCAP2
, COMPAT_HWCAP2_SHA2
),
673 HWCAP_CAP(SYS_ID_ISAR5_EL1
, ID_ISAR5_CRC32_SHIFT
, 1, CAP_COMPAT_HWCAP2
, COMPAT_HWCAP2_CRC32
),
678 static void cap_set_hwcap(const struct arm64_cpu_capabilities
*cap
)
680 switch (cap
->hwcap_type
) {
682 elf_hwcap
|= cap
->hwcap
;
685 case CAP_COMPAT_HWCAP
:
686 compat_elf_hwcap
|= (u32
)cap
->hwcap
;
688 case CAP_COMPAT_HWCAP2
:
689 compat_elf_hwcap2
|= (u32
)cap
->hwcap
;
698 /* Check if we have a particular HWCAP enabled */
699 static bool cpus_have_hwcap(const struct arm64_cpu_capabilities
*cap
)
703 switch (cap
->hwcap_type
) {
705 rc
= (elf_hwcap
& cap
->hwcap
) != 0;
708 case CAP_COMPAT_HWCAP
:
709 rc
= (compat_elf_hwcap
& (u32
)cap
->hwcap
) != 0;
711 case CAP_COMPAT_HWCAP2
:
712 rc
= (compat_elf_hwcap2
& (u32
)cap
->hwcap
) != 0;
723 static void setup_cpu_hwcaps(void)
726 const struct arm64_cpu_capabilities
*hwcaps
= arm64_hwcaps
;
728 for (i
= 0; hwcaps
[i
].desc
; i
++)
729 if (hwcaps
[i
].matches(&hwcaps
[i
]))
730 cap_set_hwcap(&hwcaps
[i
]);
733 void update_cpu_capabilities(const struct arm64_cpu_capabilities
*caps
,
738 for (i
= 0; caps
[i
].desc
; i
++) {
739 if (!caps
[i
].matches(&caps
[i
]))
742 if (!cpus_have_cap(caps
[i
].capability
))
743 pr_info("%s %s\n", info
, caps
[i
].desc
);
744 cpus_set_cap(caps
[i
].capability
);
749 * Run through the enabled capabilities and enable() it on all active
752 static void enable_cpu_capabilities(const struct arm64_cpu_capabilities
*caps
)
756 for (i
= 0; caps
[i
].desc
; i
++)
757 if (caps
[i
].enable
&& cpus_have_cap(caps
[i
].capability
))
758 on_each_cpu(caps
[i
].enable
, NULL
, true);
761 #ifdef CONFIG_HOTPLUG_CPU
764 * Flag to indicate if we have computed the system wide
765 * capabilities based on the boot time active CPUs. This
766 * will be used to determine if a new booting CPU should
767 * go through the verification process to make sure that it
768 * supports the system capabilities, without using a hotplug
771 static bool sys_caps_initialised
;
773 static inline void set_sys_caps_initialised(void)
775 sys_caps_initialised
= true;
779 * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
781 static u64
__raw_read_system_reg(u32 sys_id
)
784 case SYS_ID_PFR0_EL1
: return (u64
)read_cpuid(ID_PFR0_EL1
);
785 case SYS_ID_PFR1_EL1
: return (u64
)read_cpuid(ID_PFR1_EL1
);
786 case SYS_ID_DFR0_EL1
: return (u64
)read_cpuid(ID_DFR0_EL1
);
787 case SYS_ID_MMFR0_EL1
: return (u64
)read_cpuid(ID_MMFR0_EL1
);
788 case SYS_ID_MMFR1_EL1
: return (u64
)read_cpuid(ID_MMFR1_EL1
);
789 case SYS_ID_MMFR2_EL1
: return (u64
)read_cpuid(ID_MMFR2_EL1
);
790 case SYS_ID_MMFR3_EL1
: return (u64
)read_cpuid(ID_MMFR3_EL1
);
791 case SYS_ID_ISAR0_EL1
: return (u64
)read_cpuid(ID_ISAR0_EL1
);
792 case SYS_ID_ISAR1_EL1
: return (u64
)read_cpuid(ID_ISAR1_EL1
);
793 case SYS_ID_ISAR2_EL1
: return (u64
)read_cpuid(ID_ISAR2_EL1
);
794 case SYS_ID_ISAR3_EL1
: return (u64
)read_cpuid(ID_ISAR3_EL1
);
795 case SYS_ID_ISAR4_EL1
: return (u64
)read_cpuid(ID_ISAR4_EL1
);
796 case SYS_ID_ISAR5_EL1
: return (u64
)read_cpuid(ID_ISAR4_EL1
);
797 case SYS_MVFR0_EL1
: return (u64
)read_cpuid(MVFR0_EL1
);
798 case SYS_MVFR1_EL1
: return (u64
)read_cpuid(MVFR1_EL1
);
799 case SYS_MVFR2_EL1
: return (u64
)read_cpuid(MVFR2_EL1
);
801 case SYS_ID_AA64PFR0_EL1
: return (u64
)read_cpuid(ID_AA64PFR0_EL1
);
802 case SYS_ID_AA64PFR1_EL1
: return (u64
)read_cpuid(ID_AA64PFR0_EL1
);
803 case SYS_ID_AA64DFR0_EL1
: return (u64
)read_cpuid(ID_AA64DFR0_EL1
);
804 case SYS_ID_AA64DFR1_EL1
: return (u64
)read_cpuid(ID_AA64DFR0_EL1
);
805 case SYS_ID_AA64MMFR0_EL1
: return (u64
)read_cpuid(ID_AA64MMFR0_EL1
);
806 case SYS_ID_AA64MMFR1_EL1
: return (u64
)read_cpuid(ID_AA64MMFR1_EL1
);
807 case SYS_ID_AA64ISAR0_EL1
: return (u64
)read_cpuid(ID_AA64ISAR0_EL1
);
808 case SYS_ID_AA64ISAR1_EL1
: return (u64
)read_cpuid(ID_AA64ISAR1_EL1
);
810 case SYS_CNTFRQ_EL0
: return (u64
)read_cpuid(CNTFRQ_EL0
);
811 case SYS_CTR_EL0
: return (u64
)read_cpuid(CTR_EL0
);
812 case SYS_DCZID_EL0
: return (u64
)read_cpuid(DCZID_EL0
);
820 * Park the CPU which doesn't have the capability as advertised
823 static void fail_incapable_cpu(char *cap_type
,
824 const struct arm64_cpu_capabilities
*cap
)
826 int cpu
= smp_processor_id();
828 pr_crit("CPU%d: missing %s : %s\n", cpu
, cap_type
, cap
->desc
);
829 /* Mark this CPU absent */
830 set_cpu_present(cpu
, 0);
832 /* Check if we can park ourselves */
833 if (cpu_ops
[cpu
] && cpu_ops
[cpu
]->cpu_die
)
834 cpu_ops
[cpu
]->cpu_die(cpu
);
842 * Run through the enabled system capabilities and enable() it on this CPU.
843 * The capabilities were decided based on the available CPUs at the boot time.
844 * Any new CPU should match the system wide status of the capability. If the
845 * new CPU doesn't have a capability which the system now has enabled, we
846 * cannot do anything to fix it up and could cause unexpected failures. So
849 void verify_local_cpu_capabilities(void)
852 const struct arm64_cpu_capabilities
*caps
;
855 * If we haven't computed the system capabilities, there is nothing
858 if (!sys_caps_initialised
)
861 caps
= arm64_features
;
862 for (i
= 0; caps
[i
].desc
; i
++) {
863 if (!cpus_have_cap(caps
[i
].capability
) || !caps
[i
].sys_reg
)
866 * If the new CPU misses an advertised feature, we cannot proceed
867 * further, park the cpu.
869 if (!feature_matches(__raw_read_system_reg(caps
[i
].sys_reg
), &caps
[i
]))
870 fail_incapable_cpu("arm64_features", &caps
[i
]);
872 caps
[i
].enable(NULL
);
875 for (i
= 0, caps
= arm64_hwcaps
; caps
[i
].desc
; i
++) {
876 if (!cpus_have_hwcap(&caps
[i
]))
878 if (!feature_matches(__raw_read_system_reg(caps
[i
].sys_reg
), &caps
[i
]))
879 fail_incapable_cpu("arm64_hwcaps", &caps
[i
]);
883 #else /* !CONFIG_HOTPLUG_CPU */
885 static inline void set_sys_caps_initialised(void)
889 #endif /* CONFIG_HOTPLUG_CPU */
891 static void setup_feature_capabilities(void)
893 update_cpu_capabilities(arm64_features
, "detected feature:");
894 enable_cpu_capabilities(arm64_features
);
897 void __init
setup_cpu_features(void)
902 /* Set the CPU feature capabilies */
903 setup_feature_capabilities();
906 /* Advertise that we have computed the system capabilities */
907 set_sys_caps_initialised();
910 * Check for sane CTR_EL0.CWG value.
912 cwg
= cache_type_cwg();
913 cls
= cache_line_size();
915 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
917 if (L1_CACHE_BYTES
< cls
)
918 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
919 L1_CACHE_BYTES
, cls
);