]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/kernel/cpufeature.c
arm64: cpufeature: remove explicit RAZ fields
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kernel / cpufeature.c
CommitLineData
359b7064
MZ
1/*
2 * Contains CPU feature definitions
3 *
4 * Copyright (C) 2015 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
9cdf8ec4 19#define pr_fmt(fmt) "CPU features: " fmt
359b7064 20
3c739b57 21#include <linux/bsearch.h>
2a6dcb2b 22#include <linux/cpumask.h>
3c739b57 23#include <linux/sort.h>
2a6dcb2b 24#include <linux/stop_machine.h>
359b7064
MZ
25#include <linux/types.h>
26#include <asm/cpu.h>
27#include <asm/cpufeature.h>
dbb4e152 28#include <asm/cpu_ops.h>
13f417f3 29#include <asm/mmu_context.h>
338d4f49 30#include <asm/processor.h>
cdcf817b 31#include <asm/sysreg.h>
d88701be 32#include <asm/virt.h>
359b7064 33
9cdf8ec4
SP
34unsigned long elf_hwcap __read_mostly;
35EXPORT_SYMBOL_GPL(elf_hwcap);
36
37#ifdef CONFIG_COMPAT
38#define COMPAT_ELF_HWCAP_DEFAULT \
39 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
40 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
41 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
42 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
43 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
44 COMPAT_HWCAP_LPAE)
45unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
46unsigned int compat_elf_hwcap2 __read_mostly;
47#endif
48
49DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
4b65a5db 50EXPORT_SYMBOL(cpu_hwcaps);
9cdf8ec4 51
efd9e03f
CM
52DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
53EXPORT_SYMBOL(cpu_hwcap_keys);
54
4f0a606b 55#define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
3c739b57 56 { \
4f0a606b 57 .sign = SIGNED, \
3c739b57
SP
58 .strict = STRICT, \
59 .type = TYPE, \
60 .shift = SHIFT, \
61 .width = WIDTH, \
62 .safe_val = SAFE_VAL, \
63 }
64
0710cfdb 65/* Define a feature with unsigned values */
4f0a606b 66#define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
4f0a606b
SP
67 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
68
0710cfdb
SP
69/* Define a feature with a signed value */
70#define S_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
71 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
72
3c739b57
SP
73#define ARM64_FTR_END \
74 { \
75 .width = 0, \
76 }
77
70544196
JM
78/* meta feature for alternatives */
79static bool __maybe_unused
92406f0c
SP
80cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
81
70544196 82
5e49d73c 83static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
3c739b57 84 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
3c739b57
SP
85 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
86 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
87 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
88 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
89 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
3c739b57
SP
90 ARM64_FTR_END,
91};
92
5e49d73c 93static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
3c739b57 94 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
0710cfdb
SP
95 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
96 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
3c739b57
SP
97 /* Linux doesn't care about the EL3 */
98 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
99 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
100 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
101 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
102 ARM64_FTR_END,
103};
104
5e49d73c 105static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
0710cfdb
SP
106 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
107 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
3c739b57
SP
108 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
109 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
110 /* Linux shouldn't care about secure memory */
111 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
112 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
113 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
114 /*
115 * Differing PARange is fine as long as all peripherals and memory are mapped
116 * within the minimum PARange of all CPUs
117 */
0710cfdb 118 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
3c739b57
SP
119 ARM64_FTR_END,
120};
121
5e49d73c 122static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
3c739b57
SP
123 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
124 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
125 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
126 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
127 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
129 ARM64_FTR_END,
130};
131
5e49d73c 132static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
7d7b4ae4
KW
133 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
134 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
135 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
406e3087 136 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
7d7b4ae4 137 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
406e3087
JM
138 ARM64_FTR_END,
139};
140
5e49d73c 141static const struct arm64_ftr_bits ftr_ctr[] = {
0710cfdb 142 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
0710cfdb
SP
143 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
144 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
145 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
3c739b57
SP
146 /*
147 * Linux can handle differing I-cache policies. Userspace JITs will
ee7bc638
SP
148 * make use of *minLine.
149 * If we have differing I-cache policies, report it as the weakest - AIVIVT.
3c739b57 150 */
ee7bc638 151 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
0710cfdb 152 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* IminLine */
3c739b57
SP
153 ARM64_FTR_END,
154};
155
675b0563
AB
156struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
157 .name = "SYS_CTR_EL0",
158 .ftr_bits = ftr_ctr
159};
160
5e49d73c 161static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
0710cfdb 162 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
3c739b57
SP
163 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
164 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
165 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
166 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
0710cfdb 167 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
3c739b57
SP
168 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
169 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
170 ARM64_FTR_END,
171};
172
5e49d73c 173static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
f31deaad
WD
174 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 36, 28, 0),
175 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
0710cfdb
SP
176 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
177 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
178 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
b20d1ba3
WD
179 /*
180 * We can instantiate multiple PMU instances with different levels
181 * of support.
182 * */
183 S_ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
0710cfdb
SP
184 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
185 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
3c739b57
SP
186 ARM64_FTR_END,
187};
188
5e49d73c 189static const struct arm64_ftr_bits ftr_mvfr2[] = {
3c739b57
SP
190 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
191 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
192 ARM64_FTR_END,
193};
194
5e49d73c 195static const struct arm64_ftr_bits ftr_dczid[] = {
3c739b57
SP
196 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
197 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
198 ARM64_FTR_END,
199};
200
201
5e49d73c 202static const struct arm64_ftr_bits ftr_id_isar5[] = {
3c739b57 203 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
3c739b57
SP
204 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
205 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
206 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
207 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
208 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
209 ARM64_FTR_END,
210};
211
5e49d73c 212static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
3c739b57 213 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
3c739b57
SP
214 ARM64_FTR_END,
215};
216
5e49d73c 217static const struct arm64_ftr_bits ftr_id_pfr0[] = {
3c739b57
SP
218 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
219 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
220 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
221 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
222 ARM64_FTR_END,
223};
224
5e49d73c 225static const struct arm64_ftr_bits ftr_id_dfr0[] = {
e5343503 226 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
0710cfdb 227 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
e5343503
SP
228 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
229 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
230 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
231 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
232 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
233 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
234 ARM64_FTR_END,
235};
236
3c739b57
SP
237/*
238 * Common ftr bits for a 32bit register with all hidden, strict
239 * attributes, with 4bit feature fields and a default safe value of
240 * 0. Covers the following 32bit registers:
241 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
242 */
5e49d73c 243static const struct arm64_ftr_bits ftr_generic_32bits[] = {
3c739b57
SP
244 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
245 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
246 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
247 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
248 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
249 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
250 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
251 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
252 ARM64_FTR_END,
253};
254
5e49d73c 255static const struct arm64_ftr_bits ftr_generic[] = {
3c739b57
SP
256 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
257 ARM64_FTR_END,
258};
259
5e49d73c 260static const struct arm64_ftr_bits ftr_generic32[] = {
3c739b57
SP
261 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
262 ARM64_FTR_END,
263};
264
5e49d73c 265static const struct arm64_ftr_bits ftr_aa64raz[] = {
3c739b57
SP
266 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
267 ARM64_FTR_END,
268};
269
6f2b7eef
AB
270#define ARM64_FTR_REG(id, table) { \
271 .sys_id = id, \
272 .reg = &(struct arm64_ftr_reg){ \
3c739b57
SP
273 .name = #id, \
274 .ftr_bits = &((table)[0]), \
6f2b7eef 275 }}
3c739b57 276
6f2b7eef
AB
277static const struct __ftr_reg_entry {
278 u32 sys_id;
279 struct arm64_ftr_reg *reg;
280} arm64_ftr_regs[] = {
3c739b57
SP
281
282 /* Op1 = 0, CRn = 0, CRm = 1 */
283 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
284 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
e5343503 285 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
3c739b57
SP
286 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
287 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
288 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
289 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
290
291 /* Op1 = 0, CRn = 0, CRm = 2 */
292 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
293 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
294 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
295 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
296 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
297 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
298 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
299
300 /* Op1 = 0, CRn = 0, CRm = 3 */
301 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
302 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
303 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
304
305 /* Op1 = 0, CRn = 0, CRm = 4 */
306 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
307 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
308
309 /* Op1 = 0, CRn = 0, CRm = 5 */
310 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
311 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
312
313 /* Op1 = 0, CRn = 0, CRm = 6 */
314 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
315 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
316
317 /* Op1 = 0, CRn = 0, CRm = 7 */
318 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
319 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
406e3087 320 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
3c739b57
SP
321
322 /* Op1 = 3, CRn = 0, CRm = 0 */
675b0563 323 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
3c739b57
SP
324 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
325
326 /* Op1 = 3, CRn = 14, CRm = 0 */
327 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
328};
329
330static int search_cmp_ftr_reg(const void *id, const void *regp)
331{
6f2b7eef 332 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
3c739b57
SP
333}
334
335/*
336 * get_arm64_ftr_reg - Lookup a feature register entry using its
337 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
338 * ascending order of sys_id , we use binary search to find a matching
339 * entry.
340 *
341 * returns - Upon success, matching ftr_reg entry for id.
342 * - NULL on failure. It is upto the caller to decide
343 * the impact of a failure.
344 */
345static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
346{
6f2b7eef
AB
347 const struct __ftr_reg_entry *ret;
348
349 ret = bsearch((const void *)(unsigned long)sys_id,
3c739b57
SP
350 arm64_ftr_regs,
351 ARRAY_SIZE(arm64_ftr_regs),
352 sizeof(arm64_ftr_regs[0]),
353 search_cmp_ftr_reg);
6f2b7eef
AB
354 if (ret)
355 return ret->reg;
356 return NULL;
3c739b57
SP
357}
358
5e49d73c
AB
359static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
360 s64 ftr_val)
3c739b57
SP
361{
362 u64 mask = arm64_ftr_mask(ftrp);
363
364 reg &= ~mask;
365 reg |= (ftr_val << ftrp->shift) & mask;
366 return reg;
367}
368
5e49d73c
AB
369static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
370 s64 cur)
3c739b57
SP
371{
372 s64 ret = 0;
373
374 switch (ftrp->type) {
375 case FTR_EXACT:
376 ret = ftrp->safe_val;
377 break;
378 case FTR_LOWER_SAFE:
379 ret = new < cur ? new : cur;
380 break;
381 case FTR_HIGHER_SAFE:
382 ret = new > cur ? new : cur;
383 break;
384 default:
385 BUG();
386 }
387
388 return ret;
389}
390
3c739b57
SP
391static void __init sort_ftr_regs(void)
392{
6f2b7eef
AB
393 int i;
394
395 /* Check that the array is sorted so that we can do the binary search */
396 for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
397 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
3c739b57
SP
398}
399
400/*
401 * Initialise the CPU feature register from Boot CPU values.
402 * Also initiliases the strict_mask for the register.
b389d799
MR
403 * Any bits that are not covered by an arm64_ftr_bits entry are considered
404 * RES0 for the system-wide value, and must strictly match.
3c739b57
SP
405 */
406static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
407{
408 u64 val = 0;
409 u64 strict_mask = ~0x0ULL;
b389d799
MR
410 u64 valid_mask = 0;
411
5e49d73c 412 const struct arm64_ftr_bits *ftrp;
3c739b57
SP
413 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
414
415 BUG_ON(!reg);
416
417 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
b389d799 418 u64 ftr_mask = arm64_ftr_mask(ftrp);
3c739b57
SP
419 s64 ftr_new = arm64_ftr_value(ftrp, new);
420
421 val = arm64_ftr_set_value(ftrp, val, ftr_new);
b389d799
MR
422
423 valid_mask |= ftr_mask;
3c739b57 424 if (!ftrp->strict)
b389d799 425 strict_mask &= ~ftr_mask;
3c739b57 426 }
b389d799
MR
427
428 val &= valid_mask;
429
3c739b57
SP
430 reg->sys_val = val;
431 reg->strict_mask = strict_mask;
432}
433
434void __init init_cpu_features(struct cpuinfo_arm64 *info)
435{
436 /* Before we start using the tables, make sure it is sorted */
437 sort_ftr_regs();
438
439 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
440 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
441 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
442 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
443 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
444 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
445 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
446 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
447 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
406e3087 448 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
3c739b57
SP
449 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
450 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
a6dc3cd7
SP
451
452 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
453 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
454 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
455 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
456 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
457 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
458 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
459 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
460 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
461 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
462 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
463 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
464 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
465 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
466 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
467 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
468 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
469 }
470
3c739b57
SP
471}
472
3086d391 473static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
3c739b57 474{
5e49d73c 475 const struct arm64_ftr_bits *ftrp;
3c739b57
SP
476
477 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
478 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
479 s64 ftr_new = arm64_ftr_value(ftrp, new);
480
481 if (ftr_cur == ftr_new)
482 continue;
483 /* Find a safe value */
484 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
485 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
486 }
487
488}
489
3086d391 490static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
cdcf817b 491{
3086d391
SP
492 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
493
494 BUG_ON(!regp);
495 update_cpu_ftr_reg(regp, val);
496 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
497 return 0;
498 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
499 regp->name, boot, cpu, val);
500 return 1;
501}
502
503/*
504 * Update system wide CPU feature registers with the values from a
505 * non-boot CPU. Also performs SANITY checks to make sure that there
506 * aren't any insane variations from that of the boot CPU.
507 */
508void update_cpu_features(int cpu,
509 struct cpuinfo_arm64 *info,
510 struct cpuinfo_arm64 *boot)
511{
512 int taint = 0;
513
514 /*
515 * The kernel can handle differing I-cache policies, but otherwise
516 * caches should look identical. Userspace JITs will make use of
517 * *minLine.
518 */
519 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
520 info->reg_ctr, boot->reg_ctr);
521
522 /*
523 * Userspace may perform DC ZVA instructions. Mismatched block sizes
524 * could result in too much or too little memory being zeroed if a
525 * process is preempted and migrated between CPUs.
526 */
527 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
528 info->reg_dczid, boot->reg_dczid);
529
530 /* If different, timekeeping will be broken (especially with KVM) */
531 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
532 info->reg_cntfrq, boot->reg_cntfrq);
533
534 /*
535 * The kernel uses self-hosted debug features and expects CPUs to
536 * support identical debug features. We presently need CTX_CMPs, WRPs,
537 * and BRPs to be identical.
538 * ID_AA64DFR1 is currently RES0.
539 */
540 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
541 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
542 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
543 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
544 /*
545 * Even in big.LITTLE, processors should be identical instruction-set
546 * wise.
547 */
548 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
549 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
550 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
551 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
552
553 /*
554 * Differing PARange support is fine as long as all peripherals and
555 * memory are mapped within the minimum PARange of all CPUs.
556 * Linux should not care about secure memory.
557 */
558 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
559 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
560 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
561 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
406e3087
JM
562 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
563 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
3086d391
SP
564
565 /*
566 * EL3 is not our concern.
567 * ID_AA64PFR1 is currently RES0.
568 */
569 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
570 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
571 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
572 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
573
574 /*
a6dc3cd7
SP
575 * If we have AArch32, we care about 32-bit features for compat.
576 * If the system doesn't support AArch32, don't update them.
3086d391 577 */
a6dc3cd7
SP
578 if (id_aa64pfr0_32bit_el0(read_system_reg(SYS_ID_AA64PFR0_EL1)) &&
579 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
580
581 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
3086d391 582 info->reg_id_dfr0, boot->reg_id_dfr0);
a6dc3cd7 583 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
3086d391 584 info->reg_id_isar0, boot->reg_id_isar0);
a6dc3cd7 585 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
3086d391 586 info->reg_id_isar1, boot->reg_id_isar1);
a6dc3cd7 587 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
3086d391 588 info->reg_id_isar2, boot->reg_id_isar2);
a6dc3cd7 589 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
3086d391 590 info->reg_id_isar3, boot->reg_id_isar3);
a6dc3cd7 591 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
3086d391 592 info->reg_id_isar4, boot->reg_id_isar4);
a6dc3cd7 593 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
3086d391
SP
594 info->reg_id_isar5, boot->reg_id_isar5);
595
a6dc3cd7
SP
596 /*
597 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
598 * ACTLR formats could differ across CPUs and therefore would have to
599 * be trapped for virtualization anyway.
600 */
601 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
3086d391 602 info->reg_id_mmfr0, boot->reg_id_mmfr0);
a6dc3cd7 603 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
3086d391 604 info->reg_id_mmfr1, boot->reg_id_mmfr1);
a6dc3cd7 605 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
3086d391 606 info->reg_id_mmfr2, boot->reg_id_mmfr2);
a6dc3cd7 607 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
3086d391 608 info->reg_id_mmfr3, boot->reg_id_mmfr3);
a6dc3cd7 609 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
3086d391 610 info->reg_id_pfr0, boot->reg_id_pfr0);
a6dc3cd7 611 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
3086d391 612 info->reg_id_pfr1, boot->reg_id_pfr1);
a6dc3cd7 613 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
3086d391 614 info->reg_mvfr0, boot->reg_mvfr0);
a6dc3cd7 615 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
3086d391 616 info->reg_mvfr1, boot->reg_mvfr1);
a6dc3cd7 617 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
3086d391 618 info->reg_mvfr2, boot->reg_mvfr2);
a6dc3cd7 619 }
3086d391
SP
620
621 /*
622 * Mismatched CPU features are a recipe for disaster. Don't even
623 * pretend to support them.
624 */
625 WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
626 "Unsupported CPU feature variation.\n");
cdcf817b
SP
627}
628
b3f15378
SP
629u64 read_system_reg(u32 id)
630{
631 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
632
633 /* We shouldn't get a request for an unsupported register */
634 BUG_ON(!regp);
635 return regp->sys_val;
636}
359b7064 637
92406f0c
SP
638/*
639 * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
640 * Read the system register on the current CPU
641 */
642static u64 __raw_read_system_reg(u32 sys_id)
643{
644 switch (sys_id) {
645 case SYS_ID_PFR0_EL1: return read_cpuid(ID_PFR0_EL1);
646 case SYS_ID_PFR1_EL1: return read_cpuid(ID_PFR1_EL1);
647 case SYS_ID_DFR0_EL1: return read_cpuid(ID_DFR0_EL1);
648 case SYS_ID_MMFR0_EL1: return read_cpuid(ID_MMFR0_EL1);
649 case SYS_ID_MMFR1_EL1: return read_cpuid(ID_MMFR1_EL1);
650 case SYS_ID_MMFR2_EL1: return read_cpuid(ID_MMFR2_EL1);
651 case SYS_ID_MMFR3_EL1: return read_cpuid(ID_MMFR3_EL1);
652 case SYS_ID_ISAR0_EL1: return read_cpuid(ID_ISAR0_EL1);
653 case SYS_ID_ISAR1_EL1: return read_cpuid(ID_ISAR1_EL1);
654 case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1);
655 case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1);
656 case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1);
657 case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR4_EL1);
658 case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1);
659 case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1);
660 case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1);
661
662 case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1);
663 case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR0_EL1);
664 case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1);
665 case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR0_EL1);
666 case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1);
667 case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1);
668 case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1);
669 case SYS_ID_AA64ISAR0_EL1: return read_cpuid(ID_AA64ISAR0_EL1);
670 case SYS_ID_AA64ISAR1_EL1: return read_cpuid(ID_AA64ISAR1_EL1);
671
672 case SYS_CNTFRQ_EL0: return read_cpuid(CNTFRQ_EL0);
673 case SYS_CTR_EL0: return read_cpuid(CTR_EL0);
674 case SYS_DCZID_EL0: return read_cpuid(DCZID_EL0);
675 default:
676 BUG();
677 return 0;
678 }
679}
680
963fcd40
MZ
681#include <linux/irqchip/arm-gic-v3.h>
682
18ffa046
JM
683static bool
684feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
685{
28c5dcb2 686 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
18ffa046
JM
687
688 return val >= entry->min_field_value;
689}
690
da8d02d1 691static bool
92406f0c 692has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
da8d02d1
SP
693{
694 u64 val;
94a9e04a 695
92406f0c
SP
696 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
697 if (scope == SCOPE_SYSTEM)
698 val = read_system_reg(entry->sys_reg);
699 else
700 val = __raw_read_system_reg(entry->sys_reg);
701
da8d02d1
SP
702 return feature_matches(val, entry);
703}
338d4f49 704
92406f0c 705static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
963fcd40
MZ
706{
707 bool has_sre;
708
92406f0c 709 if (!has_cpuid_feature(entry, scope))
963fcd40
MZ
710 return false;
711
712 has_sre = gic_enable_sre();
713 if (!has_sre)
714 pr_warn_once("%s present but disabled by higher exception level\n",
715 entry->desc);
716
717 return has_sre;
718}
719
92406f0c 720static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
d5370f75
WD
721{
722 u32 midr = read_cpuid_id();
723 u32 rv_min, rv_max;
724
725 /* Cavium ThunderX pass 1.x and 2.x */
726 rv_min = 0;
727 rv_max = (1 << MIDR_VARIANT_SHIFT) | MIDR_REVISION_MASK;
728
729 return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
730}
731
92406f0c 732static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
d88701be
MZ
733{
734 return is_kernel_in_hyp_mode();
735}
736
d1745910
MZ
737static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
738 int __unused)
739{
740 phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
741
742 /*
743 * Activate the lower HYP offset only if:
744 * - the idmap doesn't clash with it,
745 * - the kernel is not running at EL2.
746 */
747 return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
748}
749
82e0191a
SP
750static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
751{
752 u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
753
754 return cpuid_feature_extract_signed_field(pfr0,
755 ID_AA64PFR0_FP_SHIFT) < 0;
756}
757
359b7064 758static const struct arm64_cpu_capabilities arm64_features[] = {
94a9e04a
MZ
759 {
760 .desc = "GIC system register CPU interface",
761 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
92406f0c 762 .def_scope = SCOPE_SYSTEM,
963fcd40 763 .matches = has_useable_gicv3_cpuif,
da8d02d1
SP
764 .sys_reg = SYS_ID_AA64PFR0_EL1,
765 .field_pos = ID_AA64PFR0_GIC_SHIFT,
ff96f7bc 766 .sign = FTR_UNSIGNED,
18ffa046 767 .min_field_value = 1,
94a9e04a 768 },
338d4f49
JM
769#ifdef CONFIG_ARM64_PAN
770 {
771 .desc = "Privileged Access Never",
772 .capability = ARM64_HAS_PAN,
92406f0c 773 .def_scope = SCOPE_SYSTEM,
da8d02d1
SP
774 .matches = has_cpuid_feature,
775 .sys_reg = SYS_ID_AA64MMFR1_EL1,
776 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
ff96f7bc 777 .sign = FTR_UNSIGNED,
338d4f49
JM
778 .min_field_value = 1,
779 .enable = cpu_enable_pan,
780 },
781#endif /* CONFIG_ARM64_PAN */
2e94da13
WD
782#if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
783 {
784 .desc = "LSE atomic instructions",
785 .capability = ARM64_HAS_LSE_ATOMICS,
92406f0c 786 .def_scope = SCOPE_SYSTEM,
da8d02d1
SP
787 .matches = has_cpuid_feature,
788 .sys_reg = SYS_ID_AA64ISAR0_EL1,
789 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
ff96f7bc 790 .sign = FTR_UNSIGNED,
2e94da13
WD
791 .min_field_value = 2,
792 },
793#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
d5370f75
WD
794 {
795 .desc = "Software prefetching using PRFM",
796 .capability = ARM64_HAS_NO_HW_PREFETCH,
92406f0c 797 .def_scope = SCOPE_SYSTEM,
d5370f75
WD
798 .matches = has_no_hw_prefetch,
799 },
57f4959b
JM
800#ifdef CONFIG_ARM64_UAO
801 {
802 .desc = "User Access Override",
803 .capability = ARM64_HAS_UAO,
92406f0c 804 .def_scope = SCOPE_SYSTEM,
57f4959b
JM
805 .matches = has_cpuid_feature,
806 .sys_reg = SYS_ID_AA64MMFR2_EL1,
807 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
808 .min_field_value = 1,
c8b06e3f
JM
809 /*
810 * We rely on stop_machine() calling uao_thread_switch() to set
811 * UAO immediately after patching.
812 */
57f4959b
JM
813 },
814#endif /* CONFIG_ARM64_UAO */
70544196
JM
815#ifdef CONFIG_ARM64_PAN
816 {
817 .capability = ARM64_ALT_PAN_NOT_UAO,
92406f0c 818 .def_scope = SCOPE_SYSTEM,
70544196
JM
819 .matches = cpufeature_pan_not_uao,
820 },
821#endif /* CONFIG_ARM64_PAN */
d88701be
MZ
822 {
823 .desc = "Virtualization Host Extensions",
824 .capability = ARM64_HAS_VIRT_HOST_EXTN,
92406f0c 825 .def_scope = SCOPE_SYSTEM,
d88701be
MZ
826 .matches = runs_at_el2,
827 },
042446a3
SP
828 {
829 .desc = "32-bit EL0 Support",
830 .capability = ARM64_HAS_32BIT_EL0,
92406f0c 831 .def_scope = SCOPE_SYSTEM,
042446a3
SP
832 .matches = has_cpuid_feature,
833 .sys_reg = SYS_ID_AA64PFR0_EL1,
834 .sign = FTR_UNSIGNED,
835 .field_pos = ID_AA64PFR0_EL0_SHIFT,
836 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
837 },
d1745910
MZ
838 {
839 .desc = "Reduced HYP mapping offset",
840 .capability = ARM64_HYP_OFFSET_LOW,
841 .def_scope = SCOPE_SYSTEM,
842 .matches = hyp_offset_low,
843 },
82e0191a
SP
844 {
845 /* FP/SIMD is not implemented */
846 .capability = ARM64_HAS_NO_FPSIMD,
847 .def_scope = SCOPE_SYSTEM,
848 .min_field_value = 0,
849 .matches = has_no_fpsimd,
850 },
359b7064
MZ
851 {},
852};
853
ff96f7bc 854#define HWCAP_CAP(reg, field, s, min_value, type, cap) \
37b01d53
SP
855 { \
856 .desc = #cap, \
92406f0c 857 .def_scope = SCOPE_SYSTEM, \
37b01d53
SP
858 .matches = has_cpuid_feature, \
859 .sys_reg = reg, \
860 .field_pos = field, \
ff96f7bc 861 .sign = s, \
37b01d53
SP
862 .min_field_value = min_value, \
863 .hwcap_type = type, \
864 .hwcap = cap, \
865 }
866
f3efb675 867static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
ff96f7bc
SP
868 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
869 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
870 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
871 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
872 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
873 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
874 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
bf500618 875 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
ff96f7bc 876 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
bf500618 877 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
75283501
SP
878 {},
879};
880
881static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
37b01d53 882#ifdef CONFIG_COMPAT
ff96f7bc
SP
883 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
884 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
885 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
886 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
887 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
37b01d53
SP
888#endif
889 {},
890};
891
f3efb675 892static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
37b01d53
SP
893{
894 switch (cap->hwcap_type) {
895 case CAP_HWCAP:
896 elf_hwcap |= cap->hwcap;
897 break;
898#ifdef CONFIG_COMPAT
899 case CAP_COMPAT_HWCAP:
900 compat_elf_hwcap |= (u32)cap->hwcap;
901 break;
902 case CAP_COMPAT_HWCAP2:
903 compat_elf_hwcap2 |= (u32)cap->hwcap;
904 break;
905#endif
906 default:
907 WARN_ON(1);
908 break;
909 }
910}
911
912/* Check if we have a particular HWCAP enabled */
f3efb675 913static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
37b01d53
SP
914{
915 bool rc;
916
917 switch (cap->hwcap_type) {
918 case CAP_HWCAP:
919 rc = (elf_hwcap & cap->hwcap) != 0;
920 break;
921#ifdef CONFIG_COMPAT
922 case CAP_COMPAT_HWCAP:
923 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
924 break;
925 case CAP_COMPAT_HWCAP2:
926 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
927 break;
928#endif
929 default:
930 WARN_ON(1);
931 rc = false;
932 }
933
934 return rc;
935}
936
75283501 937static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
37b01d53 938{
75283501 939 for (; hwcaps->matches; hwcaps++)
92406f0c 940 if (hwcaps->matches(hwcaps, hwcaps->def_scope))
75283501 941 cap_set_elf_hwcap(hwcaps);
37b01d53
SP
942}
943
ce8b602c 944void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
359b7064
MZ
945 const char *info)
946{
75283501 947 for (; caps->matches; caps++) {
92406f0c 948 if (!caps->matches(caps, caps->def_scope))
359b7064
MZ
949 continue;
950
75283501
SP
951 if (!cpus_have_cap(caps->capability) && caps->desc)
952 pr_info("%s %s\n", info, caps->desc);
953 cpus_set_cap(caps->capability);
359b7064 954 }
ce8b602c
SP
955}
956
957/*
dbb4e152
SP
958 * Run through the enabled capabilities and enable() it on all active
959 * CPUs
ce8b602c 960 */
8e231852 961void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
ce8b602c 962{
75283501
SP
963 for (; caps->matches; caps++)
964 if (caps->enable && cpus_have_cap(caps->capability))
2a6dcb2b
JM
965 /*
966 * Use stop_machine() as it schedules the work allowing
967 * us to modify PSTATE, instead of on_each_cpu() which
968 * uses an IPI, giving us a PSTATE that disappears when
969 * we return.
970 */
971 stop_machine(caps->enable, NULL, cpu_online_mask);
dbb4e152
SP
972}
973
dbb4e152
SP
974/*
975 * Flag to indicate if we have computed the system wide
976 * capabilities based on the boot time active CPUs. This
977 * will be used to determine if a new booting CPU should
978 * go through the verification process to make sure that it
979 * supports the system capabilities, without using a hotplug
980 * notifier.
981 */
982static bool sys_caps_initialised;
983
984static inline void set_sys_caps_initialised(void)
985{
986 sys_caps_initialised = true;
987}
988
989/*
13f417f3
SP
990 * Check for CPU features that are used in early boot
991 * based on the Boot CPU value.
dbb4e152 992 */
13f417f3 993static void check_early_cpu_features(void)
dbb4e152 994{
ac1ad20f 995 verify_cpu_run_el();
13f417f3 996 verify_cpu_asid_bits();
dbb4e152 997}
1c076303 998
75283501
SP
999static void
1000verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1001{
1002
92406f0c
SP
1003 for (; caps->matches; caps++)
1004 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
75283501
SP
1005 pr_crit("CPU%d: missing HWCAP: %s\n",
1006 smp_processor_id(), caps->desc);
1007 cpu_die_early();
1008 }
75283501
SP
1009}
1010
1011static void
1012verify_local_cpu_features(const struct arm64_cpu_capabilities *caps)
1013{
1014 for (; caps->matches; caps++) {
92406f0c 1015 if (!cpus_have_cap(caps->capability))
75283501
SP
1016 continue;
1017 /*
1018 * If the new CPU misses an advertised feature, we cannot proceed
1019 * further, park the cpu.
1020 */
92406f0c 1021 if (!caps->matches(caps, SCOPE_LOCAL_CPU)) {
75283501
SP
1022 pr_crit("CPU%d: missing feature: %s\n",
1023 smp_processor_id(), caps->desc);
1024 cpu_die_early();
1025 }
1026 if (caps->enable)
1027 caps->enable(NULL);
1028 }
1029}
1030
dbb4e152
SP
1031/*
1032 * Run through the enabled system capabilities and enable() it on this CPU.
1033 * The capabilities were decided based on the available CPUs at the boot time.
1034 * Any new CPU should match the system wide status of the capability. If the
1035 * new CPU doesn't have a capability which the system now has enabled, we
1036 * cannot do anything to fix it up and could cause unexpected failures. So
1037 * we park the CPU.
1038 */
c47a1900 1039static void verify_local_cpu_capabilities(void)
dbb4e152 1040{
c47a1900
SP
1041 verify_local_cpu_errata_workarounds();
1042 verify_local_cpu_features(arm64_features);
1043 verify_local_elf_hwcaps(arm64_elf_hwcaps);
1044 if (system_supports_32bit_el0())
1045 verify_local_elf_hwcaps(compat_elf_hwcaps);
1046}
dbb4e152 1047
c47a1900
SP
1048void check_local_cpu_capabilities(void)
1049{
1050 /*
1051 * All secondary CPUs should conform to the early CPU features
1052 * in use by the kernel based on boot CPU.
1053 */
13f417f3
SP
1054 check_early_cpu_features();
1055
dbb4e152 1056 /*
c47a1900
SP
1057 * If we haven't finalised the system capabilities, this CPU gets
1058 * a chance to update the errata work arounds.
1059 * Otherwise, this CPU should verify that it has all the system
1060 * advertised capabilities.
dbb4e152
SP
1061 */
1062 if (!sys_caps_initialised)
c47a1900
SP
1063 update_cpu_errata_workarounds();
1064 else
1065 verify_local_cpu_capabilities();
359b7064
MZ
1066}
1067
a7c61a34 1068static void __init setup_feature_capabilities(void)
359b7064 1069{
ce8b602c
SP
1070 update_cpu_capabilities(arm64_features, "detected feature:");
1071 enable_cpu_capabilities(arm64_features);
359b7064
MZ
1072}
1073
e3661b12
MZ
1074/*
1075 * Check if the current CPU has a given feature capability.
1076 * Should be called from non-preemptible context.
1077 */
1078bool this_cpu_has_cap(unsigned int cap)
1079{
1080 const struct arm64_cpu_capabilities *caps;
1081
1082 if (WARN_ON(preemptible()))
1083 return false;
1084
1085 for (caps = arm64_features; caps->desc; caps++)
1086 if (caps->capability == cap && caps->matches)
1087 return caps->matches(caps, SCOPE_LOCAL_CPU);
1088
1089 return false;
1090}
1091
9cdf8ec4 1092void __init setup_cpu_features(void)
359b7064 1093{
9cdf8ec4
SP
1094 u32 cwg;
1095 int cls;
1096
dbb4e152
SP
1097 /* Set the CPU feature capabilies */
1098 setup_feature_capabilities();
8e231852 1099 enable_errata_workarounds();
75283501 1100 setup_elf_hwcaps(arm64_elf_hwcaps);
643d703d
SP
1101
1102 if (system_supports_32bit_el0())
1103 setup_elf_hwcaps(compat_elf_hwcaps);
dbb4e152
SP
1104
1105 /* Advertise that we have computed the system capabilities */
1106 set_sys_caps_initialised();
1107
9cdf8ec4
SP
1108 /*
1109 * Check for sane CTR_EL0.CWG value.
1110 */
1111 cwg = cache_type_cwg();
1112 cls = cache_line_size();
1113 if (!cwg)
1114 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
1115 cls);
1116 if (L1_CACHE_BYTES < cls)
1117 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
1118 L1_CACHE_BYTES, cls);
359b7064 1119}
70544196
JM
1120
1121static bool __maybe_unused
92406f0c 1122cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
70544196 1123{
a4023f68 1124 return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
70544196 1125}