]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/kernel/cpufeature.c
arm64: ptrauth: Add bootup/runtime flags for __cpu_setup
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kernel / cpufeature.c
CommitLineData
caab277b 1// SPDX-License-Identifier: GPL-2.0-only
359b7064
MZ
2/*
3 * Contains CPU feature definitions
4 *
5 * Copyright (C) 2015 ARM Ltd.
359b7064
MZ
6 */
7
9cdf8ec4 8#define pr_fmt(fmt) "CPU features: " fmt
359b7064 9
3c739b57 10#include <linux/bsearch.h>
2a6dcb2b 11#include <linux/cpumask.h>
5ffdfaed 12#include <linux/crash_dump.h>
3c739b57 13#include <linux/sort.h>
2a6dcb2b 14#include <linux/stop_machine.h>
359b7064 15#include <linux/types.h>
2077be67 16#include <linux/mm.h>
a111b7c0 17#include <linux/cpu.h>
359b7064
MZ
18#include <asm/cpu.h>
19#include <asm/cpufeature.h>
dbb4e152 20#include <asm/cpu_ops.h>
2e0f2478 21#include <asm/fpsimd.h>
13f417f3 22#include <asm/mmu_context.h>
338d4f49 23#include <asm/processor.h>
cdcf817b 24#include <asm/sysreg.h>
77c97b4e 25#include <asm/traps.h>
d88701be 26#include <asm/virt.h>
359b7064 27
aec0bff7
AM
28/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
29static unsigned long elf_hwcap __read_mostly;
9cdf8ec4
SP
30
31#ifdef CONFIG_COMPAT
32#define COMPAT_ELF_HWCAP_DEFAULT \
33 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
34 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
7559950a 35 COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
9cdf8ec4
SP
36 COMPAT_HWCAP_LPAE)
37unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
38unsigned int compat_elf_hwcap2 __read_mostly;
39#endif
40
41DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
4b65a5db 42EXPORT_SYMBOL(cpu_hwcaps);
82a3a21b 43static struct arm64_cpu_capabilities const __ro_after_init *cpu_hwcaps_ptrs[ARM64_NCAPS];
9cdf8ec4 44
0ceb0d56
DT
45/* Need also bit for ARM64_CB_PATCH */
46DECLARE_BITMAP(boot_capabilities, ARM64_NPATCHABLE);
47
09e3c22a
MB
48bool arm64_use_ng_mappings = false;
49EXPORT_SYMBOL(arm64_use_ng_mappings);
50
8f1eec57
DM
51/*
52 * Flag to indicate if we have computed the system wide
53 * capabilities based on the boot time active CPUs. This
54 * will be used to determine if a new booting CPU should
55 * go through the verification process to make sure that it
56 * supports the system capabilities, without using a hotplug
b51c6ac2
SP
57 * notifier. This is also used to decide if we could use
58 * the fast path for checking constant CPU caps.
8f1eec57 59 */
b51c6ac2
SP
60DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
61EXPORT_SYMBOL(arm64_const_caps_ready);
62static inline void finalize_system_capabilities(void)
8f1eec57 63{
b51c6ac2 64 static_branch_enable(&arm64_const_caps_ready);
8f1eec57
DM
65}
66
8effeaaf
MR
67static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
68{
69 /* file-wide pr_fmt adds "CPU features: " prefix */
70 pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
71 return 0;
72}
73
74static struct notifier_block cpu_hwcaps_notifier = {
75 .notifier_call = dump_cpu_hwcaps
76};
77
78static int __init register_cpu_hwcaps_dumper(void)
79{
80 atomic_notifier_chain_register(&panic_notifier_list,
81 &cpu_hwcaps_notifier);
82 return 0;
83}
84__initcall(register_cpu_hwcaps_dumper);
85
efd9e03f
CM
86DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
87EXPORT_SYMBOL(cpu_hwcap_keys);
88
fe4fbdbc 89#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
3c739b57 90 { \
4f0a606b 91 .sign = SIGNED, \
fe4fbdbc 92 .visible = VISIBLE, \
3c739b57
SP
93 .strict = STRICT, \
94 .type = TYPE, \
95 .shift = SHIFT, \
96 .width = WIDTH, \
97 .safe_val = SAFE_VAL, \
98 }
99
0710cfdb 100/* Define a feature with unsigned values */
fe4fbdbc
SP
101#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
102 __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
4f0a606b 103
0710cfdb 104/* Define a feature with a signed value */
fe4fbdbc
SP
105#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
106 __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
0710cfdb 107
3c739b57
SP
108#define ARM64_FTR_END \
109 { \
110 .width = 0, \
111 }
112
70544196
JM
113/* meta feature for alternatives */
114static bool __maybe_unused
92406f0c
SP
115cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
116
5ffdfaed 117static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);
70544196 118
3ff047f6
ADK
119static bool __system_matches_cap(unsigned int n);
120
4aa8a472
SP
121/*
122 * NOTE: Any changes to the visibility of features should be kept in
123 * sync with the documentation of the CPU feature register ABI.
124 */
5e49d73c 125static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
1a50ec0b 126 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RNDR_SHIFT, 4, 0),
7206dc93 127 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
3b3b6810 128 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
5bdecb79
SP
129 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
130 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
131 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
132 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
133 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
fe4fbdbc
SP
134 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
135 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
136 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
137 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
138 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
3c739b57
SP
139 ARM64_FTR_END,
140};
141
c8c3798d 142static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
d4209d8b
SP
143 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_I8MM_SHIFT, 4, 0),
144 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DGH_SHIFT, 4, 0),
145 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_BF16_SHIFT, 4, 0),
146 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SPECRES_SHIFT, 4, 0),
bd4fb6d2 147 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_SB_SHIFT, 4, 0),
7230f7e9 148 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FRINTTS_SHIFT, 4, 0),
6984eb47
MR
149 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
150 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPI_SHIFT, 4, 0),
151 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
152 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_GPA_SHIFT, 4, 0),
5bdecb79
SP
153 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
154 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
155 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
6984eb47
MR
156 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
157 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_API_SHIFT, 4, 0),
158 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
159 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_APA_SHIFT, 4, 0),
5bdecb79 160 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
c8c3798d
SP
161 ARM64_FTR_END,
162};
163
5e49d73c 164static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
179a56f6 165 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
0f15adbb 166 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
7206dc93 167 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
3fab3999
DM
168 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
169 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
64c02720 170 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
5bdecb79 171 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
fe4fbdbc
SP
172 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
173 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
3c739b57 174 /* Linux doesn't care about the EL3 */
5bdecb79
SP
175 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
176 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
177 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
178 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
3c739b57
SP
179 ARM64_FTR_END,
180};
181
d71be2b6
WD
182static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
183 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
184 ARM64_FTR_END,
185};
186
06a916fe 187static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
d4209d8b
SP
188 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
189 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F64MM_SHIFT, 4, 0),
190 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
191 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_F32MM_SHIFT, 4, 0),
192 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
193 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_I8MM_SHIFT, 4, 0),
ec52c713
JG
194 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
195 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SM4_SHIFT, 4, 0),
196 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
197 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SHA3_SHIFT, 4, 0),
d4209d8b
SP
198 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
199 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BF16_SHIFT, 4, 0),
ec52c713
JG
200 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
201 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_BITPERM_SHIFT, 4, 0),
202 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
203 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_AES_SHIFT, 4, 0),
204 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
205 FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_SVEVER_SHIFT, 4, 0),
06a916fe
DM
206 ARM64_FTR_END,
207};
208
5e49d73c 209static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
5717fe5a
WD
210 /*
211 * We already refuse to boot CPUs that don't support our configured
212 * page size, so we can only detect mismatches for a page size other
213 * than the one we're currently using. Unfortunately, SoCs like this
214 * exist in the wild so, even though we don't like it, we'll have to go
215 * along with it and treat them as non-strict.
216 */
217 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
218 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
219 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
220
5bdecb79 221 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
3c739b57 222 /* Linux shouldn't care about secure memory */
5bdecb79
SP
223 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
224 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
225 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
3c739b57
SP
226 /*
227 * Differing PARange is fine as long as all peripherals and memory are mapped
228 * within the minimum PARange of all CPUs
229 */
fe4fbdbc 230 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
3c739b57
SP
231 ARM64_FTR_END,
232};
233
5e49d73c 234static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
fe4fbdbc 235 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
5bdecb79
SP
236 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
237 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
238 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
239 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
240 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
3c739b57
SP
241 ARM64_FTR_END,
242};
243
5e49d73c 244static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
3e6c69a0 245 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_E0PD_SHIFT, 4, 0),
e48d53a9 246 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
7206dc93 247 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
5bdecb79
SP
248 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
249 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
250 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
251 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
252 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
406e3087
JM
253 ARM64_FTR_END,
254};
255
5e49d73c 256static const struct arm64_ftr_bits ftr_ctr[] = {
6ae4b6e0
SD
257 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
258 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
259 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
147b9635
WD
260 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
261 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
6ae4b6e0 262 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
3c739b57
SP
263 /*
264 * Linux can handle differing I-cache policies. Userspace JITs will
ee7bc638 265 * make use of *minLine.
155433cb 266 * If we have differing I-cache policies, report it as the weakest - VIPT.
3c739b57 267 */
155433cb 268 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT), /* L1Ip */
4c4a39dd 269 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
3c739b57
SP
270 ARM64_FTR_END,
271};
272
675b0563
AB
273struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
274 .name = "SYS_CTR_EL0",
275 .ftr_bits = ftr_ctr
276};
277
5e49d73c 278static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
5bdecb79
SP
279 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf), /* InnerShr */
280 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0), /* FCSE */
fe4fbdbc 281 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
5bdecb79
SP
282 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0), /* TCM */
283 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* ShareLvl */
284 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf), /* OuterShr */
285 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* PMSA */
286 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* VMSA */
3c739b57
SP
287 ARM64_FTR_END,
288};
289
5e49d73c 290static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
fe4fbdbc
SP
291 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
292 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
293 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
294 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
295 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
b20d1ba3
WD
296 /*
297 * We can instantiate multiple PMU instances with different levels
298 * of support.
fe4fbdbc
SP
299 */
300 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
301 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
302 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
3c739b57
SP
303 ARM64_FTR_END,
304};
305
5e49d73c 306static const struct arm64_ftr_bits ftr_mvfr2[] = {
5bdecb79
SP
307 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* FPMisc */
308 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* SIMDMisc */
3c739b57
SP
309 ARM64_FTR_END,
310};
311
5e49d73c 312static const struct arm64_ftr_bits ftr_dczid[] = {
fe4fbdbc
SP
313 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
314 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
3c739b57
SP
315 ARM64_FTR_END,
316};
317
318
5e49d73c 319static const struct arm64_ftr_bits ftr_id_isar5[] = {
5bdecb79
SP
320 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
321 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
322 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
323 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
324 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
325 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
3c739b57
SP
326 ARM64_FTR_END,
327};
328
5e49d73c 329static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
5bdecb79 330 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* ac2 */
3c739b57
SP
331 ARM64_FTR_END,
332};
333
8e3747be
AK
334static const struct arm64_ftr_bits ftr_id_isar6[] = {
335 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_I8MM_SHIFT, 4, 0),
336 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_BF16_SHIFT, 4, 0),
337 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SPECRES_SHIFT, 4, 0),
338 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_SB_SHIFT, 4, 0),
339 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_FHM_SHIFT, 4, 0),
340 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_DP_SHIFT, 4, 0),
341 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_JSCVT_SHIFT, 4, 0),
342 ARM64_FTR_END,
343};
344
5e49d73c 345static const struct arm64_ftr_bits ftr_id_pfr0[] = {
5bdecb79
SP
346 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0), /* State3 */
347 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0), /* State2 */
348 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0), /* State1 */
349 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* State0 */
3c739b57
SP
350 ARM64_FTR_END,
351};
352
5e49d73c 353static const struct arm64_ftr_bits ftr_id_dfr0[] = {
fe4fbdbc
SP
354 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
355 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
356 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
357 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
358 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
359 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
360 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
361 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
e5343503
SP
362 ARM64_FTR_END,
363};
364
2e0f2478
DM
365static const struct arm64_ftr_bits ftr_zcr[] = {
366 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
367 ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0), /* LEN */
368 ARM64_FTR_END,
369};
370
3c739b57
SP
371/*
372 * Common ftr bits for a 32bit register with all hidden, strict
373 * attributes, with 4bit feature fields and a default safe value of
374 * 0. Covers the following 32bit registers:
375 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
376 */
5e49d73c 377static const struct arm64_ftr_bits ftr_generic_32bits[] = {
fe4fbdbc
SP
378 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
379 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
380 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
381 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
382 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
383 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
384 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
385 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
3c739b57
SP
386 ARM64_FTR_END,
387};
388
eab43e88
SP
389/* Table for a single 32bit feature value */
390static const struct arm64_ftr_bits ftr_single32[] = {
fe4fbdbc 391 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
3c739b57
SP
392 ARM64_FTR_END,
393};
394
eab43e88 395static const struct arm64_ftr_bits ftr_raz[] = {
3c739b57
SP
396 ARM64_FTR_END,
397};
398
6f2b7eef
AB
399#define ARM64_FTR_REG(id, table) { \
400 .sys_id = id, \
401 .reg = &(struct arm64_ftr_reg){ \
3c739b57
SP
402 .name = #id, \
403 .ftr_bits = &((table)[0]), \
6f2b7eef 404 }}
3c739b57 405
6f2b7eef
AB
406static const struct __ftr_reg_entry {
407 u32 sys_id;
408 struct arm64_ftr_reg *reg;
409} arm64_ftr_regs[] = {
3c739b57
SP
410
411 /* Op1 = 0, CRn = 0, CRm = 1 */
412 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
413 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
e5343503 414 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
3c739b57
SP
415 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
416 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
417 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
418 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
419
420 /* Op1 = 0, CRn = 0, CRm = 2 */
421 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
422 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
423 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
424 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
425 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
426 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
427 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
8e3747be 428 ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),
3c739b57
SP
429
430 /* Op1 = 0, CRn = 0, CRm = 3 */
431 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
432 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
433 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
434
435 /* Op1 = 0, CRn = 0, CRm = 4 */
436 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
d71be2b6 437 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
06a916fe 438 ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0),
3c739b57
SP
439
440 /* Op1 = 0, CRn = 0, CRm = 5 */
441 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
eab43e88 442 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
3c739b57
SP
443
444 /* Op1 = 0, CRn = 0, CRm = 6 */
445 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
c8c3798d 446 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
3c739b57
SP
447
448 /* Op1 = 0, CRn = 0, CRm = 7 */
449 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
450 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
406e3087 451 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
3c739b57 452
2e0f2478
DM
453 /* Op1 = 0, CRn = 1, CRm = 2 */
454 ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
455
3c739b57 456 /* Op1 = 3, CRn = 0, CRm = 0 */
675b0563 457 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
3c739b57
SP
458 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
459
460 /* Op1 = 3, CRn = 14, CRm = 0 */
eab43e88 461 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
3c739b57
SP
462};
463
464static int search_cmp_ftr_reg(const void *id, const void *regp)
465{
6f2b7eef 466 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
3c739b57
SP
467}
468
469/*
470 * get_arm64_ftr_reg - Lookup a feature register entry using its
471 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
472 * ascending order of sys_id , we use binary search to find a matching
473 * entry.
474 *
475 * returns - Upon success, matching ftr_reg entry for id.
476 * - NULL on failure. It is upto the caller to decide
477 * the impact of a failure.
478 */
479static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
480{
6f2b7eef
AB
481 const struct __ftr_reg_entry *ret;
482
483 ret = bsearch((const void *)(unsigned long)sys_id,
3c739b57
SP
484 arm64_ftr_regs,
485 ARRAY_SIZE(arm64_ftr_regs),
486 sizeof(arm64_ftr_regs[0]),
487 search_cmp_ftr_reg);
6f2b7eef
AB
488 if (ret)
489 return ret->reg;
490 return NULL;
3c739b57
SP
491}
492
5e49d73c
AB
493static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
494 s64 ftr_val)
3c739b57
SP
495{
496 u64 mask = arm64_ftr_mask(ftrp);
497
498 reg &= ~mask;
499 reg |= (ftr_val << ftrp->shift) & mask;
500 return reg;
501}
502
5e49d73c
AB
503static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
504 s64 cur)
3c739b57
SP
505{
506 s64 ret = 0;
507
508 switch (ftrp->type) {
509 case FTR_EXACT:
510 ret = ftrp->safe_val;
511 break;
512 case FTR_LOWER_SAFE:
513 ret = new < cur ? new : cur;
514 break;
147b9635
WD
515 case FTR_HIGHER_OR_ZERO_SAFE:
516 if (!cur || !new)
517 break;
518 /* Fallthrough */
3c739b57
SP
519 case FTR_HIGHER_SAFE:
520 ret = new > cur ? new : cur;
521 break;
522 default:
523 BUG();
524 }
525
526 return ret;
527}
528
3c739b57
SP
529static void __init sort_ftr_regs(void)
530{
6f2b7eef
AB
531 int i;
532
533 /* Check that the array is sorted so that we can do the binary search */
534 for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
535 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
3c739b57
SP
536}
537
538/*
539 * Initialise the CPU feature register from Boot CPU values.
540 * Also initiliases the strict_mask for the register.
b389d799
MR
541 * Any bits that are not covered by an arm64_ftr_bits entry are considered
542 * RES0 for the system-wide value, and must strictly match.
3c739b57
SP
543 */
544static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
545{
546 u64 val = 0;
547 u64 strict_mask = ~0x0ULL;
fe4fbdbc 548 u64 user_mask = 0;
b389d799
MR
549 u64 valid_mask = 0;
550
5e49d73c 551 const struct arm64_ftr_bits *ftrp;
3c739b57
SP
552 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
553
554 BUG_ON(!reg);
555
556 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
b389d799 557 u64 ftr_mask = arm64_ftr_mask(ftrp);
3c739b57
SP
558 s64 ftr_new = arm64_ftr_value(ftrp, new);
559
560 val = arm64_ftr_set_value(ftrp, val, ftr_new);
b389d799
MR
561
562 valid_mask |= ftr_mask;
3c739b57 563 if (!ftrp->strict)
b389d799 564 strict_mask &= ~ftr_mask;
fe4fbdbc
SP
565 if (ftrp->visible)
566 user_mask |= ftr_mask;
567 else
568 reg->user_val = arm64_ftr_set_value(ftrp,
569 reg->user_val,
570 ftrp->safe_val);
3c739b57 571 }
b389d799
MR
572
573 val &= valid_mask;
574
3c739b57
SP
575 reg->sys_val = val;
576 reg->strict_mask = strict_mask;
fe4fbdbc 577 reg->user_mask = user_mask;
3c739b57
SP
578}
579
1e89baed 580extern const struct arm64_cpu_capabilities arm64_errata[];
82a3a21b
SP
581static const struct arm64_cpu_capabilities arm64_features[];
582
583static void __init
584init_cpu_hwcaps_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
585{
586 for (; caps->matches; caps++) {
587 if (WARN(caps->capability >= ARM64_NCAPS,
588 "Invalid capability %d\n", caps->capability))
589 continue;
590 if (WARN(cpu_hwcaps_ptrs[caps->capability],
591 "Duplicate entry for capability %d\n",
592 caps->capability))
593 continue;
594 cpu_hwcaps_ptrs[caps->capability] = caps;
595 }
596}
597
598static void __init init_cpu_hwcaps_indirect_list(void)
599{
600 init_cpu_hwcaps_indirect_list_from_array(arm64_features);
601 init_cpu_hwcaps_indirect_list_from_array(arm64_errata);
602}
603
fd9d63da 604static void __init setup_boot_cpu_capabilities(void);
1e89baed 605
3c739b57
SP
606void __init init_cpu_features(struct cpuinfo_arm64 *info)
607{
608 /* Before we start using the tables, make sure it is sorted */
609 sort_ftr_regs();
610
611 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
612 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
613 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
614 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
615 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
616 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
617 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
618 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
619 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
406e3087 620 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
3c739b57
SP
621 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
622 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
2e0f2478 623 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
a6dc3cd7
SP
624
625 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
626 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
627 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
628 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
629 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
630 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
631 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
632 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
8e3747be 633 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
a6dc3cd7
SP
634 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
635 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
636 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
637 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
638 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
639 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
640 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
641 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
642 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
643 }
644
2e0f2478
DM
645 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
646 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
647 sve_init_vq_map();
648 }
5e91107b 649
82a3a21b
SP
650 /*
651 * Initialize the indirect array of CPU hwcaps capabilities pointers
652 * before we handle the boot CPU below.
653 */
654 init_cpu_hwcaps_indirect_list();
655
5e91107b 656 /*
fd9d63da
SP
657 * Detect and enable early CPU capabilities based on the boot CPU,
658 * after we have initialised the CPU feature infrastructure.
5e91107b 659 */
fd9d63da 660 setup_boot_cpu_capabilities();
3c739b57
SP
661}
662
3086d391 663static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
3c739b57 664{
5e49d73c 665 const struct arm64_ftr_bits *ftrp;
3c739b57
SP
666
667 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
668 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
669 s64 ftr_new = arm64_ftr_value(ftrp, new);
670
671 if (ftr_cur == ftr_new)
672 continue;
673 /* Find a safe value */
674 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
675 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
676 }
677
678}
679
3086d391 680static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
cdcf817b 681{
3086d391
SP
682 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
683
684 BUG_ON(!regp);
685 update_cpu_ftr_reg(regp, val);
686 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
687 return 0;
688 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
689 regp->name, boot, cpu, val);
690 return 1;
691}
692
693/*
694 * Update system wide CPU feature registers with the values from a
695 * non-boot CPU. Also performs SANITY checks to make sure that there
696 * aren't any insane variations from that of the boot CPU.
697 */
698void update_cpu_features(int cpu,
699 struct cpuinfo_arm64 *info,
700 struct cpuinfo_arm64 *boot)
701{
702 int taint = 0;
703
704 /*
705 * The kernel can handle differing I-cache policies, but otherwise
706 * caches should look identical. Userspace JITs will make use of
707 * *minLine.
708 */
709 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
710 info->reg_ctr, boot->reg_ctr);
711
712 /*
713 * Userspace may perform DC ZVA instructions. Mismatched block sizes
714 * could result in too much or too little memory being zeroed if a
715 * process is preempted and migrated between CPUs.
716 */
717 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
718 info->reg_dczid, boot->reg_dczid);
719
720 /* If different, timekeeping will be broken (especially with KVM) */
721 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
722 info->reg_cntfrq, boot->reg_cntfrq);
723
724 /*
725 * The kernel uses self-hosted debug features and expects CPUs to
726 * support identical debug features. We presently need CTX_CMPs, WRPs,
727 * and BRPs to be identical.
728 * ID_AA64DFR1 is currently RES0.
729 */
730 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
731 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
732 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
733 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
734 /*
735 * Even in big.LITTLE, processors should be identical instruction-set
736 * wise.
737 */
738 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
739 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
740 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
741 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
742
743 /*
744 * Differing PARange support is fine as long as all peripherals and
745 * memory are mapped within the minimum PARange of all CPUs.
746 * Linux should not care about secure memory.
747 */
748 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
749 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
750 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
751 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
406e3087
JM
752 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
753 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
3086d391
SP
754
755 /*
756 * EL3 is not our concern.
3086d391
SP
757 */
758 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
759 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
760 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
761 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
762
2e0f2478
DM
763 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
764 info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
765
3086d391 766 /*
a6dc3cd7
SP
767 * If we have AArch32, we care about 32-bit features for compat.
768 * If the system doesn't support AArch32, don't update them.
3086d391 769 */
46823dd1 770 if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
a6dc3cd7
SP
771 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
772
773 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
3086d391 774 info->reg_id_dfr0, boot->reg_id_dfr0);
a6dc3cd7 775 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
3086d391 776 info->reg_id_isar0, boot->reg_id_isar0);
a6dc3cd7 777 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
3086d391 778 info->reg_id_isar1, boot->reg_id_isar1);
a6dc3cd7 779 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
3086d391 780 info->reg_id_isar2, boot->reg_id_isar2);
a6dc3cd7 781 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
3086d391 782 info->reg_id_isar3, boot->reg_id_isar3);
a6dc3cd7 783 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
3086d391 784 info->reg_id_isar4, boot->reg_id_isar4);
a6dc3cd7 785 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
3086d391 786 info->reg_id_isar5, boot->reg_id_isar5);
8e3747be
AK
787 taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
788 info->reg_id_isar6, boot->reg_id_isar6);
3086d391 789
a6dc3cd7
SP
790 /*
791 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
792 * ACTLR formats could differ across CPUs and therefore would have to
793 * be trapped for virtualization anyway.
794 */
795 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
3086d391 796 info->reg_id_mmfr0, boot->reg_id_mmfr0);
a6dc3cd7 797 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
3086d391 798 info->reg_id_mmfr1, boot->reg_id_mmfr1);
a6dc3cd7 799 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
3086d391 800 info->reg_id_mmfr2, boot->reg_id_mmfr2);
a6dc3cd7 801 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
3086d391 802 info->reg_id_mmfr3, boot->reg_id_mmfr3);
a6dc3cd7 803 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
3086d391 804 info->reg_id_pfr0, boot->reg_id_pfr0);
a6dc3cd7 805 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
3086d391 806 info->reg_id_pfr1, boot->reg_id_pfr1);
a6dc3cd7 807 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
3086d391 808 info->reg_mvfr0, boot->reg_mvfr0);
a6dc3cd7 809 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
3086d391 810 info->reg_mvfr1, boot->reg_mvfr1);
a6dc3cd7 811 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
3086d391 812 info->reg_mvfr2, boot->reg_mvfr2);
a6dc3cd7 813 }
3086d391 814
2e0f2478
DM
815 if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
816 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
817 info->reg_zcr, boot->reg_zcr);
818
819 /* Probe vector lengths, unless we already gave up on SVE */
820 if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
b51c6ac2 821 !system_capabilities_finalized())
2e0f2478
DM
822 sve_update_vq_map();
823 }
824
3086d391
SP
825 /*
826 * Mismatched CPU features are a recipe for disaster. Don't even
827 * pretend to support them.
828 */
8dd0ee65
WD
829 if (taint) {
830 pr_warn_once("Unsupported CPU feature variation detected.\n");
831 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
832 }
cdcf817b
SP
833}
834
46823dd1 835u64 read_sanitised_ftr_reg(u32 id)
b3f15378
SP
836{
837 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
838
839 /* We shouldn't get a request for an unsupported register */
840 BUG_ON(!regp);
841 return regp->sys_val;
842}
359b7064 843
965861d6
MR
844#define read_sysreg_case(r) \
845 case r: return read_sysreg_s(r)
846
92406f0c 847/*
46823dd1 848 * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
92406f0c
SP
849 * Read the system register on the current CPU
850 */
46823dd1 851static u64 __read_sysreg_by_encoding(u32 sys_id)
92406f0c
SP
852{
853 switch (sys_id) {
965861d6
MR
854 read_sysreg_case(SYS_ID_PFR0_EL1);
855 read_sysreg_case(SYS_ID_PFR1_EL1);
856 read_sysreg_case(SYS_ID_DFR0_EL1);
857 read_sysreg_case(SYS_ID_MMFR0_EL1);
858 read_sysreg_case(SYS_ID_MMFR1_EL1);
859 read_sysreg_case(SYS_ID_MMFR2_EL1);
860 read_sysreg_case(SYS_ID_MMFR3_EL1);
861 read_sysreg_case(SYS_ID_ISAR0_EL1);
862 read_sysreg_case(SYS_ID_ISAR1_EL1);
863 read_sysreg_case(SYS_ID_ISAR2_EL1);
864 read_sysreg_case(SYS_ID_ISAR3_EL1);
865 read_sysreg_case(SYS_ID_ISAR4_EL1);
866 read_sysreg_case(SYS_ID_ISAR5_EL1);
8e3747be 867 read_sysreg_case(SYS_ID_ISAR6_EL1);
965861d6
MR
868 read_sysreg_case(SYS_MVFR0_EL1);
869 read_sysreg_case(SYS_MVFR1_EL1);
870 read_sysreg_case(SYS_MVFR2_EL1);
871
872 read_sysreg_case(SYS_ID_AA64PFR0_EL1);
873 read_sysreg_case(SYS_ID_AA64PFR1_EL1);
78ed70bf 874 read_sysreg_case(SYS_ID_AA64ZFR0_EL1);
965861d6
MR
875 read_sysreg_case(SYS_ID_AA64DFR0_EL1);
876 read_sysreg_case(SYS_ID_AA64DFR1_EL1);
877 read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
878 read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
879 read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
880 read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
881 read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
882
883 read_sysreg_case(SYS_CNTFRQ_EL0);
884 read_sysreg_case(SYS_CTR_EL0);
885 read_sysreg_case(SYS_DCZID_EL0);
886
92406f0c
SP
887 default:
888 BUG();
889 return 0;
890 }
891}
892
963fcd40
MZ
893#include <linux/irqchip/arm-gic-v3.h>
894
18ffa046
JM
895static bool
896feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
897{
28c5dcb2 898 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
18ffa046
JM
899
900 return val >= entry->min_field_value;
901}
902
da8d02d1 903static bool
92406f0c 904has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
da8d02d1
SP
905{
906 u64 val;
94a9e04a 907
92406f0c
SP
908 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
909 if (scope == SCOPE_SYSTEM)
46823dd1 910 val = read_sanitised_ftr_reg(entry->sys_reg);
92406f0c 911 else
46823dd1 912 val = __read_sysreg_by_encoding(entry->sys_reg);
92406f0c 913
da8d02d1
SP
914 return feature_matches(val, entry);
915}
338d4f49 916
92406f0c 917static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
963fcd40
MZ
918{
919 bool has_sre;
920
92406f0c 921 if (!has_cpuid_feature(entry, scope))
963fcd40
MZ
922 return false;
923
924 has_sre = gic_enable_sre();
925 if (!has_sre)
926 pr_warn_once("%s present but disabled by higher exception level\n",
927 entry->desc);
928
929 return has_sre;
930}
931
92406f0c 932static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
d5370f75
WD
933{
934 u32 midr = read_cpuid_id();
d5370f75
WD
935
936 /* Cavium ThunderX pass 1.x and 2.x */
b99286b0 937 return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
fa5ce3d1
RR
938 MIDR_CPU_VAR_REV(0, 0),
939 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
d5370f75
WD
940}
941
82e0191a
SP
942static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
943{
46823dd1 944 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
82e0191a
SP
945
946 return cpuid_feature_extract_signed_field(pfr0,
947 ID_AA64PFR0_FP_SHIFT) < 0;
948}
949
6ae4b6e0 950static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
8ab66cbe 951 int scope)
6ae4b6e0 952{
8ab66cbe
SP
953 u64 ctr;
954
955 if (scope == SCOPE_SYSTEM)
956 ctr = arm64_ftr_reg_ctrel0.sys_val;
957 else
1602df02 958 ctr = read_cpuid_effective_cachetype();
8ab66cbe
SP
959
960 return ctr & BIT(CTR_IDC_SHIFT);
6ae4b6e0
SD
961}
962
1602df02
SP
963static void cpu_emulate_effective_ctr(const struct arm64_cpu_capabilities *__unused)
964{
965 /*
966 * If the CPU exposes raw CTR_EL0.IDC = 0, while effectively
967 * CTR_EL0.IDC = 1 (from CLIDR values), we need to trap accesses
968 * to the CTR_EL0 on this CPU and emulate it with the real/safe
969 * value.
970 */
971 if (!(read_cpuid_cachetype() & BIT(CTR_IDC_SHIFT)))
972 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
973}
974
6ae4b6e0 975static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
8ab66cbe 976 int scope)
6ae4b6e0 977{
8ab66cbe
SP
978 u64 ctr;
979
980 if (scope == SCOPE_SYSTEM)
981 ctr = arm64_ftr_reg_ctrel0.sys_val;
982 else
983 ctr = read_cpuid_cachetype();
984
985 return ctr & BIT(CTR_DIC_SHIFT);
6ae4b6e0
SD
986}
987
5ffdfaed
VM
988static bool __maybe_unused
989has_useable_cnp(const struct arm64_cpu_capabilities *entry, int scope)
990{
991 /*
992 * Kdump isn't guaranteed to power-off all secondary CPUs, CNP
993 * may share TLB entries with a CPU stuck in the crashed
994 * kernel.
995 */
996 if (is_kdump_kernel())
997 return false;
998
999 return has_cpuid_feature(entry, scope);
1000}
1001
09e3c22a
MB
1002/*
1003 * This check is triggered during the early boot before the cpufeature
1004 * is initialised. Checking the status on the local CPU allows the boot
1005 * CPU to detect the need for non-global mappings and thus avoiding a
1006 * pagetable re-write after all the CPUs are booted. This check will be
1007 * anyway run on individual CPUs, allowing us to get the consistent
1008 * state once the SMP CPUs are up and thus make the switch to non-global
1009 * mappings if required.
1010 */
1011bool kaslr_requires_kpti(void)
1012{
09e3c22a
MB
1013 if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
1014 return false;
1015
1016 /*
1017 * E0PD does a similar job to KPTI so can be used instead
1018 * where available.
1019 */
1020 if (IS_ENABLED(CONFIG_ARM64_E0PD)) {
a569f5f3
WD
1021 u64 mmfr2 = read_sysreg_s(SYS_ID_AA64MMFR2_EL1);
1022 if (cpuid_feature_extract_unsigned_field(mmfr2,
1023 ID_AA64MMFR2_E0PD_SHIFT))
09e3c22a
MB
1024 return false;
1025 }
1026
1027 /*
1028 * Systems affected by Cavium erratum 24756 are incompatible
1029 * with KPTI.
1030 */
ebac96ed 1031 if (IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) {
09e3c22a
MB
1032 extern const struct midr_range cavium_erratum_27456_cpus[];
1033
ebac96ed
WD
1034 if (is_midr_in_range_list(read_cpuid_id(),
1035 cavium_erratum_27456_cpus))
1036 return false;
09e3c22a 1037 }
09e3c22a
MB
1038
1039 return kaslr_offset() > 0;
1040}
1041
1b3ccf4b 1042static bool __meltdown_safe = true;
ea1e3de8
WD
1043static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
1044
1045static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
d3aec8a2 1046 int scope)
ea1e3de8 1047{
be5b2998
SP
1048 /* List of CPUs that are not vulnerable and don't need KPTI */
1049 static const struct midr_range kpti_safe_list[] = {
1050 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
1051 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
31d868c4 1052 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
2a355ec2
WD
1053 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
1054 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
1055 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1056 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
1057 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
1058 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
0ecc471a 1059 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
918e1946 1060 MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
71c751f2 1061 { /* sentinel */ }
be5b2998 1062 };
a111b7c0 1063 char const *str = "kpti command line option";
1b3ccf4b
JL
1064 bool meltdown_safe;
1065
1066 meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
1067
1068 /* Defer to CPU feature registers */
1069 if (has_cpuid_feature(entry, scope))
1070 meltdown_safe = true;
1071
1072 if (!meltdown_safe)
1073 __meltdown_safe = false;
179a56f6 1074
6dc52b15
MZ
1075 /*
1076 * For reasons that aren't entirely clear, enabling KPTI on Cavium
1077 * ThunderX leads to apparent I-cache corruption of kernel text, which
1078 * ends as well as you might imagine. Don't even try.
1079 */
1080 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
1081 str = "ARM64_WORKAROUND_CAVIUM_27456";
1082 __kpti_forced = -1;
1083 }
1084
1b3ccf4b 1085 /* Useful for KASLR robustness */
c2d92353 1086 if (kaslr_requires_kpti()) {
1b3ccf4b
JL
1087 if (!__kpti_forced) {
1088 str = "KASLR";
1089 __kpti_forced = 1;
1090 }
1091 }
1092
a111b7c0
JP
1093 if (cpu_mitigations_off() && !__kpti_forced) {
1094 str = "mitigations=off";
1095 __kpti_forced = -1;
1096 }
1097
1b3ccf4b
JL
1098 if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
1099 pr_info_once("kernel page table isolation disabled by kernel configuration\n");
1100 return false;
1101 }
1102
6dc52b15 1103 /* Forced? */
ea1e3de8 1104 if (__kpti_forced) {
6dc52b15
MZ
1105 pr_info_once("kernel page table isolation forced %s by %s\n",
1106 __kpti_forced > 0 ? "ON" : "OFF", str);
ea1e3de8
WD
1107 return __kpti_forced > 0;
1108 }
1109
1b3ccf4b 1110 return !meltdown_safe;
ea1e3de8
WD
1111}
1112
1b3ccf4b 1113#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
c0cda3b8
DM
1114static void
1115kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
f992b4df
WD
1116{
1117 typedef void (kpti_remap_fn)(int, int, phys_addr_t);
1118 extern kpti_remap_fn idmap_kpti_install_ng_mappings;
1119 kpti_remap_fn *remap_fn;
1120
f992b4df
WD
1121 int cpu = smp_processor_id();
1122
b89d82ef
WD
1123 /*
1124 * We don't need to rewrite the page-tables if either we've done
1125 * it already or we have KASLR enabled and therefore have not
1126 * created any global mappings at all.
1127 */
09e3c22a 1128 if (arm64_use_ng_mappings)
c0cda3b8 1129 return;
f992b4df
WD
1130
1131 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
1132
1133 cpu_install_idmap();
1134 remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
1135 cpu_uninstall_idmap();
1136
1137 if (!cpu)
09e3c22a 1138 arm64_use_ng_mappings = true;
f992b4df 1139
c0cda3b8 1140 return;
f992b4df 1141}
1b3ccf4b
JL
1142#else
1143static void
1144kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
1145{
1146}
1147#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
f992b4df 1148
ea1e3de8
WD
1149static int __init parse_kpti(char *str)
1150{
1151 bool enabled;
1152 int ret = strtobool(str, &enabled);
1153
1154 if (ret)
1155 return ret;
1156
1157 __kpti_forced = enabled ? 1 : -1;
1158 return 0;
1159}
b5b7dd64 1160early_param("kpti", parse_kpti);
ea1e3de8 1161
05abb595
SP
1162#ifdef CONFIG_ARM64_HW_AFDBM
1163static inline void __cpu_enable_hw_dbm(void)
1164{
1165 u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
1166
1167 write_sysreg(tcr, tcr_el1);
1168 isb();
1169}
1170
ece1397c
SP
1171static bool cpu_has_broken_dbm(void)
1172{
1173 /* List of CPUs which have broken DBM support. */
1174 static const struct midr_range cpus[] = {
1175#ifdef CONFIG_ARM64_ERRATUM_1024718
1176 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
1177#endif
1178 {},
1179 };
1180
1181 return is_midr_in_range_list(read_cpuid_id(), cpus);
1182}
1183
05abb595
SP
1184static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
1185{
ece1397c
SP
1186 return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
1187 !cpu_has_broken_dbm();
05abb595
SP
1188}
1189
1190static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
1191{
1192 if (cpu_can_use_dbm(cap))
1193 __cpu_enable_hw_dbm();
1194}
1195
1196static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
1197 int __unused)
1198{
1199 static bool detected = false;
1200 /*
1201 * DBM is a non-conflicting feature. i.e, the kernel can safely
1202 * run a mix of CPUs with and without the feature. So, we
1203 * unconditionally enable the capability to allow any late CPU
1204 * to use the feature. We only enable the control bits on the
1205 * CPU, if it actually supports.
1206 *
1207 * We have to make sure we print the "feature" detection only
1208 * when at least one CPU actually uses it. So check if this CPU
1209 * can actually use it and print the message exactly once.
1210 *
1211 * This is safe as all CPUs (including secondary CPUs - due to the
1212 * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
1213 * goes through the "matches" check exactly once. Also if a CPU
1214 * matches the criteria, it is guaranteed that the CPU will turn
1215 * the DBM on, as the capability is unconditionally enabled.
1216 */
1217 if (!detected && cpu_can_use_dbm(cap)) {
1218 detected = true;
1219 pr_info("detected: Hardware dirty bit management\n");
1220 }
1221
1222 return true;
1223}
1224
1225#endif
1226
12eb3691
WD
1227#ifdef CONFIG_ARM64_VHE
1228static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1229{
1230 return is_kernel_in_hyp_mode();
1231}
1232
c0cda3b8 1233static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
6d99b689
JM
1234{
1235 /*
1236 * Copy register values that aren't redirected by hardware.
1237 *
1238 * Before code patching, we only set tpidr_el1, all CPUs need to copy
1239 * this value to tpidr_el2 before we patch the code. Once we've done
1240 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1241 * do anything here.
1242 */
e9ab7a2e 1243 if (!alternative_is_applied(ARM64_HAS_VIRT_HOST_EXTN))
6d99b689 1244 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
6d99b689 1245}
12eb3691 1246#endif
6d99b689 1247
e48d53a9
MZ
1248static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
1249{
1250 u64 val = read_sysreg_s(SYS_CLIDR_EL1);
1251
1252 /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
1253 WARN_ON(val & (7 << 27 | 7 << 21));
1254}
1255
8f04e8e6
WD
1256#ifdef CONFIG_ARM64_SSBD
1257static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
1258{
1259 if (user_mode(regs))
1260 return 1;
1261
74e24828 1262 if (instr & BIT(PSTATE_Imm_shift))
8f04e8e6
WD
1263 regs->pstate |= PSR_SSBS_BIT;
1264 else
1265 regs->pstate &= ~PSR_SSBS_BIT;
1266
1267 arm64_skip_faulting_instruction(regs, 4);
1268 return 0;
1269}
1270
1271static struct undef_hook ssbs_emulation_hook = {
74e24828
SP
1272 .instr_mask = ~(1U << PSTATE_Imm_shift),
1273 .instr_val = 0xd500401f | PSTATE_SSBS,
8f04e8e6
WD
1274 .fn = ssbs_emulation_handler,
1275};
1276
1277static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
1278{
1279 static bool undef_hook_registered = false;
27e6e7d6 1280 static DEFINE_RAW_SPINLOCK(hook_lock);
8f04e8e6 1281
27e6e7d6 1282 raw_spin_lock(&hook_lock);
8f04e8e6
WD
1283 if (!undef_hook_registered) {
1284 register_undef_hook(&ssbs_emulation_hook);
1285 undef_hook_registered = true;
1286 }
27e6e7d6 1287 raw_spin_unlock(&hook_lock);
8f04e8e6
WD
1288
1289 if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
1290 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
1291 arm64_set_ssbd_mitigation(false);
1292 } else {
1293 arm64_set_ssbd_mitigation(true);
1294 }
1295}
1296#endif /* CONFIG_ARM64_SSBD */
1297
b8925ee2
WD
1298#ifdef CONFIG_ARM64_PAN
1299static void cpu_enable_pan(const struct arm64_cpu_capabilities *__unused)
1300{
1301 /*
1302 * We modify PSTATE. This won't work from irq context as the PSTATE
1303 * is discarded once we return from the exception.
1304 */
1305 WARN_ON_ONCE(in_interrupt());
1306
1307 sysreg_clear_set(sctlr_el1, SCTLR_EL1_SPAN, 0);
1308 asm(SET_PSTATE_PAN(1));
1309}
1310#endif /* CONFIG_ARM64_PAN */
1311
1312#ifdef CONFIG_ARM64_RAS_EXTN
1313static void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1314{
1315 /* Firmware may have left a deferred SError in this register. */
1316 write_sysreg_s(0, SYS_DISR_EL1);
1317}
1318#endif /* CONFIG_ARM64_RAS_EXTN */
1319
6984eb47 1320#ifdef CONFIG_ARM64_PTR_AUTH
75031975
MR
1321static void cpu_enable_address_auth(struct arm64_cpu_capabilities const *cap)
1322{
1323 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ENIA | SCTLR_ELx_ENIB |
1324 SCTLR_ELx_ENDA | SCTLR_ELx_ENDB);
1325}
cfef06bd
KM
1326
1327static bool has_address_auth(const struct arm64_cpu_capabilities *entry,
1328 int __unused)
1329{
1330 return __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_ARCH) ||
1331 __system_matches_cap(ARM64_HAS_ADDRESS_AUTH_IMP_DEF);
1332}
1333
1334static bool has_generic_auth(const struct arm64_cpu_capabilities *entry,
1335 int __unused)
1336{
1337 return __system_matches_cap(ARM64_HAS_GENERIC_AUTH_ARCH) ||
1338 __system_matches_cap(ARM64_HAS_GENERIC_AUTH_IMP_DEF);
1339}
6984eb47
MR
1340#endif /* CONFIG_ARM64_PTR_AUTH */
1341
3e6c69a0
MB
1342#ifdef CONFIG_ARM64_E0PD
1343static void cpu_enable_e0pd(struct arm64_cpu_capabilities const *cap)
1344{
1345 if (this_cpu_has_cap(ARM64_HAS_E0PD))
1346 sysreg_clear_set(tcr_el1, 0, TCR_E0PD1);
1347}
1348#endif /* CONFIG_ARM64_E0PD */
1349
b90d2b22 1350#ifdef CONFIG_ARM64_PSEUDO_NMI
bc3c03cc
JT
1351static bool enable_pseudo_nmi;
1352
1353static int __init early_enable_pseudo_nmi(char *p)
1354{
1355 return strtobool(p, &enable_pseudo_nmi);
1356}
1357early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);
1358
b90d2b22
JT
1359static bool can_use_gic_priorities(const struct arm64_cpu_capabilities *entry,
1360 int scope)
1361{
bc3c03cc 1362 return enable_pseudo_nmi && has_useable_gicv3_cpuif(entry, scope);
b90d2b22
JT
1363}
1364#endif
1365
359b7064 1366static const struct arm64_cpu_capabilities arm64_features[] = {
94a9e04a
MZ
1367 {
1368 .desc = "GIC system register CPU interface",
1369 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
c9bfdf73 1370 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
963fcd40 1371 .matches = has_useable_gicv3_cpuif,
da8d02d1
SP
1372 .sys_reg = SYS_ID_AA64PFR0_EL1,
1373 .field_pos = ID_AA64PFR0_GIC_SHIFT,
ff96f7bc 1374 .sign = FTR_UNSIGNED,
18ffa046 1375 .min_field_value = 1,
94a9e04a 1376 },
338d4f49
JM
1377#ifdef CONFIG_ARM64_PAN
1378 {
1379 .desc = "Privileged Access Never",
1380 .capability = ARM64_HAS_PAN,
5b4747c5 1381 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
da8d02d1
SP
1382 .matches = has_cpuid_feature,
1383 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1384 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
ff96f7bc 1385 .sign = FTR_UNSIGNED,
338d4f49 1386 .min_field_value = 1,
c0cda3b8 1387 .cpu_enable = cpu_enable_pan,
338d4f49
JM
1388 },
1389#endif /* CONFIG_ARM64_PAN */
395af861 1390#ifdef CONFIG_ARM64_LSE_ATOMICS
2e94da13
WD
1391 {
1392 .desc = "LSE atomic instructions",
1393 .capability = ARM64_HAS_LSE_ATOMICS,
5b4747c5 1394 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
da8d02d1
SP
1395 .matches = has_cpuid_feature,
1396 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1397 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
ff96f7bc 1398 .sign = FTR_UNSIGNED,
2e94da13
WD
1399 .min_field_value = 2,
1400 },
395af861 1401#endif /* CONFIG_ARM64_LSE_ATOMICS */
d5370f75
WD
1402 {
1403 .desc = "Software prefetching using PRFM",
1404 .capability = ARM64_HAS_NO_HW_PREFETCH,
5c137714 1405 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
d5370f75
WD
1406 .matches = has_no_hw_prefetch,
1407 },
57f4959b
JM
1408#ifdef CONFIG_ARM64_UAO
1409 {
1410 .desc = "User Access Override",
1411 .capability = ARM64_HAS_UAO,
5b4747c5 1412 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
57f4959b
JM
1413 .matches = has_cpuid_feature,
1414 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1415 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
1416 .min_field_value = 1,
c8b06e3f
JM
1417 /*
1418 * We rely on stop_machine() calling uao_thread_switch() to set
1419 * UAO immediately after patching.
1420 */
57f4959b
JM
1421 },
1422#endif /* CONFIG_ARM64_UAO */
70544196
JM
1423#ifdef CONFIG_ARM64_PAN
1424 {
1425 .capability = ARM64_ALT_PAN_NOT_UAO,
5b4747c5 1426 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
70544196
JM
1427 .matches = cpufeature_pan_not_uao,
1428 },
1429#endif /* CONFIG_ARM64_PAN */
830dcc9f 1430#ifdef CONFIG_ARM64_VHE
d88701be
MZ
1431 {
1432 .desc = "Virtualization Host Extensions",
1433 .capability = ARM64_HAS_VIRT_HOST_EXTN,
830dcc9f 1434 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
d88701be 1435 .matches = runs_at_el2,
c0cda3b8 1436 .cpu_enable = cpu_copy_el2regs,
d88701be 1437 },
830dcc9f 1438#endif /* CONFIG_ARM64_VHE */
042446a3
SP
1439 {
1440 .desc = "32-bit EL0 Support",
1441 .capability = ARM64_HAS_32BIT_EL0,
5b4747c5 1442 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
042446a3
SP
1443 .matches = has_cpuid_feature,
1444 .sys_reg = SYS_ID_AA64PFR0_EL1,
1445 .sign = FTR_UNSIGNED,
1446 .field_pos = ID_AA64PFR0_EL0_SHIFT,
1447 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
1448 },
ea1e3de8 1449 {
179a56f6 1450 .desc = "Kernel page table isolation (KPTI)",
ea1e3de8 1451 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
d3aec8a2
SP
1452 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1453 /*
1454 * The ID feature fields below are used to indicate that
1455 * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1456 * more details.
1457 */
1458 .sys_reg = SYS_ID_AA64PFR0_EL1,
1459 .field_pos = ID_AA64PFR0_CSV3_SHIFT,
1460 .min_field_value = 1,
ea1e3de8 1461 .matches = unmap_kernel_at_el0,
c0cda3b8 1462 .cpu_enable = kpti_install_ng_mappings,
ea1e3de8 1463 },
82e0191a
SP
1464 {
1465 /* FP/SIMD is not implemented */
1466 .capability = ARM64_HAS_NO_FPSIMD,
449443c0 1467 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
82e0191a
SP
1468 .min_field_value = 0,
1469 .matches = has_no_fpsimd,
1470 },
d50e071f
RM
1471#ifdef CONFIG_ARM64_PMEM
1472 {
1473 .desc = "Data cache clean to Point of Persistence",
1474 .capability = ARM64_HAS_DCPOP,
5b4747c5 1475 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
d50e071f
RM
1476 .matches = has_cpuid_feature,
1477 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1478 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
1479 .min_field_value = 1,
1480 },
b9585f53
AM
1481 {
1482 .desc = "Data cache clean to Point of Deep Persistence",
1483 .capability = ARM64_HAS_DCPODP,
1484 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1485 .matches = has_cpuid_feature,
1486 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1487 .sign = FTR_UNSIGNED,
1488 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
1489 .min_field_value = 2,
1490 },
d50e071f 1491#endif
43994d82
DM
1492#ifdef CONFIG_ARM64_SVE
1493 {
1494 .desc = "Scalable Vector Extension",
5b4747c5 1495 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
43994d82 1496 .capability = ARM64_SVE,
43994d82
DM
1497 .sys_reg = SYS_ID_AA64PFR0_EL1,
1498 .sign = FTR_UNSIGNED,
1499 .field_pos = ID_AA64PFR0_SVE_SHIFT,
1500 .min_field_value = ID_AA64PFR0_SVE,
1501 .matches = has_cpuid_feature,
c0cda3b8 1502 .cpu_enable = sve_kernel_enable,
43994d82
DM
1503 },
1504#endif /* CONFIG_ARM64_SVE */
64c02720
XX
1505#ifdef CONFIG_ARM64_RAS_EXTN
1506 {
1507 .desc = "RAS Extension Support",
1508 .capability = ARM64_HAS_RAS_EXTN,
5b4747c5 1509 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
64c02720
XX
1510 .matches = has_cpuid_feature,
1511 .sys_reg = SYS_ID_AA64PFR0_EL1,
1512 .sign = FTR_UNSIGNED,
1513 .field_pos = ID_AA64PFR0_RAS_SHIFT,
1514 .min_field_value = ID_AA64PFR0_RAS_V1,
c0cda3b8 1515 .cpu_enable = cpu_clear_disr,
64c02720
XX
1516 },
1517#endif /* CONFIG_ARM64_RAS_EXTN */
6ae4b6e0
SD
1518 {
1519 .desc = "Data cache clean to the PoU not required for I/D coherence",
1520 .capability = ARM64_HAS_CACHE_IDC,
5b4747c5 1521 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
6ae4b6e0 1522 .matches = has_cache_idc,
1602df02 1523 .cpu_enable = cpu_emulate_effective_ctr,
6ae4b6e0
SD
1524 },
1525 {
1526 .desc = "Instruction cache invalidation not required for I/D coherence",
1527 .capability = ARM64_HAS_CACHE_DIC,
5b4747c5 1528 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
6ae4b6e0
SD
1529 .matches = has_cache_dic,
1530 },
e48d53a9
MZ
1531 {
1532 .desc = "Stage-2 Force Write-Back",
1533 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1534 .capability = ARM64_HAS_STAGE2_FWB,
1535 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1536 .sign = FTR_UNSIGNED,
1537 .field_pos = ID_AA64MMFR2_FWB_SHIFT,
1538 .min_field_value = 1,
1539 .matches = has_cpuid_feature,
1540 .cpu_enable = cpu_has_fwb,
1541 },
05abb595
SP
1542#ifdef CONFIG_ARM64_HW_AFDBM
1543 {
1544 /*
1545 * Since we turn this on always, we don't want the user to
1546 * think that the feature is available when it may not be.
1547 * So hide the description.
1548 *
1549 * .desc = "Hardware pagetable Dirty Bit Management",
1550 *
1551 */
1552 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1553 .capability = ARM64_HW_DBM,
1554 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1555 .sign = FTR_UNSIGNED,
1556 .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
1557 .min_field_value = 2,
1558 .matches = has_hw_dbm,
1559 .cpu_enable = cpu_enable_hw_dbm,
1560 },
1561#endif
86d0dd34
AB
1562 {
1563 .desc = "CRC32 instructions",
1564 .capability = ARM64_HAS_CRC32,
1565 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1566 .matches = has_cpuid_feature,
1567 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1568 .field_pos = ID_AA64ISAR0_CRC32_SHIFT,
1569 .min_field_value = 1,
1570 },
4f9f4964 1571#ifdef CONFIG_ARM64_SSBD
d71be2b6
WD
1572 {
1573 .desc = "Speculative Store Bypassing Safe (SSBS)",
1574 .capability = ARM64_SSBS,
1575 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1576 .matches = has_cpuid_feature,
1577 .sys_reg = SYS_ID_AA64PFR1_EL1,
1578 .field_pos = ID_AA64PFR1_SSBS_SHIFT,
1579 .sign = FTR_UNSIGNED,
1580 .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
8f04e8e6 1581 .cpu_enable = cpu_enable_ssbs,
d71be2b6 1582 },
5ffdfaed
VM
1583#endif
1584#ifdef CONFIG_ARM64_CNP
1585 {
1586 .desc = "Common not Private translations",
1587 .capability = ARM64_HAS_CNP,
1588 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1589 .matches = has_useable_cnp,
1590 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1591 .sign = FTR_UNSIGNED,
1592 .field_pos = ID_AA64MMFR2_CNP_SHIFT,
1593 .min_field_value = 1,
1594 .cpu_enable = cpu_enable_cnp,
1595 },
8f04e8e6 1596#endif
bd4fb6d2
WD
1597 {
1598 .desc = "Speculation barrier (SB)",
1599 .capability = ARM64_HAS_SB,
1600 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1601 .matches = has_cpuid_feature,
1602 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1603 .field_pos = ID_AA64ISAR1_SB_SHIFT,
1604 .sign = FTR_UNSIGNED,
1605 .min_field_value = 1,
1606 },
6984eb47
MR
1607#ifdef CONFIG_ARM64_PTR_AUTH
1608 {
1609 .desc = "Address authentication (architected algorithm)",
1610 .capability = ARM64_HAS_ADDRESS_AUTH_ARCH,
1611 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1612 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1613 .sign = FTR_UNSIGNED,
1614 .field_pos = ID_AA64ISAR1_APA_SHIFT,
1615 .min_field_value = ID_AA64ISAR1_APA_ARCHITECTED,
1616 .matches = has_cpuid_feature,
1617 },
1618 {
1619 .desc = "Address authentication (IMP DEF algorithm)",
1620 .capability = ARM64_HAS_ADDRESS_AUTH_IMP_DEF,
1621 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1622 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1623 .sign = FTR_UNSIGNED,
1624 .field_pos = ID_AA64ISAR1_API_SHIFT,
1625 .min_field_value = ID_AA64ISAR1_API_IMP_DEF,
1626 .matches = has_cpuid_feature,
cfef06bd
KM
1627 },
1628 {
1629 .capability = ARM64_HAS_ADDRESS_AUTH,
1630 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1631 .matches = has_address_auth,
75031975 1632 .cpu_enable = cpu_enable_address_auth,
6984eb47
MR
1633 },
1634 {
1635 .desc = "Generic authentication (architected algorithm)",
1636 .capability = ARM64_HAS_GENERIC_AUTH_ARCH,
1637 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1638 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1639 .sign = FTR_UNSIGNED,
1640 .field_pos = ID_AA64ISAR1_GPA_SHIFT,
1641 .min_field_value = ID_AA64ISAR1_GPA_ARCHITECTED,
1642 .matches = has_cpuid_feature,
1643 },
1644 {
1645 .desc = "Generic authentication (IMP DEF algorithm)",
1646 .capability = ARM64_HAS_GENERIC_AUTH_IMP_DEF,
1647 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1648 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1649 .sign = FTR_UNSIGNED,
1650 .field_pos = ID_AA64ISAR1_GPI_SHIFT,
1651 .min_field_value = ID_AA64ISAR1_GPI_IMP_DEF,
1652 .matches = has_cpuid_feature,
1653 },
cfef06bd
KM
1654 {
1655 .capability = ARM64_HAS_GENERIC_AUTH,
1656 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1657 .matches = has_generic_auth,
1658 },
6984eb47 1659#endif /* CONFIG_ARM64_PTR_AUTH */
b90d2b22
JT
1660#ifdef CONFIG_ARM64_PSEUDO_NMI
1661 {
1662 /*
1663 * Depends on having GICv3
1664 */
1665 .desc = "IRQ priority masking",
1666 .capability = ARM64_HAS_IRQ_PRIO_MASKING,
1667 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1668 .matches = can_use_gic_priorities,
1669 .sys_reg = SYS_ID_AA64PFR0_EL1,
1670 .field_pos = ID_AA64PFR0_GIC_SHIFT,
1671 .sign = FTR_UNSIGNED,
1672 .min_field_value = 1,
1673 },
3e6c69a0
MB
1674#endif
1675#ifdef CONFIG_ARM64_E0PD
1676 {
1677 .desc = "E0PD",
1678 .capability = ARM64_HAS_E0PD,
1679 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1680 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1681 .sign = FTR_UNSIGNED,
1682 .field_pos = ID_AA64MMFR2_E0PD_SHIFT,
1683 .matches = has_cpuid_feature,
1684 .min_field_value = 1,
1685 .cpu_enable = cpu_enable_e0pd,
1686 },
bc206065 1687#endif
1a50ec0b
RH
1688#ifdef CONFIG_ARCH_RANDOM
1689 {
1690 .desc = "Random Number Generator",
1691 .capability = ARM64_HAS_RNG,
1692 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1693 .matches = has_cpuid_feature,
1694 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1695 .field_pos = ID_AA64ISAR0_RNDR_SHIFT,
1696 .sign = FTR_UNSIGNED,
1697 .min_field_value = 1,
1698 },
b90d2b22 1699#endif
359b7064
MZ
1700 {},
1701};
1702
1e013d06
WD
1703#define HWCAP_CPUID_MATCH(reg, field, s, min_value) \
1704 .matches = has_cpuid_feature, \
1705 .sys_reg = reg, \
1706 .field_pos = field, \
1707 .sign = s, \
1708 .min_field_value = min_value,
1709
1710#define __HWCAP_CAP(name, cap_type, cap) \
1711 .desc = name, \
1712 .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
1713 .hwcap_type = cap_type, \
1714 .hwcap = cap, \
1715
1716#define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
1717 { \
1718 __HWCAP_CAP(#cap, cap_type, cap) \
1719 HWCAP_CPUID_MATCH(reg, field, s, min_value) \
37b01d53
SP
1720 }
1721
1e013d06
WD
1722#define HWCAP_MULTI_CAP(list, cap_type, cap) \
1723 { \
1724 __HWCAP_CAP(#cap, cap_type, cap) \
1725 .matches = cpucap_multi_entry_cap_matches, \
1726 .match_list = list, \
1727 }
1728
7559950a
SP
1729#define HWCAP_CAP_MATCH(match, cap_type, cap) \
1730 { \
1731 __HWCAP_CAP(#cap, cap_type, cap) \
1732 .matches = match, \
1733 }
1734
1e013d06
WD
1735#ifdef CONFIG_ARM64_PTR_AUTH
1736static const struct arm64_cpu_capabilities ptr_auth_hwcap_addr_matches[] = {
1737 {
1738 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_APA_SHIFT,
1739 FTR_UNSIGNED, ID_AA64ISAR1_APA_ARCHITECTED)
1740 },
1741 {
1742 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_API_SHIFT,
1743 FTR_UNSIGNED, ID_AA64ISAR1_API_IMP_DEF)
1744 },
1745 {},
1746};
1747
1748static const struct arm64_cpu_capabilities ptr_auth_hwcap_gen_matches[] = {
1749 {
1750 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPA_SHIFT,
1751 FTR_UNSIGNED, ID_AA64ISAR1_GPA_ARCHITECTED)
1752 },
1753 {
1754 HWCAP_CPUID_MATCH(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_GPI_SHIFT,
1755 FTR_UNSIGNED, ID_AA64ISAR1_GPI_IMP_DEF)
1756 },
1757 {},
1758};
1759#endif
1760
f3efb675 1761static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
aaba098f
AM
1762 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_PMULL),
1763 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_AES),
1764 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA1),
1765 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA2),
1766 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_SHA512),
1767 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_CRC32),
1768 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ATOMICS),
1769 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDRDM),
1770 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SHA3),
1771 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM3),
1772 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SM4),
1773 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDDP),
1774 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDFHM),
1775 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FLAGM),
12019374 1776 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_FLAGM2),
1a50ec0b 1777 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RNDR_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_RNG),
aaba098f
AM
1778 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_FP),
1779 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FPHP),
1780 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, KERNEL_HWCAP_ASIMD),
1781 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_ASIMDHP),
1782 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DIT),
1783 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DCPOP),
671db581 1784 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_DCPODP),
aaba098f
AM
1785 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_JSCVT),
1786 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FCMA),
1787 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_LRCPC),
1788 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, KERNEL_HWCAP_ILRCPC),
ca9503fc 1789 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FRINTTS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_FRINT),
aaba098f 1790 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_SB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_SB),
d4209d8b
SP
1791 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_BF16_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_BF16),
1792 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DGH_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_DGH),
1793 HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_I8MM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_I8MM),
aaba098f 1794 HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, KERNEL_HWCAP_USCAT),
43994d82 1795#ifdef CONFIG_ARM64_SVE
aaba098f 1796 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, KERNEL_HWCAP_SVE),
06a916fe
DM
1797 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SVEVER_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SVEVER_SVE2, CAP_HWCAP, KERNEL_HWCAP_SVE2),
1798 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES, CAP_HWCAP, KERNEL_HWCAP_SVEAES),
1799 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_AES_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_AES_PMULL, CAP_HWCAP, KERNEL_HWCAP_SVEPMULL),
1800 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BITPERM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BITPERM, CAP_HWCAP, KERNEL_HWCAP_SVEBITPERM),
d4209d8b 1801 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_BF16_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_BF16, CAP_HWCAP, KERNEL_HWCAP_SVEBF16),
06a916fe
DM
1802 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SHA3_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SHA3, CAP_HWCAP, KERNEL_HWCAP_SVESHA3),
1803 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_SM4_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_SM4, CAP_HWCAP, KERNEL_HWCAP_SVESM4),
d4209d8b
SP
1804 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_I8MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_I8MM, CAP_HWCAP, KERNEL_HWCAP_SVEI8MM),
1805 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F32MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F32MM, CAP_HWCAP, KERNEL_HWCAP_SVEF32MM),
1806 HWCAP_CAP(SYS_ID_AA64ZFR0_EL1, ID_AA64ZFR0_F64MM_SHIFT, FTR_UNSIGNED, ID_AA64ZFR0_F64MM, CAP_HWCAP, KERNEL_HWCAP_SVEF64MM),
43994d82 1807#endif
aaba098f 1808 HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, KERNEL_HWCAP_SSBS),
75031975 1809#ifdef CONFIG_ARM64_PTR_AUTH
aaba098f
AM
1810 HWCAP_MULTI_CAP(ptr_auth_hwcap_addr_matches, CAP_HWCAP, KERNEL_HWCAP_PACA),
1811 HWCAP_MULTI_CAP(ptr_auth_hwcap_gen_matches, CAP_HWCAP, KERNEL_HWCAP_PACG),
75031975 1812#endif
75283501
SP
1813 {},
1814};
1815
7559950a
SP
1816#ifdef CONFIG_COMPAT
1817static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
1818{
1819 /*
1820 * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
1821 * in line with that of arm32 as in vfp_init(). We make sure that the
1822 * check is future proof, by making sure value is non-zero.
1823 */
1824 u32 mvfr1;
1825
1826 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
1827 if (scope == SCOPE_SYSTEM)
1828 mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
1829 else
1830 mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
1831
1832 return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
1833 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
1834 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
1835}
1836#endif
1837
75283501 1838static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
37b01d53 1839#ifdef CONFIG_COMPAT
7559950a
SP
1840 HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
1841 HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
1842 /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
1843 HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
1844 HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
ff96f7bc
SP
1845 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1846 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1847 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1848 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1849 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
37b01d53
SP
1850#endif
1851 {},
1852};
1853
f3efb675 1854static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
37b01d53
SP
1855{
1856 switch (cap->hwcap_type) {
1857 case CAP_HWCAP:
aaba098f 1858 cpu_set_feature(cap->hwcap);
37b01d53
SP
1859 break;
1860#ifdef CONFIG_COMPAT
1861 case CAP_COMPAT_HWCAP:
1862 compat_elf_hwcap |= (u32)cap->hwcap;
1863 break;
1864 case CAP_COMPAT_HWCAP2:
1865 compat_elf_hwcap2 |= (u32)cap->hwcap;
1866 break;
1867#endif
1868 default:
1869 WARN_ON(1);
1870 break;
1871 }
1872}
1873
1874/* Check if we have a particular HWCAP enabled */
f3efb675 1875static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
37b01d53
SP
1876{
1877 bool rc;
1878
1879 switch (cap->hwcap_type) {
1880 case CAP_HWCAP:
aaba098f 1881 rc = cpu_have_feature(cap->hwcap);
37b01d53
SP
1882 break;
1883#ifdef CONFIG_COMPAT
1884 case CAP_COMPAT_HWCAP:
1885 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1886 break;
1887 case CAP_COMPAT_HWCAP2:
1888 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1889 break;
1890#endif
1891 default:
1892 WARN_ON(1);
1893 rc = false;
1894 }
1895
1896 return rc;
1897}
1898
75283501 1899static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
37b01d53 1900{
77c97b4e 1901 /* We support emulation of accesses to CPU ID feature registers */
aaba098f 1902 cpu_set_named_feature(CPUID);
75283501 1903 for (; hwcaps->matches; hwcaps++)
143ba05d 1904 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
75283501 1905 cap_set_elf_hwcap(hwcaps);
37b01d53
SP
1906}
1907
606f8e7b 1908static void update_cpu_capabilities(u16 scope_mask)
67948af4 1909{
606f8e7b 1910 int i;
67948af4
SP
1911 const struct arm64_cpu_capabilities *caps;
1912
cce360b5 1913 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
606f8e7b
SP
1914 for (i = 0; i < ARM64_NCAPS; i++) {
1915 caps = cpu_hwcaps_ptrs[i];
1916 if (!caps || !(caps->type & scope_mask) ||
1917 cpus_have_cap(caps->capability) ||
cce360b5 1918 !caps->matches(caps, cpucap_default_scope(caps)))
359b7064
MZ
1919 continue;
1920
606f8e7b
SP
1921 if (caps->desc)
1922 pr_info("detected: %s\n", caps->desc);
75283501 1923 cpus_set_cap(caps->capability);
0ceb0d56
DT
1924
1925 if ((scope_mask & SCOPE_BOOT_CPU) && (caps->type & SCOPE_BOOT_CPU))
1926 set_bit(caps->capability, boot_capabilities);
359b7064 1927 }
ce8b602c
SP
1928}
1929
0b587c84
SP
1930/*
1931 * Enable all the available capabilities on this CPU. The capabilities
1932 * with BOOT_CPU scope are handled separately and hence skipped here.
1933 */
1934static int cpu_enable_non_boot_scope_capabilities(void *__unused)
ed478b3f 1935{
0b587c84
SP
1936 int i;
1937 u16 non_boot_scope = SCOPE_ALL & ~SCOPE_BOOT_CPU;
ed478b3f 1938
0b587c84
SP
1939 for_each_available_cap(i) {
1940 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[i];
1941
1942 if (WARN_ON(!cap))
1943 continue;
c0cda3b8 1944
0b587c84
SP
1945 if (!(cap->type & non_boot_scope))
1946 continue;
1947
1948 if (cap->cpu_enable)
1949 cap->cpu_enable(cap);
1950 }
c0cda3b8
DM
1951 return 0;
1952}
1953
ce8b602c 1954/*
dbb4e152
SP
1955 * Run through the enabled capabilities and enable() it on all active
1956 * CPUs
ce8b602c 1957 */
0b587c84 1958static void __init enable_cpu_capabilities(u16 scope_mask)
ce8b602c 1959{
0b587c84
SP
1960 int i;
1961 const struct arm64_cpu_capabilities *caps;
1962 bool boot_scope;
1963
cce360b5 1964 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
0b587c84 1965 boot_scope = !!(scope_mask & SCOPE_BOOT_CPU);
63a1e1c9 1966
0b587c84
SP
1967 for (i = 0; i < ARM64_NCAPS; i++) {
1968 unsigned int num;
1969
1970 caps = cpu_hwcaps_ptrs[i];
1971 if (!caps || !(caps->type & scope_mask))
1972 continue;
1973 num = caps->capability;
1974 if (!cpus_have_cap(num))
63a1e1c9
MR
1975 continue;
1976
1977 /* Ensure cpus_have_const_cap(num) works */
1978 static_branch_enable(&cpu_hwcap_keys[num]);
1979
0b587c84 1980 if (boot_scope && caps->cpu_enable)
2a6dcb2b 1981 /*
fd9d63da
SP
1982 * Capabilities with SCOPE_BOOT_CPU scope are finalised
1983 * before any secondary CPU boots. Thus, each secondary
1984 * will enable the capability as appropriate via
1985 * check_local_cpu_capabilities(). The only exception is
1986 * the boot CPU, for which the capability must be
1987 * enabled here. This approach avoids costly
1988 * stop_machine() calls for this case.
2a6dcb2b 1989 */
0b587c84 1990 caps->cpu_enable(caps);
63a1e1c9 1991 }
dbb4e152 1992
0b587c84
SP
1993 /*
1994 * For all non-boot scope capabilities, use stop_machine()
1995 * as it schedules the work allowing us to modify PSTATE,
1996 * instead of on_each_cpu() which uses an IPI, giving us a
1997 * PSTATE that disappears when we return.
1998 */
1999 if (!boot_scope)
2000 stop_machine(cpu_enable_non_boot_scope_capabilities,
2001 NULL, cpu_online_mask);
ed478b3f
SP
2002}
2003
eaac4d83
SP
2004/*
2005 * Run through the list of capabilities to check for conflicts.
2006 * If the system has already detected a capability, take necessary
2007 * action on this CPU.
2008 *
2009 * Returns "false" on conflicts.
2010 */
606f8e7b 2011static bool verify_local_cpu_caps(u16 scope_mask)
eaac4d83 2012{
606f8e7b 2013 int i;
eaac4d83 2014 bool cpu_has_cap, system_has_cap;
606f8e7b 2015 const struct arm64_cpu_capabilities *caps;
eaac4d83 2016
cce360b5
SP
2017 scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
2018
606f8e7b
SP
2019 for (i = 0; i < ARM64_NCAPS; i++) {
2020 caps = cpu_hwcaps_ptrs[i];
2021 if (!caps || !(caps->type & scope_mask))
cce360b5
SP
2022 continue;
2023
ba7d9233 2024 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
eaac4d83
SP
2025 system_has_cap = cpus_have_cap(caps->capability);
2026
2027 if (system_has_cap) {
2028 /*
2029 * Check if the new CPU misses an advertised feature,
2030 * which is not safe to miss.
2031 */
2032 if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
2033 break;
2034 /*
2035 * We have to issue cpu_enable() irrespective of
2036 * whether the CPU has it or not, as it is enabeld
2037 * system wide. It is upto the call back to take
2038 * appropriate action on this CPU.
2039 */
2040 if (caps->cpu_enable)
2041 caps->cpu_enable(caps);
2042 } else {
2043 /*
2044 * Check if the CPU has this capability if it isn't
2045 * safe to have when the system doesn't.
2046 */
2047 if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
2048 break;
2049 }
2050 }
2051
606f8e7b 2052 if (i < ARM64_NCAPS) {
eaac4d83
SP
2053 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
2054 smp_processor_id(), caps->capability,
2055 caps->desc, system_has_cap, cpu_has_cap);
2056 return false;
2057 }
2058
2059 return true;
2060}
2061
dbb4e152 2062/*
13f417f3
SP
2063 * Check for CPU features that are used in early boot
2064 * based on the Boot CPU value.
dbb4e152 2065 */
13f417f3 2066static void check_early_cpu_features(void)
dbb4e152 2067{
13f417f3 2068 verify_cpu_asid_bits();
fd9d63da
SP
2069 /*
2070 * Early features are used by the kernel already. If there
2071 * is a conflict, we cannot proceed further.
2072 */
2073 if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
2074 cpu_panic_kernel();
dbb4e152 2075}
1c076303 2076
75283501
SP
2077static void
2078verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
2079{
2080
92406f0c
SP
2081 for (; caps->matches; caps++)
2082 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
75283501
SP
2083 pr_crit("CPU%d: missing HWCAP: %s\n",
2084 smp_processor_id(), caps->desc);
2085 cpu_die_early();
2086 }
75283501
SP
2087}
2088
2e0f2478
DM
2089static void verify_sve_features(void)
2090{
2091 u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
2092 u64 zcr = read_zcr_features();
2093
2094 unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
2095 unsigned int len = zcr & ZCR_ELx_LEN_MASK;
2096
2097 if (len < safe_len || sve_verify_vq_map()) {
d06b76be 2098 pr_crit("CPU%d: SVE: vector length support mismatch\n",
2e0f2478
DM
2099 smp_processor_id());
2100 cpu_die_early();
2101 }
2102
2103 /* Add checks on other ZCR bits here if necessary */
2104}
2105
1e89baed 2106
dbb4e152
SP
2107/*
2108 * Run through the enabled system capabilities and enable() it on this CPU.
2109 * The capabilities were decided based on the available CPUs at the boot time.
2110 * Any new CPU should match the system wide status of the capability. If the
2111 * new CPU doesn't have a capability which the system now has enabled, we
2112 * cannot do anything to fix it up and could cause unexpected failures. So
2113 * we park the CPU.
2114 */
c47a1900 2115static void verify_local_cpu_capabilities(void)
dbb4e152 2116{
fd9d63da
SP
2117 /*
2118 * The capabilities with SCOPE_BOOT_CPU are checked from
2119 * check_early_cpu_features(), as they need to be verified
2120 * on all secondary CPUs.
2121 */
2122 if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
600b9c91 2123 cpu_die_early();
ed478b3f 2124
c47a1900 2125 verify_local_elf_hwcaps(arm64_elf_hwcaps);
2e0f2478 2126
c47a1900
SP
2127 if (system_supports_32bit_el0())
2128 verify_local_elf_hwcaps(compat_elf_hwcaps);
2e0f2478
DM
2129
2130 if (system_supports_sve())
2131 verify_sve_features();
c47a1900 2132}
dbb4e152 2133
c47a1900
SP
2134void check_local_cpu_capabilities(void)
2135{
2136 /*
2137 * All secondary CPUs should conform to the early CPU features
2138 * in use by the kernel based on boot CPU.
2139 */
13f417f3
SP
2140 check_early_cpu_features();
2141
dbb4e152 2142 /*
c47a1900 2143 * If we haven't finalised the system capabilities, this CPU gets
fbd890b9 2144 * a chance to update the errata work arounds and local features.
c47a1900
SP
2145 * Otherwise, this CPU should verify that it has all the system
2146 * advertised capabilities.
dbb4e152 2147 */
b51c6ac2 2148 if (!system_capabilities_finalized())
ed478b3f
SP
2149 update_cpu_capabilities(SCOPE_LOCAL_CPU);
2150 else
c47a1900 2151 verify_local_cpu_capabilities();
359b7064
MZ
2152}
2153
fd9d63da
SP
2154static void __init setup_boot_cpu_capabilities(void)
2155{
2156 /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
2157 update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
2158 /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
2159 enable_cpu_capabilities(SCOPE_BOOT_CPU);
2160}
2161
f7bfc14a 2162bool this_cpu_has_cap(unsigned int n)
8f413758 2163{
f7bfc14a
SP
2164 if (!WARN_ON(preemptible()) && n < ARM64_NCAPS) {
2165 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2166
2167 if (cap)
2168 return cap->matches(cap, SCOPE_LOCAL_CPU);
2169 }
2170
2171 return false;
8f413758
MZ
2172}
2173
3ff047f6
ADK
2174/*
2175 * This helper function is used in a narrow window when,
2176 * - The system wide safe registers are set with all the SMP CPUs and,
2177 * - The SYSTEM_FEATURE cpu_hwcaps may not have been set.
2178 * In all other cases cpus_have_{const_}cap() should be used.
2179 */
2180static bool __system_matches_cap(unsigned int n)
2181{
2182 if (n < ARM64_NCAPS) {
2183 const struct arm64_cpu_capabilities *cap = cpu_hwcaps_ptrs[n];
2184
2185 if (cap)
2186 return cap->matches(cap, SCOPE_SYSTEM);
2187 }
2188 return false;
2189}
2190
aec0bff7
AM
2191void cpu_set_feature(unsigned int num)
2192{
2193 WARN_ON(num >= MAX_CPU_FEATURES);
2194 elf_hwcap |= BIT(num);
2195}
2196EXPORT_SYMBOL_GPL(cpu_set_feature);
2197
2198bool cpu_have_feature(unsigned int num)
2199{
2200 WARN_ON(num >= MAX_CPU_FEATURES);
2201 return elf_hwcap & BIT(num);
2202}
2203EXPORT_SYMBOL_GPL(cpu_have_feature);
2204
2205unsigned long cpu_get_elf_hwcap(void)
2206{
2207 /*
2208 * We currently only populate the first 32 bits of AT_HWCAP. Please
2209 * note that for userspace compatibility we guarantee that bits 62
2210 * and 63 will always be returned as 0.
2211 */
2212 return lower_32_bits(elf_hwcap);
2213}
2214
2215unsigned long cpu_get_elf_hwcap2(void)
2216{
2217 return upper_32_bits(elf_hwcap);
2218}
2219
ed478b3f
SP
2220static void __init setup_system_capabilities(void)
2221{
2222 /*
2223 * We have finalised the system-wide safe feature
2224 * registers, finalise the capabilities that depend
fd9d63da
SP
2225 * on it. Also enable all the available capabilities,
2226 * that are not enabled already.
ed478b3f
SP
2227 */
2228 update_cpu_capabilities(SCOPE_SYSTEM);
fd9d63da 2229 enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
ed478b3f
SP
2230}
2231
9cdf8ec4 2232void __init setup_cpu_features(void)
359b7064 2233{
9cdf8ec4 2234 u32 cwg;
9cdf8ec4 2235
ed478b3f 2236 setup_system_capabilities();
75283501 2237 setup_elf_hwcaps(arm64_elf_hwcaps);
643d703d
SP
2238
2239 if (system_supports_32bit_el0())
2240 setup_elf_hwcaps(compat_elf_hwcaps);
dbb4e152 2241
2e6f549f
KC
2242 if (system_uses_ttbr0_pan())
2243 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
2244
2e0f2478 2245 sve_setup();
94b07c1f 2246 minsigstksz_setup();
2e0f2478 2247
dbb4e152 2248 /* Advertise that we have computed the system capabilities */
b51c6ac2 2249 finalize_system_capabilities();
dbb4e152 2250
9cdf8ec4
SP
2251 /*
2252 * Check for sane CTR_EL0.CWG value.
2253 */
2254 cwg = cache_type_cwg();
9cdf8ec4 2255 if (!cwg)
ebc7e21e
CM
2256 pr_warn("No Cache Writeback Granule information, assuming %d\n",
2257 ARCH_DMA_MINALIGN);
359b7064 2258}
70544196
JM
2259
2260static bool __maybe_unused
92406f0c 2261cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
70544196 2262{
3ff047f6 2263 return (__system_matches_cap(ARM64_HAS_PAN) && !__system_matches_cap(ARM64_HAS_UAO));
70544196 2264}
77c97b4e 2265
5ffdfaed
VM
2266static void __maybe_unused cpu_enable_cnp(struct arm64_cpu_capabilities const *cap)
2267{
2268 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
2269}
2270
77c97b4e
SP
2271/*
2272 * We emulate only the following system register space.
2273 * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
2274 * See Table C5-6 System instruction encodings for System register accesses,
2275 * ARMv8 ARM(ARM DDI 0487A.f) for more details.
2276 */
2277static inline bool __attribute_const__ is_emulated(u32 id)
2278{
2279 return (sys_reg_Op0(id) == 0x3 &&
2280 sys_reg_CRn(id) == 0x0 &&
2281 sys_reg_Op1(id) == 0x0 &&
2282 (sys_reg_CRm(id) == 0 ||
2283 ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
2284}
2285
2286/*
2287 * With CRm == 0, reg should be one of :
2288 * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
2289 */
2290static inline int emulate_id_reg(u32 id, u64 *valp)
2291{
2292 switch (id) {
2293 case SYS_MIDR_EL1:
2294 *valp = read_cpuid_id();
2295 break;
2296 case SYS_MPIDR_EL1:
2297 *valp = SYS_MPIDR_SAFE_VAL;
2298 break;
2299 case SYS_REVIDR_EL1:
2300 /* IMPLEMENTATION DEFINED values are emulated with 0 */
2301 *valp = 0;
2302 break;
2303 default:
2304 return -EINVAL;
2305 }
2306
2307 return 0;
2308}
2309
2310static int emulate_sys_reg(u32 id, u64 *valp)
2311{
2312 struct arm64_ftr_reg *regp;
2313
2314 if (!is_emulated(id))
2315 return -EINVAL;
2316
2317 if (sys_reg_CRm(id) == 0)
2318 return emulate_id_reg(id, valp);
2319
2320 regp = get_arm64_ftr_reg(id);
2321 if (regp)
2322 *valp = arm64_ftr_reg_user_value(regp);
2323 else
2324 /*
2325 * The untracked registers are either IMPLEMENTATION DEFINED
2326 * (e.g, ID_AFR0_EL1) or reserved RAZ.
2327 */
2328 *valp = 0;
2329 return 0;
2330}
2331
520ad988 2332int do_emulate_mrs(struct pt_regs *regs, u32 sys_reg, u32 rt)
77c97b4e
SP
2333{
2334 int rc;
77c97b4e
SP
2335 u64 val;
2336
77c97b4e
SP
2337 rc = emulate_sys_reg(sys_reg, &val);
2338 if (!rc) {
520ad988 2339 pt_regs_write_reg(regs, rt, val);
6436beee 2340 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
77c97b4e 2341 }
77c97b4e
SP
2342 return rc;
2343}
2344
520ad988
AK
2345static int emulate_mrs(struct pt_regs *regs, u32 insn)
2346{
2347 u32 sys_reg, rt;
2348
2349 /*
2350 * sys_reg values are defined as used in mrs/msr instruction.
2351 * shift the imm value to get the encoding.
2352 */
2353 sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
2354 rt = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
2355 return do_emulate_mrs(regs, sys_reg, rt);
2356}
2357
77c97b4e
SP
2358static struct undef_hook mrs_hook = {
2359 .instr_mask = 0xfff00000,
2360 .instr_val = 0xd5300000,
d64567f6 2361 .pstate_mask = PSR_AA32_MODE_MASK,
77c97b4e
SP
2362 .pstate_val = PSR_MODE_EL0t,
2363 .fn = emulate_mrs,
2364};
2365
2366static int __init enable_mrs_emulation(void)
2367{
2368 register_undef_hook(&mrs_hook);
2369 return 0;
2370}
2371
c0d8832e 2372core_initcall(enable_mrs_emulation);
1b3ccf4b
JL
2373
2374ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
2375 char *buf)
2376{
2377 if (__meltdown_safe)
2378 return sprintf(buf, "Not affected\n");
2379
2380 if (arm64_kernel_unmapped_at_el0())
2381 return sprintf(buf, "Mitigation: PTI\n");
2382
2383 return sprintf(buf, "Vulnerable\n");
2384}