]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e116a375 AP |
2 | /* |
3 | * Contains CPU specific errata definitions | |
4 | * | |
5 | * Copyright (C) 2014 ARM Ltd. | |
e116a375 AP |
6 | */ |
7 | ||
94a5d879 | 8 | #include <linux/arm-smccc.h> |
e116a375 | 9 | #include <linux/types.h> |
a111b7c0 | 10 | #include <linux/cpu.h> |
e116a375 AP |
11 | #include <asm/cpu.h> |
12 | #include <asm/cputype.h> | |
13 | #include <asm/cpufeature.h> | |
4db61fef | 14 | #include <asm/kvm_asm.h> |
93916beb | 15 | #include <asm/smp_plat.h> |
e116a375 | 16 | |
301bcfac | 17 | static bool __maybe_unused |
92406f0c | 18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
301bcfac | 19 | { |
e8002e02 AB |
20 | const struct arm64_midr_revidr *fix; |
21 | u32 midr = read_cpuid_id(), revidr; | |
22 | ||
92406f0c | 23 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1df31050 | 24 | if (!is_midr_in_range(midr, &entry->midr_range)) |
e8002e02 AB |
25 | return false; |
26 | ||
27 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; | |
28 | revidr = read_cpuid(REVIDR_EL1); | |
29 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) | |
30 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) | |
31 | return false; | |
32 | ||
33 | return true; | |
301bcfac AP |
34 | } |
35 | ||
be5b2998 SP |
36 | static bool __maybe_unused |
37 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, | |
38 | int scope) | |
301bcfac | 39 | { |
92406f0c | 40 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
be5b2998 | 41 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
301bcfac AP |
42 | } |
43 | ||
bb487118 SB |
44 | static bool __maybe_unused |
45 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) | |
46 | { | |
47 | u32 model; | |
48 | ||
49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
50 | ||
51 | model = read_cpuid_id(); | |
52 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | | |
53 | MIDR_ARCHITECTURE_MASK; | |
54 | ||
1df31050 | 55 | return model == entry->midr_range.model; |
bb487118 SB |
56 | } |
57 | ||
116c81f4 | 58 | static bool |
314d53d2 SP |
59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
60 | int scope) | |
116c81f4 | 61 | { |
1602df02 SP |
62 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
63 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; | |
64 | u64 ctr_raw, ctr_real; | |
314d53d2 | 65 | |
116c81f4 | 66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1602df02 SP |
67 | |
68 | /* | |
69 | * We want to make sure that all the CPUs in the system expose | |
70 | * a consistent CTR_EL0 to make sure that applications behaves | |
71 | * correctly with migration. | |
72 | * | |
73 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : | |
74 | * | |
75 | * 1) It is safe if the system doesn't support IDC, as CPU anyway | |
76 | * reports IDC = 0, consistent with the rest. | |
77 | * | |
78 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 | |
79 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. | |
80 | * | |
81 | * So, we need to make sure either the raw CTR_EL0 or the effective | |
82 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. | |
83 | */ | |
84 | ctr_raw = read_cpuid_cachetype() & mask; | |
85 | ctr_real = read_cpuid_effective_cachetype() & mask; | |
86 | ||
87 | return (ctr_real != sys) && (ctr_raw != sys); | |
116c81f4 SP |
88 | } |
89 | ||
c0cda3b8 | 90 | static void |
05460849 | 91 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap) |
116c81f4 | 92 | { |
4afe8e79 | 93 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
05460849 | 94 | bool enable_uct_trap = false; |
4afe8e79 SP |
95 | |
96 | /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ | |
97 | if ((read_cpuid_cachetype() & mask) != | |
98 | (arm64_ftr_reg_ctrel0.sys_val & mask)) | |
05460849 JM |
99 | enable_uct_trap = true; |
100 | ||
101 | /* ... or if the system is affected by an erratum */ | |
102 | if (cap->capability == ARM64_WORKAROUND_1542419) | |
103 | enable_uct_trap = true; | |
104 | ||
105 | if (enable_uct_trap) | |
4afe8e79 | 106 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); |
116c81f4 SP |
107 | } |
108 | ||
5cf9ce6e MZ |
109 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
110 | ||
a43ae4df | 111 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
526e065d | 112 | static bool __ssb_safe = true; |
a43ae4df MZ |
113 | |
114 | static const struct ssbd_options { | |
115 | const char *str; | |
116 | int state; | |
117 | } ssbd_options[] = { | |
118 | { "force-on", ARM64_SSBD_FORCE_ENABLE, }, | |
119 | { "force-off", ARM64_SSBD_FORCE_DISABLE, }, | |
120 | { "kernel", ARM64_SSBD_KERNEL, }, | |
121 | }; | |
122 | ||
123 | static int __init ssbd_cfg(char *buf) | |
124 | { | |
125 | int i; | |
126 | ||
127 | if (!buf || !buf[0]) | |
128 | return -EINVAL; | |
129 | ||
130 | for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { | |
131 | int len = strlen(ssbd_options[i].str); | |
132 | ||
133 | if (strncmp(buf, ssbd_options[i].str, len)) | |
134 | continue; | |
135 | ||
136 | ssbd_state = ssbd_options[i].state; | |
137 | return 0; | |
138 | } | |
139 | ||
140 | return -EINVAL; | |
141 | } | |
142 | early_param("ssbd", ssbd_cfg); | |
143 | ||
8e290624 MZ |
144 | void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
145 | __le32 *origptr, __le32 *updptr, | |
146 | int nr_inst) | |
147 | { | |
148 | u32 insn; | |
149 | ||
150 | BUG_ON(nr_inst != 1); | |
151 | ||
c98bd299 MR |
152 | switch (arm_smccc_1_1_get_conduit()) { |
153 | case SMCCC_CONDUIT_HVC: | |
8e290624 MZ |
154 | insn = aarch64_insn_get_hvc_value(); |
155 | break; | |
c98bd299 | 156 | case SMCCC_CONDUIT_SMC: |
8e290624 MZ |
157 | insn = aarch64_insn_get_smc_value(); |
158 | break; | |
159 | default: | |
160 | return; | |
161 | } | |
162 | ||
163 | *updptr = cpu_to_le32(insn); | |
164 | } | |
a725e3dd | 165 | |
986372c4 MZ |
166 | void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
167 | __le32 *origptr, __le32 *updptr, | |
168 | int nr_inst) | |
169 | { | |
170 | BUG_ON(nr_inst != 1); | |
171 | /* | |
172 | * Only allow mitigation on EL1 entry/exit and guest | |
173 | * ARCH_WORKAROUND_2 handling if the SSBD state allows it to | |
174 | * be flipped. | |
175 | */ | |
176 | if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) | |
177 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); | |
178 | } | |
179 | ||
647d0519 | 180 | void arm64_set_ssbd_mitigation(bool state) |
a725e3dd | 181 | { |
ce4d5ca2 SP |
182 | int conduit; |
183 | ||
8f04e8e6 WD |
184 | if (this_cpu_has_cap(ARM64_SSBS)) { |
185 | if (state) | |
186 | asm volatile(SET_PSTATE_SSBS(0)); | |
187 | else | |
188 | asm volatile(SET_PSTATE_SSBS(1)); | |
189 | return; | |
190 | } | |
191 | ||
ce4d5ca2 SP |
192 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, state, |
193 | NULL); | |
a725e3dd | 194 | |
ce4d5ca2 | 195 | WARN_ON_ONCE(conduit == SMCCC_CONDUIT_NONE); |
a725e3dd MZ |
196 | } |
197 | ||
198 | static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |
199 | int scope) | |
200 | { | |
201 | struct arm_smccc_res res; | |
a43ae4df MZ |
202 | bool required = true; |
203 | s32 val; | |
526e065d | 204 | bool this_cpu_safe = false; |
ce4d5ca2 | 205 | int conduit; |
a725e3dd MZ |
206 | |
207 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
208 | ||
a111b7c0 JP |
209 | if (cpu_mitigations_off()) |
210 | ssbd_state = ARM64_SSBD_FORCE_DISABLE; | |
211 | ||
eb337cdf WD |
212 | /* delay setting __ssb_safe until we get a firmware response */ |
213 | if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) | |
214 | this_cpu_safe = true; | |
215 | ||
8f04e8e6 | 216 | if (this_cpu_has_cap(ARM64_SSBS)) { |
eb337cdf WD |
217 | if (!this_cpu_safe) |
218 | __ssb_safe = false; | |
8f04e8e6 WD |
219 | required = false; |
220 | goto out_printmsg; | |
221 | } | |
222 | ||
ce4d5ca2 SP |
223 | conduit = arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, |
224 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd | 225 | |
ce4d5ca2 | 226 | if (conduit == SMCCC_CONDUIT_NONE) { |
a43ae4df | 227 | ssbd_state = ARM64_SSBD_UNKNOWN; |
526e065d JL |
228 | if (!this_cpu_safe) |
229 | __ssb_safe = false; | |
a43ae4df | 230 | return false; |
a725e3dd MZ |
231 | } |
232 | ||
a43ae4df MZ |
233 | val = (s32)res.a0; |
234 | ||
235 | switch (val) { | |
236 | case SMCCC_RET_NOT_SUPPORTED: | |
237 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
526e065d JL |
238 | if (!this_cpu_safe) |
239 | __ssb_safe = false; | |
a43ae4df MZ |
240 | return false; |
241 | ||
526e065d | 242 | /* machines with mixed mitigation requirements must not return this */ |
a43ae4df MZ |
243 | case SMCCC_RET_NOT_REQUIRED: |
244 | pr_info_once("%s mitigation not required\n", entry->desc); | |
245 | ssbd_state = ARM64_SSBD_MITIGATED; | |
246 | return false; | |
247 | ||
248 | case SMCCC_RET_SUCCESS: | |
526e065d | 249 | __ssb_safe = false; |
a43ae4df MZ |
250 | required = true; |
251 | break; | |
252 | ||
253 | case 1: /* Mitigation not required on this CPU */ | |
254 | required = false; | |
255 | break; | |
256 | ||
257 | default: | |
258 | WARN_ON(1); | |
526e065d JL |
259 | if (!this_cpu_safe) |
260 | __ssb_safe = false; | |
a43ae4df MZ |
261 | return false; |
262 | } | |
263 | ||
264 | switch (ssbd_state) { | |
265 | case ARM64_SSBD_FORCE_DISABLE: | |
a43ae4df MZ |
266 | arm64_set_ssbd_mitigation(false); |
267 | required = false; | |
268 | break; | |
269 | ||
270 | case ARM64_SSBD_KERNEL: | |
271 | if (required) { | |
272 | __this_cpu_write(arm64_ssbd_callback_required, 1); | |
273 | arm64_set_ssbd_mitigation(true); | |
274 | } | |
275 | break; | |
276 | ||
277 | case ARM64_SSBD_FORCE_ENABLE: | |
a725e3dd | 278 | arm64_set_ssbd_mitigation(true); |
a43ae4df MZ |
279 | required = true; |
280 | break; | |
281 | ||
282 | default: | |
283 | WARN_ON(1); | |
284 | break; | |
a725e3dd MZ |
285 | } |
286 | ||
8f04e8e6 WD |
287 | out_printmsg: |
288 | switch (ssbd_state) { | |
289 | case ARM64_SSBD_FORCE_DISABLE: | |
290 | pr_info_once("%s disabled from command-line\n", entry->desc); | |
291 | break; | |
292 | ||
293 | case ARM64_SSBD_FORCE_ENABLE: | |
294 | pr_info_once("%s forced from command-line\n", entry->desc); | |
295 | break; | |
296 | } | |
297 | ||
a43ae4df | 298 | return required; |
a725e3dd | 299 | } |
8e290624 | 300 | |
39533e12 MZ |
301 | static void cpu_enable_ssbd_mitigation(const struct arm64_cpu_capabilities *cap) |
302 | { | |
303 | if (ssbd_state != ARM64_SSBD_FORCE_DISABLE) | |
304 | cap->matches(cap, SCOPE_LOCAL_CPU); | |
305 | } | |
306 | ||
526e065d JL |
307 | /* known invulnerable cores */ |
308 | static const struct midr_range arm64_ssb_cpus[] = { | |
309 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
310 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
311 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
e059770c | 312 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
108447fd SPR |
313 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER), |
314 | MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER), | |
526e065d JL |
315 | {}, |
316 | }; | |
317 | ||
969f5ea6 WD |
318 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
319 | DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); | |
320 | ||
321 | static bool | |
322 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, | |
323 | int scope) | |
324 | { | |
a9e821b8 | 325 | return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode(); |
969f5ea6 WD |
326 | } |
327 | #endif | |
328 | ||
b8925ee2 WD |
329 | static void __maybe_unused |
330 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |
331 | { | |
332 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); | |
333 | } | |
334 | ||
5e7951ce SP |
335 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
336 | .matches = is_affected_midr_range, \ | |
1df31050 | 337 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
5e7951ce SP |
338 | |
339 | #define CAP_MIDR_ALL_VERSIONS(model) \ | |
340 | .matches = is_affected_midr_range, \ | |
1df31050 | 341 | .midr_range = MIDR_ALL_VERSIONS(model) |
06f1494f | 342 | |
e8002e02 AB |
343 | #define MIDR_FIXED(rev, revidr_mask) \ |
344 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} | |
345 | ||
5e7951ce SP |
346 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
347 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
348 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) | |
349 | ||
be5b2998 SP |
350 | #define CAP_MIDR_RANGE_LIST(list) \ |
351 | .matches = is_affected_midr_range_list, \ | |
352 | .midr_range_list = list | |
353 | ||
5e7951ce SP |
354 | /* Errata affecting a range of revisions of given model variant */ |
355 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ | |
356 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) | |
357 | ||
358 | /* Errata affecting a single variant/revision of a model */ | |
359 | #define ERRATA_MIDR_REV(model, var, rev) \ | |
360 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) | |
361 | ||
362 | /* Errata affecting all variants/revisions of a given a model */ | |
363 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ | |
364 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
365 | CAP_MIDR_ALL_VERSIONS(model) | |
366 | ||
be5b2998 SP |
367 | /* Errata affecting a list of midr ranges, with same work around */ |
368 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ | |
369 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
370 | CAP_MIDR_RANGE_LIST(midr_list) | |
371 | ||
93916beb MZ |
372 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
373 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | |
374 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | |
375 | {}, | |
376 | }; | |
377 | ||
378 | static bool __maybe_unused | |
379 | needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, | |
380 | int scope) | |
381 | { | |
382 | int i; | |
383 | ||
384 | if (!is_affected_midr_range_list(entry, scope) || | |
385 | !is_hyp_mode_available()) | |
386 | return false; | |
387 | ||
388 | for_each_possible_cpu(i) { | |
389 | if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) | |
390 | return true; | |
391 | } | |
392 | ||
393 | return false; | |
394 | } | |
395 | ||
05460849 JM |
396 | static bool __maybe_unused |
397 | has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry, | |
398 | int scope) | |
399 | { | |
400 | u32 midr = read_cpuid_id(); | |
401 | bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT); | |
402 | const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1); | |
403 | ||
404 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
405 | return is_midr_in_range(midr, &range) && has_dic; | |
406 | } | |
8892b718 | 407 | |
a59a2edb | 408 | #ifdef CONFIG_RANDOMIZE_BASE |
8892b718 | 409 | |
f75e2294 | 410 | static const struct midr_range ca57_a72[] = { |
8892b718 MZ |
411 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), |
412 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
413 | {}, | |
414 | }; | |
415 | ||
dc6ed61d MZ |
416 | #endif |
417 | ||
ce8c80c5 | 418 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
36c602dc | 419 | static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
ce8c80c5 | 420 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
36c602dc BA |
421 | { |
422 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) | |
423 | }, | |
424 | { | |
425 | .midr_range.model = MIDR_QCOM_KRYO, | |
426 | .matches = is_kryo_midr, | |
427 | }, | |
ce8c80c5 CM |
428 | #endif |
429 | #ifdef CONFIG_ARM64_ERRATUM_1286807 | |
36c602dc BA |
430 | { |
431 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), | |
432 | }, | |
ce8c80c5 CM |
433 | #endif |
434 | {}, | |
435 | }; | |
ce8c80c5 CM |
436 | #endif |
437 | ||
f58cdf7e | 438 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
b89d82ef | 439 | const struct midr_range cavium_erratum_27456_cpus[] = { |
f58cdf7e SP |
440 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
441 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), | |
442 | /* Cavium ThunderX, T81 pass 1.0 */ | |
443 | MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), | |
444 | {}, | |
445 | }; | |
446 | #endif | |
447 | ||
448 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
449 | static const struct midr_range cavium_erratum_30115_cpus[] = { | |
450 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ | |
451 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), | |
452 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ | |
453 | MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), | |
454 | /* Cavium ThunderX, T83 pass 1.0 */ | |
455 | MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), | |
456 | {}, | |
457 | }; | |
458 | #endif | |
459 | ||
a3dcea2c SP |
460 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
461 | static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { | |
462 | { | |
463 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), | |
464 | }, | |
465 | { | |
466 | .midr_range.model = MIDR_QCOM_KRYO, | |
467 | .matches = is_kryo_midr, | |
468 | }, | |
469 | {}, | |
470 | }; | |
471 | #endif | |
472 | ||
c9460dcb SP |
473 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
474 | static const struct midr_range workaround_clean_cache[] = { | |
c0a01b84 AP |
475 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
476 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | |
477 | defined(CONFIG_ARM64_ERRATUM_824069) | |
c9460dcb SP |
478 | /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
479 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), | |
480 | #endif | |
481 | #ifdef CONFIG_ARM64_ERRATUM_819472 | |
482 | /* Cortex-A53 r0p[01] : ARM errata 819472 */ | |
483 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), | |
c0a01b84 | 484 | #endif |
c9460dcb SP |
485 | {}, |
486 | }; | |
487 | #endif | |
488 | ||
a5325089 MZ |
489 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
490 | /* | |
491 | * - 1188873 affects r0p0 to r2p0 | |
492 | * - 1418040 affects r0p0 to r3p1 | |
493 | */ | |
494 | static const struct midr_range erratum_1418040_list[] = { | |
495 | /* Cortex-A76 r0p0 to r3p1 */ | |
496 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
497 | /* Neoverse-N1 r0p0 to r3p1 */ | |
498 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), | |
a9e821b8 SPR |
499 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ |
500 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
6989303a MZ |
501 | {}, |
502 | }; | |
503 | #endif | |
504 | ||
bfc97f9f DB |
505 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
506 | static const struct midr_range erratum_845719_list[] = { | |
507 | /* Cortex-A53 r0p[01234] */ | |
508 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
509 | /* Brahma-B53 r0p[0] */ | |
510 | MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
511 | {}, | |
512 | }; | |
513 | #endif | |
514 | ||
1cf45b8f FF |
515 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
516 | static const struct arm64_cpu_capabilities erratum_843419_list[] = { | |
517 | { | |
518 | /* Cortex-A53 r0p[01234] */ | |
519 | .matches = is_affected_midr_range, | |
520 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
521 | MIDR_FIXED(0x4, BIT(8)), | |
522 | }, | |
523 | { | |
524 | /* Brahma-B53 r0p[0] */ | |
525 | .matches = is_affected_midr_range, | |
526 | ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
527 | }, | |
528 | {}, | |
529 | }; | |
530 | #endif | |
531 | ||
02ab1f50 AS |
532 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
533 | static const struct midr_range erratum_speculative_at_list[] = { | |
e85d68fa SP |
534 | #ifdef CONFIG_ARM64_ERRATUM_1165522 |
535 | /* Cortex A76 r0p0 to r2p0 */ | |
536 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | |
275fa0ea | 537 | #endif |
02ab1f50 AS |
538 | #ifdef CONFIG_ARM64_ERRATUM_1319367 |
539 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | |
540 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
541 | #endif | |
275fa0ea SP |
542 | #ifdef CONFIG_ARM64_ERRATUM_1530923 |
543 | /* Cortex A55 r0p0 to r2p0 */ | |
544 | MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0), | |
9b23d95c SPR |
545 | /* Kryo4xx Silver (rdpe => r1p0) */ |
546 | MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), | |
e85d68fa SP |
547 | #endif |
548 | {}, | |
549 | }; | |
550 | #endif | |
551 | ||
a9e821b8 SPR |
552 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
553 | static const struct midr_range erratum_1463225[] = { | |
554 | /* Cortex-A76 r0p0 - r3p1 */ | |
555 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
556 | /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */ | |
557 | MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf), | |
09c717c9 | 558 | {}, |
a9e821b8 SPR |
559 | }; |
560 | #endif | |
561 | ||
c9460dcb SP |
562 | const struct arm64_cpu_capabilities arm64_errata[] = { |
563 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE | |
c0a01b84 | 564 | { |
357dd8a2 | 565 | .desc = "ARM errata 826319, 827319, 824069, or 819472", |
c0a01b84 | 566 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
c9460dcb | 567 | ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
c0cda3b8 | 568 | .cpu_enable = cpu_enable_cache_maint_trap, |
c0a01b84 AP |
569 | }, |
570 | #endif | |
571 | #ifdef CONFIG_ARM64_ERRATUM_832075 | |
301bcfac | 572 | { |
5afaa1fc AP |
573 | /* Cortex-A57 r0p0 - r1p2 */ |
574 | .desc = "ARM erratum 832075", | |
575 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | |
5e7951ce SP |
576 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
577 | 0, 0, | |
578 | 1, 2), | |
5afaa1fc | 579 | }, |
905e8c5d | 580 | #endif |
498cd5c3 MZ |
581 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
582 | { | |
583 | /* Cortex-A57 r0p0 - r1p2 */ | |
584 | .desc = "ARM erratum 834220", | |
585 | .capability = ARM64_WORKAROUND_834220, | |
5e7951ce SP |
586 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
587 | 0, 0, | |
588 | 1, 2), | |
498cd5c3 MZ |
589 | }, |
590 | #endif | |
ca79acca AB |
591 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
592 | { | |
ca79acca AB |
593 | .desc = "ARM erratum 843419", |
594 | .capability = ARM64_WORKAROUND_843419, | |
1cf45b8f FF |
595 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
596 | .matches = cpucap_multi_entry_cap_matches, | |
597 | .match_list = erratum_843419_list, | |
498cd5c3 MZ |
598 | }, |
599 | #endif | |
905e8c5d WD |
600 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
601 | { | |
905e8c5d WD |
602 | .desc = "ARM erratum 845719", |
603 | .capability = ARM64_WORKAROUND_845719, | |
bfc97f9f | 604 | ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
905e8c5d | 605 | }, |
6d4e11c5 RR |
606 | #endif |
607 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 | |
608 | { | |
609 | /* Cavium ThunderX, pass 1.x */ | |
610 | .desc = "Cavium erratum 23154", | |
611 | .capability = ARM64_WORKAROUND_CAVIUM_23154, | |
5e7951ce | 612 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), |
6d4e11c5 | 613 | }, |
104a0c02 AP |
614 | #endif |
615 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | |
616 | { | |
47c459be GK |
617 | .desc = "Cavium erratum 27456", |
618 | .capability = ARM64_WORKAROUND_CAVIUM_27456, | |
f58cdf7e | 619 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
47c459be | 620 | }, |
690a3415 DD |
621 | #endif |
622 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
623 | { | |
690a3415 DD |
624 | .desc = "Cavium erratum 30115", |
625 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
f58cdf7e | 626 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
690a3415 | 627 | }, |
c0a01b84 | 628 | #endif |
116c81f4 | 629 | { |
880f7cc4 | 630 | .desc = "Mismatched cache type (CTR_EL0)", |
314d53d2 SP |
631 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
632 | .matches = has_mismatched_cache_type, | |
5b4747c5 | 633 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
c0cda3b8 | 634 | .cpu_enable = cpu_enable_trap_ctr_access, |
116c81f4 | 635 | }, |
38fd94b0 CC |
636 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
637 | { | |
a3dcea2c | 638 | .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", |
bb487118 | 639 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
d4af3c4b | 640 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
1e013d06 | 641 | .matches = cpucap_multi_entry_cap_matches, |
a3dcea2c | 642 | .match_list = qcom_erratum_1003_list, |
bb487118 | 643 | }, |
38fd94b0 | 644 | #endif |
ce8c80c5 | 645 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
d9ff80f8 | 646 | { |
357dd8a2 | 647 | .desc = "Qualcomm erratum 1009, or ARM erratum 1286807", |
d9ff80f8 | 648 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
36c602dc BA |
649 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
650 | .matches = cpucap_multi_entry_cap_matches, | |
651 | .match_list = arm64_repeat_tlbi_list, | |
d9ff80f8 | 652 | }, |
eeb1efbc MZ |
653 | #endif |
654 | #ifdef CONFIG_ARM64_ERRATUM_858921 | |
655 | { | |
656 | /* Cortex-A73 all versions */ | |
657 | .desc = "ARM erratum 858921", | |
658 | .capability = ARM64_WORKAROUND_858921, | |
5e7951ce | 659 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
eeb1efbc | 660 | }, |
aa6acde6 | 661 | #endif |
aa6acde6 | 662 | { |
d4647f0a | 663 | .desc = "Spectre-v2", |
688f1e4b | 664 | .capability = ARM64_SPECTRE_V2, |
73f38166 | 665 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
d4647f0a WD |
666 | .matches = has_spectre_v2, |
667 | .cpu_enable = spectre_v2_enable_mitigation, | |
f3d795d9 | 668 | }, |
a59a2edb | 669 | #ifdef CONFIG_RANDOMIZE_BASE |
4b472ffd | 670 | { |
8892b718 | 671 | .desc = "EL2 vector hardening", |
4b472ffd | 672 | .capability = ARM64_HARDEN_EL2_VECTORS, |
f75e2294 | 673 | ERRATA_MIDR_RANGE_LIST(ca57_a72), |
4b472ffd | 674 | }, |
a725e3dd | 675 | #endif |
a725e3dd MZ |
676 | { |
677 | .desc = "Speculative Store Bypass Disable", | |
678 | .capability = ARM64_SSBD, | |
679 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
680 | .matches = has_ssbd_mitigation, | |
39533e12 | 681 | .cpu_enable = cpu_enable_ssbd_mitigation, |
526e065d | 682 | .midr_range_list = arm64_ssb_cpus, |
a725e3dd | 683 | }, |
a5325089 | 684 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
95b861a4 | 685 | { |
a5325089 MZ |
686 | .desc = "ARM erratum 1418040", |
687 | .capability = ARM64_WORKAROUND_1418040, | |
688 | ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), | |
bf87bb08 MZ |
689 | .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU | |
690 | ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU), | |
95b861a4 | 691 | }, |
8b2cca9a | 692 | #endif |
02ab1f50 | 693 | #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT |
8b2cca9a | 694 | { |
c350717e | 695 | .desc = "ARM errata 1165522, 1319367, or 1530923", |
02ab1f50 AS |
696 | .capability = ARM64_WORKAROUND_SPECULATIVE_AT, |
697 | ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list), | |
8b2cca9a | 698 | }, |
969f5ea6 WD |
699 | #endif |
700 | #ifdef CONFIG_ARM64_ERRATUM_1463225 | |
701 | { | |
702 | .desc = "ARM erratum 1463225", | |
703 | .capability = ARM64_WORKAROUND_1463225, | |
704 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
705 | .matches = has_cortex_a76_erratum_1463225, | |
a9e821b8 | 706 | .midr_range_list = erratum_1463225, |
969f5ea6 | 707 | }, |
93916beb MZ |
708 | #endif |
709 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 | |
710 | { | |
711 | .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", | |
712 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, | |
713 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
714 | .matches = needs_tx2_tvm_workaround, | |
715 | }, | |
9405447e MZ |
716 | { |
717 | .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", | |
718 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, | |
719 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
720 | }, | |
6a036afb | 721 | #endif |
05460849 JM |
722 | #ifdef CONFIG_ARM64_ERRATUM_1542419 |
723 | { | |
724 | /* we depend on the firmware portion for correctness */ | |
725 | .desc = "ARM erratum 1542419 (kernel portion)", | |
726 | .capability = ARM64_WORKAROUND_1542419, | |
727 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
728 | .matches = has_neoverse_n1_erratum_1542419, | |
729 | .cpu_enable = cpu_enable_trap_ctr_access, | |
730 | }, | |
d9ff80f8 | 731 | #endif |
5afaa1fc | 732 | { |
301bcfac | 733 | } |
e116a375 | 734 | }; |
3891ebcc | 735 | |
526e065d JL |
736 | ssize_t cpu_show_spec_store_bypass(struct device *dev, |
737 | struct device_attribute *attr, char *buf) | |
738 | { | |
739 | if (__ssb_safe) | |
740 | return sprintf(buf, "Not affected\n"); | |
741 | ||
742 | switch (ssbd_state) { | |
743 | case ARM64_SSBD_KERNEL: | |
744 | case ARM64_SSBD_FORCE_ENABLE: | |
6e5f0927 | 745 | return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n"); |
526e065d JL |
746 | } |
747 | ||
748 | return sprintf(buf, "Vulnerable\n"); | |
749 | } |