]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
e116a375 AP |
2 | /* |
3 | * Contains CPU specific errata definitions | |
4 | * | |
5 | * Copyright (C) 2014 ARM Ltd. | |
e116a375 AP |
6 | */ |
7 | ||
94a5d879 AB |
8 | #include <linux/arm-smccc.h> |
9 | #include <linux/psci.h> | |
e116a375 | 10 | #include <linux/types.h> |
a111b7c0 | 11 | #include <linux/cpu.h> |
e116a375 AP |
12 | #include <asm/cpu.h> |
13 | #include <asm/cputype.h> | |
14 | #include <asm/cpufeature.h> | |
452d06d2 | 15 | #include <asm/smp_plat.h> |
e116a375 | 16 | |
301bcfac | 17 | static bool __maybe_unused |
92406f0c | 18 | is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope) |
301bcfac | 19 | { |
e8002e02 AB |
20 | const struct arm64_midr_revidr *fix; |
21 | u32 midr = read_cpuid_id(), revidr; | |
22 | ||
92406f0c | 23 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1df31050 | 24 | if (!is_midr_in_range(midr, &entry->midr_range)) |
e8002e02 AB |
25 | return false; |
26 | ||
27 | midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK; | |
28 | revidr = read_cpuid(REVIDR_EL1); | |
29 | for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++) | |
30 | if (midr == fix->midr_rv && (revidr & fix->revidr_mask)) | |
31 | return false; | |
32 | ||
33 | return true; | |
301bcfac AP |
34 | } |
35 | ||
be5b2998 SP |
36 | static bool __maybe_unused |
37 | is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry, | |
38 | int scope) | |
301bcfac | 39 | { |
92406f0c | 40 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
be5b2998 | 41 | return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list); |
301bcfac AP |
42 | } |
43 | ||
bb487118 SB |
44 | static bool __maybe_unused |
45 | is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope) | |
46 | { | |
47 | u32 model; | |
48 | ||
49 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
50 | ||
51 | model = read_cpuid_id(); | |
52 | model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) | | |
53 | MIDR_ARCHITECTURE_MASK; | |
54 | ||
1df31050 | 55 | return model == entry->midr_range.model; |
bb487118 SB |
56 | } |
57 | ||
116c81f4 | 58 | static bool |
314d53d2 SP |
59 | has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry, |
60 | int scope) | |
116c81f4 | 61 | { |
1602df02 SP |
62 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
63 | u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask; | |
64 | u64 ctr_raw, ctr_real; | |
314d53d2 | 65 | |
116c81f4 | 66 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); |
1602df02 SP |
67 | |
68 | /* | |
69 | * We want to make sure that all the CPUs in the system expose | |
70 | * a consistent CTR_EL0 to make sure that applications behaves | |
71 | * correctly with migration. | |
72 | * | |
73 | * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 : | |
74 | * | |
75 | * 1) It is safe if the system doesn't support IDC, as CPU anyway | |
76 | * reports IDC = 0, consistent with the rest. | |
77 | * | |
78 | * 2) If the system has IDC, it is still safe as we trap CTR_EL0 | |
79 | * access on this CPU via the ARM64_HAS_CACHE_IDC capability. | |
80 | * | |
81 | * So, we need to make sure either the raw CTR_EL0 or the effective | |
82 | * CTR_EL0 matches the system's copy to allow a secondary CPU to boot. | |
83 | */ | |
84 | ctr_raw = read_cpuid_cachetype() & mask; | |
85 | ctr_real = read_cpuid_effective_cachetype() & mask; | |
86 | ||
87 | return (ctr_real != sys) && (ctr_raw != sys); | |
116c81f4 SP |
88 | } |
89 | ||
c0cda3b8 DM |
90 | static void |
91 | cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused) | |
116c81f4 | 92 | { |
4afe8e79 SP |
93 | u64 mask = arm64_ftr_reg_ctrel0.strict_mask; |
94 | ||
95 | /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */ | |
96 | if ((read_cpuid_cachetype() & mask) != | |
97 | (arm64_ftr_reg_ctrel0.sys_val & mask)) | |
98 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0); | |
116c81f4 SP |
99 | } |
100 | ||
4205a89b MZ |
101 | atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1); |
102 | ||
0f15adbb WD |
103 | #include <asm/mmu_context.h> |
104 | #include <asm/cacheflush.h> | |
105 | ||
106 | DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data); | |
107 | ||
e8b22d0f | 108 | #ifdef CONFIG_KVM_INDIRECT_VECTORS |
b092201e MZ |
109 | extern char __smccc_workaround_1_smc_start[]; |
110 | extern char __smccc_workaround_1_smc_end[]; | |
aa6acde6 | 111 | |
0f15adbb WD |
112 | static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start, |
113 | const char *hyp_vecs_end) | |
114 | { | |
115 | void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K); | |
116 | int i; | |
117 | ||
118 | for (i = 0; i < SZ_2K; i += 0x80) | |
119 | memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start); | |
120 | ||
3b8c9f1c | 121 | __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K); |
0f15adbb WD |
122 | } |
123 | ||
73f38166 MZ |
124 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
125 | const char *hyp_vecs_start, | |
126 | const char *hyp_vecs_end) | |
0f15adbb | 127 | { |
d8797b12 | 128 | static DEFINE_RAW_SPINLOCK(bp_lock); |
0f15adbb WD |
129 | int cpu, slot = -1; |
130 | ||
4debef55 | 131 | /* |
9065fe79 TR |
132 | * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if |
133 | * we're a guest. Skip the hyp-vectors work. | |
4debef55 JM |
134 | */ |
135 | if (!hyp_vecs_start) { | |
136 | __this_cpu_write(bp_hardening_data.fn, fn); | |
137 | return; | |
138 | } | |
139 | ||
d8797b12 | 140 | raw_spin_lock(&bp_lock); |
0f15adbb WD |
141 | for_each_possible_cpu(cpu) { |
142 | if (per_cpu(bp_hardening_data.fn, cpu) == fn) { | |
143 | slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu); | |
144 | break; | |
145 | } | |
146 | } | |
147 | ||
148 | if (slot == -1) { | |
4205a89b MZ |
149 | slot = atomic_inc_return(&arm64_el2_vector_last_slot); |
150 | BUG_ON(slot >= BP_HARDEN_EL2_SLOTS); | |
0f15adbb WD |
151 | __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end); |
152 | } | |
153 | ||
154 | __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot); | |
155 | __this_cpu_write(bp_hardening_data.fn, fn); | |
d8797b12 | 156 | raw_spin_unlock(&bp_lock); |
0f15adbb WD |
157 | } |
158 | #else | |
b092201e MZ |
159 | #define __smccc_workaround_1_smc_start NULL |
160 | #define __smccc_workaround_1_smc_end NULL | |
aa6acde6 | 161 | |
73f38166 | 162 | static void install_bp_hardening_cb(bp_hardening_cb_t fn, |
0f15adbb WD |
163 | const char *hyp_vecs_start, |
164 | const char *hyp_vecs_end) | |
165 | { | |
166 | __this_cpu_write(bp_hardening_data.fn, fn); | |
167 | } | |
e8b22d0f | 168 | #endif /* CONFIG_KVM_INDIRECT_VECTORS */ |
0f15adbb | 169 | |
b092201e MZ |
170 | #include <uapi/linux/psci.h> |
171 | #include <linux/arm-smccc.h> | |
aa6acde6 WD |
172 | #include <linux/psci.h> |
173 | ||
b092201e MZ |
174 | static void call_smc_arch_workaround_1(void) |
175 | { | |
176 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
177 | } | |
178 | ||
179 | static void call_hvc_arch_workaround_1(void) | |
180 | { | |
181 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL); | |
182 | } | |
183 | ||
4bc352ff SD |
184 | static void qcom_link_stack_sanitization(void) |
185 | { | |
186 | u64 tmp; | |
187 | ||
188 | asm volatile("mov %0, x30 \n" | |
189 | ".rept 16 \n" | |
190 | "bl . + 4 \n" | |
191 | ".endr \n" | |
192 | "mov x30, %0 \n" | |
193 | : "=&r" (tmp)); | |
194 | } | |
195 | ||
e5ce5e72 JL |
196 | static bool __nospectre_v2; |
197 | static int __init parse_nospectre_v2(char *str) | |
198 | { | |
199 | __nospectre_v2 = true; | |
200 | return 0; | |
201 | } | |
202 | early_param("nospectre_v2", parse_nospectre_v2); | |
203 | ||
73f38166 MZ |
204 | /* |
205 | * -1: No workaround | |
206 | * 0: No workaround required | |
207 | * 1: Workaround installed | |
208 | */ | |
209 | static int detect_harden_bp_fw(void) | |
b092201e MZ |
210 | { |
211 | bp_hardening_cb_t cb; | |
212 | void *smccc_start, *smccc_end; | |
213 | struct arm_smccc_res res; | |
4bc352ff | 214 | u32 midr = read_cpuid_id(); |
b092201e | 215 | |
b092201e | 216 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) |
73f38166 | 217 | return -1; |
b092201e MZ |
218 | |
219 | switch (psci_ops.conduit) { | |
220 | case PSCI_CONDUIT_HVC: | |
221 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
222 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | |
517953c2 MZ |
223 | switch ((int)res.a0) { |
224 | case 1: | |
225 | /* Firmware says we're just fine */ | |
226 | return 0; | |
227 | case 0: | |
228 | cb = call_hvc_arch_workaround_1; | |
229 | /* This is a guest, no need to patch KVM vectors */ | |
230 | smccc_start = NULL; | |
231 | smccc_end = NULL; | |
232 | break; | |
233 | default: | |
73f38166 | 234 | return -1; |
517953c2 | 235 | } |
b092201e MZ |
236 | break; |
237 | ||
238 | case PSCI_CONDUIT_SMC: | |
239 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
240 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | |
517953c2 MZ |
241 | switch ((int)res.a0) { |
242 | case 1: | |
243 | /* Firmware says we're just fine */ | |
244 | return 0; | |
245 | case 0: | |
246 | cb = call_smc_arch_workaround_1; | |
247 | smccc_start = __smccc_workaround_1_smc_start; | |
248 | smccc_end = __smccc_workaround_1_smc_end; | |
249 | break; | |
250 | default: | |
73f38166 | 251 | return -1; |
517953c2 | 252 | } |
b092201e MZ |
253 | break; |
254 | ||
255 | default: | |
73f38166 | 256 | return -1; |
b092201e MZ |
257 | } |
258 | ||
4bc352ff SD |
259 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
260 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) | |
261 | cb = qcom_link_stack_sanitization; | |
262 | ||
8c1e3d2b JL |
263 | if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) |
264 | install_bp_hardening_cb(cb, smccc_start, smccc_end); | |
b092201e | 265 | |
73f38166 | 266 | return 1; |
aa6acde6 | 267 | } |
0f15adbb | 268 | |
5cf9ce6e MZ |
269 | DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); |
270 | ||
a43ae4df | 271 | int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; |
526e065d | 272 | static bool __ssb_safe = true; |
a43ae4df MZ |
273 | |
274 | static const struct ssbd_options { | |
275 | const char *str; | |
276 | int state; | |
277 | } ssbd_options[] = { | |
278 | { "force-on", ARM64_SSBD_FORCE_ENABLE, }, | |
279 | { "force-off", ARM64_SSBD_FORCE_DISABLE, }, | |
280 | { "kernel", ARM64_SSBD_KERNEL, }, | |
281 | }; | |
282 | ||
283 | static int __init ssbd_cfg(char *buf) | |
284 | { | |
285 | int i; | |
286 | ||
287 | if (!buf || !buf[0]) | |
288 | return -EINVAL; | |
289 | ||
290 | for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) { | |
291 | int len = strlen(ssbd_options[i].str); | |
292 | ||
293 | if (strncmp(buf, ssbd_options[i].str, len)) | |
294 | continue; | |
295 | ||
296 | ssbd_state = ssbd_options[i].state; | |
297 | return 0; | |
298 | } | |
299 | ||
300 | return -EINVAL; | |
301 | } | |
302 | early_param("ssbd", ssbd_cfg); | |
303 | ||
8e290624 MZ |
304 | void __init arm64_update_smccc_conduit(struct alt_instr *alt, |
305 | __le32 *origptr, __le32 *updptr, | |
306 | int nr_inst) | |
307 | { | |
308 | u32 insn; | |
309 | ||
310 | BUG_ON(nr_inst != 1); | |
311 | ||
312 | switch (psci_ops.conduit) { | |
313 | case PSCI_CONDUIT_HVC: | |
314 | insn = aarch64_insn_get_hvc_value(); | |
315 | break; | |
316 | case PSCI_CONDUIT_SMC: | |
317 | insn = aarch64_insn_get_smc_value(); | |
318 | break; | |
319 | default: | |
320 | return; | |
321 | } | |
322 | ||
323 | *updptr = cpu_to_le32(insn); | |
324 | } | |
a725e3dd | 325 | |
986372c4 MZ |
326 | void __init arm64_enable_wa2_handling(struct alt_instr *alt, |
327 | __le32 *origptr, __le32 *updptr, | |
328 | int nr_inst) | |
329 | { | |
330 | BUG_ON(nr_inst != 1); | |
331 | /* | |
332 | * Only allow mitigation on EL1 entry/exit and guest | |
333 | * ARCH_WORKAROUND_2 handling if the SSBD state allows it to | |
334 | * be flipped. | |
335 | */ | |
336 | if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL) | |
337 | *updptr = cpu_to_le32(aarch64_insn_gen_nop()); | |
338 | } | |
339 | ||
647d0519 | 340 | void arm64_set_ssbd_mitigation(bool state) |
a725e3dd | 341 | { |
d42281b6 JL |
342 | if (!IS_ENABLED(CONFIG_ARM64_SSBD)) { |
343 | pr_info_once("SSBD disabled by kernel configuration\n"); | |
344 | return; | |
345 | } | |
346 | ||
8f04e8e6 WD |
347 | if (this_cpu_has_cap(ARM64_SSBS)) { |
348 | if (state) | |
349 | asm volatile(SET_PSTATE_SSBS(0)); | |
350 | else | |
351 | asm volatile(SET_PSTATE_SSBS(1)); | |
352 | return; | |
353 | } | |
354 | ||
a725e3dd MZ |
355 | switch (psci_ops.conduit) { |
356 | case PSCI_CONDUIT_HVC: | |
357 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); | |
358 | break; | |
359 | ||
360 | case PSCI_CONDUIT_SMC: | |
361 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL); | |
362 | break; | |
363 | ||
364 | default: | |
365 | WARN_ON_ONCE(1); | |
366 | break; | |
367 | } | |
368 | } | |
369 | ||
370 | static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, | |
371 | int scope) | |
372 | { | |
373 | struct arm_smccc_res res; | |
a43ae4df MZ |
374 | bool required = true; |
375 | s32 val; | |
526e065d | 376 | bool this_cpu_safe = false; |
a725e3dd MZ |
377 | |
378 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
379 | ||
a111b7c0 JP |
380 | if (cpu_mitigations_off()) |
381 | ssbd_state = ARM64_SSBD_FORCE_DISABLE; | |
382 | ||
eb337cdf WD |
383 | /* delay setting __ssb_safe until we get a firmware response */ |
384 | if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list)) | |
385 | this_cpu_safe = true; | |
386 | ||
8f04e8e6 | 387 | if (this_cpu_has_cap(ARM64_SSBS)) { |
eb337cdf WD |
388 | if (!this_cpu_safe) |
389 | __ssb_safe = false; | |
8f04e8e6 WD |
390 | required = false; |
391 | goto out_printmsg; | |
392 | } | |
393 | ||
a43ae4df MZ |
394 | if (psci_ops.smccc_version == SMCCC_VERSION_1_0) { |
395 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
526e065d JL |
396 | if (!this_cpu_safe) |
397 | __ssb_safe = false; | |
a725e3dd | 398 | return false; |
a43ae4df | 399 | } |
a725e3dd | 400 | |
a725e3dd MZ |
401 | switch (psci_ops.conduit) { |
402 | case PSCI_CONDUIT_HVC: | |
403 | arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
404 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd MZ |
405 | break; |
406 | ||
407 | case PSCI_CONDUIT_SMC: | |
408 | arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, | |
409 | ARM_SMCCC_ARCH_WORKAROUND_2, &res); | |
a725e3dd MZ |
410 | break; |
411 | ||
412 | default: | |
a43ae4df | 413 | ssbd_state = ARM64_SSBD_UNKNOWN; |
526e065d JL |
414 | if (!this_cpu_safe) |
415 | __ssb_safe = false; | |
a43ae4df | 416 | return false; |
a725e3dd MZ |
417 | } |
418 | ||
a43ae4df MZ |
419 | val = (s32)res.a0; |
420 | ||
421 | switch (val) { | |
422 | case SMCCC_RET_NOT_SUPPORTED: | |
423 | ssbd_state = ARM64_SSBD_UNKNOWN; | |
526e065d JL |
424 | if (!this_cpu_safe) |
425 | __ssb_safe = false; | |
a43ae4df MZ |
426 | return false; |
427 | ||
526e065d | 428 | /* machines with mixed mitigation requirements must not return this */ |
a43ae4df MZ |
429 | case SMCCC_RET_NOT_REQUIRED: |
430 | pr_info_once("%s mitigation not required\n", entry->desc); | |
431 | ssbd_state = ARM64_SSBD_MITIGATED; | |
432 | return false; | |
433 | ||
434 | case SMCCC_RET_SUCCESS: | |
526e065d | 435 | __ssb_safe = false; |
a43ae4df MZ |
436 | required = true; |
437 | break; | |
438 | ||
439 | case 1: /* Mitigation not required on this CPU */ | |
440 | required = false; | |
441 | break; | |
442 | ||
443 | default: | |
444 | WARN_ON(1); | |
526e065d JL |
445 | if (!this_cpu_safe) |
446 | __ssb_safe = false; | |
a43ae4df MZ |
447 | return false; |
448 | } | |
449 | ||
450 | switch (ssbd_state) { | |
451 | case ARM64_SSBD_FORCE_DISABLE: | |
a43ae4df MZ |
452 | arm64_set_ssbd_mitigation(false); |
453 | required = false; | |
454 | break; | |
455 | ||
456 | case ARM64_SSBD_KERNEL: | |
457 | if (required) { | |
458 | __this_cpu_write(arm64_ssbd_callback_required, 1); | |
459 | arm64_set_ssbd_mitigation(true); | |
460 | } | |
461 | break; | |
462 | ||
463 | case ARM64_SSBD_FORCE_ENABLE: | |
a725e3dd | 464 | arm64_set_ssbd_mitigation(true); |
a43ae4df MZ |
465 | required = true; |
466 | break; | |
467 | ||
468 | default: | |
469 | WARN_ON(1); | |
470 | break; | |
a725e3dd MZ |
471 | } |
472 | ||
8f04e8e6 WD |
473 | out_printmsg: |
474 | switch (ssbd_state) { | |
475 | case ARM64_SSBD_FORCE_DISABLE: | |
476 | pr_info_once("%s disabled from command-line\n", entry->desc); | |
477 | break; | |
478 | ||
479 | case ARM64_SSBD_FORCE_ENABLE: | |
480 | pr_info_once("%s forced from command-line\n", entry->desc); | |
481 | break; | |
482 | } | |
483 | ||
a43ae4df | 484 | return required; |
a725e3dd | 485 | } |
8e290624 | 486 | |
526e065d JL |
487 | /* known invulnerable cores */ |
488 | static const struct midr_range arm64_ssb_cpus[] = { | |
489 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
490 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
491 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
c4d191f5 | 492 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
526e065d JL |
493 | {}, |
494 | }; | |
495 | ||
969f5ea6 WD |
496 | #ifdef CONFIG_ARM64_ERRATUM_1463225 |
497 | DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); | |
498 | ||
499 | static bool | |
500 | has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry, | |
501 | int scope) | |
502 | { | |
503 | u32 midr = read_cpuid_id(); | |
504 | /* Cortex-A76 r0p0 - r3p1 */ | |
505 | struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1); | |
506 | ||
507 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
508 | return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode(); | |
509 | } | |
510 | #endif | |
511 | ||
b8925ee2 WD |
512 | static void __maybe_unused |
513 | cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused) | |
514 | { | |
515 | sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0); | |
516 | } | |
517 | ||
5e7951ce SP |
518 | #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
519 | .matches = is_affected_midr_range, \ | |
1df31050 | 520 | .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max) |
5e7951ce SP |
521 | |
522 | #define CAP_MIDR_ALL_VERSIONS(model) \ | |
523 | .matches = is_affected_midr_range, \ | |
1df31050 | 524 | .midr_range = MIDR_ALL_VERSIONS(model) |
06f1494f | 525 | |
e8002e02 AB |
526 | #define MIDR_FIXED(rev, revidr_mask) \ |
527 | .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}} | |
528 | ||
5e7951ce SP |
529 | #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \ |
530 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
531 | CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) | |
532 | ||
be5b2998 SP |
533 | #define CAP_MIDR_RANGE_LIST(list) \ |
534 | .matches = is_affected_midr_range_list, \ | |
535 | .midr_range_list = list | |
536 | ||
5e7951ce SP |
537 | /* Errata affecting a range of revisions of given model variant */ |
538 | #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \ | |
539 | ERRATA_MIDR_RANGE(m, var, r_min, var, r_max) | |
540 | ||
541 | /* Errata affecting a single variant/revision of a model */ | |
542 | #define ERRATA_MIDR_REV(model, var, rev) \ | |
543 | ERRATA_MIDR_RANGE(model, var, rev, var, rev) | |
544 | ||
545 | /* Errata affecting all variants/revisions of a given a model */ | |
546 | #define ERRATA_MIDR_ALL_VERSIONS(model) \ | |
547 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
548 | CAP_MIDR_ALL_VERSIONS(model) | |
549 | ||
be5b2998 SP |
550 | /* Errata affecting a list of midr ranges, with same work around */ |
551 | #define ERRATA_MIDR_RANGE_LIST(midr_list) \ | |
552 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \ | |
553 | CAP_MIDR_RANGE_LIST(midr_list) | |
554 | ||
d2532e27 JL |
555 | /* Track overall mitigation state. We are only mitigated if all cores are ok */ |
556 | static bool __hardenbp_enab = true; | |
557 | static bool __spectrev2_safe = true; | |
558 | ||
c118bbb5 AP |
559 | int get_spectre_v2_workaround_state(void) |
560 | { | |
561 | if (__spectrev2_safe) | |
562 | return ARM64_BP_HARDEN_NOT_REQUIRED; | |
563 | ||
564 | if (!__hardenbp_enab) | |
565 | return ARM64_BP_HARDEN_UNKNOWN; | |
566 | ||
567 | return ARM64_BP_HARDEN_WA_NEEDED; | |
568 | } | |
569 | ||
be5b2998 | 570 | /* |
73f38166 | 571 | * List of CPUs that do not need any Spectre-v2 mitigation at all. |
be5b2998 | 572 | */ |
73f38166 MZ |
573 | static const struct midr_range spectre_v2_safe_list[] = { |
574 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A35), | |
575 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A53), | |
576 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), | |
c4d191f5 | 577 | MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53), |
73f38166 | 578 | { /* sentinel */ } |
be5b2998 SP |
579 | }; |
580 | ||
d2532e27 JL |
581 | /* |
582 | * Track overall bp hardening for all heterogeneous cores in the machine. | |
583 | * We are only considered "safe" if all booted cores are known safe. | |
584 | */ | |
73f38166 MZ |
585 | static bool __maybe_unused |
586 | check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope) | |
587 | { | |
588 | int need_wa; | |
589 | ||
590 | WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible()); | |
591 | ||
592 | /* If the CPU has CSV2 set, we're safe */ | |
593 | if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1), | |
594 | ID_AA64PFR0_CSV2_SHIFT)) | |
595 | return false; | |
596 | ||
597 | /* Alternatively, we have a list of unaffected CPUs */ | |
598 | if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list)) | |
599 | return false; | |
600 | ||
601 | /* Fallback to firmware detection */ | |
602 | need_wa = detect_harden_bp_fw(); | |
603 | if (!need_wa) | |
604 | return false; | |
605 | ||
d2532e27 JL |
606 | __spectrev2_safe = false; |
607 | ||
8c1e3d2b JL |
608 | if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) { |
609 | pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n"); | |
610 | __hardenbp_enab = false; | |
611 | return false; | |
612 | } | |
613 | ||
73f38166 | 614 | /* forced off */ |
a111b7c0 | 615 | if (__nospectre_v2 || cpu_mitigations_off()) { |
73f38166 | 616 | pr_info_once("spectrev2 mitigation disabled by command line option\n"); |
d2532e27 | 617 | __hardenbp_enab = false; |
73f38166 MZ |
618 | return false; |
619 | } | |
620 | ||
d2532e27 | 621 | if (need_wa < 0) { |
73f38166 | 622 | pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n"); |
d2532e27 JL |
623 | __hardenbp_enab = false; |
624 | } | |
73f38166 MZ |
625 | |
626 | return (need_wa > 0); | |
627 | } | |
06f1494f | 628 | |
452d06d2 MZ |
629 | static const __maybe_unused struct midr_range tx2_family_cpus[] = { |
630 | MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN), | |
631 | MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2), | |
632 | {}, | |
633 | }; | |
634 | ||
635 | static bool __maybe_unused | |
636 | needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry, | |
637 | int scope) | |
638 | { | |
639 | int i; | |
640 | ||
641 | if (!is_affected_midr_range_list(entry, scope) || | |
642 | !is_hyp_mode_available()) | |
643 | return false; | |
644 | ||
645 | for_each_possible_cpu(i) { | |
646 | if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0) | |
647 | return true; | |
648 | } | |
649 | ||
650 | return false; | |
651 | } | |
652 | ||
8892b718 MZ |
653 | #ifdef CONFIG_HARDEN_EL2_VECTORS |
654 | ||
655 | static const struct midr_range arm64_harden_el2_vectors[] = { | |
656 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A57), | |
657 | MIDR_ALL_VERSIONS(MIDR_CORTEX_A72), | |
658 | {}, | |
659 | }; | |
660 | ||
dc6ed61d MZ |
661 | #endif |
662 | ||
ce8c80c5 | 663 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
93ae0855 | 664 | static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = { |
ce8c80c5 | 665 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 |
93ae0855 BA |
666 | { |
667 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0) | |
668 | }, | |
669 | { | |
670 | .midr_range.model = MIDR_QCOM_KRYO, | |
671 | .matches = is_kryo_midr, | |
672 | }, | |
ce8c80c5 CM |
673 | #endif |
674 | #ifdef CONFIG_ARM64_ERRATUM_1286807 | |
93ae0855 BA |
675 | { |
676 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), | |
677 | }, | |
ce8c80c5 CM |
678 | #endif |
679 | {}, | |
680 | }; | |
ce8c80c5 CM |
681 | #endif |
682 | ||
f58cdf7e | 683 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 |
b89d82ef | 684 | const struct midr_range cavium_erratum_27456_cpus[] = { |
f58cdf7e SP |
685 | /* Cavium ThunderX, T88 pass 1.x - 2.1 */ |
686 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1), | |
687 | /* Cavium ThunderX, T81 pass 1.0 */ | |
688 | MIDR_REV(MIDR_THUNDERX_81XX, 0, 0), | |
689 | {}, | |
690 | }; | |
691 | #endif | |
692 | ||
693 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
694 | static const struct midr_range cavium_erratum_30115_cpus[] = { | |
695 | /* Cavium ThunderX, T88 pass 1.x - 2.2 */ | |
696 | MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2), | |
697 | /* Cavium ThunderX, T81 pass 1.0 - 1.2 */ | |
698 | MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2), | |
699 | /* Cavium ThunderX, T83 pass 1.0 */ | |
700 | MIDR_REV(MIDR_THUNDERX_83XX, 0, 0), | |
701 | {}, | |
702 | }; | |
703 | #endif | |
704 | ||
a3dcea2c SP |
705 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
706 | static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = { | |
707 | { | |
708 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), | |
709 | }, | |
710 | { | |
711 | .midr_range.model = MIDR_QCOM_KRYO, | |
712 | .matches = is_kryo_midr, | |
713 | }, | |
714 | {}, | |
715 | }; | |
716 | #endif | |
717 | ||
c9460dcb SP |
718 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE |
719 | static const struct midr_range workaround_clean_cache[] = { | |
c0a01b84 AP |
720 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
721 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | |
722 | defined(CONFIG_ARM64_ERRATUM_824069) | |
c9460dcb SP |
723 | /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */ |
724 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2), | |
725 | #endif | |
726 | #ifdef CONFIG_ARM64_ERRATUM_819472 | |
727 | /* Cortex-A53 r0p[01] : ARM errata 819472 */ | |
728 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1), | |
c0a01b84 | 729 | #endif |
c9460dcb SP |
730 | {}, |
731 | }; | |
732 | #endif | |
733 | ||
a5325089 MZ |
734 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
735 | /* | |
736 | * - 1188873 affects r0p0 to r2p0 | |
737 | * - 1418040 affects r0p0 to r3p1 | |
738 | */ | |
739 | static const struct midr_range erratum_1418040_list[] = { | |
740 | /* Cortex-A76 r0p0 to r3p1 */ | |
741 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1), | |
742 | /* Neoverse-N1 r0p0 to r3p1 */ | |
743 | MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1), | |
6989303a MZ |
744 | {}, |
745 | }; | |
746 | #endif | |
747 | ||
0d70632f DB |
748 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
749 | static const struct midr_range erratum_845719_list[] = { | |
750 | /* Cortex-A53 r0p[01234] */ | |
751 | MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
752 | /* Brahma-B53 r0p[0] */ | |
753 | MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
754 | {}, | |
755 | }; | |
756 | #endif | |
757 | ||
1794f7bc FF |
758 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
759 | static const struct arm64_cpu_capabilities erratum_843419_list[] = { | |
760 | { | |
761 | /* Cortex-A53 r0p[01234] */ | |
762 | .matches = is_affected_midr_range, | |
763 | ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4), | |
764 | MIDR_FIXED(0x4, BIT(8)), | |
765 | }, | |
766 | { | |
767 | /* Brahma-B53 r0p[0] */ | |
768 | .matches = is_affected_midr_range, | |
769 | ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0), | |
770 | }, | |
771 | {}, | |
772 | }; | |
773 | #endif | |
774 | ||
c9460dcb SP |
775 | const struct arm64_cpu_capabilities arm64_errata[] = { |
776 | #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE | |
c0a01b84 | 777 | { |
c9460dcb | 778 | .desc = "ARM errata 826319, 827319, 824069, 819472", |
c0a01b84 | 779 | .capability = ARM64_WORKAROUND_CLEAN_CACHE, |
c9460dcb | 780 | ERRATA_MIDR_RANGE_LIST(workaround_clean_cache), |
c0cda3b8 | 781 | .cpu_enable = cpu_enable_cache_maint_trap, |
c0a01b84 AP |
782 | }, |
783 | #endif | |
784 | #ifdef CONFIG_ARM64_ERRATUM_832075 | |
301bcfac | 785 | { |
5afaa1fc AP |
786 | /* Cortex-A57 r0p0 - r1p2 */ |
787 | .desc = "ARM erratum 832075", | |
788 | .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, | |
5e7951ce SP |
789 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
790 | 0, 0, | |
791 | 1, 2), | |
5afaa1fc | 792 | }, |
905e8c5d | 793 | #endif |
498cd5c3 MZ |
794 | #ifdef CONFIG_ARM64_ERRATUM_834220 |
795 | { | |
796 | /* Cortex-A57 r0p0 - r1p2 */ | |
797 | .desc = "ARM erratum 834220", | |
798 | .capability = ARM64_WORKAROUND_834220, | |
5e7951ce SP |
799 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A57, |
800 | 0, 0, | |
801 | 1, 2), | |
498cd5c3 MZ |
802 | }, |
803 | #endif | |
ca79acca AB |
804 | #ifdef CONFIG_ARM64_ERRATUM_843419 |
805 | { | |
ca79acca AB |
806 | .desc = "ARM erratum 843419", |
807 | .capability = ARM64_WORKAROUND_843419, | |
1794f7bc FF |
808 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
809 | .matches = cpucap_multi_entry_cap_matches, | |
810 | .match_list = erratum_843419_list, | |
498cd5c3 MZ |
811 | }, |
812 | #endif | |
905e8c5d WD |
813 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
814 | { | |
905e8c5d WD |
815 | .desc = "ARM erratum 845719", |
816 | .capability = ARM64_WORKAROUND_845719, | |
0d70632f | 817 | ERRATA_MIDR_RANGE_LIST(erratum_845719_list), |
905e8c5d | 818 | }, |
6d4e11c5 RR |
819 | #endif |
820 | #ifdef CONFIG_CAVIUM_ERRATUM_23154 | |
821 | { | |
822 | /* Cavium ThunderX, pass 1.x */ | |
823 | .desc = "Cavium erratum 23154", | |
824 | .capability = ARM64_WORKAROUND_CAVIUM_23154, | |
5e7951ce | 825 | ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1), |
6d4e11c5 | 826 | }, |
104a0c02 AP |
827 | #endif |
828 | #ifdef CONFIG_CAVIUM_ERRATUM_27456 | |
829 | { | |
47c459be GK |
830 | .desc = "Cavium erratum 27456", |
831 | .capability = ARM64_WORKAROUND_CAVIUM_27456, | |
f58cdf7e | 832 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus), |
47c459be | 833 | }, |
690a3415 DD |
834 | #endif |
835 | #ifdef CONFIG_CAVIUM_ERRATUM_30115 | |
836 | { | |
690a3415 DD |
837 | .desc = "Cavium erratum 30115", |
838 | .capability = ARM64_WORKAROUND_CAVIUM_30115, | |
f58cdf7e | 839 | ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus), |
690a3415 | 840 | }, |
c0a01b84 | 841 | #endif |
116c81f4 | 842 | { |
880f7cc4 | 843 | .desc = "Mismatched cache type (CTR_EL0)", |
314d53d2 SP |
844 | .capability = ARM64_MISMATCHED_CACHE_TYPE, |
845 | .matches = has_mismatched_cache_type, | |
5b4747c5 | 846 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
c0cda3b8 | 847 | .cpu_enable = cpu_enable_trap_ctr_access, |
116c81f4 | 848 | }, |
38fd94b0 CC |
849 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
850 | { | |
a3dcea2c | 851 | .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003", |
bb487118 | 852 | .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003, |
d40fce49 | 853 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
1e013d06 | 854 | .matches = cpucap_multi_entry_cap_matches, |
a3dcea2c | 855 | .match_list = qcom_erratum_1003_list, |
bb487118 | 856 | }, |
38fd94b0 | 857 | #endif |
ce8c80c5 | 858 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
d9ff80f8 | 859 | { |
ce8c80c5 | 860 | .desc = "Qualcomm erratum 1009, ARM erratum 1286807", |
d9ff80f8 | 861 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
93ae0855 BA |
862 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
863 | .matches = cpucap_multi_entry_cap_matches, | |
864 | .match_list = arm64_repeat_tlbi_list, | |
d9ff80f8 | 865 | }, |
eeb1efbc MZ |
866 | #endif |
867 | #ifdef CONFIG_ARM64_ERRATUM_858921 | |
868 | { | |
869 | /* Cortex-A73 all versions */ | |
870 | .desc = "ARM erratum 858921", | |
871 | .capability = ARM64_WORKAROUND_858921, | |
5e7951ce | 872 | ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73), |
eeb1efbc | 873 | }, |
aa6acde6 | 874 | #endif |
aa6acde6 WD |
875 | { |
876 | .capability = ARM64_HARDEN_BRANCH_PREDICTOR, | |
73f38166 MZ |
877 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, |
878 | .matches = check_branch_predictor, | |
f3d795d9 | 879 | }, |
4b472ffd MZ |
880 | #ifdef CONFIG_HARDEN_EL2_VECTORS |
881 | { | |
8892b718 | 882 | .desc = "EL2 vector hardening", |
4b472ffd | 883 | .capability = ARM64_HARDEN_EL2_VECTORS, |
8892b718 | 884 | ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), |
4b472ffd | 885 | }, |
a725e3dd | 886 | #endif |
a725e3dd MZ |
887 | { |
888 | .desc = "Speculative Store Bypass Disable", | |
889 | .capability = ARM64_SSBD, | |
890 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
891 | .matches = has_ssbd_mitigation, | |
526e065d | 892 | .midr_range_list = arm64_ssb_cpus, |
a725e3dd | 893 | }, |
a5325089 | 894 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
95b861a4 | 895 | { |
a5325089 MZ |
896 | .desc = "ARM erratum 1418040", |
897 | .capability = ARM64_WORKAROUND_1418040, | |
898 | ERRATA_MIDR_RANGE_LIST(erratum_1418040_list), | |
95b861a4 | 899 | }, |
8b2cca9a MZ |
900 | #endif |
901 | #ifdef CONFIG_ARM64_ERRATUM_1165522 | |
902 | { | |
903 | /* Cortex-A76 r0p0 to r2p0 */ | |
904 | .desc = "ARM erratum 1165522", | |
905 | .capability = ARM64_WORKAROUND_1165522, | |
906 | ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0), | |
907 | }, | |
969f5ea6 WD |
908 | #endif |
909 | #ifdef CONFIG_ARM64_ERRATUM_1463225 | |
910 | { | |
911 | .desc = "ARM erratum 1463225", | |
912 | .capability = ARM64_WORKAROUND_1463225, | |
913 | .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, | |
914 | .matches = has_cortex_a76_erratum_1463225, | |
915 | }, | |
76dc07b8 MZ |
916 | { |
917 | .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)", | |
918 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM, | |
919 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
920 | }, | |
452d06d2 MZ |
921 | #endif |
922 | #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219 | |
923 | { | |
924 | .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)", | |
925 | .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM, | |
926 | ERRATA_MIDR_RANGE_LIST(tx2_family_cpus), | |
927 | .matches = needs_tx2_tvm_workaround, | |
928 | }, | |
d9ff80f8 | 929 | #endif |
5afaa1fc | 930 | { |
301bcfac | 931 | } |
e116a375 | 932 | }; |
3891ebcc MYK |
933 | |
934 | ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, | |
935 | char *buf) | |
936 | { | |
937 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | |
938 | } | |
d2532e27 JL |
939 | |
940 | ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, | |
941 | char *buf) | |
942 | { | |
c118bbb5 AP |
943 | switch (get_spectre_v2_workaround_state()) { |
944 | case ARM64_BP_HARDEN_NOT_REQUIRED: | |
d2532e27 | 945 | return sprintf(buf, "Not affected\n"); |
c118bbb5 | 946 | case ARM64_BP_HARDEN_WA_NEEDED: |
d2532e27 | 947 | return sprintf(buf, "Mitigation: Branch predictor hardening\n"); |
c118bbb5 AP |
948 | case ARM64_BP_HARDEN_UNKNOWN: |
949 | default: | |
950 | return sprintf(buf, "Vulnerable\n"); | |
951 | } | |
d2532e27 | 952 | } |
526e065d JL |
953 | |
954 | ssize_t cpu_show_spec_store_bypass(struct device *dev, | |
955 | struct device_attribute *attr, char *buf) | |
956 | { | |
957 | if (__ssb_safe) | |
958 | return sprintf(buf, "Not affected\n"); | |
959 | ||
960 | switch (ssbd_state) { | |
961 | case ARM64_SSBD_KERNEL: | |
962 | case ARM64_SSBD_FORCE_ENABLE: | |
963 | if (IS_ENABLED(CONFIG_ARM64_SSBD)) | |
964 | return sprintf(buf, | |
965 | "Mitigation: Speculative Store Bypass disabled via prctl\n"); | |
966 | } | |
967 | ||
968 | return sprintf(buf, "Vulnerable\n"); | |
969 | } |