]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/arm64/kernel/cpu_errata.c
arm64: Extend early page table code to allow for larger kernels
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kernel / cpu_errata.c
CommitLineData
e116a375
AP
1/*
2 * Contains CPU specific errata definitions
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
e116a375
AP
19#include <linux/types.h>
20#include <asm/cpu.h>
21#include <asm/cputype.h>
22#include <asm/cpufeature.h>
23
301bcfac 24static bool __maybe_unused
92406f0c 25is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
301bcfac 26{
92406f0c 27 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
d5370f75
WD
28 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
29 entry->midr_range_min,
30 entry->midr_range_max);
301bcfac
AP
31}
32
116c81f4
SP
33static bool
34has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
35 int scope)
36{
37 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
38 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
39 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
40}
41
2a6dcb2b 42static int cpu_enable_trap_ctr_access(void *__unused)
116c81f4
SP
43{
44 /* Clear SCTLR_EL1.UCT */
45 config_sctlr_el1(SCTLR_EL1_UCT, 0);
2a6dcb2b 46 return 0;
116c81f4
SP
47}
48
0f15adbb
WD
49#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
50#include <asm/mmu_context.h>
51#include <asm/cacheflush.h>
52
53DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
54
55#ifdef CONFIG_KVM
aa6acde6 56extern char __psci_hyp_bp_inval_start[], __psci_hyp_bp_inval_end[];
ec82b567
SD
57extern char __qcom_hyp_sanitize_link_stack_start[];
58extern char __qcom_hyp_sanitize_link_stack_end[];
aa6acde6 59
0f15adbb
WD
60static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
61 const char *hyp_vecs_end)
62{
63 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
64 int i;
65
66 for (i = 0; i < SZ_2K; i += 0x80)
67 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
68
69 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
70}
71
72static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
73 const char *hyp_vecs_start,
74 const char *hyp_vecs_end)
75{
76 static int last_slot = -1;
77 static DEFINE_SPINLOCK(bp_lock);
78 int cpu, slot = -1;
79
80 spin_lock(&bp_lock);
81 for_each_possible_cpu(cpu) {
82 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
83 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
84 break;
85 }
86 }
87
88 if (slot == -1) {
89 last_slot++;
90 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
91 / SZ_2K) <= last_slot);
92 slot = last_slot;
93 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
94 }
95
96 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
97 __this_cpu_write(bp_hardening_data.fn, fn);
98 spin_unlock(&bp_lock);
99}
100#else
ec82b567
SD
101#define __psci_hyp_bp_inval_start NULL
102#define __psci_hyp_bp_inval_end NULL
103#define __qcom_hyp_sanitize_link_stack_start NULL
104#define __qcom_hyp_sanitize_link_stack_end NULL
aa6acde6 105
0f15adbb
WD
106static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
107 const char *hyp_vecs_start,
108 const char *hyp_vecs_end)
109{
110 __this_cpu_write(bp_hardening_data.fn, fn);
111}
112#endif /* CONFIG_KVM */
113
114static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
115 bp_hardening_cb_t fn,
116 const char *hyp_vecs_start,
117 const char *hyp_vecs_end)
118{
119 u64 pfr0;
120
121 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
122 return;
123
124 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
125 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
126 return;
127
128 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
129}
aa6acde6
WD
130
131#include <linux/psci.h>
132
133static int enable_psci_bp_hardening(void *data)
134{
135 const struct arm64_cpu_capabilities *entry = data;
136
137 if (psci_ops.get_version)
138 install_bp_hardening_cb(entry,
139 (bp_hardening_cb_t)psci_ops.get_version,
140 __psci_hyp_bp_inval_start,
141 __psci_hyp_bp_inval_end);
142
143 return 0;
144}
ec82b567
SD
145
146static void qcom_link_stack_sanitization(void)
147{
148 u64 tmp;
149
150 asm volatile("mov %0, x30 \n"
151 ".rept 16 \n"
152 "bl . + 4 \n"
153 ".endr \n"
154 "mov x30, %0 \n"
155 : "=&r" (tmp));
156}
157
158static int qcom_enable_link_stack_sanitization(void *data)
159{
160 const struct arm64_cpu_capabilities *entry = data;
161
162 install_bp_hardening_cb(entry, qcom_link_stack_sanitization,
163 __qcom_hyp_sanitize_link_stack_start,
164 __qcom_hyp_sanitize_link_stack_end);
165
166 return 0;
167}
0f15adbb
WD
168#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
169
301bcfac 170#define MIDR_RANGE(model, min, max) \
92406f0c 171 .def_scope = SCOPE_LOCAL_CPU, \
359b7064 172 .matches = is_affected_midr_range, \
301bcfac
AP
173 .midr_model = model, \
174 .midr_range_min = min, \
175 .midr_range_max = max
176
06f1494f
MZ
177#define MIDR_ALL_VERSIONS(model) \
178 .def_scope = SCOPE_LOCAL_CPU, \
179 .matches = is_affected_midr_range, \
180 .midr_model = model, \
181 .midr_range_min = 0, \
182 .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
183
359b7064 184const struct arm64_cpu_capabilities arm64_errata[] = {
c0a01b84
AP
185#if defined(CONFIG_ARM64_ERRATUM_826319) || \
186 defined(CONFIG_ARM64_ERRATUM_827319) || \
187 defined(CONFIG_ARM64_ERRATUM_824069)
301bcfac
AP
188 {
189 /* Cortex-A53 r0p[012] */
190 .desc = "ARM errata 826319, 827319, 824069",
191 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
192 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
7dd01aef 193 .enable = cpu_enable_cache_maint_trap,
301bcfac 194 },
c0a01b84
AP
195#endif
196#ifdef CONFIG_ARM64_ERRATUM_819472
197 {
198 /* Cortex-A53 r0p[01] */
199 .desc = "ARM errata 819472",
200 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
201 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
7dd01aef 202 .enable = cpu_enable_cache_maint_trap,
c0a01b84
AP
203 },
204#endif
205#ifdef CONFIG_ARM64_ERRATUM_832075
301bcfac 206 {
5afaa1fc
AP
207 /* Cortex-A57 r0p0 - r1p2 */
208 .desc = "ARM erratum 832075",
209 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
fa5ce3d1
RR
210 MIDR_RANGE(MIDR_CORTEX_A57,
211 MIDR_CPU_VAR_REV(0, 0),
212 MIDR_CPU_VAR_REV(1, 2)),
5afaa1fc 213 },
905e8c5d 214#endif
498cd5c3
MZ
215#ifdef CONFIG_ARM64_ERRATUM_834220
216 {
217 /* Cortex-A57 r0p0 - r1p2 */
218 .desc = "ARM erratum 834220",
219 .capability = ARM64_WORKAROUND_834220,
fa5ce3d1
RR
220 MIDR_RANGE(MIDR_CORTEX_A57,
221 MIDR_CPU_VAR_REV(0, 0),
222 MIDR_CPU_VAR_REV(1, 2)),
498cd5c3
MZ
223 },
224#endif
905e8c5d
WD
225#ifdef CONFIG_ARM64_ERRATUM_845719
226 {
227 /* Cortex-A53 r0p[01234] */
228 .desc = "ARM erratum 845719",
229 .capability = ARM64_WORKAROUND_845719,
230 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
231 },
6d4e11c5
RR
232#endif
233#ifdef CONFIG_CAVIUM_ERRATUM_23154
234 {
235 /* Cavium ThunderX, pass 1.x */
236 .desc = "Cavium erratum 23154",
237 .capability = ARM64_WORKAROUND_CAVIUM_23154,
238 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
239 },
104a0c02
AP
240#endif
241#ifdef CONFIG_CAVIUM_ERRATUM_27456
242 {
243 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
244 .desc = "Cavium erratum 27456",
245 .capability = ARM64_WORKAROUND_CAVIUM_27456,
fa5ce3d1
RR
246 MIDR_RANGE(MIDR_THUNDERX,
247 MIDR_CPU_VAR_REV(0, 0),
248 MIDR_CPU_VAR_REV(1, 1)),
104a0c02 249 },
47c459be
GK
250 {
251 /* Cavium ThunderX, T81 pass 1.0 */
252 .desc = "Cavium erratum 27456",
253 .capability = ARM64_WORKAROUND_CAVIUM_27456,
254 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
255 },
690a3415
DD
256#endif
257#ifdef CONFIG_CAVIUM_ERRATUM_30115
258 {
259 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
260 .desc = "Cavium erratum 30115",
261 .capability = ARM64_WORKAROUND_CAVIUM_30115,
262 MIDR_RANGE(MIDR_THUNDERX, 0x00,
263 (1 << MIDR_VARIANT_SHIFT) | 2),
264 },
265 {
266 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
267 .desc = "Cavium erratum 30115",
268 .capability = ARM64_WORKAROUND_CAVIUM_30115,
269 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x02),
270 },
271 {
272 /* Cavium ThunderX, T83 pass 1.0 */
273 .desc = "Cavium erratum 30115",
274 .capability = ARM64_WORKAROUND_CAVIUM_30115,
275 MIDR_RANGE(MIDR_THUNDERX_83XX, 0x00, 0x00),
276 },
c0a01b84 277#endif
116c81f4
SP
278 {
279 .desc = "Mismatched cache line size",
280 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
281 .matches = has_mismatched_cache_line_size,
282 .def_scope = SCOPE_LOCAL_CPU,
283 .enable = cpu_enable_trap_ctr_access,
284 },
38fd94b0
CC
285#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
286 {
287 .desc = "Qualcomm Technologies Falkor erratum 1003",
288 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
289 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
290 MIDR_CPU_VAR_REV(0, 0),
291 MIDR_CPU_VAR_REV(0, 0)),
292 },
293#endif
d9ff80f8
CC
294#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
295 {
296 .desc = "Qualcomm Technologies Falkor erratum 1009",
297 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
298 MIDR_RANGE(MIDR_QCOM_FALKOR_V1,
299 MIDR_CPU_VAR_REV(0, 0),
300 MIDR_CPU_VAR_REV(0, 0)),
301 },
eeb1efbc
MZ
302#endif
303#ifdef CONFIG_ARM64_ERRATUM_858921
304 {
305 /* Cortex-A73 all versions */
306 .desc = "ARM erratum 858921",
307 .capability = ARM64_WORKAROUND_858921,
308 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
309 },
aa6acde6
WD
310#endif
311#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
312 {
313 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
314 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
315 .enable = enable_psci_bp_hardening,
316 },
317 {
318 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
319 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
320 .enable = enable_psci_bp_hardening,
321 },
322 {
323 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
324 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
325 .enable = enable_psci_bp_hardening,
326 },
327 {
328 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
329 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
330 .enable = enable_psci_bp_hardening,
331 },
ec82b567
SD
332 {
333 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
334 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
335 .enable = qcom_enable_link_stack_sanitization,
336 },
337 {
338 .capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
339 MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
340 },
d9ff80f8 341#endif
5afaa1fc 342 {
301bcfac 343 }
e116a375
AP
344};
345
6a6efbb4
SP
346/*
347 * The CPU Errata work arounds are detected and applied at boot time
348 * and the related information is freed soon after. If the new CPU requires
349 * an errata not detected at boot, fail this CPU.
350 */
89ba2645 351void verify_local_cpu_errata_workarounds(void)
6a6efbb4
SP
352{
353 const struct arm64_cpu_capabilities *caps = arm64_errata;
354
355 for (; caps->matches; caps++)
356 if (!cpus_have_cap(caps->capability) &&
357 caps->matches(caps, SCOPE_LOCAL_CPU)) {
358 pr_crit("CPU%d: Requires work around for %s, not detected"
359 " at boot time\n",
360 smp_processor_id(),
361 caps->desc ? : "an erratum");
362 cpu_die_early();
363 }
364}
365
89ba2645 366void update_cpu_errata_workarounds(void)
e116a375 367{
ce8b602c 368 update_cpu_capabilities(arm64_errata, "enabling workaround for");
e116a375 369}
8e231852
AP
370
371void __init enable_errata_workarounds(void)
372{
373 enable_cpu_capabilities(arm64_errata);
374}