]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/kernel/cpu_errata.c
arm64: mm: use phys_addr_t instead of unsigned long in __map_memblock
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kernel / cpu_errata.c
CommitLineData
e116a375
AP
1/*
2 * Contains CPU specific errata definitions
3 *
4 * Copyright (C) 2014 ARM Ltd.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
e116a375
AP
19#include <linux/types.h>
20#include <asm/cpu.h>
21#include <asm/cputype.h>
22#include <asm/cpufeature.h>
23
301bcfac 24static bool __maybe_unused
92406f0c 25is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
301bcfac 26{
92406f0c 27 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
d5370f75
WD
28 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
29 entry->midr_range_min,
30 entry->midr_range_max);
301bcfac
AP
31}
32
116c81f4
SP
33static bool
34has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
35 int scope)
36{
37 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
38 return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
39 (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
40}
41
2a6dcb2b 42static int cpu_enable_trap_ctr_access(void *__unused)
116c81f4
SP
43{
44 /* Clear SCTLR_EL1.UCT */
45 config_sctlr_el1(SCTLR_EL1_UCT, 0);
2a6dcb2b 46 return 0;
116c81f4
SP
47}
48
301bcfac 49#define MIDR_RANGE(model, min, max) \
92406f0c 50 .def_scope = SCOPE_LOCAL_CPU, \
359b7064 51 .matches = is_affected_midr_range, \
301bcfac
AP
52 .midr_model = model, \
53 .midr_range_min = min, \
54 .midr_range_max = max
55
359b7064 56const struct arm64_cpu_capabilities arm64_errata[] = {
c0a01b84
AP
57#if defined(CONFIG_ARM64_ERRATUM_826319) || \
58 defined(CONFIG_ARM64_ERRATUM_827319) || \
59 defined(CONFIG_ARM64_ERRATUM_824069)
301bcfac
AP
60 {
61 /* Cortex-A53 r0p[012] */
62 .desc = "ARM errata 826319, 827319, 824069",
63 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
64 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
7dd01aef 65 .enable = cpu_enable_cache_maint_trap,
301bcfac 66 },
c0a01b84
AP
67#endif
68#ifdef CONFIG_ARM64_ERRATUM_819472
69 {
70 /* Cortex-A53 r0p[01] */
71 .desc = "ARM errata 819472",
72 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
73 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
7dd01aef 74 .enable = cpu_enable_cache_maint_trap,
c0a01b84
AP
75 },
76#endif
77#ifdef CONFIG_ARM64_ERRATUM_832075
301bcfac 78 {
5afaa1fc
AP
79 /* Cortex-A57 r0p0 - r1p2 */
80 .desc = "ARM erratum 832075",
81 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
6d1966df
BY
82 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
83 (1 << MIDR_VARIANT_SHIFT) | 2),
5afaa1fc 84 },
905e8c5d 85#endif
498cd5c3
MZ
86#ifdef CONFIG_ARM64_ERRATUM_834220
87 {
88 /* Cortex-A57 r0p0 - r1p2 */
89 .desc = "ARM erratum 834220",
90 .capability = ARM64_WORKAROUND_834220,
91 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
92 (1 << MIDR_VARIANT_SHIFT) | 2),
93 },
94#endif
905e8c5d
WD
95#ifdef CONFIG_ARM64_ERRATUM_845719
96 {
97 /* Cortex-A53 r0p[01234] */
98 .desc = "ARM erratum 845719",
99 .capability = ARM64_WORKAROUND_845719,
100 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
101 },
6d4e11c5
RR
102#endif
103#ifdef CONFIG_CAVIUM_ERRATUM_23154
104 {
105 /* Cavium ThunderX, pass 1.x */
106 .desc = "Cavium erratum 23154",
107 .capability = ARM64_WORKAROUND_CAVIUM_23154,
108 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
109 },
104a0c02
AP
110#endif
111#ifdef CONFIG_CAVIUM_ERRATUM_27456
112 {
113 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
114 .desc = "Cavium erratum 27456",
115 .capability = ARM64_WORKAROUND_CAVIUM_27456,
116 MIDR_RANGE(MIDR_THUNDERX, 0x00,
117 (1 << MIDR_VARIANT_SHIFT) | 1),
118 },
47c459be
GK
119 {
120 /* Cavium ThunderX, T81 pass 1.0 */
121 .desc = "Cavium erratum 27456",
122 .capability = ARM64_WORKAROUND_CAVIUM_27456,
123 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
124 },
c0a01b84 125#endif
116c81f4
SP
126 {
127 .desc = "Mismatched cache line size",
128 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
129 .matches = has_mismatched_cache_line_size,
130 .def_scope = SCOPE_LOCAL_CPU,
131 .enable = cpu_enable_trap_ctr_access,
132 },
5afaa1fc 133 {
301bcfac 134 }
e116a375
AP
135};
136
6a6efbb4
SP
137/*
138 * The CPU Errata work arounds are detected and applied at boot time
139 * and the related information is freed soon after. If the new CPU requires
140 * an errata not detected at boot, fail this CPU.
141 */
89ba2645 142void verify_local_cpu_errata_workarounds(void)
6a6efbb4
SP
143{
144 const struct arm64_cpu_capabilities *caps = arm64_errata;
145
146 for (; caps->matches; caps++)
147 if (!cpus_have_cap(caps->capability) &&
148 caps->matches(caps, SCOPE_LOCAL_CPU)) {
149 pr_crit("CPU%d: Requires work around for %s, not detected"
150 " at boot time\n",
151 smp_processor_id(),
152 caps->desc ? : "an erratum");
153 cpu_die_early();
154 }
155}
156
89ba2645 157void update_cpu_errata_workarounds(void)
e116a375 158{
ce8b602c 159 update_cpu_capabilities(arm64_errata, "enabling workaround for");
e116a375 160}
8e231852
AP
161
162void __init enable_errata_workarounds(void)
163{
164 enable_cpu_capabilities(arm64_errata);
165}