]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm/mm/cache-tauros2.c
Merge remote-tracking branches 'asoc/fix/sgtl5000', 'asoc/fix/topology' and 'asoc...
[mirror_ubuntu-bionic-kernel.git] / arch / arm / mm / cache-tauros2.c
1 /*
2 * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
3 *
4 * Copyright (C) 2008 Marvell Semiconductor
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 *
10 * References:
11 * - PJ1 CPU Core Datasheet,
12 * Document ID MV-S104837-01, Rev 0.7, January 24 2008.
13 * - PJ4 CPU Core Datasheet,
14 * Document ID MV-S105190-00, Rev 0.7, March 14 2008.
15 */
16
17 #include <linux/init.h>
18 #include <linux/of.h>
19 #include <linux/of_address.h>
20 #include <asm/cacheflush.h>
21 #include <asm/cp15.h>
22 #include <asm/cputype.h>
23 #include <asm/hardware/cache-tauros2.h>
24
25
26 /*
27 * When Tauros2 is used on a CPU that supports the v7 hierarchical
28 * cache operations, the cache handling code in proc-v7.S takes care
29 * of everything, including handling DMA coherency.
30 *
31 * So, we only need to register outer cache operations here if we're
32 * being used on a pre-v7 CPU, and we only need to build support for
33 * outer cache operations into the kernel image if the kernel has been
34 * configured to support a pre-v7 CPU.
35 */
36 #ifdef CONFIG_CPU_32v5
37 /*
38 * Low-level cache maintenance operations.
39 */
40 static inline void tauros2_clean_pa(unsigned long addr)
41 {
42 __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
43 }
44
45 static inline void tauros2_clean_inv_pa(unsigned long addr)
46 {
47 __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
48 }
49
50 static inline void tauros2_inv_pa(unsigned long addr)
51 {
52 __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
53 }
54
55
56 /*
57 * Linux primitives.
58 *
59 * Note that the end addresses passed to Linux primitives are
60 * noninclusive.
61 */
62 #define CACHE_LINE_SIZE 32
63
64 static void tauros2_inv_range(unsigned long start, unsigned long end)
65 {
66 /*
67 * Clean and invalidate partial first cache line.
68 */
69 if (start & (CACHE_LINE_SIZE - 1)) {
70 tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
71 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
72 }
73
74 /*
75 * Clean and invalidate partial last cache line.
76 */
77 if (end & (CACHE_LINE_SIZE - 1)) {
78 tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
79 end &= ~(CACHE_LINE_SIZE - 1);
80 }
81
82 /*
83 * Invalidate all full cache lines between 'start' and 'end'.
84 */
85 while (start < end) {
86 tauros2_inv_pa(start);
87 start += CACHE_LINE_SIZE;
88 }
89
90 dsb();
91 }
92
93 static void tauros2_clean_range(unsigned long start, unsigned long end)
94 {
95 start &= ~(CACHE_LINE_SIZE - 1);
96 while (start < end) {
97 tauros2_clean_pa(start);
98 start += CACHE_LINE_SIZE;
99 }
100
101 dsb();
102 }
103
104 static void tauros2_flush_range(unsigned long start, unsigned long end)
105 {
106 start &= ~(CACHE_LINE_SIZE - 1);
107 while (start < end) {
108 tauros2_clean_inv_pa(start);
109 start += CACHE_LINE_SIZE;
110 }
111
112 dsb();
113 }
114
115 static void tauros2_disable(void)
116 {
117 __asm__ __volatile__ (
118 "mcr p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
119 "mrc p15, 0, %0, c1, c0, 0\n\t"
120 "bic %0, %0, #(1 << 26)\n\t"
121 "mcr p15, 0, %0, c1, c0, 0 @Disable L2 Cache\n\t"
122 : : "r" (0x0));
123 }
124
125 static void tauros2_resume(void)
126 {
127 __asm__ __volatile__ (
128 "mcr p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
129 "mrc p15, 0, %0, c1, c0, 0\n\t"
130 "orr %0, %0, #(1 << 26)\n\t"
131 "mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
132 : : "r" (0x0));
133 }
134 #endif
135
136 static inline u32 __init read_extra_features(void)
137 {
138 u32 u;
139
140 __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
141
142 return u;
143 }
144
145 static inline void __init write_extra_features(u32 u)
146 {
147 __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
148 }
149
150 static inline int __init cpuid_scheme(void)
151 {
152 return !!((processor_id & 0x000f0000) == 0x000f0000);
153 }
154
155 static inline u32 __init read_mmfr3(void)
156 {
157 u32 mmfr3;
158
159 __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
160
161 return mmfr3;
162 }
163
164 static inline u32 __init read_actlr(void)
165 {
166 u32 actlr;
167
168 __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
169
170 return actlr;
171 }
172
173 static inline void __init write_actlr(u32 actlr)
174 {
175 __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
176 }
177
178 static void enable_extra_feature(unsigned int features)
179 {
180 u32 u;
181
182 u = read_extra_features();
183
184 if (features & CACHE_TAUROS2_PREFETCH_ON)
185 u &= ~0x01000000;
186 else
187 u |= 0x01000000;
188 pr_info("Tauros2: %s L2 prefetch.\n",
189 (features & CACHE_TAUROS2_PREFETCH_ON)
190 ? "Enabling" : "Disabling");
191
192 if (features & CACHE_TAUROS2_LINEFILL_BURST8)
193 u |= 0x00100000;
194 else
195 u &= ~0x00100000;
196 pr_info("Tauros2: %s line fill burt8.\n",
197 (features & CACHE_TAUROS2_LINEFILL_BURST8)
198 ? "Enabling" : "Disabling");
199
200 write_extra_features(u);
201 }
202
203 static void __init tauros2_internal_init(unsigned int features)
204 {
205 char *mode = NULL;
206
207 enable_extra_feature(features);
208
209 #ifdef CONFIG_CPU_32v5
210 if ((processor_id & 0xff0f0000) == 0x56050000) {
211 u32 feat;
212
213 /*
214 * v5 CPUs with Tauros2 have the L2 cache enable bit
215 * located in the CPU Extra Features register.
216 */
217 feat = read_extra_features();
218 if (!(feat & 0x00400000)) {
219 pr_info("Tauros2: Enabling L2 cache.\n");
220 write_extra_features(feat | 0x00400000);
221 }
222
223 mode = "ARMv5";
224 outer_cache.inv_range = tauros2_inv_range;
225 outer_cache.clean_range = tauros2_clean_range;
226 outer_cache.flush_range = tauros2_flush_range;
227 outer_cache.disable = tauros2_disable;
228 outer_cache.resume = tauros2_resume;
229 }
230 #endif
231
232 #ifdef CONFIG_CPU_32v7
233 /*
234 * Check whether this CPU has support for the v7 hierarchical
235 * cache ops. (PJ4 is in its v7 personality mode if the MMFR3
236 * register indicates support for the v7 hierarchical cache
237 * ops.)
238 *
239 * (Although strictly speaking there may exist CPUs that
240 * implement the v7 cache ops but are only ARMv6 CPUs (due to
241 * not complying with all of the other ARMv7 requirements),
242 * there are no real-life examples of Tauros2 being used on
243 * such CPUs as of yet.)
244 */
245 if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
246 u32 actlr;
247
248 /*
249 * When Tauros2 is used in an ARMv7 system, the L2
250 * enable bit is located in the Auxiliary System Control
251 * Register (which is the only register allowed by the
252 * ARMv7 spec to contain fine-grained cache control bits).
253 */
254 actlr = read_actlr();
255 if (!(actlr & 0x00000002)) {
256 pr_info("Tauros2: Enabling L2 cache.\n");
257 write_actlr(actlr | 0x00000002);
258 }
259
260 mode = "ARMv7";
261 }
262 #endif
263
264 if (mode == NULL) {
265 pr_crit("Tauros2: Unable to detect CPU mode.\n");
266 return;
267 }
268
269 pr_info("Tauros2: L2 cache support initialised "
270 "in %s mode.\n", mode);
271 }
272
273 #ifdef CONFIG_OF
274 static const struct of_device_id tauros2_ids[] __initconst = {
275 { .compatible = "marvell,tauros2-cache"},
276 {}
277 };
278 #endif
279
280 void __init tauros2_init(unsigned int features)
281 {
282 #ifdef CONFIG_OF
283 struct device_node *node;
284 int ret;
285 unsigned int f;
286
287 node = of_find_matching_node(NULL, tauros2_ids);
288 if (!node) {
289 pr_info("Not found marvell,tauros2-cache, disable it\n");
290 return;
291 }
292
293 ret = of_property_read_u32(node, "marvell,tauros2-cache-features", &f);
294 if (ret) {
295 pr_info("Not found marvell,tauros-cache-features property, "
296 "disable extra features\n");
297 features = 0;
298 } else
299 features = f;
300 #endif
301 tauros2_internal_init(features);
302 }