]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/mm/cache-l2x0.c
ARM: l2c: remove obsolete l2x0 ops for non-OF init
[mirror_ubuntu-artful-kernel.git] / arch / arm / mm / cache-l2x0.c
CommitLineData
382266ad
CM
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
8c369264 19#include <linux/err.h>
382266ad 20#include <linux/init.h>
07620976 21#include <linux/spinlock.h>
fced80c7 22#include <linux/io.h>
8c369264
RH
23#include <linux/of.h>
24#include <linux/of_address.h>
382266ad
CM
25
26#include <asm/cacheflush.h>
382266ad 27#include <asm/hardware/cache-l2x0.h>
e68f31f4 28#include "cache-tauros3.h"
b8db6b88 29#include "cache-aurora-l2.h"
382266ad 30
c02642bc 31struct l2c_init_data {
3b8bad57 32 unsigned num_lock;
c02642bc 33 void (*of_parse)(const struct device_node *, u32 *, u32 *);
3b8bad57 34 void (*enable)(void __iomem *, u32, unsigned);
75461f5c 35 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
9846dfc9 36 void (*save)(void __iomem *);
c02642bc
RK
37 struct outer_cache_fns outer_cache;
38};
39
382266ad
CM
40#define CACHE_LINE_SIZE 32
41
42static void __iomem *l2x0_base;
bd31b859 43static DEFINE_RAW_SPINLOCK(l2x0_lock);
3e175ca4
RK
44static u32 l2x0_way_mask; /* Bitmask of active ways */
45static u32 l2x0_size;
f154fe9b 46static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
382266ad 47
91c2ebb9
BS
48struct l2x0_regs l2x0_saved_regs;
49
37abcdb9
RK
50/*
51 * Common code for all cache controllers.
52 */
83841fe1 53static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
382266ad 54{
9a6655e4 55 /* wait for cache operation by line or way to complete */
6775a558 56 while (readl_relaxed(reg) & mask)
1caf3092 57 cpu_relax();
382266ad
CM
58}
59
2b2a87a1
RK
60/*
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
64 */
65static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66{
67 outer_cache.set_debug(val);
68}
69
df5dd4c6
RK
70static void __l2c_op_way(void __iomem *reg)
71{
72 writel_relaxed(l2x0_way_mask, reg);
83841fe1 73 l2c_wait_mask(reg, l2x0_way_mask);
df5dd4c6
RK
74}
75
37abcdb9
RK
76static inline void l2c_unlock(void __iomem *base, unsigned num)
77{
78 unsigned i;
79
80 for (i = 0; i < num; i++) {
81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 i * L2X0_LOCKDOWN_STRIDE);
83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 i * L2X0_LOCKDOWN_STRIDE);
85 }
86}
87
3b8bad57
RK
88/*
89 * Enable the L2 cache controller. This function must only be
90 * called when the cache controller is known to be disabled.
91 */
92static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93{
94 unsigned long flags;
95
9a07f27b
RK
96 /* Only write the aux register if it needs changing */
97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 writel_relaxed(aux, base + L2X0_AUX_CTRL);
3b8bad57 99
17f3f99f
RK
100 l2c_unlock(base, num_lock);
101
3b8bad57
RK
102 local_irq_save(flags);
103 __l2c_op_way(base + L2X0_INV_WAY);
104 writel_relaxed(0, base + sync_reg_offset);
105 l2c_wait_mask(base + sync_reg_offset, 1);
106 local_irq_restore(flags);
107
108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
109}
110
111static void l2c_disable(void)
112{
113 void __iomem *base = l2x0_base;
114
115 outer_cache.flush_all();
116 writel_relaxed(0, base + L2X0_CTRL);
117 dsb(st);
118}
119
9a6655e4
CM
120#ifdef CONFIG_CACHE_PL310
121static inline void cache_wait(void __iomem *reg, unsigned long mask)
122{
123 /* cache operations by line are atomic on PL310 */
124}
125#else
83841fe1 126#define cache_wait l2c_wait_mask
9a6655e4
CM
127#endif
128
382266ad
CM
129static inline void cache_sync(void)
130{
3d107434 131 void __iomem *base = l2x0_base;
885028e4 132
f154fe9b 133 writel_relaxed(0, base + sync_reg_offset);
3d107434 134 cache_wait(base + L2X0_CACHE_SYNC, 1);
382266ad
CM
135}
136
2839e06c 137#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
ab4d5368
WD
138static inline void debug_writel(unsigned long val)
139{
140 if (outer_cache.set_debug)
2b2a87a1 141 l2c_set_debug(l2x0_base, val);
ab4d5368 142}
2839e06c
SS
143#else
144/* Optimised out for non-errata case */
145static inline void debug_writel(unsigned long val)
146{
147}
2839e06c 148#endif
9e65582a 149
23107c54
CM
150static void l2x0_cache_sync(void)
151{
152 unsigned long flags;
153
bd31b859 154 raw_spin_lock_irqsave(&l2x0_lock, flags);
23107c54 155 cache_sync();
bd31b859 156 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
23107c54
CM
157}
158
38a8914f 159static void __l2x0_flush_all(void)
2fd86589 160{
2839e06c 161 debug_writel(0x03);
df5dd4c6 162 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
2fd86589 163 cache_sync();
2839e06c 164 debug_writel(0x00);
38a8914f
WD
165}
166
167static void l2x0_flush_all(void)
168{
169 unsigned long flags;
170
171 /* clean all ways */
bd31b859 172 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f 173 __l2x0_flush_all();
bd31b859 174 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
175}
176
2fd86589
TG
177static void l2x0_disable(void)
178{
179 unsigned long flags;
180
bd31b859 181 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f
WD
182 __l2x0_flush_all();
183 writel_relaxed(0, l2x0_base + L2X0_CTRL);
9781aa8a 184 dsb(st);
bd31b859 185 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
186}
187
6a28cf59
RK
188/*
189 * L2C-210 specific code.
190 *
191 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
192 * ensure that no background operation is running. The way operations
193 * are all background tasks.
194 *
195 * While a background operation is in progress, any new operation is
196 * ignored (unspecified whether this causes an error.) Thankfully, not
197 * used on SMP.
198 *
199 * Never has a different sync register other than L2X0_CACHE_SYNC, but
200 * we use sync_reg_offset here so we can share some of this with L2C-310.
201 */
202static void __l2c210_cache_sync(void __iomem *base)
203{
204 writel_relaxed(0, base + sync_reg_offset);
205}
206
207static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
208 unsigned long end)
209{
210 while (start < end) {
211 writel_relaxed(start, reg);
212 start += CACHE_LINE_SIZE;
213 }
214}
215
216static void l2c210_inv_range(unsigned long start, unsigned long end)
217{
218 void __iomem *base = l2x0_base;
219
220 if (start & (CACHE_LINE_SIZE - 1)) {
221 start &= ~(CACHE_LINE_SIZE - 1);
222 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
223 start += CACHE_LINE_SIZE;
224 }
225
226 if (end & (CACHE_LINE_SIZE - 1)) {
227 end &= ~(CACHE_LINE_SIZE - 1);
228 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
229 }
230
231 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
232 __l2c210_cache_sync(base);
233}
234
235static void l2c210_clean_range(unsigned long start, unsigned long end)
236{
237 void __iomem *base = l2x0_base;
238
239 start &= ~(CACHE_LINE_SIZE - 1);
240 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
241 __l2c210_cache_sync(base);
242}
243
244static void l2c210_flush_range(unsigned long start, unsigned long end)
245{
246 void __iomem *base = l2x0_base;
247
248 start &= ~(CACHE_LINE_SIZE - 1);
249 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
250 __l2c210_cache_sync(base);
251}
252
253static void l2c210_flush_all(void)
254{
255 void __iomem *base = l2x0_base;
256
257 BUG_ON(!irqs_disabled());
258
259 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
260 __l2c210_cache_sync(base);
261}
262
263static void l2c210_sync(void)
264{
265 __l2c210_cache_sync(l2x0_base);
266}
267
268static void l2c210_resume(void)
269{
270 void __iomem *base = l2x0_base;
271
272 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
273 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
274}
275
276static const struct l2c_init_data l2c210_data __initconst = {
277 .num_lock = 1,
278 .enable = l2c_enable,
279 .outer_cache = {
280 .inv_range = l2c210_inv_range,
281 .clean_range = l2c210_clean_range,
282 .flush_range = l2c210_flush_range,
283 .flush_all = l2c210_flush_all,
284 .disable = l2c_disable,
285 .sync = l2c210_sync,
286 .resume = l2c210_resume,
287 },
288};
289
733c6bba
RK
290/*
291 * L2C-220 specific code.
292 *
293 * All operations are background operations: they have to be waited for.
294 * Conflicting requests generate a slave error (which will cause an
295 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
296 * sync register here.
297 *
298 * However, we can re-use the l2c210_resume call.
299 */
300static inline void __l2c220_cache_sync(void __iomem *base)
301{
302 writel_relaxed(0, base + L2X0_CACHE_SYNC);
303 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
304}
305
306static void l2c220_op_way(void __iomem *base, unsigned reg)
307{
308 unsigned long flags;
309
310 raw_spin_lock_irqsave(&l2x0_lock, flags);
311 __l2c_op_way(base + reg);
312 __l2c220_cache_sync(base);
313 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
314}
315
316static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
317 unsigned long end, unsigned long flags)
318{
319 raw_spinlock_t *lock = &l2x0_lock;
320
321 while (start < end) {
322 unsigned long blk_end = start + min(end - start, 4096UL);
323
324 while (start < blk_end) {
325 l2c_wait_mask(reg, 1);
326 writel_relaxed(start, reg);
327 start += CACHE_LINE_SIZE;
328 }
329
330 if (blk_end < end) {
331 raw_spin_unlock_irqrestore(lock, flags);
332 raw_spin_lock_irqsave(lock, flags);
333 }
334 }
335
336 return flags;
337}
338
339static void l2c220_inv_range(unsigned long start, unsigned long end)
340{
341 void __iomem *base = l2x0_base;
342 unsigned long flags;
343
344 raw_spin_lock_irqsave(&l2x0_lock, flags);
345 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
346 if (start & (CACHE_LINE_SIZE - 1)) {
347 start &= ~(CACHE_LINE_SIZE - 1);
348 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
349 start += CACHE_LINE_SIZE;
350 }
351
352 if (end & (CACHE_LINE_SIZE - 1)) {
353 end &= ~(CACHE_LINE_SIZE - 1);
354 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
355 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
356 }
357 }
358
359 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
360 start, end, flags);
361 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
362 __l2c220_cache_sync(base);
363 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
364}
365
366static void l2c220_clean_range(unsigned long start, unsigned long end)
367{
368 void __iomem *base = l2x0_base;
369 unsigned long flags;
370
371 start &= ~(CACHE_LINE_SIZE - 1);
372 if ((end - start) >= l2x0_size) {
373 l2c220_op_way(base, L2X0_CLEAN_WAY);
374 return;
375 }
376
377 raw_spin_lock_irqsave(&l2x0_lock, flags);
378 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
379 start, end, flags);
380 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
381 __l2c220_cache_sync(base);
382 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
383}
384
385static void l2c220_flush_range(unsigned long start, unsigned long end)
386{
387 void __iomem *base = l2x0_base;
388 unsigned long flags;
389
390 start &= ~(CACHE_LINE_SIZE - 1);
391 if ((end - start) >= l2x0_size) {
392 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
393 return;
394 }
395
396 raw_spin_lock_irqsave(&l2x0_lock, flags);
397 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
398 start, end, flags);
399 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
400 __l2c220_cache_sync(base);
401 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
402}
403
404static void l2c220_flush_all(void)
405{
406 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
407}
408
409static void l2c220_sync(void)
410{
411 unsigned long flags;
412
413 raw_spin_lock_irqsave(&l2x0_lock, flags);
414 __l2c220_cache_sync(l2x0_base);
415 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
416}
417
418static const struct l2c_init_data l2c220_data = {
419 .num_lock = 1,
420 .enable = l2c_enable,
421 .outer_cache = {
422 .inv_range = l2c220_inv_range,
423 .clean_range = l2c220_clean_range,
424 .flush_range = l2c220_flush_range,
425 .flush_all = l2c220_flush_all,
426 .disable = l2c_disable,
427 .sync = l2c220_sync,
428 .resume = l2c210_resume,
429 },
430};
431
75461f5c
RK
432/*
433 * L2C-310 specific code.
434 *
f777332b
RK
435 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
436 * and the way operations are all background tasks. However, issuing an
437 * operation while a background operation is in progress results in a
438 * SLVERR response. We can reuse:
439 *
440 * __l2c210_cache_sync (using sync_reg_offset)
441 * l2c210_sync
442 * l2c210_inv_range (if 588369 is not applicable)
443 * l2c210_clean_range
444 * l2c210_flush_range (if 588369 is not applicable)
445 * l2c210_flush_all (if 727915 is not applicable)
446 *
75461f5c
RK
447 * Errata:
448 * 588369: PL310 R0P0->R1P0, fixed R2P0.
449 * Affects: all clean+invalidate operations
450 * clean and invalidate skips the invalidate step, so we need to issue
451 * separate operations. We also require the above debug workaround
452 * enclosing this code fragment on affected parts. On unaffected parts,
453 * we must not use this workaround without the debug register writes
454 * to avoid exposing a problem similar to 727915.
455 *
456 * 727915: PL310 R2P0->R3P0, fixed R3P1.
457 * Affects: clean+invalidate by way
458 * clean and invalidate by way runs in the background, and a store can
459 * hit the line between the clean operation and invalidate operation,
460 * resulting in the store being lost.
461 *
462 * 753970: PL310 R3P0, fixed R3P1.
463 * Affects: sync
464 * prevents merging writes after the sync operation, until another L2C
465 * operation is performed (or a number of other conditions.)
466 *
467 * 769419: PL310 R0P0->R3P1, fixed R3P2.
468 * Affects: store buffer
469 * store buffer is not automatically drained.
470 */
bda0b74e
RK
471static void l2c310_set_debug(unsigned long val)
472{
473 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
474}
475
ebd4219f
RK
476static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
477{
478 void __iomem *base = l2x0_base;
479
480 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
481 unsigned long flags;
482
483 /* Erratum 588369 for both clean+invalidate operations */
484 raw_spin_lock_irqsave(&l2x0_lock, flags);
485 l2c_set_debug(base, 0x03);
486
487 if (start & (CACHE_LINE_SIZE - 1)) {
488 start &= ~(CACHE_LINE_SIZE - 1);
489 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
490 writel_relaxed(start, base + L2X0_INV_LINE_PA);
491 start += CACHE_LINE_SIZE;
492 }
493
494 if (end & (CACHE_LINE_SIZE - 1)) {
495 end &= ~(CACHE_LINE_SIZE - 1);
496 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
497 writel_relaxed(end, base + L2X0_INV_LINE_PA);
498 }
499
500 l2c_set_debug(base, 0x00);
501 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
502 }
503
504 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
505 __l2c210_cache_sync(base);
506}
507
508static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
509{
510 raw_spinlock_t *lock = &l2x0_lock;
511 unsigned long flags;
512 void __iomem *base = l2x0_base;
513
514 raw_spin_lock_irqsave(lock, flags);
515 while (start < end) {
516 unsigned long blk_end = start + min(end - start, 4096UL);
517
518 l2c_set_debug(base, 0x03);
519 while (start < blk_end) {
520 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
521 writel_relaxed(start, base + L2X0_INV_LINE_PA);
522 start += CACHE_LINE_SIZE;
523 }
524 l2c_set_debug(base, 0x00);
525
526 if (blk_end < end) {
527 raw_spin_unlock_irqrestore(lock, flags);
528 raw_spin_lock_irqsave(lock, flags);
529 }
530 }
531 raw_spin_unlock_irqrestore(lock, flags);
532 __l2c210_cache_sync(base);
533}
534
99ca1772
RK
535static void l2c310_flush_all_erratum(void)
536{
537 void __iomem *base = l2x0_base;
538 unsigned long flags;
539
540 raw_spin_lock_irqsave(&l2x0_lock, flags);
541 l2c_set_debug(base, 0x03);
542 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
543 l2c_set_debug(base, 0x00);
544 __l2c210_cache_sync(base);
545 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
546}
547
09a5d180 548static void __init l2c310_save(void __iomem *base)
b98556f2 549{
09a5d180 550 unsigned revision;
b98556f2
RK
551
552 l2x0_saved_regs.tag_latency = readl_relaxed(base +
553 L2X0_TAG_LATENCY_CTRL);
554 l2x0_saved_regs.data_latency = readl_relaxed(base +
555 L2X0_DATA_LATENCY_CTRL);
556 l2x0_saved_regs.filter_end = readl_relaxed(base +
557 L2X0_ADDR_FILTER_END);
558 l2x0_saved_regs.filter_start = readl_relaxed(base +
559 L2X0_ADDR_FILTER_START);
560
09a5d180
RK
561 revision = readl_relaxed(base + L2X0_CACHE_ID) &
562 L2X0_CACHE_ID_RTL_MASK;
563
564 /* From r2p0, there is Prefetch offset/control register */
565 if (revision >= L310_CACHE_ID_RTL_R2P0)
b98556f2 566 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
09a5d180
RK
567 L2X0_PREFETCH_CTRL);
568
569 /* From r3p0, there is Power control register */
570 if (revision >= L310_CACHE_ID_RTL_R3P0)
571 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
572 L2X0_POWER_CTRL);
b98556f2
RK
573}
574
09a5d180 575static void l2c310_resume(void)
b98556f2 576{
09a5d180
RK
577 void __iomem *base = l2x0_base;
578
579 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
580 unsigned revision;
b98556f2 581
b98556f2
RK
582 /* restore pl310 setup */
583 writel_relaxed(l2x0_saved_regs.tag_latency,
09a5d180 584 base + L2X0_TAG_LATENCY_CTRL);
b98556f2 585 writel_relaxed(l2x0_saved_regs.data_latency,
09a5d180 586 base + L2X0_DATA_LATENCY_CTRL);
b98556f2 587 writel_relaxed(l2x0_saved_regs.filter_end,
09a5d180 588 base + L2X0_ADDR_FILTER_END);
b98556f2 589 writel_relaxed(l2x0_saved_regs.filter_start,
09a5d180 590 base + L2X0_ADDR_FILTER_START);
b98556f2 591
09a5d180
RK
592 revision = readl_relaxed(base + L2X0_CACHE_ID) &
593 L2X0_CACHE_ID_RTL_MASK;
b98556f2 594
09a5d180 595 if (revision >= L310_CACHE_ID_RTL_R2P0)
b98556f2 596 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
09a5d180
RK
597 base + L2X0_PREFETCH_CTRL);
598 if (revision >= L310_CACHE_ID_RTL_R3P0)
599 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
600 base + L2X0_POWER_CTRL);
b98556f2 601
09a5d180
RK
602 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
603 }
b98556f2
RK
604}
605
75461f5c
RK
606static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
607 struct outer_cache_fns *fns)
608{
609 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
610 const char *errata[4];
611 unsigned n = 0;
612
ebd4219f 613 /* For compatibility */
75461f5c 614 if (revision <= L310_CACHE_ID_RTL_R3P0)
bda0b74e 615 fns->set_debug = l2c310_set_debug;
75461f5c 616
ebd4219f
RK
617 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
618 revision < L310_CACHE_ID_RTL_R2P0 &&
619 /* For bcm compatibility */
f777332b 620 fns->inv_range == l2c210_inv_range) {
ebd4219f
RK
621 fns->inv_range = l2c310_inv_range_erratum;
622 fns->flush_range = l2c310_flush_range_erratum;
623 errata[n++] = "588369";
624 }
625
99ca1772
RK
626 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
627 revision >= L310_CACHE_ID_RTL_R2P0 &&
628 revision < L310_CACHE_ID_RTL_R3P1) {
629 fns->flush_all = l2c310_flush_all_erratum;
630 errata[n++] = "727915";
631 }
632
75461f5c
RK
633 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
634 revision == L310_CACHE_ID_RTL_R3P0) {
635 sync_reg_offset = L2X0_DUMMY_REG;
636 errata[n++] = "753970";
637 }
638
639 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
640 errata[n++] = "769419";
641
642 if (n) {
643 unsigned i;
644
645 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
646 for (i = 0; i < n; i++)
647 pr_cont(" %s", errata[i]);
648 pr_cont(" enabled\n");
649 }
650}
651
652static const struct l2c_init_data l2c310_init_fns __initconst = {
653 .num_lock = 8,
654 .enable = l2c_enable,
655 .fixup = l2c310_fixup,
09a5d180 656 .save = l2c310_save,
75461f5c 657 .outer_cache = {
f777332b
RK
658 .inv_range = l2c210_inv_range,
659 .clean_range = l2c210_clean_range,
660 .flush_range = l2c210_flush_range,
661 .flush_all = l2c210_flush_all,
662 .disable = l2c_disable,
663 .sync = l2c210_sync,
664 .set_debug = l2c310_set_debug,
09a5d180 665 .resume = l2c310_resume,
75461f5c
RK
666 },
667};
668
96054b0a
RK
669static void __init __l2c_init(const struct l2c_init_data *data,
670 u32 aux_val, u32 aux_mask, u32 cache_id)
382266ad 671{
75461f5c 672 struct outer_cache_fns fns;
3e175ca4 673 u32 aux;
3e175ca4 674 u32 way_size = 0;
64039be8 675 int ways;
b8db6b88 676 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
64039be8 677 const char *type;
382266ad 678
c40e7eb6
RK
679 /*
680 * It is strange to save the register state before initialisation,
681 * but hey, this is what the DT implementations decided to do.
682 */
683 if (data->save)
684 data->save(l2x0_base);
685
6775a558 686 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
64039be8 687
4082cfa7
SH
688 aux &= aux_mask;
689 aux |= aux_val;
690
64039be8 691 /* Determine the number of ways */
6e7aceeb 692 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
64039be8
JM
693 case L2X0_CACHE_ID_PART_L310:
694 if (aux & (1 << 16))
695 ways = 16;
696 else
697 ways = 8;
698 type = "L310";
699 break;
75461f5c 700
64039be8
JM
701 case L2X0_CACHE_ID_PART_L210:
702 ways = (aux >> 13) & 0xf;
703 type = "L210";
704 break;
b8db6b88
GC
705
706 case AURORA_CACHE_ID:
b8db6b88
GC
707 ways = (aux >> 13) & 0xf;
708 ways = 2 << ((ways + 1) >> 2);
709 way_size_shift = AURORA_WAY_SIZE_SHIFT;
710 type = "Aurora";
711 break;
75461f5c 712
64039be8
JM
713 default:
714 /* Assume unknown chips have 8 ways */
715 ways = 8;
716 type = "L2x0 series";
717 break;
718 }
719
720 l2x0_way_mask = (1 << ways) - 1;
721
5ba70372
SS
722 /*
723 * L2 cache Size = Way size * Number of ways
724 */
725 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
b8db6b88
GC
726 way_size = 1 << (way_size + way_size_shift);
727
5ba70372
SS
728 l2x0_size = ways * way_size * SZ_1K;
729
75461f5c
RK
730 fns = data->outer_cache;
731 if (data->fixup)
732 data->fixup(l2x0_base, cache_id, &fns);
733
48371cd3 734 /*
3b8bad57
RK
735 * Check if l2x0 controller is already enabled. If we are booting
736 * in non-secure mode accessing the below registers will fault.
48371cd3 737 */
3b8bad57
RK
738 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
739 data->enable(l2x0_base, aux, data->num_lock);
382266ad 740
9d4876f0
YM
741 /* Re-read it in case some bits are reserved. */
742 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
743
744 /* Save the value for resuming. */
745 l2x0_saved_regs.aux_ctrl = aux;
746
75461f5c 747 outer_cache = fns;
382266ad 748
cdef8689
RK
749 pr_info("%s cache controller enabled, %d ways, %d kB\n",
750 type, ways, l2x0_size >> 10);
751 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
752 type, cache_id, aux);
382266ad 753}
8c369264 754
96054b0a
RK
755void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
756{
75461f5c 757 const struct l2c_init_data *data;
96054b0a
RK
758 u32 cache_id;
759
760 l2x0_base = base;
761
762 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
763
75461f5c
RK
764 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
765 default:
6a28cf59
RK
766 case L2X0_CACHE_ID_PART_L210:
767 data = &l2c210_data;
768 break;
769
733c6bba
RK
770 case L2X0_CACHE_ID_PART_L220:
771 data = &l2c220_data;
772 break;
773
75461f5c
RK
774 case L2X0_CACHE_ID_PART_L310:
775 data = &l2c310_init_fns;
776 break;
777 }
778
779 __l2c_init(data, aux_val, aux_mask, cache_id);
96054b0a
RK
780}
781
8c369264 782#ifdef CONFIG_OF
b8db6b88
GC
783static int l2_wt_override;
784
96054b0a
RK
785/* Aurora don't have the cache ID register available, so we have to
786 * pass it though the device tree */
787static u32 cache_id_part_number_from_dt;
788
da3627fb
RK
789static void __init l2x0_of_parse(const struct device_node *np,
790 u32 *aux_val, u32 *aux_mask)
791{
792 u32 data[2] = { 0, 0 };
793 u32 tag = 0;
794 u32 dirty = 0;
795 u32 val = 0, mask = 0;
796
797 of_property_read_u32(np, "arm,tag-latency", &tag);
798 if (tag) {
799 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
800 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
801 }
802
803 of_property_read_u32_array(np, "arm,data-latency",
804 data, ARRAY_SIZE(data));
805 if (data[0] && data[1]) {
806 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
807 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
808 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
809 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
810 }
811
812 of_property_read_u32(np, "arm,dirty-latency", &dirty);
813 if (dirty) {
814 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
815 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
816 }
817
818 *aux_val &= ~mask;
819 *aux_val |= val;
820 *aux_mask &= ~mask;
821}
822
6a28cf59
RK
823static const struct l2c_init_data of_l2c210_data __initconst = {
824 .num_lock = 1,
825 .of_parse = l2x0_of_parse,
826 .enable = l2c_enable,
827 .outer_cache = {
828 .inv_range = l2c210_inv_range,
829 .clean_range = l2c210_clean_range,
830 .flush_range = l2c210_flush_range,
831 .flush_all = l2c210_flush_all,
832 .disable = l2c_disable,
833 .sync = l2c210_sync,
834 .resume = l2c210_resume,
835 },
836};
837
733c6bba
RK
838static const struct l2c_init_data of_l2c220_data __initconst = {
839 .num_lock = 1,
da3627fb 840 .of_parse = l2x0_of_parse,
733c6bba 841 .enable = l2c_enable,
da3627fb 842 .outer_cache = {
733c6bba
RK
843 .inv_range = l2c220_inv_range,
844 .clean_range = l2c220_clean_range,
845 .flush_range = l2c220_flush_range,
846 .flush_all = l2c220_flush_all,
847 .disable = l2c_disable,
848 .sync = l2c220_sync,
849 .resume = l2c210_resume,
da3627fb
RK
850 },
851};
852
f777332b
RK
853static void __init l2c310_of_parse(const struct device_node *np,
854 u32 *aux_val, u32 *aux_mask)
da3627fb
RK
855{
856 u32 data[3] = { 0, 0, 0 };
857 u32 tag[3] = { 0, 0, 0 };
858 u32 filter[2] = { 0, 0 };
859
860 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
861 if (tag[0] && tag[1] && tag[2])
862 writel_relaxed(
863 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
864 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
865 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
866 l2x0_base + L2X0_TAG_LATENCY_CTRL);
867
868 of_property_read_u32_array(np, "arm,data-latency",
869 data, ARRAY_SIZE(data));
870 if (data[0] && data[1] && data[2])
871 writel_relaxed(
872 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
873 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
874 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
875 l2x0_base + L2X0_DATA_LATENCY_CTRL);
876
877 of_property_read_u32_array(np, "arm,filter-ranges",
878 filter, ARRAY_SIZE(filter));
879 if (filter[1]) {
880 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
881 l2x0_base + L2X0_ADDR_FILTER_END);
882 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
883 l2x0_base + L2X0_ADDR_FILTER_START);
884 }
885}
886
f777332b 887static const struct l2c_init_data of_l2c310_data __initconst = {
3b8bad57 888 .num_lock = 8,
f777332b 889 .of_parse = l2c310_of_parse,
3b8bad57 890 .enable = l2c_enable,
75461f5c 891 .fixup = l2c310_fixup,
09a5d180 892 .save = l2c310_save,
da3627fb 893 .outer_cache = {
f777332b
RK
894 .inv_range = l2c210_inv_range,
895 .clean_range = l2c210_clean_range,
896 .flush_range = l2c210_flush_range,
897 .flush_all = l2c210_flush_all,
898 .disable = l2c_disable,
899 .sync = l2c210_sync,
900 .set_debug = l2c310_set_debug,
09a5d180 901 .resume = l2c310_resume,
da3627fb
RK
902 },
903};
904
b8db6b88
GC
905/*
906 * Note that the end addresses passed to Linux primitives are
907 * noninclusive, while the hardware cache range operations use
908 * inclusive start and end addresses.
909 */
910static unsigned long calc_range_end(unsigned long start, unsigned long end)
911{
912 /*
913 * Limit the number of cache lines processed at once,
914 * since cache range operations stall the CPU pipeline
915 * until completion.
916 */
917 if (end > start + MAX_RANGE_SIZE)
918 end = start + MAX_RANGE_SIZE;
919
920 /*
921 * Cache range operations can't straddle a page boundary.
922 */
923 if (end > PAGE_ALIGN(start+1))
924 end = PAGE_ALIGN(start+1);
925
926 return end;
927}
928
929/*
930 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
931 * and range operations only do a TLB lookup on the start address.
932 */
933static void aurora_pa_range(unsigned long start, unsigned long end,
934 unsigned long offset)
935{
936 unsigned long flags;
937
938 raw_spin_lock_irqsave(&l2x0_lock, flags);
8a3a180d
GC
939 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
940 writel_relaxed(end, l2x0_base + offset);
b8db6b88
GC
941 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
942
943 cache_sync();
944}
945
946static void aurora_inv_range(unsigned long start, unsigned long end)
947{
948 /*
949 * round start and end adresses up to cache line size
950 */
951 start &= ~(CACHE_LINE_SIZE - 1);
952 end = ALIGN(end, CACHE_LINE_SIZE);
953
954 /*
955 * Invalidate all full cache lines between 'start' and 'end'.
956 */
957 while (start < end) {
958 unsigned long range_end = calc_range_end(start, end);
959 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
960 AURORA_INVAL_RANGE_REG);
961 start = range_end;
962 }
963}
964
965static void aurora_clean_range(unsigned long start, unsigned long end)
966{
967 /*
968 * If L2 is forced to WT, the L2 will always be clean and we
969 * don't need to do anything here.
970 */
971 if (!l2_wt_override) {
972 start &= ~(CACHE_LINE_SIZE - 1);
973 end = ALIGN(end, CACHE_LINE_SIZE);
974 while (start != end) {
975 unsigned long range_end = calc_range_end(start, end);
976 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
977 AURORA_CLEAN_RANGE_REG);
978 start = range_end;
979 }
980 }
981}
982
983static void aurora_flush_range(unsigned long start, unsigned long end)
984{
8b827c60
GC
985 start &= ~(CACHE_LINE_SIZE - 1);
986 end = ALIGN(end, CACHE_LINE_SIZE);
987 while (start != end) {
988 unsigned long range_end = calc_range_end(start, end);
989 /*
990 * If L2 is forced to WT, the L2 will always be clean and we
991 * just need to invalidate.
992 */
993 if (l2_wt_override)
b8db6b88 994 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
8b827c60
GC
995 AURORA_INVAL_RANGE_REG);
996 else
997 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
998 AURORA_FLUSH_RANGE_REG);
999 start = range_end;
b8db6b88
GC
1000 }
1001}
1002
da3627fb
RK
1003static void aurora_save(void __iomem *base)
1004{
1005 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1006 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1007}
1008
1009static void aurora_resume(void)
1010{
09a5d180
RK
1011 void __iomem *base = l2x0_base;
1012
1013 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1014 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1015 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
da3627fb
RK
1016 }
1017}
1018
40266d6f
RK
1019/*
1020 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1021 * broadcasting of cache commands to L2.
1022 */
1023static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1024 unsigned num_lock)
da3627fb 1025{
40266d6f
RK
1026 u32 u;
1027
1028 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
da3627fb 1029 u |= AURORA_CTRL_FW; /* Set the FW bit */
40266d6f
RK
1030 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1031
da3627fb 1032 isb();
40266d6f
RK
1033
1034 l2c_enable(base, aux, num_lock);
da3627fb
RK
1035}
1036
75461f5c
RK
1037static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1038 struct outer_cache_fns *fns)
1039{
1040 sync_reg_offset = AURORA_SYNC_REG;
1041}
1042
da3627fb
RK
1043static void __init aurora_of_parse(const struct device_node *np,
1044 u32 *aux_val, u32 *aux_mask)
1045{
1046 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1047 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1048
1049 of_property_read_u32(np, "cache-id-part",
1050 &cache_id_part_number_from_dt);
1051
1052 /* Determine and save the write policy */
1053 l2_wt_override = of_property_read_bool(np, "wt-override");
1054
1055 if (l2_wt_override) {
1056 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1057 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1058 }
1059
1060 *aux_val &= ~mask;
1061 *aux_val |= val;
1062 *aux_mask &= ~mask;
1063}
1064
1065static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
3b8bad57 1066 .num_lock = 4,
da3627fb 1067 .of_parse = aurora_of_parse,
3b8bad57 1068 .enable = l2c_enable,
75461f5c 1069 .fixup = aurora_fixup,
da3627fb
RK
1070 .save = aurora_save,
1071 .outer_cache = {
1072 .inv_range = aurora_inv_range,
1073 .clean_range = aurora_clean_range,
1074 .flush_range = aurora_flush_range,
1075 .flush_all = l2x0_flush_all,
1076 .disable = l2x0_disable,
1077 .sync = l2x0_cache_sync,
1078 .resume = aurora_resume,
1079 },
1080};
1081
1082static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
3b8bad57 1083 .num_lock = 4,
da3627fb 1084 .of_parse = aurora_of_parse,
40266d6f 1085 .enable = aurora_enable_no_outer,
75461f5c 1086 .fixup = aurora_fixup,
da3627fb
RK
1087 .save = aurora_save,
1088 .outer_cache = {
1089 .resume = aurora_resume,
1090 },
1091};
1092
3b656fed
CD
1093/*
1094 * For certain Broadcom SoCs, depending on the address range, different offsets
1095 * need to be added to the address before passing it to L2 for
1096 * invalidation/clean/flush
1097 *
1098 * Section Address Range Offset EMI
1099 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1100 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1101 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1102 *
1103 * When the start and end addresses have crossed two different sections, we
1104 * need to break the L2 operation into two, each within its own section.
1105 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1106 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1107 * 0xC0000000 - 0xC0001000
1108 *
1109 * Note 1:
1110 * By breaking a single L2 operation into two, we may potentially suffer some
1111 * performance hit, but keep in mind the cross section case is very rare
1112 *
1113 * Note 2:
1114 * We do not need to handle the case when the start address is in
1115 * Section 1 and the end address is in Section 3, since it is not a valid use
1116 * case
1117 *
1118 * Note 3:
1119 * Section 1 in practical terms can no longer be used on rev A2. Because of
1120 * that the code does not need to handle section 1 at all.
1121 *
1122 */
1123#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1124#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1125
1126#define BCM_SYS_EMI_OFFSET 0x40000000UL
1127#define BCM_VC_EMI_OFFSET 0x80000000UL
1128
1129static inline int bcm_addr_is_sys_emi(unsigned long addr)
1130{
1131 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1132 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1133}
1134
1135static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1136{
1137 if (bcm_addr_is_sys_emi(addr))
1138 return addr + BCM_SYS_EMI_OFFSET;
1139 else
1140 return addr + BCM_VC_EMI_OFFSET;
1141}
1142
1143static void bcm_inv_range(unsigned long start, unsigned long end)
1144{
1145 unsigned long new_start, new_end;
1146
1147 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1148
1149 if (unlikely(end <= start))
1150 return;
1151
1152 new_start = bcm_l2_phys_addr(start);
1153 new_end = bcm_l2_phys_addr(end);
1154
1155 /* normal case, no cross section between start and end */
1156 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
90811148 1157 l2c210_inv_range(new_start, new_end);
3b656fed
CD
1158 return;
1159 }
1160
1161 /* They cross sections, so it can only be a cross from section
1162 * 2 to section 3
1163 */
90811148 1164 l2c210_inv_range(new_start,
3b656fed 1165 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
90811148 1166 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
3b656fed
CD
1167 new_end);
1168}
1169
1170static void bcm_clean_range(unsigned long start, unsigned long end)
1171{
1172 unsigned long new_start, new_end;
1173
1174 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1175
1176 if (unlikely(end <= start))
1177 return;
1178
3b656fed
CD
1179 new_start = bcm_l2_phys_addr(start);
1180 new_end = bcm_l2_phys_addr(end);
1181
1182 /* normal case, no cross section between start and end */
1183 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
90811148 1184 l2c210_clean_range(new_start, new_end);
3b656fed
CD
1185 return;
1186 }
1187
1188 /* They cross sections, so it can only be a cross from section
1189 * 2 to section 3
1190 */
90811148 1191 l2c210_clean_range(new_start,
3b656fed 1192 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
90811148 1193 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
3b656fed
CD
1194 new_end);
1195}
1196
1197static void bcm_flush_range(unsigned long start, unsigned long end)
1198{
1199 unsigned long new_start, new_end;
1200
1201 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1202
1203 if (unlikely(end <= start))
1204 return;
1205
1206 if ((end - start) >= l2x0_size) {
90811148 1207 outer_cache.flush_all();
3b656fed
CD
1208 return;
1209 }
1210
1211 new_start = bcm_l2_phys_addr(start);
1212 new_end = bcm_l2_phys_addr(end);
1213
1214 /* normal case, no cross section between start and end */
1215 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
90811148 1216 l2c210_flush_range(new_start, new_end);
3b656fed
CD
1217 return;
1218 }
1219
1220 /* They cross sections, so it can only be a cross from section
1221 * 2 to section 3
1222 */
90811148 1223 l2c210_flush_range(new_start,
3b656fed 1224 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
90811148 1225 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
3b656fed
CD
1226 new_end);
1227}
1228
90811148 1229/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
da3627fb 1230static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
3b8bad57 1231 .num_lock = 8,
f777332b 1232 .of_parse = l2c310_of_parse,
3b8bad57 1233 .enable = l2c_enable,
09a5d180 1234 .save = l2c310_save,
da3627fb
RK
1235 .outer_cache = {
1236 .inv_range = bcm_inv_range,
1237 .clean_range = bcm_clean_range,
1238 .flush_range = bcm_flush_range,
f777332b
RK
1239 .flush_all = l2c210_flush_all,
1240 .disable = l2c_disable,
1241 .sync = l2c210_sync,
09a5d180 1242 .resume = l2c310_resume,
da3627fb
RK
1243 },
1244};
b8db6b88 1245
9846dfc9 1246static void __init tauros3_save(void __iomem *base)
e68f31f4
SH
1247{
1248 l2x0_saved_regs.aux2_ctrl =
9846dfc9 1249 readl_relaxed(base + TAUROS3_AUX2_CTRL);
e68f31f4 1250 l2x0_saved_regs.prefetch_ctrl =
9846dfc9 1251 readl_relaxed(base + L2X0_PREFETCH_CTRL);
e68f31f4
SH
1252}
1253
e68f31f4
SH
1254static void tauros3_resume(void)
1255{
09a5d180
RK
1256 void __iomem *base = l2x0_base;
1257
1258 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
e68f31f4 1259 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
09a5d180 1260 base + TAUROS3_AUX2_CTRL);
e68f31f4 1261 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
09a5d180 1262 base + L2X0_PREFETCH_CTRL);
e68f31f4 1263
09a5d180
RK
1264 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1265 }
e68f31f4
SH
1266}
1267
c02642bc 1268static const struct l2c_init_data of_tauros3_data __initconst = {
3b8bad57
RK
1269 .num_lock = 8,
1270 .enable = l2c_enable,
e68f31f4
SH
1271 .save = tauros3_save,
1272 /* Tauros3 broadcasts L1 cache operations to L2 */
1273 .outer_cache = {
1274 .resume = tauros3_resume,
1275 },
1276};
1277
a65bb925 1278#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
8c369264 1279static const struct of_device_id l2x0_ids[] __initconst = {
6a28cf59 1280 L2C_ID("arm,l210-cache", of_l2c210_data),
733c6bba 1281 L2C_ID("arm,l220-cache", of_l2c220_data),
f777332b 1282 L2C_ID("arm,pl310-cache", of_l2c310_data),
c02642bc
RK
1283 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1284 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1285 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1286 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
a65bb925 1287 /* Deprecated IDs */
c02642bc 1288 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
8c369264
RH
1289 {}
1290};
1291
3e175ca4 1292int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
8c369264 1293{
c02642bc 1294 const struct l2c_init_data *data;
8c369264 1295 struct device_node *np;
91c2ebb9 1296 struct resource res;
96054b0a 1297 u32 cache_id;
8c369264
RH
1298
1299 np = of_find_matching_node(NULL, l2x0_ids);
1300 if (!np)
1301 return -ENODEV;
91c2ebb9
BS
1302
1303 if (of_address_to_resource(np, 0, &res))
1304 return -ENODEV;
1305
1306 l2x0_base = ioremap(res.start, resource_size(&res));
8c369264
RH
1307 if (!l2x0_base)
1308 return -ENOMEM;
1309
91c2ebb9
BS
1310 l2x0_saved_regs.phy_base = res.start;
1311
1312 data = of_match_node(l2x0_ids, np)->data;
1313
8c369264 1314 /* L2 configuration can only be changed if the cache is disabled */
40266d6f 1315 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
c02642bc
RK
1316 if (data->of_parse)
1317 data->of_parse(np, &aux_val, &aux_mask);
b8db6b88 1318
96054b0a
RK
1319 if (cache_id_part_number_from_dt)
1320 cache_id = cache_id_part_number_from_dt;
1321 else
1322 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1323
1324 __l2c_init(data, aux_val, aux_mask, cache_id);
6248d060 1325
8c369264
RH
1326 return 0;
1327}
1328#endif