]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm/mm/cache-l2x0.c
ARM: l2c: clean up L2 cache initialisation messages
[mirror_ubuntu-artful-kernel.git] / arch / arm / mm / cache-l2x0.c
CommitLineData
382266ad
CM
1/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
8c369264 19#include <linux/err.h>
382266ad 20#include <linux/init.h>
07620976 21#include <linux/spinlock.h>
fced80c7 22#include <linux/io.h>
8c369264
RH
23#include <linux/of.h>
24#include <linux/of_address.h>
382266ad
CM
25
26#include <asm/cacheflush.h>
382266ad 27#include <asm/hardware/cache-l2x0.h>
e68f31f4 28#include "cache-tauros3.h"
b8db6b88 29#include "cache-aurora-l2.h"
382266ad 30
c02642bc 31struct l2c_init_data {
3b8bad57 32 unsigned num_lock;
c02642bc 33 void (*of_parse)(const struct device_node *, u32 *, u32 *);
3b8bad57 34 void (*enable)(void __iomem *, u32, unsigned);
75461f5c 35 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
9846dfc9 36 void (*save)(void __iomem *);
c02642bc
RK
37 struct outer_cache_fns outer_cache;
38};
39
382266ad
CM
40#define CACHE_LINE_SIZE 32
41
42static void __iomem *l2x0_base;
bd31b859 43static DEFINE_RAW_SPINLOCK(l2x0_lock);
3e175ca4
RK
44static u32 l2x0_way_mask; /* Bitmask of active ways */
45static u32 l2x0_size;
f154fe9b 46static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
382266ad 47
91c2ebb9
BS
48struct l2x0_regs l2x0_saved_regs;
49
37abcdb9
RK
50/*
51 * Common code for all cache controllers.
52 */
83841fe1 53static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
382266ad 54{
9a6655e4 55 /* wait for cache operation by line or way to complete */
6775a558 56 while (readl_relaxed(reg) & mask)
1caf3092 57 cpu_relax();
382266ad
CM
58}
59
2b2a87a1
RK
60/*
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
64 */
65static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66{
67 outer_cache.set_debug(val);
68}
69
df5dd4c6
RK
70static void __l2c_op_way(void __iomem *reg)
71{
72 writel_relaxed(l2x0_way_mask, reg);
83841fe1 73 l2c_wait_mask(reg, l2x0_way_mask);
df5dd4c6
RK
74}
75
37abcdb9
RK
76static inline void l2c_unlock(void __iomem *base, unsigned num)
77{
78 unsigned i;
79
80 for (i = 0; i < num; i++) {
81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 i * L2X0_LOCKDOWN_STRIDE);
83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 i * L2X0_LOCKDOWN_STRIDE);
85 }
86}
87
3b8bad57
RK
88/*
89 * Enable the L2 cache controller. This function must only be
90 * called when the cache controller is known to be disabled.
91 */
92static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93{
94 unsigned long flags;
95
9a07f27b
RK
96 /* Only write the aux register if it needs changing */
97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 writel_relaxed(aux, base + L2X0_AUX_CTRL);
3b8bad57 99
17f3f99f
RK
100 l2c_unlock(base, num_lock);
101
3b8bad57
RK
102 local_irq_save(flags);
103 __l2c_op_way(base + L2X0_INV_WAY);
104 writel_relaxed(0, base + sync_reg_offset);
105 l2c_wait_mask(base + sync_reg_offset, 1);
106 local_irq_restore(flags);
107
108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
109}
110
111static void l2c_disable(void)
112{
113 void __iomem *base = l2x0_base;
114
115 outer_cache.flush_all();
116 writel_relaxed(0, base + L2X0_CTRL);
117 dsb(st);
118}
119
9a6655e4
CM
120#ifdef CONFIG_CACHE_PL310
121static inline void cache_wait(void __iomem *reg, unsigned long mask)
122{
123 /* cache operations by line are atomic on PL310 */
124}
125#else
83841fe1 126#define cache_wait l2c_wait_mask
9a6655e4
CM
127#endif
128
382266ad
CM
129static inline void cache_sync(void)
130{
3d107434 131 void __iomem *base = l2x0_base;
885028e4 132
f154fe9b 133 writel_relaxed(0, base + sync_reg_offset);
3d107434 134 cache_wait(base + L2X0_CACHE_SYNC, 1);
382266ad
CM
135}
136
424d6b14
SS
137static inline void l2x0_clean_line(unsigned long addr)
138{
139 void __iomem *base = l2x0_base;
140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
424d6b14
SS
142}
143
144static inline void l2x0_inv_line(unsigned long addr)
145{
146 void __iomem *base = l2x0_base;
147 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 148 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
424d6b14
SS
149}
150
2839e06c 151#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
ab4d5368
WD
152static inline void debug_writel(unsigned long val)
153{
154 if (outer_cache.set_debug)
2b2a87a1 155 l2c_set_debug(l2x0_base, val);
ab4d5368 156}
9e65582a 157
ab4d5368 158static void pl310_set_debug(unsigned long val)
2839e06c
SS
159{
160 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
9e65582a 161}
2839e06c
SS
162#else
163/* Optimised out for non-errata case */
164static inline void debug_writel(unsigned long val)
165{
166}
167
ab4d5368 168#define pl310_set_debug NULL
2839e06c 169#endif
9e65582a 170
2839e06c 171#ifdef CONFIG_PL310_ERRATA_588369
9e65582a
SS
172static inline void l2x0_flush_line(unsigned long addr)
173{
174 void __iomem *base = l2x0_base;
175
176 /* Clean by PA followed by Invalidate by PA */
177 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
6775a558 178 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
9e65582a 179 cache_wait(base + L2X0_INV_LINE_PA, 1);
6775a558 180 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
9e65582a
SS
181}
182#else
183
424d6b14
SS
184static inline void l2x0_flush_line(unsigned long addr)
185{
186 void __iomem *base = l2x0_base;
187 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
6775a558 188 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
424d6b14 189}
9e65582a 190#endif
424d6b14 191
23107c54
CM
192static void l2x0_cache_sync(void)
193{
194 unsigned long flags;
195
bd31b859 196 raw_spin_lock_irqsave(&l2x0_lock, flags);
23107c54 197 cache_sync();
bd31b859 198 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
23107c54
CM
199}
200
38a8914f 201static void __l2x0_flush_all(void)
2fd86589 202{
2839e06c 203 debug_writel(0x03);
df5dd4c6 204 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
2fd86589 205 cache_sync();
2839e06c 206 debug_writel(0x00);
38a8914f
WD
207}
208
209static void l2x0_flush_all(void)
210{
211 unsigned long flags;
212
213 /* clean all ways */
bd31b859 214 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f 215 __l2x0_flush_all();
bd31b859 216 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
217}
218
444457c1
SS
219static void l2x0_clean_all(void)
220{
221 unsigned long flags;
222
223 /* clean all ways */
bd31b859 224 raw_spin_lock_irqsave(&l2x0_lock, flags);
df5dd4c6 225 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
444457c1 226 cache_sync();
bd31b859 227 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
444457c1
SS
228}
229
2fd86589 230static void l2x0_inv_all(void)
382266ad 231{
0eb948dd
RK
232 unsigned long flags;
233
382266ad 234 /* invalidate all ways */
bd31b859 235 raw_spin_lock_irqsave(&l2x0_lock, flags);
2fd86589 236 /* Invalidating when L2 is enabled is a nono */
b8db6b88 237 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
df5dd4c6 238 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
382266ad 239 cache_sync();
bd31b859 240 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
241}
242
243static void l2x0_inv_range(unsigned long start, unsigned long end)
244{
3d107434 245 void __iomem *base = l2x0_base;
0eb948dd 246 unsigned long flags;
382266ad 247
bd31b859 248 raw_spin_lock_irqsave(&l2x0_lock, flags);
4f6627ac
RS
249 if (start & (CACHE_LINE_SIZE - 1)) {
250 start &= ~(CACHE_LINE_SIZE - 1);
9e65582a 251 debug_writel(0x03);
424d6b14 252 l2x0_flush_line(start);
9e65582a 253 debug_writel(0x00);
4f6627ac
RS
254 start += CACHE_LINE_SIZE;
255 }
256
257 if (end & (CACHE_LINE_SIZE - 1)) {
258 end &= ~(CACHE_LINE_SIZE - 1);
9e65582a 259 debug_writel(0x03);
424d6b14 260 l2x0_flush_line(end);
9e65582a 261 debug_writel(0x00);
4f6627ac
RS
262 }
263
0eb948dd
RK
264 while (start < end) {
265 unsigned long blk_end = start + min(end - start, 4096UL);
266
267 while (start < blk_end) {
424d6b14 268 l2x0_inv_line(start);
0eb948dd
RK
269 start += CACHE_LINE_SIZE;
270 }
271
272 if (blk_end < end) {
bd31b859
TG
273 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
274 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
275 }
276 }
3d107434 277 cache_wait(base + L2X0_INV_LINE_PA, 1);
382266ad 278 cache_sync();
bd31b859 279 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
280}
281
282static void l2x0_clean_range(unsigned long start, unsigned long end)
283{
3d107434 284 void __iomem *base = l2x0_base;
0eb948dd 285 unsigned long flags;
382266ad 286
444457c1
SS
287 if ((end - start) >= l2x0_size) {
288 l2x0_clean_all();
289 return;
290 }
291
bd31b859 292 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 293 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
294 while (start < end) {
295 unsigned long blk_end = start + min(end - start, 4096UL);
296
297 while (start < blk_end) {
424d6b14 298 l2x0_clean_line(start);
0eb948dd
RK
299 start += CACHE_LINE_SIZE;
300 }
301
302 if (blk_end < end) {
bd31b859
TG
303 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
304 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
305 }
306 }
3d107434 307 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
382266ad 308 cache_sync();
bd31b859 309 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
310}
311
312static void l2x0_flush_range(unsigned long start, unsigned long end)
313{
3d107434 314 void __iomem *base = l2x0_base;
0eb948dd 315 unsigned long flags;
382266ad 316
444457c1
SS
317 if ((end - start) >= l2x0_size) {
318 l2x0_flush_all();
319 return;
320 }
321
bd31b859 322 raw_spin_lock_irqsave(&l2x0_lock, flags);
382266ad 323 start &= ~(CACHE_LINE_SIZE - 1);
0eb948dd
RK
324 while (start < end) {
325 unsigned long blk_end = start + min(end - start, 4096UL);
326
9e65582a 327 debug_writel(0x03);
0eb948dd 328 while (start < blk_end) {
424d6b14 329 l2x0_flush_line(start);
0eb948dd
RK
330 start += CACHE_LINE_SIZE;
331 }
9e65582a 332 debug_writel(0x00);
0eb948dd
RK
333
334 if (blk_end < end) {
bd31b859
TG
335 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
336 raw_spin_lock_irqsave(&l2x0_lock, flags);
0eb948dd
RK
337 }
338 }
3d107434 339 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
382266ad 340 cache_sync();
bd31b859 341 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
382266ad
CM
342}
343
2fd86589
TG
344static void l2x0_disable(void)
345{
346 unsigned long flags;
347
bd31b859 348 raw_spin_lock_irqsave(&l2x0_lock, flags);
38a8914f
WD
349 __l2x0_flush_all();
350 writel_relaxed(0, l2x0_base + L2X0_CTRL);
9781aa8a 351 dsb(st);
bd31b859 352 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
2fd86589
TG
353}
354
3e175ca4 355static void l2x0_unlock(u32 cache_id)
bac7e6ec
LW
356{
357 int lockregs;
bac7e6ec 358
6e7aceeb 359 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
b8db6b88 360 case L2X0_CACHE_ID_PART_L310:
bac7e6ec 361 lockregs = 8;
b8db6b88 362 break;
b8db6b88 363 default:
bac7e6ec
LW
364 /* L210 and unknown types */
365 lockregs = 1;
b8db6b88
GC
366 break;
367 }
bac7e6ec 368
37abcdb9 369 l2c_unlock(l2x0_base, lockregs);
bac7e6ec
LW
370}
371
3b8bad57
RK
372static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
373{
3b8bad57
RK
374 /* l2x0 controller is disabled */
375 writel_relaxed(aux, base + L2X0_AUX_CTRL);
376
17f3f99f
RK
377 /* Make sure that I&D is not locked down when starting */
378 l2x0_unlock(readl_relaxed(base + L2X0_CACHE_ID));
379
3b8bad57
RK
380 l2x0_inv_all();
381
382 /* enable L2X0 */
383 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
384}
385
96054b0a 386static const struct l2c_init_data l2x0_init_fns __initconst = {
3b8bad57 387 .enable = l2x0_enable,
96054b0a
RK
388 .outer_cache = {
389 .inv_range = l2x0_inv_range,
390 .clean_range = l2x0_clean_range,
391 .flush_range = l2x0_flush_range,
392 .flush_all = l2x0_flush_all,
393 .disable = l2x0_disable,
394 .sync = l2x0_cache_sync,
395 },
396};
397
75461f5c
RK
398/*
399 * L2C-310 specific code.
400 *
401 * Errata:
402 * 588369: PL310 R0P0->R1P0, fixed R2P0.
403 * Affects: all clean+invalidate operations
404 * clean and invalidate skips the invalidate step, so we need to issue
405 * separate operations. We also require the above debug workaround
406 * enclosing this code fragment on affected parts. On unaffected parts,
407 * we must not use this workaround without the debug register writes
408 * to avoid exposing a problem similar to 727915.
409 *
410 * 727915: PL310 R2P0->R3P0, fixed R3P1.
411 * Affects: clean+invalidate by way
412 * clean and invalidate by way runs in the background, and a store can
413 * hit the line between the clean operation and invalidate operation,
414 * resulting in the store being lost.
415 *
416 * 753970: PL310 R3P0, fixed R3P1.
417 * Affects: sync
418 * prevents merging writes after the sync operation, until another L2C
419 * operation is performed (or a number of other conditions.)
420 *
421 * 769419: PL310 R0P0->R3P1, fixed R3P2.
422 * Affects: store buffer
423 * store buffer is not automatically drained.
424 */
425static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
426 struct outer_cache_fns *fns)
427{
428 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
429 const char *errata[4];
430 unsigned n = 0;
431
432 if (revision <= L310_CACHE_ID_RTL_R3P0)
433 fns->set_debug = pl310_set_debug;
434
435 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
436 revision == L310_CACHE_ID_RTL_R3P0) {
437 sync_reg_offset = L2X0_DUMMY_REG;
438 errata[n++] = "753970";
439 }
440
441 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
442 errata[n++] = "769419";
443
444 if (n) {
445 unsigned i;
446
447 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
448 for (i = 0; i < n; i++)
449 pr_cont(" %s", errata[i]);
450 pr_cont(" enabled\n");
451 }
452}
453
454static const struct l2c_init_data l2c310_init_fns __initconst = {
455 .num_lock = 8,
456 .enable = l2c_enable,
457 .fixup = l2c310_fixup,
458 .outer_cache = {
459 .inv_range = l2x0_inv_range,
460 .clean_range = l2x0_clean_range,
461 .flush_range = l2x0_flush_range,
462 .flush_all = l2x0_flush_all,
463 .disable = l2x0_disable,
464 .sync = l2x0_cache_sync,
465 },
466};
467
96054b0a
RK
468static void __init __l2c_init(const struct l2c_init_data *data,
469 u32 aux_val, u32 aux_mask, u32 cache_id)
382266ad 470{
75461f5c 471 struct outer_cache_fns fns;
3e175ca4 472 u32 aux;
3e175ca4 473 u32 way_size = 0;
64039be8 474 int ways;
b8db6b88 475 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
64039be8 476 const char *type;
382266ad 477
c40e7eb6
RK
478 /*
479 * It is strange to save the register state before initialisation,
480 * but hey, this is what the DT implementations decided to do.
481 */
482 if (data->save)
483 data->save(l2x0_base);
484
6775a558 485 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
64039be8 486
4082cfa7
SH
487 aux &= aux_mask;
488 aux |= aux_val;
489
64039be8 490 /* Determine the number of ways */
6e7aceeb 491 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
64039be8
JM
492 case L2X0_CACHE_ID_PART_L310:
493 if (aux & (1 << 16))
494 ways = 16;
495 else
496 ways = 8;
497 type = "L310";
498 break;
75461f5c 499
64039be8
JM
500 case L2X0_CACHE_ID_PART_L210:
501 ways = (aux >> 13) & 0xf;
502 type = "L210";
503 break;
b8db6b88
GC
504
505 case AURORA_CACHE_ID:
b8db6b88
GC
506 ways = (aux >> 13) & 0xf;
507 ways = 2 << ((ways + 1) >> 2);
508 way_size_shift = AURORA_WAY_SIZE_SHIFT;
509 type = "Aurora";
510 break;
75461f5c 511
64039be8
JM
512 default:
513 /* Assume unknown chips have 8 ways */
514 ways = 8;
515 type = "L2x0 series";
516 break;
517 }
518
519 l2x0_way_mask = (1 << ways) - 1;
520
5ba70372
SS
521 /*
522 * L2 cache Size = Way size * Number of ways
523 */
524 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
b8db6b88
GC
525 way_size = 1 << (way_size + way_size_shift);
526
5ba70372
SS
527 l2x0_size = ways * way_size * SZ_1K;
528
75461f5c
RK
529 fns = data->outer_cache;
530 if (data->fixup)
531 data->fixup(l2x0_base, cache_id, &fns);
532
48371cd3 533 /*
3b8bad57
RK
534 * Check if l2x0 controller is already enabled. If we are booting
535 * in non-secure mode accessing the below registers will fault.
48371cd3 536 */
3b8bad57
RK
537 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
538 data->enable(l2x0_base, aux, data->num_lock);
382266ad 539
9d4876f0
YM
540 /* Re-read it in case some bits are reserved. */
541 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
542
543 /* Save the value for resuming. */
544 l2x0_saved_regs.aux_ctrl = aux;
545
75461f5c 546 outer_cache = fns;
382266ad 547
cdef8689
RK
548 pr_info("%s cache controller enabled, %d ways, %d kB\n",
549 type, ways, l2x0_size >> 10);
550 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
551 type, cache_id, aux);
382266ad 552}
8c369264 553
96054b0a
RK
554void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
555{
75461f5c 556 const struct l2c_init_data *data;
96054b0a
RK
557 u32 cache_id;
558
559 l2x0_base = base;
560
561 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
562
75461f5c
RK
563 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
564 default:
565 data = &l2x0_init_fns;
566 break;
567
568 case L2X0_CACHE_ID_PART_L310:
569 data = &l2c310_init_fns;
570 break;
571 }
572
573 __l2c_init(data, aux_val, aux_mask, cache_id);
96054b0a
RK
574}
575
8c369264 576#ifdef CONFIG_OF
b8db6b88
GC
577static int l2_wt_override;
578
96054b0a
RK
579/* Aurora don't have the cache ID register available, so we have to
580 * pass it though the device tree */
581static u32 cache_id_part_number_from_dt;
582
da3627fb
RK
583static void __init l2x0_of_parse(const struct device_node *np,
584 u32 *aux_val, u32 *aux_mask)
585{
586 u32 data[2] = { 0, 0 };
587 u32 tag = 0;
588 u32 dirty = 0;
589 u32 val = 0, mask = 0;
590
591 of_property_read_u32(np, "arm,tag-latency", &tag);
592 if (tag) {
593 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
594 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
595 }
596
597 of_property_read_u32_array(np, "arm,data-latency",
598 data, ARRAY_SIZE(data));
599 if (data[0] && data[1]) {
600 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
601 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
602 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
603 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
604 }
605
606 of_property_read_u32(np, "arm,dirty-latency", &dirty);
607 if (dirty) {
608 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
609 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
610 }
611
612 *aux_val &= ~mask;
613 *aux_val |= val;
614 *aux_mask &= ~mask;
615}
616
617static void l2x0_resume(void)
618{
619 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
620 /* restore aux ctrl and enable l2 */
621 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
622
623 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
624 L2X0_AUX_CTRL);
625
626 l2x0_inv_all();
627
628 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
629 }
630}
631
632static const struct l2c_init_data of_l2x0_data __initconst = {
633 .of_parse = l2x0_of_parse,
3b8bad57 634 .enable = l2x0_enable,
da3627fb
RK
635 .outer_cache = {
636 .inv_range = l2x0_inv_range,
637 .clean_range = l2x0_clean_range,
638 .flush_range = l2x0_flush_range,
639 .flush_all = l2x0_flush_all,
640 .disable = l2x0_disable,
641 .sync = l2x0_cache_sync,
642 .resume = l2x0_resume,
643 },
644};
645
646static void __init pl310_of_parse(const struct device_node *np,
647 u32 *aux_val, u32 *aux_mask)
648{
649 u32 data[3] = { 0, 0, 0 };
650 u32 tag[3] = { 0, 0, 0 };
651 u32 filter[2] = { 0, 0 };
652
653 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
654 if (tag[0] && tag[1] && tag[2])
655 writel_relaxed(
656 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
657 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
658 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
659 l2x0_base + L2X0_TAG_LATENCY_CTRL);
660
661 of_property_read_u32_array(np, "arm,data-latency",
662 data, ARRAY_SIZE(data));
663 if (data[0] && data[1] && data[2])
664 writel_relaxed(
665 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
666 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
667 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
668 l2x0_base + L2X0_DATA_LATENCY_CTRL);
669
670 of_property_read_u32_array(np, "arm,filter-ranges",
671 filter, ARRAY_SIZE(filter));
672 if (filter[1]) {
673 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
674 l2x0_base + L2X0_ADDR_FILTER_END);
675 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
676 l2x0_base + L2X0_ADDR_FILTER_START);
677 }
678}
679
680static void __init pl310_save(void __iomem *base)
681{
682 u32 l2x0_revision = readl_relaxed(base + L2X0_CACHE_ID) &
683 L2X0_CACHE_ID_RTL_MASK;
684
685 l2x0_saved_regs.tag_latency = readl_relaxed(base +
686 L2X0_TAG_LATENCY_CTRL);
687 l2x0_saved_regs.data_latency = readl_relaxed(base +
688 L2X0_DATA_LATENCY_CTRL);
689 l2x0_saved_regs.filter_end = readl_relaxed(base +
690 L2X0_ADDR_FILTER_END);
691 l2x0_saved_regs.filter_start = readl_relaxed(base +
692 L2X0_ADDR_FILTER_START);
693
694 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
695 /*
696 * From r2p0, there is Prefetch offset/control register
697 */
698 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
699 L2X0_PREFETCH_CTRL);
700 /*
701 * From r3p0, there is Power control register
702 */
703 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
704 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
705 L2X0_POWER_CTRL);
706 }
707}
708
709static void pl310_resume(void)
710{
711 u32 l2x0_revision;
712
713 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
714 /* restore pl310 setup */
715 writel_relaxed(l2x0_saved_regs.tag_latency,
716 l2x0_base + L2X0_TAG_LATENCY_CTRL);
717 writel_relaxed(l2x0_saved_regs.data_latency,
718 l2x0_base + L2X0_DATA_LATENCY_CTRL);
719 writel_relaxed(l2x0_saved_regs.filter_end,
720 l2x0_base + L2X0_ADDR_FILTER_END);
721 writel_relaxed(l2x0_saved_regs.filter_start,
722 l2x0_base + L2X0_ADDR_FILTER_START);
723
724 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
725 L2X0_CACHE_ID_RTL_MASK;
726
727 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
728 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
729 l2x0_base + L2X0_PREFETCH_CTRL);
730 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
731 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
732 l2x0_base + L2X0_POWER_CTRL);
733 }
734 }
735
736 l2x0_resume();
737}
738
739static const struct l2c_init_data of_pl310_data __initconst = {
3b8bad57 740 .num_lock = 8,
da3627fb 741 .of_parse = pl310_of_parse,
3b8bad57 742 .enable = l2c_enable,
75461f5c 743 .fixup = l2c310_fixup,
da3627fb
RK
744 .save = pl310_save,
745 .outer_cache = {
746 .inv_range = l2x0_inv_range,
747 .clean_range = l2x0_clean_range,
748 .flush_range = l2x0_flush_range,
749 .flush_all = l2x0_flush_all,
750 .disable = l2x0_disable,
751 .sync = l2x0_cache_sync,
752 .resume = pl310_resume,
753 },
754};
755
b8db6b88
GC
756/*
757 * Note that the end addresses passed to Linux primitives are
758 * noninclusive, while the hardware cache range operations use
759 * inclusive start and end addresses.
760 */
761static unsigned long calc_range_end(unsigned long start, unsigned long end)
762{
763 /*
764 * Limit the number of cache lines processed at once,
765 * since cache range operations stall the CPU pipeline
766 * until completion.
767 */
768 if (end > start + MAX_RANGE_SIZE)
769 end = start + MAX_RANGE_SIZE;
770
771 /*
772 * Cache range operations can't straddle a page boundary.
773 */
774 if (end > PAGE_ALIGN(start+1))
775 end = PAGE_ALIGN(start+1);
776
777 return end;
778}
779
780/*
781 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
782 * and range operations only do a TLB lookup on the start address.
783 */
784static void aurora_pa_range(unsigned long start, unsigned long end,
785 unsigned long offset)
786{
787 unsigned long flags;
788
789 raw_spin_lock_irqsave(&l2x0_lock, flags);
8a3a180d
GC
790 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
791 writel_relaxed(end, l2x0_base + offset);
b8db6b88
GC
792 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
793
794 cache_sync();
795}
796
797static void aurora_inv_range(unsigned long start, unsigned long end)
798{
799 /*
800 * round start and end adresses up to cache line size
801 */
802 start &= ~(CACHE_LINE_SIZE - 1);
803 end = ALIGN(end, CACHE_LINE_SIZE);
804
805 /*
806 * Invalidate all full cache lines between 'start' and 'end'.
807 */
808 while (start < end) {
809 unsigned long range_end = calc_range_end(start, end);
810 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
811 AURORA_INVAL_RANGE_REG);
812 start = range_end;
813 }
814}
815
816static void aurora_clean_range(unsigned long start, unsigned long end)
817{
818 /*
819 * If L2 is forced to WT, the L2 will always be clean and we
820 * don't need to do anything here.
821 */
822 if (!l2_wt_override) {
823 start &= ~(CACHE_LINE_SIZE - 1);
824 end = ALIGN(end, CACHE_LINE_SIZE);
825 while (start != end) {
826 unsigned long range_end = calc_range_end(start, end);
827 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
828 AURORA_CLEAN_RANGE_REG);
829 start = range_end;
830 }
831 }
832}
833
834static void aurora_flush_range(unsigned long start, unsigned long end)
835{
8b827c60
GC
836 start &= ~(CACHE_LINE_SIZE - 1);
837 end = ALIGN(end, CACHE_LINE_SIZE);
838 while (start != end) {
839 unsigned long range_end = calc_range_end(start, end);
840 /*
841 * If L2 is forced to WT, the L2 will always be clean and we
842 * just need to invalidate.
843 */
844 if (l2_wt_override)
b8db6b88 845 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
8b827c60
GC
846 AURORA_INVAL_RANGE_REG);
847 else
848 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
849 AURORA_FLUSH_RANGE_REG);
850 start = range_end;
b8db6b88
GC
851 }
852}
853
da3627fb
RK
854static void aurora_save(void __iomem *base)
855{
856 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
857 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
858}
859
860static void aurora_resume(void)
861{
862 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
863 writel_relaxed(l2x0_saved_regs.aux_ctrl,
864 l2x0_base + L2X0_AUX_CTRL);
865 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
866 }
867}
868
40266d6f
RK
869/*
870 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
871 * broadcasting of cache commands to L2.
872 */
873static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
874 unsigned num_lock)
da3627fb 875{
40266d6f
RK
876 u32 u;
877
878 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
da3627fb 879 u |= AURORA_CTRL_FW; /* Set the FW bit */
40266d6f
RK
880 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
881
da3627fb 882 isb();
40266d6f
RK
883
884 l2c_enable(base, aux, num_lock);
da3627fb
RK
885}
886
75461f5c
RK
887static void __init aurora_fixup(void __iomem *base, u32 cache_id,
888 struct outer_cache_fns *fns)
889{
890 sync_reg_offset = AURORA_SYNC_REG;
891}
892
da3627fb
RK
893static void __init aurora_of_parse(const struct device_node *np,
894 u32 *aux_val, u32 *aux_mask)
895{
896 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
897 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
898
899 of_property_read_u32(np, "cache-id-part",
900 &cache_id_part_number_from_dt);
901
902 /* Determine and save the write policy */
903 l2_wt_override = of_property_read_bool(np, "wt-override");
904
905 if (l2_wt_override) {
906 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
907 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
908 }
909
910 *aux_val &= ~mask;
911 *aux_val |= val;
912 *aux_mask &= ~mask;
913}
914
915static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
3b8bad57 916 .num_lock = 4,
da3627fb 917 .of_parse = aurora_of_parse,
3b8bad57 918 .enable = l2c_enable,
75461f5c 919 .fixup = aurora_fixup,
da3627fb
RK
920 .save = aurora_save,
921 .outer_cache = {
922 .inv_range = aurora_inv_range,
923 .clean_range = aurora_clean_range,
924 .flush_range = aurora_flush_range,
925 .flush_all = l2x0_flush_all,
926 .disable = l2x0_disable,
927 .sync = l2x0_cache_sync,
928 .resume = aurora_resume,
929 },
930};
931
932static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
3b8bad57 933 .num_lock = 4,
da3627fb 934 .of_parse = aurora_of_parse,
40266d6f 935 .enable = aurora_enable_no_outer,
75461f5c 936 .fixup = aurora_fixup,
da3627fb
RK
937 .save = aurora_save,
938 .outer_cache = {
939 .resume = aurora_resume,
940 },
941};
942
3b656fed
CD
943/*
944 * For certain Broadcom SoCs, depending on the address range, different offsets
945 * need to be added to the address before passing it to L2 for
946 * invalidation/clean/flush
947 *
948 * Section Address Range Offset EMI
949 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
950 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
951 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
952 *
953 * When the start and end addresses have crossed two different sections, we
954 * need to break the L2 operation into two, each within its own section.
955 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
956 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
957 * 0xC0000000 - 0xC0001000
958 *
959 * Note 1:
960 * By breaking a single L2 operation into two, we may potentially suffer some
961 * performance hit, but keep in mind the cross section case is very rare
962 *
963 * Note 2:
964 * We do not need to handle the case when the start address is in
965 * Section 1 and the end address is in Section 3, since it is not a valid use
966 * case
967 *
968 * Note 3:
969 * Section 1 in practical terms can no longer be used on rev A2. Because of
970 * that the code does not need to handle section 1 at all.
971 *
972 */
973#define BCM_SYS_EMI_START_ADDR 0x40000000UL
974#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
975
976#define BCM_SYS_EMI_OFFSET 0x40000000UL
977#define BCM_VC_EMI_OFFSET 0x80000000UL
978
979static inline int bcm_addr_is_sys_emi(unsigned long addr)
980{
981 return (addr >= BCM_SYS_EMI_START_ADDR) &&
982 (addr < BCM_VC_EMI_SEC3_START_ADDR);
983}
984
985static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
986{
987 if (bcm_addr_is_sys_emi(addr))
988 return addr + BCM_SYS_EMI_OFFSET;
989 else
990 return addr + BCM_VC_EMI_OFFSET;
991}
992
993static void bcm_inv_range(unsigned long start, unsigned long end)
994{
995 unsigned long new_start, new_end;
996
997 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
998
999 if (unlikely(end <= start))
1000 return;
1001
1002 new_start = bcm_l2_phys_addr(start);
1003 new_end = bcm_l2_phys_addr(end);
1004
1005 /* normal case, no cross section between start and end */
1006 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1007 l2x0_inv_range(new_start, new_end);
1008 return;
1009 }
1010
1011 /* They cross sections, so it can only be a cross from section
1012 * 2 to section 3
1013 */
1014 l2x0_inv_range(new_start,
1015 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1016 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1017 new_end);
1018}
1019
1020static void bcm_clean_range(unsigned long start, unsigned long end)
1021{
1022 unsigned long new_start, new_end;
1023
1024 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1025
1026 if (unlikely(end <= start))
1027 return;
1028
1029 if ((end - start) >= l2x0_size) {
1030 l2x0_clean_all();
1031 return;
1032 }
1033
1034 new_start = bcm_l2_phys_addr(start);
1035 new_end = bcm_l2_phys_addr(end);
1036
1037 /* normal case, no cross section between start and end */
1038 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1039 l2x0_clean_range(new_start, new_end);
1040 return;
1041 }
1042
1043 /* They cross sections, so it can only be a cross from section
1044 * 2 to section 3
1045 */
1046 l2x0_clean_range(new_start,
1047 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1048 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1049 new_end);
1050}
1051
1052static void bcm_flush_range(unsigned long start, unsigned long end)
1053{
1054 unsigned long new_start, new_end;
1055
1056 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1057
1058 if (unlikely(end <= start))
1059 return;
1060
1061 if ((end - start) >= l2x0_size) {
1062 l2x0_flush_all();
1063 return;
1064 }
1065
1066 new_start = bcm_l2_phys_addr(start);
1067 new_end = bcm_l2_phys_addr(end);
1068
1069 /* normal case, no cross section between start and end */
1070 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1071 l2x0_flush_range(new_start, new_end);
1072 return;
1073 }
1074
1075 /* They cross sections, so it can only be a cross from section
1076 * 2 to section 3
1077 */
1078 l2x0_flush_range(new_start,
1079 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1080 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1081 new_end);
1082}
1083
da3627fb 1084static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
3b8bad57 1085 .num_lock = 8,
da3627fb 1086 .of_parse = pl310_of_parse,
3b8bad57 1087 .enable = l2c_enable,
75461f5c 1088 .fixup = l2c310_fixup,
da3627fb
RK
1089 .save = pl310_save,
1090 .outer_cache = {
1091 .inv_range = bcm_inv_range,
1092 .clean_range = bcm_clean_range,
1093 .flush_range = bcm_flush_range,
1094 .flush_all = l2x0_flush_all,
1095 .disable = l2x0_disable,
1096 .sync = l2x0_cache_sync,
1097 .resume = pl310_resume,
1098 },
1099};
b8db6b88 1100
9846dfc9 1101static void __init tauros3_save(void __iomem *base)
e68f31f4
SH
1102{
1103 l2x0_saved_regs.aux2_ctrl =
9846dfc9 1104 readl_relaxed(base + TAUROS3_AUX2_CTRL);
e68f31f4 1105 l2x0_saved_regs.prefetch_ctrl =
9846dfc9 1106 readl_relaxed(base + L2X0_PREFETCH_CTRL);
e68f31f4
SH
1107}
1108
e68f31f4
SH
1109static void tauros3_resume(void)
1110{
1111 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1112 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1113 l2x0_base + TAUROS3_AUX2_CTRL);
1114 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1115 l2x0_base + L2X0_PREFETCH_CTRL);
1116 }
1117
1118 l2x0_resume();
1119}
1120
c02642bc 1121static const struct l2c_init_data of_tauros3_data __initconst = {
3b8bad57
RK
1122 .num_lock = 8,
1123 .enable = l2c_enable,
e68f31f4
SH
1124 .save = tauros3_save,
1125 /* Tauros3 broadcasts L1 cache operations to L2 */
1126 .outer_cache = {
1127 .resume = tauros3_resume,
1128 },
1129};
1130
a65bb925 1131#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
8c369264 1132static const struct of_device_id l2x0_ids[] __initconst = {
c02642bc
RK
1133 L2C_ID("arm,l210-cache", of_l2x0_data),
1134 L2C_ID("arm,l220-cache", of_l2x0_data),
1135 L2C_ID("arm,pl310-cache", of_pl310_data),
1136 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1137 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1138 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1139 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
a65bb925 1140 /* Deprecated IDs */
c02642bc 1141 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
8c369264
RH
1142 {}
1143};
1144
3e175ca4 1145int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
8c369264 1146{
c02642bc 1147 const struct l2c_init_data *data;
8c369264 1148 struct device_node *np;
91c2ebb9 1149 struct resource res;
96054b0a 1150 u32 cache_id;
8c369264
RH
1151
1152 np = of_find_matching_node(NULL, l2x0_ids);
1153 if (!np)
1154 return -ENODEV;
91c2ebb9
BS
1155
1156 if (of_address_to_resource(np, 0, &res))
1157 return -ENODEV;
1158
1159 l2x0_base = ioremap(res.start, resource_size(&res));
8c369264
RH
1160 if (!l2x0_base)
1161 return -ENOMEM;
1162
91c2ebb9
BS
1163 l2x0_saved_regs.phy_base = res.start;
1164
1165 data = of_match_node(l2x0_ids, np)->data;
1166
8c369264 1167 /* L2 configuration can only be changed if the cache is disabled */
40266d6f 1168 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
c02642bc
RK
1169 if (data->of_parse)
1170 data->of_parse(np, &aux_val, &aux_mask);
b8db6b88 1171
96054b0a
RK
1172 if (cache_id_part_number_from_dt)
1173 cache_id = cache_id_part_number_from_dt;
1174 else
1175 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1176
1177 __l2c_init(data, aux_val, aux_mask, cache_id);
6248d060 1178
8c369264
RH
1179 return 0;
1180}
1181#endif