]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/mm/c-r4k.c
[MIPS] Remove useless S-cache flushes.
[mirror_ubuntu-zesty-kernel.git] / arch / mips / mm / c-r4k.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/bitops.h>
18
19 #include <asm/bcache.h>
20 #include <asm/bootinfo.h>
21 #include <asm/cache.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu.h>
24 #include <asm/cpu-features.h>
25 #include <asm/io.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/r4kcache.h>
29 #include <asm/sections.h>
30 #include <asm/system.h>
31 #include <asm/mmu_context.h>
32 #include <asm/war.h>
33 #include <asm/cacheflush.h> /* for run_uncached() */
34
35
36 /*
37 * Special Variant of smp_call_function for use by cache functions:
38 *
39 * o No return value
40 * o collapses to normal function call on UP kernels
41 * o collapses to normal function call on systems with a single shared
42 * primary cache.
43 */
44 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
45 int retry, int wait)
46 {
47 preempt_disable();
48
49 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
50 smp_call_function(func, info, retry, wait);
51 #endif
52 func(info);
53 preempt_enable();
54 }
55
56 /*
57 * Must die.
58 */
59 static unsigned long icache_size __read_mostly;
60 static unsigned long dcache_size __read_mostly;
61 static unsigned long scache_size __read_mostly;
62
63 /*
64 * Dummy cache handling routines for machines without boardcaches
65 */
66 static void cache_noop(void) {}
67
68 static struct bcache_ops no_sc_ops = {
69 .bc_enable = (void *)cache_noop,
70 .bc_disable = (void *)cache_noop,
71 .bc_wback_inv = (void *)cache_noop,
72 .bc_inv = (void *)cache_noop
73 };
74
75 struct bcache_ops *bcops = &no_sc_ops;
76
77 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
78 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
79
80 #define R4600_HIT_CACHEOP_WAR_IMPL \
81 do { \
82 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
83 *(volatile unsigned long *)CKSEG1; \
84 if (R4600_V1_HIT_CACHEOP_WAR) \
85 __asm__ __volatile__("nop;nop;nop;nop"); \
86 } while (0)
87
88 static void (*r4k_blast_dcache_page)(unsigned long addr);
89
90 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
91 {
92 R4600_HIT_CACHEOP_WAR_IMPL;
93 blast_dcache32_page(addr);
94 }
95
96 static void __init r4k_blast_dcache_page_setup(void)
97 {
98 unsigned long dc_lsize = cpu_dcache_line_size();
99
100 if (dc_lsize == 0)
101 r4k_blast_dcache_page = (void *)cache_noop;
102 else if (dc_lsize == 16)
103 r4k_blast_dcache_page = blast_dcache16_page;
104 else if (dc_lsize == 32)
105 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
106 }
107
108 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
109
110 static void __init r4k_blast_dcache_page_indexed_setup(void)
111 {
112 unsigned long dc_lsize = cpu_dcache_line_size();
113
114 if (dc_lsize == 0)
115 r4k_blast_dcache_page_indexed = (void *)cache_noop;
116 else if (dc_lsize == 16)
117 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
118 else if (dc_lsize == 32)
119 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
120 }
121
122 static void (* r4k_blast_dcache)(void);
123
124 static void __init r4k_blast_dcache_setup(void)
125 {
126 unsigned long dc_lsize = cpu_dcache_line_size();
127
128 if (dc_lsize == 0)
129 r4k_blast_dcache = (void *)cache_noop;
130 else if (dc_lsize == 16)
131 r4k_blast_dcache = blast_dcache16;
132 else if (dc_lsize == 32)
133 r4k_blast_dcache = blast_dcache32;
134 }
135
136 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
137 #define JUMP_TO_ALIGN(order) \
138 __asm__ __volatile__( \
139 "b\t1f\n\t" \
140 ".align\t" #order "\n\t" \
141 "1:\n\t" \
142 )
143 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
144 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
145
146 static inline void blast_r4600_v1_icache32(void)
147 {
148 unsigned long flags;
149
150 local_irq_save(flags);
151 blast_icache32();
152 local_irq_restore(flags);
153 }
154
155 static inline void tx49_blast_icache32(void)
156 {
157 unsigned long start = INDEX_BASE;
158 unsigned long end = start + current_cpu_data.icache.waysize;
159 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
160 unsigned long ws_end = current_cpu_data.icache.ways <<
161 current_cpu_data.icache.waybit;
162 unsigned long ws, addr;
163
164 CACHE32_UNROLL32_ALIGN2;
165 /* I'm in even chunk. blast odd chunks */
166 for (ws = 0; ws < ws_end; ws += ws_inc)
167 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
168 cache32_unroll32(addr|ws, Index_Invalidate_I);
169 CACHE32_UNROLL32_ALIGN;
170 /* I'm in odd chunk. blast even chunks */
171 for (ws = 0; ws < ws_end; ws += ws_inc)
172 for (addr = start; addr < end; addr += 0x400 * 2)
173 cache32_unroll32(addr|ws, Index_Invalidate_I);
174 }
175
176 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
177 {
178 unsigned long flags;
179
180 local_irq_save(flags);
181 blast_icache32_page_indexed(page);
182 local_irq_restore(flags);
183 }
184
185 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
186 {
187 unsigned long indexmask = current_cpu_data.icache.waysize - 1;
188 unsigned long start = INDEX_BASE + (page & indexmask);
189 unsigned long end = start + PAGE_SIZE;
190 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
191 unsigned long ws_end = current_cpu_data.icache.ways <<
192 current_cpu_data.icache.waybit;
193 unsigned long ws, addr;
194
195 CACHE32_UNROLL32_ALIGN2;
196 /* I'm in even chunk. blast odd chunks */
197 for (ws = 0; ws < ws_end; ws += ws_inc)
198 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
199 cache32_unroll32(addr|ws, Index_Invalidate_I);
200 CACHE32_UNROLL32_ALIGN;
201 /* I'm in odd chunk. blast even chunks */
202 for (ws = 0; ws < ws_end; ws += ws_inc)
203 for (addr = start; addr < end; addr += 0x400 * 2)
204 cache32_unroll32(addr|ws, Index_Invalidate_I);
205 }
206
207 static void (* r4k_blast_icache_page)(unsigned long addr);
208
209 static void __init r4k_blast_icache_page_setup(void)
210 {
211 unsigned long ic_lsize = cpu_icache_line_size();
212
213 if (ic_lsize == 0)
214 r4k_blast_icache_page = (void *)cache_noop;
215 else if (ic_lsize == 16)
216 r4k_blast_icache_page = blast_icache16_page;
217 else if (ic_lsize == 32)
218 r4k_blast_icache_page = blast_icache32_page;
219 else if (ic_lsize == 64)
220 r4k_blast_icache_page = blast_icache64_page;
221 }
222
223
224 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
225
226 static void __init r4k_blast_icache_page_indexed_setup(void)
227 {
228 unsigned long ic_lsize = cpu_icache_line_size();
229
230 if (ic_lsize == 0)
231 r4k_blast_icache_page_indexed = (void *)cache_noop;
232 else if (ic_lsize == 16)
233 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
234 else if (ic_lsize == 32) {
235 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
236 r4k_blast_icache_page_indexed =
237 blast_icache32_r4600_v1_page_indexed;
238 else if (TX49XX_ICACHE_INDEX_INV_WAR)
239 r4k_blast_icache_page_indexed =
240 tx49_blast_icache32_page_indexed;
241 else
242 r4k_blast_icache_page_indexed =
243 blast_icache32_page_indexed;
244 } else if (ic_lsize == 64)
245 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
246 }
247
248 static void (* r4k_blast_icache)(void);
249
250 static void __init r4k_blast_icache_setup(void)
251 {
252 unsigned long ic_lsize = cpu_icache_line_size();
253
254 if (ic_lsize == 0)
255 r4k_blast_icache = (void *)cache_noop;
256 else if (ic_lsize == 16)
257 r4k_blast_icache = blast_icache16;
258 else if (ic_lsize == 32) {
259 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
260 r4k_blast_icache = blast_r4600_v1_icache32;
261 else if (TX49XX_ICACHE_INDEX_INV_WAR)
262 r4k_blast_icache = tx49_blast_icache32;
263 else
264 r4k_blast_icache = blast_icache32;
265 } else if (ic_lsize == 64)
266 r4k_blast_icache = blast_icache64;
267 }
268
269 static void (* r4k_blast_scache_page)(unsigned long addr);
270
271 static void __init r4k_blast_scache_page_setup(void)
272 {
273 unsigned long sc_lsize = cpu_scache_line_size();
274
275 if (scache_size == 0)
276 r4k_blast_scache_page = (void *)cache_noop;
277 else if (sc_lsize == 16)
278 r4k_blast_scache_page = blast_scache16_page;
279 else if (sc_lsize == 32)
280 r4k_blast_scache_page = blast_scache32_page;
281 else if (sc_lsize == 64)
282 r4k_blast_scache_page = blast_scache64_page;
283 else if (sc_lsize == 128)
284 r4k_blast_scache_page = blast_scache128_page;
285 }
286
287 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
288
289 static void __init r4k_blast_scache_page_indexed_setup(void)
290 {
291 unsigned long sc_lsize = cpu_scache_line_size();
292
293 if (scache_size == 0)
294 r4k_blast_scache_page_indexed = (void *)cache_noop;
295 else if (sc_lsize == 16)
296 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
297 else if (sc_lsize == 32)
298 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
299 else if (sc_lsize == 64)
300 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
301 else if (sc_lsize == 128)
302 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
303 }
304
305 static void (* r4k_blast_scache)(void);
306
307 static void __init r4k_blast_scache_setup(void)
308 {
309 unsigned long sc_lsize = cpu_scache_line_size();
310
311 if (scache_size == 0)
312 r4k_blast_scache = (void *)cache_noop;
313 else if (sc_lsize == 16)
314 r4k_blast_scache = blast_scache16;
315 else if (sc_lsize == 32)
316 r4k_blast_scache = blast_scache32;
317 else if (sc_lsize == 64)
318 r4k_blast_scache = blast_scache64;
319 else if (sc_lsize == 128)
320 r4k_blast_scache = blast_scache128;
321 }
322
323 static inline void local_r4k___flush_cache_all(void * args)
324 {
325 #if defined(CONFIG_CPU_LOONGSON2)
326 r4k_blast_scache();
327 return;
328 #endif
329 r4k_blast_dcache();
330 r4k_blast_icache();
331
332 switch (current_cpu_type()) {
333 case CPU_R4000SC:
334 case CPU_R4000MC:
335 case CPU_R4400SC:
336 case CPU_R4400MC:
337 case CPU_R10000:
338 case CPU_R12000:
339 case CPU_R14000:
340 r4k_blast_scache();
341 }
342 }
343
344 static void r4k___flush_cache_all(void)
345 {
346 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
347 }
348
349 static inline int has_valid_asid(const struct mm_struct *mm)
350 {
351 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
352 int i;
353
354 for_each_online_cpu(i)
355 if (cpu_context(i, mm))
356 return 1;
357
358 return 0;
359 #else
360 return cpu_context(smp_processor_id(), mm);
361 #endif
362 }
363
364 static inline void local_r4k_flush_cache_range(void * args)
365 {
366 struct vm_area_struct *vma = args;
367
368 if (!(has_valid_asid(vma->vm_mm)))
369 return;
370
371 r4k_blast_dcache();
372 }
373
374 static void r4k_flush_cache_range(struct vm_area_struct *vma,
375 unsigned long start, unsigned long end)
376 {
377 if (!cpu_has_dc_aliases)
378 return;
379
380 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
381 }
382
383 static inline void local_r4k_flush_cache_mm(void * args)
384 {
385 struct mm_struct *mm = args;
386
387 if (!has_valid_asid(mm))
388 return;
389
390 /*
391 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
392 * only flush the primary caches but R10000 and R12000 behave sane ...
393 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
394 * caches, so we can bail out early.
395 */
396 if (current_cpu_type() == CPU_R4000SC ||
397 current_cpu_type() == CPU_R4000MC ||
398 current_cpu_type() == CPU_R4400SC ||
399 current_cpu_type() == CPU_R4400MC) {
400 r4k_blast_scache();
401 return;
402 }
403
404 r4k_blast_dcache();
405 }
406
407 static void r4k_flush_cache_mm(struct mm_struct *mm)
408 {
409 if (!cpu_has_dc_aliases)
410 return;
411
412 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
413 }
414
415 struct flush_cache_page_args {
416 struct vm_area_struct *vma;
417 unsigned long addr;
418 unsigned long pfn;
419 };
420
421 static inline void local_r4k_flush_cache_page(void *args)
422 {
423 struct flush_cache_page_args *fcp_args = args;
424 struct vm_area_struct *vma = fcp_args->vma;
425 unsigned long addr = fcp_args->addr;
426 struct page *page = pfn_to_page(fcp_args->pfn);
427 int exec = vma->vm_flags & VM_EXEC;
428 struct mm_struct *mm = vma->vm_mm;
429 pgd_t *pgdp;
430 pud_t *pudp;
431 pmd_t *pmdp;
432 pte_t *ptep;
433 void *vaddr;
434
435 /*
436 * If ownes no valid ASID yet, cannot possibly have gotten
437 * this page into the cache.
438 */
439 if (!has_valid_asid(mm))
440 return;
441
442 addr &= PAGE_MASK;
443 pgdp = pgd_offset(mm, addr);
444 pudp = pud_offset(pgdp, addr);
445 pmdp = pmd_offset(pudp, addr);
446 ptep = pte_offset(pmdp, addr);
447
448 /*
449 * If the page isn't marked valid, the page cannot possibly be
450 * in the cache.
451 */
452 if (!(pte_present(*ptep)))
453 return;
454
455 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
456 vaddr = NULL;
457 else {
458 /*
459 * Use kmap_coherent or kmap_atomic to do flushes for
460 * another ASID than the current one.
461 */
462 if (cpu_has_dc_aliases)
463 vaddr = kmap_coherent(page, addr);
464 else
465 vaddr = kmap_atomic(page, KM_USER0);
466 addr = (unsigned long)vaddr;
467 }
468
469 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
470 r4k_blast_dcache_page(addr);
471 }
472 if (exec) {
473 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
474 int cpu = smp_processor_id();
475
476 if (cpu_context(cpu, mm) != 0)
477 drop_mmu_context(mm, cpu);
478 } else
479 r4k_blast_icache_page(addr);
480 }
481
482 if (vaddr) {
483 if (cpu_has_dc_aliases)
484 kunmap_coherent();
485 else
486 kunmap_atomic(vaddr, KM_USER0);
487 }
488 }
489
490 static void r4k_flush_cache_page(struct vm_area_struct *vma,
491 unsigned long addr, unsigned long pfn)
492 {
493 struct flush_cache_page_args args;
494
495 args.vma = vma;
496 args.addr = addr;
497 args.pfn = pfn;
498
499 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
500 }
501
502 static inline void local_r4k_flush_data_cache_page(void * addr)
503 {
504 r4k_blast_dcache_page((unsigned long) addr);
505 }
506
507 static void r4k_flush_data_cache_page(unsigned long addr)
508 {
509 if (in_atomic())
510 local_r4k_flush_data_cache_page((void *)addr);
511 else
512 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
513 1, 1);
514 }
515
516 struct flush_icache_range_args {
517 unsigned long start;
518 unsigned long end;
519 };
520
521 static inline void local_r4k_flush_icache_range(void *args)
522 {
523 struct flush_icache_range_args *fir_args = args;
524 unsigned long start = fir_args->start;
525 unsigned long end = fir_args->end;
526
527 if (!cpu_has_ic_fills_f_dc) {
528 if (end - start >= dcache_size) {
529 r4k_blast_dcache();
530 } else {
531 R4600_HIT_CACHEOP_WAR_IMPL;
532 protected_blast_dcache_range(start, end);
533 }
534 }
535
536 if (end - start > icache_size)
537 r4k_blast_icache();
538 else
539 protected_blast_icache_range(start, end);
540 }
541
542 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
543 {
544 struct flush_icache_range_args args;
545
546 args.start = start;
547 args.end = end;
548
549 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
550 instruction_hazard();
551 }
552
553 #ifdef CONFIG_DMA_NONCOHERENT
554
555 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
556 {
557 /* Catch bad driver code */
558 BUG_ON(size == 0);
559
560 if (cpu_has_inclusive_pcaches) {
561 if (size >= scache_size)
562 r4k_blast_scache();
563 else
564 blast_scache_range(addr, addr + size);
565 return;
566 }
567
568 /*
569 * Either no secondary cache or the available caches don't have the
570 * subset property so we have to flush the primary caches
571 * explicitly
572 */
573 if (size >= dcache_size) {
574 r4k_blast_dcache();
575 } else {
576 R4600_HIT_CACHEOP_WAR_IMPL;
577 blast_dcache_range(addr, addr + size);
578 }
579
580 bc_wback_inv(addr, size);
581 }
582
583 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
584 {
585 /* Catch bad driver code */
586 BUG_ON(size == 0);
587
588 if (cpu_has_inclusive_pcaches) {
589 if (size >= scache_size)
590 r4k_blast_scache();
591 else
592 blast_scache_range(addr, addr + size);
593 return;
594 }
595
596 if (size >= dcache_size) {
597 r4k_blast_dcache();
598 } else {
599 R4600_HIT_CACHEOP_WAR_IMPL;
600 blast_dcache_range(addr, addr + size);
601 }
602
603 bc_inv(addr, size);
604 }
605 #endif /* CONFIG_DMA_NONCOHERENT */
606
607 /*
608 * While we're protected against bad userland addresses we don't care
609 * very much about what happens in that case. Usually a segmentation
610 * fault will dump the process later on anyway ...
611 */
612 static void local_r4k_flush_cache_sigtramp(void * arg)
613 {
614 unsigned long ic_lsize = cpu_icache_line_size();
615 unsigned long dc_lsize = cpu_dcache_line_size();
616 unsigned long sc_lsize = cpu_scache_line_size();
617 unsigned long addr = (unsigned long) arg;
618
619 R4600_HIT_CACHEOP_WAR_IMPL;
620 if (dc_lsize)
621 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
622 if (!cpu_icache_snoops_remote_store && scache_size)
623 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
624 if (ic_lsize)
625 protected_flush_icache_line(addr & ~(ic_lsize - 1));
626 if (MIPS4K_ICACHE_REFILL_WAR) {
627 __asm__ __volatile__ (
628 ".set push\n\t"
629 ".set noat\n\t"
630 ".set mips3\n\t"
631 #ifdef CONFIG_32BIT
632 "la $at,1f\n\t"
633 #endif
634 #ifdef CONFIG_64BIT
635 "dla $at,1f\n\t"
636 #endif
637 "cache %0,($at)\n\t"
638 "nop; nop; nop\n"
639 "1:\n\t"
640 ".set pop"
641 :
642 : "i" (Hit_Invalidate_I));
643 }
644 if (MIPS_CACHE_SYNC_WAR)
645 __asm__ __volatile__ ("sync");
646 }
647
648 static void r4k_flush_cache_sigtramp(unsigned long addr)
649 {
650 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
651 }
652
653 static void r4k_flush_icache_all(void)
654 {
655 if (cpu_has_vtag_icache)
656 r4k_blast_icache();
657 }
658
659 static inline void rm7k_erratum31(void)
660 {
661 const unsigned long ic_lsize = 32;
662 unsigned long addr;
663
664 /* RM7000 erratum #31. The icache is screwed at startup. */
665 write_c0_taglo(0);
666 write_c0_taghi(0);
667
668 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
669 __asm__ __volatile__ (
670 ".set push\n\t"
671 ".set noreorder\n\t"
672 ".set mips3\n\t"
673 "cache\t%1, 0(%0)\n\t"
674 "cache\t%1, 0x1000(%0)\n\t"
675 "cache\t%1, 0x2000(%0)\n\t"
676 "cache\t%1, 0x3000(%0)\n\t"
677 "cache\t%2, 0(%0)\n\t"
678 "cache\t%2, 0x1000(%0)\n\t"
679 "cache\t%2, 0x2000(%0)\n\t"
680 "cache\t%2, 0x3000(%0)\n\t"
681 "cache\t%1, 0(%0)\n\t"
682 "cache\t%1, 0x1000(%0)\n\t"
683 "cache\t%1, 0x2000(%0)\n\t"
684 "cache\t%1, 0x3000(%0)\n\t"
685 ".set pop\n"
686 :
687 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
688 }
689 }
690
691 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
692 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
693 };
694
695 static void __init probe_pcache(void)
696 {
697 struct cpuinfo_mips *c = &current_cpu_data;
698 unsigned int config = read_c0_config();
699 unsigned int prid = read_c0_prid();
700 unsigned long config1;
701 unsigned int lsize;
702
703 switch (c->cputype) {
704 case CPU_R4600: /* QED style two way caches? */
705 case CPU_R4700:
706 case CPU_R5000:
707 case CPU_NEVADA:
708 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
709 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
710 c->icache.ways = 2;
711 c->icache.waybit = __ffs(icache_size/2);
712
713 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
714 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
715 c->dcache.ways = 2;
716 c->dcache.waybit= __ffs(dcache_size/2);
717
718 c->options |= MIPS_CPU_CACHE_CDEX_P;
719 break;
720
721 case CPU_R5432:
722 case CPU_R5500:
723 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
724 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
725 c->icache.ways = 2;
726 c->icache.waybit= 0;
727
728 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
729 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
730 c->dcache.ways = 2;
731 c->dcache.waybit = 0;
732
733 c->options |= MIPS_CPU_CACHE_CDEX_P;
734 break;
735
736 case CPU_TX49XX:
737 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
738 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
739 c->icache.ways = 4;
740 c->icache.waybit= 0;
741
742 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
743 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
744 c->dcache.ways = 4;
745 c->dcache.waybit = 0;
746
747 c->options |= MIPS_CPU_CACHE_CDEX_P;
748 c->options |= MIPS_CPU_PREFETCH;
749 break;
750
751 case CPU_R4000PC:
752 case CPU_R4000SC:
753 case CPU_R4000MC:
754 case CPU_R4400PC:
755 case CPU_R4400SC:
756 case CPU_R4400MC:
757 case CPU_R4300:
758 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
759 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
760 c->icache.ways = 1;
761 c->icache.waybit = 0; /* doesn't matter */
762
763 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
764 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
765 c->dcache.ways = 1;
766 c->dcache.waybit = 0; /* does not matter */
767
768 c->options |= MIPS_CPU_CACHE_CDEX_P;
769 break;
770
771 case CPU_R10000:
772 case CPU_R12000:
773 case CPU_R14000:
774 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
775 c->icache.linesz = 64;
776 c->icache.ways = 2;
777 c->icache.waybit = 0;
778
779 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
780 c->dcache.linesz = 32;
781 c->dcache.ways = 2;
782 c->dcache.waybit = 0;
783
784 c->options |= MIPS_CPU_PREFETCH;
785 break;
786
787 case CPU_VR4133:
788 write_c0_config(config & ~VR41_CONF_P4K);
789 case CPU_VR4131:
790 /* Workaround for cache instruction bug of VR4131 */
791 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
792 c->processor_id == 0x0c82U) {
793 config |= 0x00400000U;
794 if (c->processor_id == 0x0c80U)
795 config |= VR41_CONF_BP;
796 write_c0_config(config);
797 } else
798 c->options |= MIPS_CPU_CACHE_CDEX_P;
799
800 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
801 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
802 c->icache.ways = 2;
803 c->icache.waybit = __ffs(icache_size/2);
804
805 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
806 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
807 c->dcache.ways = 2;
808 c->dcache.waybit = __ffs(dcache_size/2);
809 break;
810
811 case CPU_VR41XX:
812 case CPU_VR4111:
813 case CPU_VR4121:
814 case CPU_VR4122:
815 case CPU_VR4181:
816 case CPU_VR4181A:
817 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
818 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
819 c->icache.ways = 1;
820 c->icache.waybit = 0; /* doesn't matter */
821
822 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
823 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
824 c->dcache.ways = 1;
825 c->dcache.waybit = 0; /* does not matter */
826
827 c->options |= MIPS_CPU_CACHE_CDEX_P;
828 break;
829
830 case CPU_RM7000:
831 rm7k_erratum31();
832
833 case CPU_RM9000:
834 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
835 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
836 c->icache.ways = 4;
837 c->icache.waybit = __ffs(icache_size / c->icache.ways);
838
839 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
840 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
841 c->dcache.ways = 4;
842 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
843
844 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
845 c->options |= MIPS_CPU_CACHE_CDEX_P;
846 #endif
847 c->options |= MIPS_CPU_PREFETCH;
848 break;
849
850 case CPU_LOONGSON2:
851 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
852 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
853 if (prid & 0x3)
854 c->icache.ways = 4;
855 else
856 c->icache.ways = 2;
857 c->icache.waybit = 0;
858
859 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
860 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
861 if (prid & 0x3)
862 c->dcache.ways = 4;
863 else
864 c->dcache.ways = 2;
865 c->dcache.waybit = 0;
866 break;
867
868 default:
869 if (!(config & MIPS_CONF_M))
870 panic("Don't know how to probe P-caches on this cpu.");
871
872 /*
873 * So we seem to be a MIPS32 or MIPS64 CPU
874 * So let's probe the I-cache ...
875 */
876 config1 = read_c0_config1();
877
878 if ((lsize = ((config1 >> 19) & 7)))
879 c->icache.linesz = 2 << lsize;
880 else
881 c->icache.linesz = lsize;
882 c->icache.sets = 64 << ((config1 >> 22) & 7);
883 c->icache.ways = 1 + ((config1 >> 16) & 7);
884
885 icache_size = c->icache.sets *
886 c->icache.ways *
887 c->icache.linesz;
888 c->icache.waybit = __ffs(icache_size/c->icache.ways);
889
890 if (config & 0x8) /* VI bit */
891 c->icache.flags |= MIPS_CACHE_VTAG;
892
893 /*
894 * Now probe the MIPS32 / MIPS64 data cache.
895 */
896 c->dcache.flags = 0;
897
898 if ((lsize = ((config1 >> 10) & 7)))
899 c->dcache.linesz = 2 << lsize;
900 else
901 c->dcache.linesz= lsize;
902 c->dcache.sets = 64 << ((config1 >> 13) & 7);
903 c->dcache.ways = 1 + ((config1 >> 7) & 7);
904
905 dcache_size = c->dcache.sets *
906 c->dcache.ways *
907 c->dcache.linesz;
908 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
909
910 c->options |= MIPS_CPU_PREFETCH;
911 break;
912 }
913
914 /*
915 * Processor configuration sanity check for the R4000SC erratum
916 * #5. With page sizes larger than 32kB there is no possibility
917 * to get a VCE exception anymore so we don't care about this
918 * misconfiguration. The case is rather theoretical anyway;
919 * presumably no vendor is shipping his hardware in the "bad"
920 * configuration.
921 */
922 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
923 !(config & CONF_SC) && c->icache.linesz != 16 &&
924 PAGE_SIZE <= 0x8000)
925 panic("Improper R4000SC processor configuration detected");
926
927 /* compute a couple of other cache variables */
928 c->icache.waysize = icache_size / c->icache.ways;
929 c->dcache.waysize = dcache_size / c->dcache.ways;
930
931 c->icache.sets = c->icache.linesz ?
932 icache_size / (c->icache.linesz * c->icache.ways) : 0;
933 c->dcache.sets = c->dcache.linesz ?
934 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
935
936 /*
937 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
938 * 2-way virtually indexed so normally would suffer from aliases. So
939 * normally they'd suffer from aliases but magic in the hardware deals
940 * with that for us so we don't need to take care ourselves.
941 */
942 switch (c->cputype) {
943 case CPU_20KC:
944 case CPU_25KF:
945 case CPU_SB1:
946 case CPU_SB1A:
947 c->dcache.flags |= MIPS_CACHE_PINDEX;
948 break;
949
950 case CPU_R10000:
951 case CPU_R12000:
952 case CPU_R14000:
953 break;
954
955 case CPU_24K:
956 case CPU_34K:
957 case CPU_74K:
958 if ((read_c0_config7() & (1 << 16))) {
959 /* effectively physically indexed dcache,
960 thus no virtual aliases. */
961 c->dcache.flags |= MIPS_CACHE_PINDEX;
962 break;
963 }
964 default:
965 if (c->dcache.waysize > PAGE_SIZE)
966 c->dcache.flags |= MIPS_CACHE_ALIASES;
967 }
968
969 switch (c->cputype) {
970 case CPU_20KC:
971 /*
972 * Some older 20Kc chips doesn't have the 'VI' bit in
973 * the config register.
974 */
975 c->icache.flags |= MIPS_CACHE_VTAG;
976 break;
977
978 case CPU_AU1000:
979 case CPU_AU1500:
980 case CPU_AU1100:
981 case CPU_AU1550:
982 case CPU_AU1200:
983 c->icache.flags |= MIPS_CACHE_IC_F_DC;
984 break;
985 }
986
987 #ifdef CONFIG_CPU_LOONGSON2
988 /*
989 * LOONGSON2 has 4 way icache, but when using indexed cache op,
990 * one op will act on all 4 ways
991 */
992 c->icache.ways = 1;
993 #endif
994
995 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
996 icache_size >> 10,
997 cpu_has_vtag_icache ? "VIVT" : "VIPT",
998 way_string[c->icache.ways], c->icache.linesz);
999
1000 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1001 dcache_size >> 10, way_string[c->dcache.ways],
1002 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1003 (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1004 "cache aliases" : "no aliases",
1005 c->dcache.linesz);
1006 }
1007
1008 /*
1009 * If you even _breathe_ on this function, look at the gcc output and make sure
1010 * it does not pop things on and off the stack for the cache sizing loop that
1011 * executes in KSEG1 space or else you will crash and burn badly. You have
1012 * been warned.
1013 */
1014 static int __init probe_scache(void)
1015 {
1016 unsigned long flags, addr, begin, end, pow2;
1017 unsigned int config = read_c0_config();
1018 struct cpuinfo_mips *c = &current_cpu_data;
1019 int tmp;
1020
1021 if (config & CONF_SC)
1022 return 0;
1023
1024 begin = (unsigned long) &_stext;
1025 begin &= ~((4 * 1024 * 1024) - 1);
1026 end = begin + (4 * 1024 * 1024);
1027
1028 /*
1029 * This is such a bitch, you'd think they would make it easy to do
1030 * this. Away you daemons of stupidity!
1031 */
1032 local_irq_save(flags);
1033
1034 /* Fill each size-multiple cache line with a valid tag. */
1035 pow2 = (64 * 1024);
1036 for (addr = begin; addr < end; addr = (begin + pow2)) {
1037 unsigned long *p = (unsigned long *) addr;
1038 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1039 pow2 <<= 1;
1040 }
1041
1042 /* Load first line with zero (therefore invalid) tag. */
1043 write_c0_taglo(0);
1044 write_c0_taghi(0);
1045 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1046 cache_op(Index_Store_Tag_I, begin);
1047 cache_op(Index_Store_Tag_D, begin);
1048 cache_op(Index_Store_Tag_SD, begin);
1049
1050 /* Now search for the wrap around point. */
1051 pow2 = (128 * 1024);
1052 tmp = 0;
1053 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1054 cache_op(Index_Load_Tag_SD, addr);
1055 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1056 if (!read_c0_taglo())
1057 break;
1058 pow2 <<= 1;
1059 }
1060 local_irq_restore(flags);
1061 addr -= begin;
1062
1063 scache_size = addr;
1064 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1065 c->scache.ways = 1;
1066 c->dcache.waybit = 0; /* does not matter */
1067
1068 return 1;
1069 }
1070
1071 #if defined(CONFIG_CPU_LOONGSON2)
1072 static void __init loongson2_sc_init(void)
1073 {
1074 struct cpuinfo_mips *c = &current_cpu_data;
1075
1076 scache_size = 512*1024;
1077 c->scache.linesz = 32;
1078 c->scache.ways = 4;
1079 c->scache.waybit = 0;
1080 c->scache.waysize = scache_size / (c->scache.ways);
1081 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1082 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1083 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1084
1085 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1086 }
1087 #endif
1088
1089 extern int r5k_sc_init(void);
1090 extern int rm7k_sc_init(void);
1091 extern int mips_sc_init(void);
1092
1093 static void __init setup_scache(void)
1094 {
1095 struct cpuinfo_mips *c = &current_cpu_data;
1096 unsigned int config = read_c0_config();
1097 int sc_present = 0;
1098
1099 /*
1100 * Do the probing thing on R4000SC and R4400SC processors. Other
1101 * processors don't have a S-cache that would be relevant to the
1102 * Linux memory managment.
1103 */
1104 switch (c->cputype) {
1105 case CPU_R4000SC:
1106 case CPU_R4000MC:
1107 case CPU_R4400SC:
1108 case CPU_R4400MC:
1109 sc_present = run_uncached(probe_scache);
1110 if (sc_present)
1111 c->options |= MIPS_CPU_CACHE_CDEX_S;
1112 break;
1113
1114 case CPU_R10000:
1115 case CPU_R12000:
1116 case CPU_R14000:
1117 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1118 c->scache.linesz = 64 << ((config >> 13) & 1);
1119 c->scache.ways = 2;
1120 c->scache.waybit= 0;
1121 sc_present = 1;
1122 break;
1123
1124 case CPU_R5000:
1125 case CPU_NEVADA:
1126 #ifdef CONFIG_R5000_CPU_SCACHE
1127 r5k_sc_init();
1128 #endif
1129 return;
1130
1131 case CPU_RM7000:
1132 case CPU_RM9000:
1133 #ifdef CONFIG_RM7000_CPU_SCACHE
1134 rm7k_sc_init();
1135 #endif
1136 return;
1137
1138 #if defined(CONFIG_CPU_LOONGSON2)
1139 case CPU_LOONGSON2:
1140 loongson2_sc_init();
1141 return;
1142 #endif
1143
1144 default:
1145 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1146 c->isa_level == MIPS_CPU_ISA_M32R2 ||
1147 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1148 c->isa_level == MIPS_CPU_ISA_M64R2) {
1149 #ifdef CONFIG_MIPS_CPU_SCACHE
1150 if (mips_sc_init ()) {
1151 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1152 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1153 scache_size >> 10,
1154 way_string[c->scache.ways], c->scache.linesz);
1155 }
1156 #else
1157 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1158 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1159 #endif
1160 return;
1161 }
1162 sc_present = 0;
1163 }
1164
1165 if (!sc_present)
1166 return;
1167
1168 /* compute a couple of other cache variables */
1169 c->scache.waysize = scache_size / c->scache.ways;
1170
1171 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1172
1173 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1174 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1175
1176 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1177 }
1178
1179 void au1x00_fixup_config_od(void)
1180 {
1181 /*
1182 * c0_config.od (bit 19) was write only (and read as 0)
1183 * on the early revisions of Alchemy SOCs. It disables the bus
1184 * transaction overlapping and needs to be set to fix various errata.
1185 */
1186 switch (read_c0_prid()) {
1187 case 0x00030100: /* Au1000 DA */
1188 case 0x00030201: /* Au1000 HA */
1189 case 0x00030202: /* Au1000 HB */
1190 case 0x01030200: /* Au1500 AB */
1191 /*
1192 * Au1100 errata actually keeps silence about this bit, so we set it
1193 * just in case for those revisions that require it to be set according
1194 * to arch/mips/au1000/common/cputable.c
1195 */
1196 case 0x02030200: /* Au1100 AB */
1197 case 0x02030201: /* Au1100 BA */
1198 case 0x02030202: /* Au1100 BC */
1199 set_c0_config(1 << 19);
1200 break;
1201 }
1202 }
1203
1204 static void __init coherency_setup(void)
1205 {
1206 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1207
1208 /*
1209 * c0_status.cu=0 specifies that updates by the sc instruction use
1210 * the coherency mode specified by the TLB; 1 means cachable
1211 * coherent update on write will be used. Not all processors have
1212 * this bit and; some wire it to zero, others like Toshiba had the
1213 * silly idea of putting something else there ...
1214 */
1215 switch (current_cpu_type()) {
1216 case CPU_R4000PC:
1217 case CPU_R4000SC:
1218 case CPU_R4000MC:
1219 case CPU_R4400PC:
1220 case CPU_R4400SC:
1221 case CPU_R4400MC:
1222 clear_c0_config(CONF_CU);
1223 break;
1224 /*
1225 * We need to catch the early Alchemy SOCs with
1226 * the write-only co_config.od bit and set it back to one...
1227 */
1228 case CPU_AU1000: /* rev. DA, HA, HB */
1229 case CPU_AU1100: /* rev. AB, BA, BC ?? */
1230 case CPU_AU1500: /* rev. AB */
1231 au1x00_fixup_config_od();
1232 break;
1233 }
1234 }
1235
1236 void __init r4k_cache_init(void)
1237 {
1238 extern void build_clear_page(void);
1239 extern void build_copy_page(void);
1240 extern char __weak except_vec2_generic;
1241 extern char __weak except_vec2_sb1;
1242 struct cpuinfo_mips *c = &current_cpu_data;
1243
1244 switch (c->cputype) {
1245 case CPU_SB1:
1246 case CPU_SB1A:
1247 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1248 break;
1249
1250 default:
1251 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1252 break;
1253 }
1254
1255 probe_pcache();
1256 setup_scache();
1257
1258 r4k_blast_dcache_page_setup();
1259 r4k_blast_dcache_page_indexed_setup();
1260 r4k_blast_dcache_setup();
1261 r4k_blast_icache_page_setup();
1262 r4k_blast_icache_page_indexed_setup();
1263 r4k_blast_icache_setup();
1264 r4k_blast_scache_page_setup();
1265 r4k_blast_scache_page_indexed_setup();
1266 r4k_blast_scache_setup();
1267
1268 /*
1269 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1270 * This code supports virtually indexed processors and will be
1271 * unnecessarily inefficient on physically indexed processors.
1272 */
1273 if (c->dcache.linesz)
1274 shm_align_mask = max_t( unsigned long,
1275 c->dcache.sets * c->dcache.linesz - 1,
1276 PAGE_SIZE - 1);
1277 else
1278 shm_align_mask = PAGE_SIZE-1;
1279 flush_cache_all = cache_noop;
1280 __flush_cache_all = r4k___flush_cache_all;
1281 flush_cache_mm = r4k_flush_cache_mm;
1282 flush_cache_page = r4k_flush_cache_page;
1283 flush_cache_range = r4k_flush_cache_range;
1284
1285 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1286 flush_icache_all = r4k_flush_icache_all;
1287 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1288 flush_data_cache_page = r4k_flush_data_cache_page;
1289 flush_icache_range = r4k_flush_icache_range;
1290
1291 #ifdef CONFIG_DMA_NONCOHERENT
1292 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1293 _dma_cache_wback = r4k_dma_cache_wback_inv;
1294 _dma_cache_inv = r4k_dma_cache_inv;
1295 #endif
1296
1297 build_clear_page();
1298 build_copy_page();
1299 local_r4k___flush_cache_all(NULL);
1300 coherency_setup();
1301 }