]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/mips/mm/c-r4k.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[mirror_ubuntu-zesty-kernel.git] / arch / mips / mm / c-r4k.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/bitops.h>
18
19 #include <asm/bcache.h>
20 #include <asm/bootinfo.h>
21 #include <asm/cache.h>
22 #include <asm/cacheops.h>
23 #include <asm/cpu.h>
24 #include <asm/cpu-features.h>
25 #include <asm/io.h>
26 #include <asm/page.h>
27 #include <asm/pgtable.h>
28 #include <asm/r4kcache.h>
29 #include <asm/sections.h>
30 #include <asm/system.h>
31 #include <asm/mmu_context.h>
32 #include <asm/war.h>
33 #include <asm/cacheflush.h> /* for run_uncached() */
34
35
36 /*
37 * Special Variant of smp_call_function for use by cache functions:
38 *
39 * o No return value
40 * o collapses to normal function call on UP kernels
41 * o collapses to normal function call on systems with a single shared
42 * primary cache.
43 */
44 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
45 int retry, int wait)
46 {
47 preempt_disable();
48
49 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
50 smp_call_function(func, info, retry, wait);
51 #endif
52 func(info);
53 preempt_enable();
54 }
55
56 /*
57 * Must die.
58 */
59 static unsigned long icache_size __read_mostly;
60 static unsigned long dcache_size __read_mostly;
61 static unsigned long scache_size __read_mostly;
62
63 /*
64 * Dummy cache handling routines for machines without boardcaches
65 */
66 static void cache_noop(void) {}
67
68 static struct bcache_ops no_sc_ops = {
69 .bc_enable = (void *)cache_noop,
70 .bc_disable = (void *)cache_noop,
71 .bc_wback_inv = (void *)cache_noop,
72 .bc_inv = (void *)cache_noop
73 };
74
75 struct bcache_ops *bcops = &no_sc_ops;
76
77 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
78 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
79
80 #define R4600_HIT_CACHEOP_WAR_IMPL \
81 do { \
82 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
83 *(volatile unsigned long *)CKSEG1; \
84 if (R4600_V1_HIT_CACHEOP_WAR) \
85 __asm__ __volatile__("nop;nop;nop;nop"); \
86 } while (0)
87
88 static void (*r4k_blast_dcache_page)(unsigned long addr);
89
90 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
91 {
92 R4600_HIT_CACHEOP_WAR_IMPL;
93 blast_dcache32_page(addr);
94 }
95
96 static void __init r4k_blast_dcache_page_setup(void)
97 {
98 unsigned long dc_lsize = cpu_dcache_line_size();
99
100 if (dc_lsize == 0)
101 r4k_blast_dcache_page = (void *)cache_noop;
102 else if (dc_lsize == 16)
103 r4k_blast_dcache_page = blast_dcache16_page;
104 else if (dc_lsize == 32)
105 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
106 }
107
108 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
109
110 static void __init r4k_blast_dcache_page_indexed_setup(void)
111 {
112 unsigned long dc_lsize = cpu_dcache_line_size();
113
114 if (dc_lsize == 0)
115 r4k_blast_dcache_page_indexed = (void *)cache_noop;
116 else if (dc_lsize == 16)
117 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
118 else if (dc_lsize == 32)
119 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
120 }
121
122 static void (* r4k_blast_dcache)(void);
123
124 static void __init r4k_blast_dcache_setup(void)
125 {
126 unsigned long dc_lsize = cpu_dcache_line_size();
127
128 if (dc_lsize == 0)
129 r4k_blast_dcache = (void *)cache_noop;
130 else if (dc_lsize == 16)
131 r4k_blast_dcache = blast_dcache16;
132 else if (dc_lsize == 32)
133 r4k_blast_dcache = blast_dcache32;
134 }
135
136 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
137 #define JUMP_TO_ALIGN(order) \
138 __asm__ __volatile__( \
139 "b\t1f\n\t" \
140 ".align\t" #order "\n\t" \
141 "1:\n\t" \
142 )
143 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
144 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
145
146 static inline void blast_r4600_v1_icache32(void)
147 {
148 unsigned long flags;
149
150 local_irq_save(flags);
151 blast_icache32();
152 local_irq_restore(flags);
153 }
154
155 static inline void tx49_blast_icache32(void)
156 {
157 unsigned long start = INDEX_BASE;
158 unsigned long end = start + current_cpu_data.icache.waysize;
159 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
160 unsigned long ws_end = current_cpu_data.icache.ways <<
161 current_cpu_data.icache.waybit;
162 unsigned long ws, addr;
163
164 CACHE32_UNROLL32_ALIGN2;
165 /* I'm in even chunk. blast odd chunks */
166 for (ws = 0; ws < ws_end; ws += ws_inc)
167 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
168 cache32_unroll32(addr|ws, Index_Invalidate_I);
169 CACHE32_UNROLL32_ALIGN;
170 /* I'm in odd chunk. blast even chunks */
171 for (ws = 0; ws < ws_end; ws += ws_inc)
172 for (addr = start; addr < end; addr += 0x400 * 2)
173 cache32_unroll32(addr|ws, Index_Invalidate_I);
174 }
175
176 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
177 {
178 unsigned long flags;
179
180 local_irq_save(flags);
181 blast_icache32_page_indexed(page);
182 local_irq_restore(flags);
183 }
184
185 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
186 {
187 unsigned long indexmask = current_cpu_data.icache.waysize - 1;
188 unsigned long start = INDEX_BASE + (page & indexmask);
189 unsigned long end = start + PAGE_SIZE;
190 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
191 unsigned long ws_end = current_cpu_data.icache.ways <<
192 current_cpu_data.icache.waybit;
193 unsigned long ws, addr;
194
195 CACHE32_UNROLL32_ALIGN2;
196 /* I'm in even chunk. blast odd chunks */
197 for (ws = 0; ws < ws_end; ws += ws_inc)
198 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
199 cache32_unroll32(addr|ws, Index_Invalidate_I);
200 CACHE32_UNROLL32_ALIGN;
201 /* I'm in odd chunk. blast even chunks */
202 for (ws = 0; ws < ws_end; ws += ws_inc)
203 for (addr = start; addr < end; addr += 0x400 * 2)
204 cache32_unroll32(addr|ws, Index_Invalidate_I);
205 }
206
207 static void (* r4k_blast_icache_page)(unsigned long addr);
208
209 static void __init r4k_blast_icache_page_setup(void)
210 {
211 unsigned long ic_lsize = cpu_icache_line_size();
212
213 if (ic_lsize == 0)
214 r4k_blast_icache_page = (void *)cache_noop;
215 else if (ic_lsize == 16)
216 r4k_blast_icache_page = blast_icache16_page;
217 else if (ic_lsize == 32)
218 r4k_blast_icache_page = blast_icache32_page;
219 else if (ic_lsize == 64)
220 r4k_blast_icache_page = blast_icache64_page;
221 }
222
223
224 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
225
226 static void __init r4k_blast_icache_page_indexed_setup(void)
227 {
228 unsigned long ic_lsize = cpu_icache_line_size();
229
230 if (ic_lsize == 0)
231 r4k_blast_icache_page_indexed = (void *)cache_noop;
232 else if (ic_lsize == 16)
233 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
234 else if (ic_lsize == 32) {
235 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
236 r4k_blast_icache_page_indexed =
237 blast_icache32_r4600_v1_page_indexed;
238 else if (TX49XX_ICACHE_INDEX_INV_WAR)
239 r4k_blast_icache_page_indexed =
240 tx49_blast_icache32_page_indexed;
241 else
242 r4k_blast_icache_page_indexed =
243 blast_icache32_page_indexed;
244 } else if (ic_lsize == 64)
245 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
246 }
247
248 static void (* r4k_blast_icache)(void);
249
250 static void __init r4k_blast_icache_setup(void)
251 {
252 unsigned long ic_lsize = cpu_icache_line_size();
253
254 if (ic_lsize == 0)
255 r4k_blast_icache = (void *)cache_noop;
256 else if (ic_lsize == 16)
257 r4k_blast_icache = blast_icache16;
258 else if (ic_lsize == 32) {
259 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
260 r4k_blast_icache = blast_r4600_v1_icache32;
261 else if (TX49XX_ICACHE_INDEX_INV_WAR)
262 r4k_blast_icache = tx49_blast_icache32;
263 else
264 r4k_blast_icache = blast_icache32;
265 } else if (ic_lsize == 64)
266 r4k_blast_icache = blast_icache64;
267 }
268
269 static void (* r4k_blast_scache_page)(unsigned long addr);
270
271 static void __init r4k_blast_scache_page_setup(void)
272 {
273 unsigned long sc_lsize = cpu_scache_line_size();
274
275 if (scache_size == 0)
276 r4k_blast_scache_page = (void *)cache_noop;
277 else if (sc_lsize == 16)
278 r4k_blast_scache_page = blast_scache16_page;
279 else if (sc_lsize == 32)
280 r4k_blast_scache_page = blast_scache32_page;
281 else if (sc_lsize == 64)
282 r4k_blast_scache_page = blast_scache64_page;
283 else if (sc_lsize == 128)
284 r4k_blast_scache_page = blast_scache128_page;
285 }
286
287 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
288
289 static void __init r4k_blast_scache_page_indexed_setup(void)
290 {
291 unsigned long sc_lsize = cpu_scache_line_size();
292
293 if (scache_size == 0)
294 r4k_blast_scache_page_indexed = (void *)cache_noop;
295 else if (sc_lsize == 16)
296 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
297 else if (sc_lsize == 32)
298 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
299 else if (sc_lsize == 64)
300 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
301 else if (sc_lsize == 128)
302 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
303 }
304
305 static void (* r4k_blast_scache)(void);
306
307 static void __init r4k_blast_scache_setup(void)
308 {
309 unsigned long sc_lsize = cpu_scache_line_size();
310
311 if (scache_size == 0)
312 r4k_blast_scache = (void *)cache_noop;
313 else if (sc_lsize == 16)
314 r4k_blast_scache = blast_scache16;
315 else if (sc_lsize == 32)
316 r4k_blast_scache = blast_scache32;
317 else if (sc_lsize == 64)
318 r4k_blast_scache = blast_scache64;
319 else if (sc_lsize == 128)
320 r4k_blast_scache = blast_scache128;
321 }
322
323 static inline void local_r4k___flush_cache_all(void * args)
324 {
325 #if defined(CONFIG_CPU_LOONGSON2)
326 r4k_blast_scache();
327 return;
328 #endif
329 r4k_blast_dcache();
330 r4k_blast_icache();
331
332 switch (current_cpu_type()) {
333 case CPU_R4000SC:
334 case CPU_R4000MC:
335 case CPU_R4400SC:
336 case CPU_R4400MC:
337 case CPU_R10000:
338 case CPU_R12000:
339 case CPU_R14000:
340 r4k_blast_scache();
341 }
342 }
343
344 static void r4k___flush_cache_all(void)
345 {
346 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1);
347 }
348
349 static inline int has_valid_asid(const struct mm_struct *mm)
350 {
351 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
352 int i;
353
354 for_each_online_cpu(i)
355 if (cpu_context(i, mm))
356 return 1;
357
358 return 0;
359 #else
360 return cpu_context(smp_processor_id(), mm);
361 #endif
362 }
363
364 static inline void local_r4k_flush_cache_range(void * args)
365 {
366 struct vm_area_struct *vma = args;
367
368 if (!(has_valid_asid(vma->vm_mm)))
369 return;
370
371 r4k_blast_dcache();
372 }
373
374 static void r4k_flush_cache_range(struct vm_area_struct *vma,
375 unsigned long start, unsigned long end)
376 {
377 if (!cpu_has_dc_aliases)
378 return;
379
380 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1);
381 }
382
383 static inline void local_r4k_flush_cache_mm(void * args)
384 {
385 struct mm_struct *mm = args;
386
387 if (!has_valid_asid(mm))
388 return;
389
390 /*
391 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
392 * only flush the primary caches but R10000 and R12000 behave sane ...
393 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
394 * caches, so we can bail out early.
395 */
396 if (current_cpu_type() == CPU_R4000SC ||
397 current_cpu_type() == CPU_R4000MC ||
398 current_cpu_type() == CPU_R4400SC ||
399 current_cpu_type() == CPU_R4400MC) {
400 r4k_blast_scache();
401 return;
402 }
403
404 r4k_blast_dcache();
405 }
406
407 static void r4k_flush_cache_mm(struct mm_struct *mm)
408 {
409 if (!cpu_has_dc_aliases)
410 return;
411
412 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1);
413 }
414
415 struct flush_cache_page_args {
416 struct vm_area_struct *vma;
417 unsigned long addr;
418 unsigned long pfn;
419 };
420
421 static inline void local_r4k_flush_cache_page(void *args)
422 {
423 struct flush_cache_page_args *fcp_args = args;
424 struct vm_area_struct *vma = fcp_args->vma;
425 unsigned long addr = fcp_args->addr;
426 struct page *page = pfn_to_page(fcp_args->pfn);
427 int exec = vma->vm_flags & VM_EXEC;
428 struct mm_struct *mm = vma->vm_mm;
429 pgd_t *pgdp;
430 pud_t *pudp;
431 pmd_t *pmdp;
432 pte_t *ptep;
433 void *vaddr;
434
435 /*
436 * If ownes no valid ASID yet, cannot possibly have gotten
437 * this page into the cache.
438 */
439 if (!has_valid_asid(mm))
440 return;
441
442 addr &= PAGE_MASK;
443 pgdp = pgd_offset(mm, addr);
444 pudp = pud_offset(pgdp, addr);
445 pmdp = pmd_offset(pudp, addr);
446 ptep = pte_offset(pmdp, addr);
447
448 /*
449 * If the page isn't marked valid, the page cannot possibly be
450 * in the cache.
451 */
452 if (!(pte_val(*ptep) & _PAGE_PRESENT))
453 return;
454
455 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
456 vaddr = NULL;
457 else {
458 /*
459 * Use kmap_coherent or kmap_atomic to do flushes for
460 * another ASID than the current one.
461 */
462 if (cpu_has_dc_aliases)
463 vaddr = kmap_coherent(page, addr);
464 else
465 vaddr = kmap_atomic(page, KM_USER0);
466 addr = (unsigned long)vaddr;
467 }
468
469 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
470 r4k_blast_dcache_page(addr);
471 if (exec && !cpu_icache_snoops_remote_store)
472 r4k_blast_scache_page(addr);
473 }
474 if (exec) {
475 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
476 int cpu = smp_processor_id();
477
478 if (cpu_context(cpu, mm) != 0)
479 drop_mmu_context(mm, cpu);
480 } else
481 r4k_blast_icache_page(addr);
482 }
483
484 if (vaddr) {
485 if (cpu_has_dc_aliases)
486 kunmap_coherent();
487 else
488 kunmap_atomic(vaddr, KM_USER0);
489 }
490 }
491
492 static void r4k_flush_cache_page(struct vm_area_struct *vma,
493 unsigned long addr, unsigned long pfn)
494 {
495 struct flush_cache_page_args args;
496
497 args.vma = vma;
498 args.addr = addr;
499 args.pfn = pfn;
500
501 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1);
502 }
503
504 static inline void local_r4k_flush_data_cache_page(void * addr)
505 {
506 r4k_blast_dcache_page((unsigned long) addr);
507 }
508
509 static void r4k_flush_data_cache_page(unsigned long addr)
510 {
511 if (in_atomic())
512 local_r4k_flush_data_cache_page((void *)addr);
513 else
514 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
515 1, 1);
516 }
517
518 struct flush_icache_range_args {
519 unsigned long start;
520 unsigned long end;
521 };
522
523 static inline void local_r4k_flush_icache_range(void *args)
524 {
525 struct flush_icache_range_args *fir_args = args;
526 unsigned long start = fir_args->start;
527 unsigned long end = fir_args->end;
528
529 if (!cpu_has_ic_fills_f_dc) {
530 if (end - start >= dcache_size) {
531 r4k_blast_dcache();
532 } else {
533 R4600_HIT_CACHEOP_WAR_IMPL;
534 protected_blast_dcache_range(start, end);
535 }
536
537 if (!cpu_icache_snoops_remote_store && scache_size) {
538 if (end - start > scache_size)
539 r4k_blast_scache();
540 else
541 protected_blast_scache_range(start, end);
542 }
543 }
544
545 if (end - start > icache_size)
546 r4k_blast_icache();
547 else
548 protected_blast_icache_range(start, end);
549 }
550
551 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
552 {
553 struct flush_icache_range_args args;
554
555 args.start = start;
556 args.end = end;
557
558 r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1);
559 instruction_hazard();
560 }
561
562 #ifdef CONFIG_DMA_NONCOHERENT
563
564 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
565 {
566 /* Catch bad driver code */
567 BUG_ON(size == 0);
568
569 if (cpu_has_inclusive_pcaches) {
570 if (size >= scache_size)
571 r4k_blast_scache();
572 else
573 blast_scache_range(addr, addr + size);
574 return;
575 }
576
577 /*
578 * Either no secondary cache or the available caches don't have the
579 * subset property so we have to flush the primary caches
580 * explicitly
581 */
582 if (size >= dcache_size) {
583 r4k_blast_dcache();
584 } else {
585 R4600_HIT_CACHEOP_WAR_IMPL;
586 blast_dcache_range(addr, addr + size);
587 }
588
589 bc_wback_inv(addr, size);
590 }
591
592 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
593 {
594 /* Catch bad driver code */
595 BUG_ON(size == 0);
596
597 if (cpu_has_inclusive_pcaches) {
598 if (size >= scache_size)
599 r4k_blast_scache();
600 else
601 blast_scache_range(addr, addr + size);
602 return;
603 }
604
605 if (size >= dcache_size) {
606 r4k_blast_dcache();
607 } else {
608 R4600_HIT_CACHEOP_WAR_IMPL;
609 blast_dcache_range(addr, addr + size);
610 }
611
612 bc_inv(addr, size);
613 }
614 #endif /* CONFIG_DMA_NONCOHERENT */
615
616 /*
617 * While we're protected against bad userland addresses we don't care
618 * very much about what happens in that case. Usually a segmentation
619 * fault will dump the process later on anyway ...
620 */
621 static void local_r4k_flush_cache_sigtramp(void * arg)
622 {
623 unsigned long ic_lsize = cpu_icache_line_size();
624 unsigned long dc_lsize = cpu_dcache_line_size();
625 unsigned long sc_lsize = cpu_scache_line_size();
626 unsigned long addr = (unsigned long) arg;
627
628 R4600_HIT_CACHEOP_WAR_IMPL;
629 if (dc_lsize)
630 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
631 if (!cpu_icache_snoops_remote_store && scache_size)
632 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
633 if (ic_lsize)
634 protected_flush_icache_line(addr & ~(ic_lsize - 1));
635 if (MIPS4K_ICACHE_REFILL_WAR) {
636 __asm__ __volatile__ (
637 ".set push\n\t"
638 ".set noat\n\t"
639 ".set mips3\n\t"
640 #ifdef CONFIG_32BIT
641 "la $at,1f\n\t"
642 #endif
643 #ifdef CONFIG_64BIT
644 "dla $at,1f\n\t"
645 #endif
646 "cache %0,($at)\n\t"
647 "nop; nop; nop\n"
648 "1:\n\t"
649 ".set pop"
650 :
651 : "i" (Hit_Invalidate_I));
652 }
653 if (MIPS_CACHE_SYNC_WAR)
654 __asm__ __volatile__ ("sync");
655 }
656
657 static void r4k_flush_cache_sigtramp(unsigned long addr)
658 {
659 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1);
660 }
661
662 static void r4k_flush_icache_all(void)
663 {
664 if (cpu_has_vtag_icache)
665 r4k_blast_icache();
666 }
667
668 static inline void rm7k_erratum31(void)
669 {
670 const unsigned long ic_lsize = 32;
671 unsigned long addr;
672
673 /* RM7000 erratum #31. The icache is screwed at startup. */
674 write_c0_taglo(0);
675 write_c0_taghi(0);
676
677 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
678 __asm__ __volatile__ (
679 ".set push\n\t"
680 ".set noreorder\n\t"
681 ".set mips3\n\t"
682 "cache\t%1, 0(%0)\n\t"
683 "cache\t%1, 0x1000(%0)\n\t"
684 "cache\t%1, 0x2000(%0)\n\t"
685 "cache\t%1, 0x3000(%0)\n\t"
686 "cache\t%2, 0(%0)\n\t"
687 "cache\t%2, 0x1000(%0)\n\t"
688 "cache\t%2, 0x2000(%0)\n\t"
689 "cache\t%2, 0x3000(%0)\n\t"
690 "cache\t%1, 0(%0)\n\t"
691 "cache\t%1, 0x1000(%0)\n\t"
692 "cache\t%1, 0x2000(%0)\n\t"
693 "cache\t%1, 0x3000(%0)\n\t"
694 ".set pop\n"
695 :
696 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
697 }
698 }
699
700 static char *way_string[] __initdata = { NULL, "direct mapped", "2-way",
701 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
702 };
703
704 static void __init probe_pcache(void)
705 {
706 struct cpuinfo_mips *c = &current_cpu_data;
707 unsigned int config = read_c0_config();
708 unsigned int prid = read_c0_prid();
709 unsigned long config1;
710 unsigned int lsize;
711
712 switch (c->cputype) {
713 case CPU_R4600: /* QED style two way caches? */
714 case CPU_R4700:
715 case CPU_R5000:
716 case CPU_NEVADA:
717 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
718 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
719 c->icache.ways = 2;
720 c->icache.waybit = __ffs(icache_size/2);
721
722 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
723 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
724 c->dcache.ways = 2;
725 c->dcache.waybit= __ffs(dcache_size/2);
726
727 c->options |= MIPS_CPU_CACHE_CDEX_P;
728 break;
729
730 case CPU_R5432:
731 case CPU_R5500:
732 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
733 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
734 c->icache.ways = 2;
735 c->icache.waybit= 0;
736
737 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
738 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
739 c->dcache.ways = 2;
740 c->dcache.waybit = 0;
741
742 c->options |= MIPS_CPU_CACHE_CDEX_P;
743 break;
744
745 case CPU_TX49XX:
746 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
747 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
748 c->icache.ways = 4;
749 c->icache.waybit= 0;
750
751 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
752 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
753 c->dcache.ways = 4;
754 c->dcache.waybit = 0;
755
756 c->options |= MIPS_CPU_CACHE_CDEX_P;
757 c->options |= MIPS_CPU_PREFETCH;
758 break;
759
760 case CPU_R4000PC:
761 case CPU_R4000SC:
762 case CPU_R4000MC:
763 case CPU_R4400PC:
764 case CPU_R4400SC:
765 case CPU_R4400MC:
766 case CPU_R4300:
767 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
768 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
769 c->icache.ways = 1;
770 c->icache.waybit = 0; /* doesn't matter */
771
772 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
773 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
774 c->dcache.ways = 1;
775 c->dcache.waybit = 0; /* does not matter */
776
777 c->options |= MIPS_CPU_CACHE_CDEX_P;
778 break;
779
780 case CPU_R10000:
781 case CPU_R12000:
782 case CPU_R14000:
783 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
784 c->icache.linesz = 64;
785 c->icache.ways = 2;
786 c->icache.waybit = 0;
787
788 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
789 c->dcache.linesz = 32;
790 c->dcache.ways = 2;
791 c->dcache.waybit = 0;
792
793 c->options |= MIPS_CPU_PREFETCH;
794 break;
795
796 case CPU_VR4133:
797 write_c0_config(config & ~VR41_CONF_P4K);
798 case CPU_VR4131:
799 /* Workaround for cache instruction bug of VR4131 */
800 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
801 c->processor_id == 0x0c82U) {
802 config |= 0x00400000U;
803 if (c->processor_id == 0x0c80U)
804 config |= VR41_CONF_BP;
805 write_c0_config(config);
806 } else
807 c->options |= MIPS_CPU_CACHE_CDEX_P;
808
809 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
810 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
811 c->icache.ways = 2;
812 c->icache.waybit = __ffs(icache_size/2);
813
814 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
815 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
816 c->dcache.ways = 2;
817 c->dcache.waybit = __ffs(dcache_size/2);
818 break;
819
820 case CPU_VR41XX:
821 case CPU_VR4111:
822 case CPU_VR4121:
823 case CPU_VR4122:
824 case CPU_VR4181:
825 case CPU_VR4181A:
826 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
827 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
828 c->icache.ways = 1;
829 c->icache.waybit = 0; /* doesn't matter */
830
831 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
832 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
833 c->dcache.ways = 1;
834 c->dcache.waybit = 0; /* does not matter */
835
836 c->options |= MIPS_CPU_CACHE_CDEX_P;
837 break;
838
839 case CPU_RM7000:
840 rm7k_erratum31();
841
842 case CPU_RM9000:
843 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
844 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
845 c->icache.ways = 4;
846 c->icache.waybit = __ffs(icache_size / c->icache.ways);
847
848 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
849 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
850 c->dcache.ways = 4;
851 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
852
853 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
854 c->options |= MIPS_CPU_CACHE_CDEX_P;
855 #endif
856 c->options |= MIPS_CPU_PREFETCH;
857 break;
858
859 case CPU_LOONGSON2:
860 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
861 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
862 if (prid & 0x3)
863 c->icache.ways = 4;
864 else
865 c->icache.ways = 2;
866 c->icache.waybit = 0;
867
868 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
869 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
870 if (prid & 0x3)
871 c->dcache.ways = 4;
872 else
873 c->dcache.ways = 2;
874 c->dcache.waybit = 0;
875 break;
876
877 default:
878 if (!(config & MIPS_CONF_M))
879 panic("Don't know how to probe P-caches on this cpu.");
880
881 /*
882 * So we seem to be a MIPS32 or MIPS64 CPU
883 * So let's probe the I-cache ...
884 */
885 config1 = read_c0_config1();
886
887 if ((lsize = ((config1 >> 19) & 7)))
888 c->icache.linesz = 2 << lsize;
889 else
890 c->icache.linesz = lsize;
891 c->icache.sets = 64 << ((config1 >> 22) & 7);
892 c->icache.ways = 1 + ((config1 >> 16) & 7);
893
894 icache_size = c->icache.sets *
895 c->icache.ways *
896 c->icache.linesz;
897 c->icache.waybit = __ffs(icache_size/c->icache.ways);
898
899 if (config & 0x8) /* VI bit */
900 c->icache.flags |= MIPS_CACHE_VTAG;
901
902 /*
903 * Now probe the MIPS32 / MIPS64 data cache.
904 */
905 c->dcache.flags = 0;
906
907 if ((lsize = ((config1 >> 10) & 7)))
908 c->dcache.linesz = 2 << lsize;
909 else
910 c->dcache.linesz= lsize;
911 c->dcache.sets = 64 << ((config1 >> 13) & 7);
912 c->dcache.ways = 1 + ((config1 >> 7) & 7);
913
914 dcache_size = c->dcache.sets *
915 c->dcache.ways *
916 c->dcache.linesz;
917 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
918
919 c->options |= MIPS_CPU_PREFETCH;
920 break;
921 }
922
923 /*
924 * Processor configuration sanity check for the R4000SC erratum
925 * #5. With page sizes larger than 32kB there is no possibility
926 * to get a VCE exception anymore so we don't care about this
927 * misconfiguration. The case is rather theoretical anyway;
928 * presumably no vendor is shipping his hardware in the "bad"
929 * configuration.
930 */
931 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
932 !(config & CONF_SC) && c->icache.linesz != 16 &&
933 PAGE_SIZE <= 0x8000)
934 panic("Improper R4000SC processor configuration detected");
935
936 /* compute a couple of other cache variables */
937 c->icache.waysize = icache_size / c->icache.ways;
938 c->dcache.waysize = dcache_size / c->dcache.ways;
939
940 c->icache.sets = c->icache.linesz ?
941 icache_size / (c->icache.linesz * c->icache.ways) : 0;
942 c->dcache.sets = c->dcache.linesz ?
943 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
944
945 /*
946 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
947 * 2-way virtually indexed so normally would suffer from aliases. So
948 * normally they'd suffer from aliases but magic in the hardware deals
949 * with that for us so we don't need to take care ourselves.
950 */
951 switch (c->cputype) {
952 case CPU_20KC:
953 case CPU_25KF:
954 case CPU_SB1:
955 case CPU_SB1A:
956 c->dcache.flags |= MIPS_CACHE_PINDEX;
957 break;
958
959 case CPU_R10000:
960 case CPU_R12000:
961 case CPU_R14000:
962 break;
963
964 case CPU_24K:
965 case CPU_34K:
966 case CPU_74K:
967 if ((read_c0_config7() & (1 << 16))) {
968 /* effectively physically indexed dcache,
969 thus no virtual aliases. */
970 c->dcache.flags |= MIPS_CACHE_PINDEX;
971 break;
972 }
973 default:
974 if (c->dcache.waysize > PAGE_SIZE)
975 c->dcache.flags |= MIPS_CACHE_ALIASES;
976 }
977
978 switch (c->cputype) {
979 case CPU_20KC:
980 /*
981 * Some older 20Kc chips doesn't have the 'VI' bit in
982 * the config register.
983 */
984 c->icache.flags |= MIPS_CACHE_VTAG;
985 break;
986
987 case CPU_AU1000:
988 case CPU_AU1500:
989 case CPU_AU1100:
990 case CPU_AU1550:
991 case CPU_AU1200:
992 c->icache.flags |= MIPS_CACHE_IC_F_DC;
993 break;
994 }
995
996 #ifdef CONFIG_CPU_LOONGSON2
997 /*
998 * LOONGSON2 has 4 way icache, but when using indexed cache op,
999 * one op will act on all 4 ways
1000 */
1001 c->icache.ways = 1;
1002 #endif
1003
1004 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1005 icache_size >> 10,
1006 cpu_has_vtag_icache ? "VIVT" : "VIPT",
1007 way_string[c->icache.ways], c->icache.linesz);
1008
1009 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1010 dcache_size >> 10, way_string[c->dcache.ways],
1011 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1012 (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1013 "cache aliases" : "no aliases",
1014 c->dcache.linesz);
1015 }
1016
1017 /*
1018 * If you even _breathe_ on this function, look at the gcc output and make sure
1019 * it does not pop things on and off the stack for the cache sizing loop that
1020 * executes in KSEG1 space or else you will crash and burn badly. You have
1021 * been warned.
1022 */
1023 static int __init probe_scache(void)
1024 {
1025 unsigned long flags, addr, begin, end, pow2;
1026 unsigned int config = read_c0_config();
1027 struct cpuinfo_mips *c = &current_cpu_data;
1028 int tmp;
1029
1030 if (config & CONF_SC)
1031 return 0;
1032
1033 begin = (unsigned long) &_stext;
1034 begin &= ~((4 * 1024 * 1024) - 1);
1035 end = begin + (4 * 1024 * 1024);
1036
1037 /*
1038 * This is such a bitch, you'd think they would make it easy to do
1039 * this. Away you daemons of stupidity!
1040 */
1041 local_irq_save(flags);
1042
1043 /* Fill each size-multiple cache line with a valid tag. */
1044 pow2 = (64 * 1024);
1045 for (addr = begin; addr < end; addr = (begin + pow2)) {
1046 unsigned long *p = (unsigned long *) addr;
1047 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1048 pow2 <<= 1;
1049 }
1050
1051 /* Load first line with zero (therefore invalid) tag. */
1052 write_c0_taglo(0);
1053 write_c0_taghi(0);
1054 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1055 cache_op(Index_Store_Tag_I, begin);
1056 cache_op(Index_Store_Tag_D, begin);
1057 cache_op(Index_Store_Tag_SD, begin);
1058
1059 /* Now search for the wrap around point. */
1060 pow2 = (128 * 1024);
1061 tmp = 0;
1062 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1063 cache_op(Index_Load_Tag_SD, addr);
1064 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1065 if (!read_c0_taglo())
1066 break;
1067 pow2 <<= 1;
1068 }
1069 local_irq_restore(flags);
1070 addr -= begin;
1071
1072 scache_size = addr;
1073 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1074 c->scache.ways = 1;
1075 c->dcache.waybit = 0; /* does not matter */
1076
1077 return 1;
1078 }
1079
1080 #if defined(CONFIG_CPU_LOONGSON2)
1081 static void __init loongson2_sc_init(void)
1082 {
1083 struct cpuinfo_mips *c = &current_cpu_data;
1084
1085 scache_size = 512*1024;
1086 c->scache.linesz = 32;
1087 c->scache.ways = 4;
1088 c->scache.waybit = 0;
1089 c->scache.waysize = scache_size / (c->scache.ways);
1090 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1091 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1092 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1093
1094 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1095 }
1096 #endif
1097
1098 extern int r5k_sc_init(void);
1099 extern int rm7k_sc_init(void);
1100 extern int mips_sc_init(void);
1101
1102 static void __init setup_scache(void)
1103 {
1104 struct cpuinfo_mips *c = &current_cpu_data;
1105 unsigned int config = read_c0_config();
1106 int sc_present = 0;
1107
1108 /*
1109 * Do the probing thing on R4000SC and R4400SC processors. Other
1110 * processors don't have a S-cache that would be relevant to the
1111 * Linux memory managment.
1112 */
1113 switch (c->cputype) {
1114 case CPU_R4000SC:
1115 case CPU_R4000MC:
1116 case CPU_R4400SC:
1117 case CPU_R4400MC:
1118 sc_present = run_uncached(probe_scache);
1119 if (sc_present)
1120 c->options |= MIPS_CPU_CACHE_CDEX_S;
1121 break;
1122
1123 case CPU_R10000:
1124 case CPU_R12000:
1125 case CPU_R14000:
1126 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1127 c->scache.linesz = 64 << ((config >> 13) & 1);
1128 c->scache.ways = 2;
1129 c->scache.waybit= 0;
1130 sc_present = 1;
1131 break;
1132
1133 case CPU_R5000:
1134 case CPU_NEVADA:
1135 #ifdef CONFIG_R5000_CPU_SCACHE
1136 r5k_sc_init();
1137 #endif
1138 return;
1139
1140 case CPU_RM7000:
1141 case CPU_RM9000:
1142 #ifdef CONFIG_RM7000_CPU_SCACHE
1143 rm7k_sc_init();
1144 #endif
1145 return;
1146
1147 #if defined(CONFIG_CPU_LOONGSON2)
1148 case CPU_LOONGSON2:
1149 loongson2_sc_init();
1150 return;
1151 #endif
1152
1153 default:
1154 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1155 c->isa_level == MIPS_CPU_ISA_M32R2 ||
1156 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1157 c->isa_level == MIPS_CPU_ISA_M64R2) {
1158 #ifdef CONFIG_MIPS_CPU_SCACHE
1159 if (mips_sc_init ()) {
1160 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1161 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1162 scache_size >> 10,
1163 way_string[c->scache.ways], c->scache.linesz);
1164 }
1165 #else
1166 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1167 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1168 #endif
1169 return;
1170 }
1171 sc_present = 0;
1172 }
1173
1174 if (!sc_present)
1175 return;
1176
1177 /* compute a couple of other cache variables */
1178 c->scache.waysize = scache_size / c->scache.ways;
1179
1180 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1181
1182 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1183 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1184
1185 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1186 }
1187
1188 void au1x00_fixup_config_od(void)
1189 {
1190 /*
1191 * c0_config.od (bit 19) was write only (and read as 0)
1192 * on the early revisions of Alchemy SOCs. It disables the bus
1193 * transaction overlapping and needs to be set to fix various errata.
1194 */
1195 switch (read_c0_prid()) {
1196 case 0x00030100: /* Au1000 DA */
1197 case 0x00030201: /* Au1000 HA */
1198 case 0x00030202: /* Au1000 HB */
1199 case 0x01030200: /* Au1500 AB */
1200 /*
1201 * Au1100 errata actually keeps silence about this bit, so we set it
1202 * just in case for those revisions that require it to be set according
1203 * to arch/mips/au1000/common/cputable.c
1204 */
1205 case 0x02030200: /* Au1100 AB */
1206 case 0x02030201: /* Au1100 BA */
1207 case 0x02030202: /* Au1100 BC */
1208 set_c0_config(1 << 19);
1209 break;
1210 }
1211 }
1212
1213 static void __init coherency_setup(void)
1214 {
1215 change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
1216
1217 /*
1218 * c0_status.cu=0 specifies that updates by the sc instruction use
1219 * the coherency mode specified by the TLB; 1 means cachable
1220 * coherent update on write will be used. Not all processors have
1221 * this bit and; some wire it to zero, others like Toshiba had the
1222 * silly idea of putting something else there ...
1223 */
1224 switch (current_cpu_type()) {
1225 case CPU_R4000PC:
1226 case CPU_R4000SC:
1227 case CPU_R4000MC:
1228 case CPU_R4400PC:
1229 case CPU_R4400SC:
1230 case CPU_R4400MC:
1231 clear_c0_config(CONF_CU);
1232 break;
1233 /*
1234 * We need to catch the early Alchemy SOCs with
1235 * the write-only co_config.od bit and set it back to one...
1236 */
1237 case CPU_AU1000: /* rev. DA, HA, HB */
1238 case CPU_AU1100: /* rev. AB, BA, BC ?? */
1239 case CPU_AU1500: /* rev. AB */
1240 au1x00_fixup_config_od();
1241 break;
1242 }
1243 }
1244
1245 void __init r4k_cache_init(void)
1246 {
1247 extern void build_clear_page(void);
1248 extern void build_copy_page(void);
1249 extern char __weak except_vec2_generic;
1250 extern char __weak except_vec2_sb1;
1251 struct cpuinfo_mips *c = &current_cpu_data;
1252
1253 switch (c->cputype) {
1254 case CPU_SB1:
1255 case CPU_SB1A:
1256 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1257 break;
1258
1259 default:
1260 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1261 break;
1262 }
1263
1264 probe_pcache();
1265 setup_scache();
1266
1267 r4k_blast_dcache_page_setup();
1268 r4k_blast_dcache_page_indexed_setup();
1269 r4k_blast_dcache_setup();
1270 r4k_blast_icache_page_setup();
1271 r4k_blast_icache_page_indexed_setup();
1272 r4k_blast_icache_setup();
1273 r4k_blast_scache_page_setup();
1274 r4k_blast_scache_page_indexed_setup();
1275 r4k_blast_scache_setup();
1276
1277 /*
1278 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1279 * This code supports virtually indexed processors and will be
1280 * unnecessarily inefficient on physically indexed processors.
1281 */
1282 if (c->dcache.linesz)
1283 shm_align_mask = max_t( unsigned long,
1284 c->dcache.sets * c->dcache.linesz - 1,
1285 PAGE_SIZE - 1);
1286 else
1287 shm_align_mask = PAGE_SIZE-1;
1288 flush_cache_all = cache_noop;
1289 __flush_cache_all = r4k___flush_cache_all;
1290 flush_cache_mm = r4k_flush_cache_mm;
1291 flush_cache_page = r4k_flush_cache_page;
1292 flush_cache_range = r4k_flush_cache_range;
1293
1294 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1295 flush_icache_all = r4k_flush_icache_all;
1296 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1297 flush_data_cache_page = r4k_flush_data_cache_page;
1298 flush_icache_range = r4k_flush_icache_range;
1299
1300 #ifdef CONFIG_DMA_NONCOHERENT
1301 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1302 _dma_cache_wback = r4k_dma_cache_wback_inv;
1303 _dma_cache_inv = r4k_dma_cache_inv;
1304 #endif
1305
1306 build_clear_page();
1307 build_copy_page();
1308 local_r4k___flush_cache_all(NULL);
1309 coherency_setup();
1310 }