]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/mips/mm/c-r4k.c
[SCSI] mpt2sas: fix driver version inconsistency
[mirror_ubuntu-artful-kernel.git] / arch / mips / mm / c-r4k.c
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
8 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
9 */
10 #include <linux/hardirq.h>
11 #include <linux/init.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
14 #include <linux/linkage.h>
15 #include <linux/sched.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/bitops.h>
19
20 #include <asm/bcache.h>
21 #include <asm/bootinfo.h>
22 #include <asm/cache.h>
23 #include <asm/cacheops.h>
24 #include <asm/cpu.h>
25 #include <asm/cpu-features.h>
26 #include <asm/io.h>
27 #include <asm/page.h>
28 #include <asm/pgtable.h>
29 #include <asm/r4kcache.h>
30 #include <asm/sections.h>
31 #include <asm/system.h>
32 #include <asm/mmu_context.h>
33 #include <asm/war.h>
34 #include <asm/cacheflush.h> /* for run_uncached() */
35
36
37 /*
38 * Special Variant of smp_call_function for use by cache functions:
39 *
40 * o No return value
41 * o collapses to normal function call on UP kernels
42 * o collapses to normal function call on systems with a single shared
43 * primary cache.
44 */
45 static inline void r4k_on_each_cpu(void (*func) (void *info), void *info,
46 int wait)
47 {
48 preempt_disable();
49
50 #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
51 smp_call_function(func, info, wait);
52 #endif
53 func(info);
54 preempt_enable();
55 }
56
57 #if defined(CONFIG_MIPS_CMP)
58 #define cpu_has_safe_index_cacheops 0
59 #else
60 #define cpu_has_safe_index_cacheops 1
61 #endif
62
63 /*
64 * Must die.
65 */
66 static unsigned long icache_size __read_mostly;
67 static unsigned long dcache_size __read_mostly;
68 static unsigned long scache_size __read_mostly;
69
70 /*
71 * Dummy cache handling routines for machines without boardcaches
72 */
73 static void cache_noop(void) {}
74
75 static struct bcache_ops no_sc_ops = {
76 .bc_enable = (void *)cache_noop,
77 .bc_disable = (void *)cache_noop,
78 .bc_wback_inv = (void *)cache_noop,
79 .bc_inv = (void *)cache_noop
80 };
81
82 struct bcache_ops *bcops = &no_sc_ops;
83
84 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010)
85 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020)
86
87 #define R4600_HIT_CACHEOP_WAR_IMPL \
88 do { \
89 if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \
90 *(volatile unsigned long *)CKSEG1; \
91 if (R4600_V1_HIT_CACHEOP_WAR) \
92 __asm__ __volatile__("nop;nop;nop;nop"); \
93 } while (0)
94
95 static void (*r4k_blast_dcache_page)(unsigned long addr);
96
97 static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
98 {
99 R4600_HIT_CACHEOP_WAR_IMPL;
100 blast_dcache32_page(addr);
101 }
102
103 static void __cpuinit r4k_blast_dcache_page_setup(void)
104 {
105 unsigned long dc_lsize = cpu_dcache_line_size();
106
107 if (dc_lsize == 0)
108 r4k_blast_dcache_page = (void *)cache_noop;
109 else if (dc_lsize == 16)
110 r4k_blast_dcache_page = blast_dcache16_page;
111 else if (dc_lsize == 32)
112 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
113 }
114
115 static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
116
117 static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
118 {
119 unsigned long dc_lsize = cpu_dcache_line_size();
120
121 if (dc_lsize == 0)
122 r4k_blast_dcache_page_indexed = (void *)cache_noop;
123 else if (dc_lsize == 16)
124 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
125 else if (dc_lsize == 32)
126 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
127 }
128
129 static void (* r4k_blast_dcache)(void);
130
131 static void __cpuinit r4k_blast_dcache_setup(void)
132 {
133 unsigned long dc_lsize = cpu_dcache_line_size();
134
135 if (dc_lsize == 0)
136 r4k_blast_dcache = (void *)cache_noop;
137 else if (dc_lsize == 16)
138 r4k_blast_dcache = blast_dcache16;
139 else if (dc_lsize == 32)
140 r4k_blast_dcache = blast_dcache32;
141 }
142
143 /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
144 #define JUMP_TO_ALIGN(order) \
145 __asm__ __volatile__( \
146 "b\t1f\n\t" \
147 ".align\t" #order "\n\t" \
148 "1:\n\t" \
149 )
150 #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */
151 #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11)
152
153 static inline void blast_r4600_v1_icache32(void)
154 {
155 unsigned long flags;
156
157 local_irq_save(flags);
158 blast_icache32();
159 local_irq_restore(flags);
160 }
161
162 static inline void tx49_blast_icache32(void)
163 {
164 unsigned long start = INDEX_BASE;
165 unsigned long end = start + current_cpu_data.icache.waysize;
166 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
167 unsigned long ws_end = current_cpu_data.icache.ways <<
168 current_cpu_data.icache.waybit;
169 unsigned long ws, addr;
170
171 CACHE32_UNROLL32_ALIGN2;
172 /* I'm in even chunk. blast odd chunks */
173 for (ws = 0; ws < ws_end; ws += ws_inc)
174 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
175 cache32_unroll32(addr|ws, Index_Invalidate_I);
176 CACHE32_UNROLL32_ALIGN;
177 /* I'm in odd chunk. blast even chunks */
178 for (ws = 0; ws < ws_end; ws += ws_inc)
179 for (addr = start; addr < end; addr += 0x400 * 2)
180 cache32_unroll32(addr|ws, Index_Invalidate_I);
181 }
182
183 static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page)
184 {
185 unsigned long flags;
186
187 local_irq_save(flags);
188 blast_icache32_page_indexed(page);
189 local_irq_restore(flags);
190 }
191
192 static inline void tx49_blast_icache32_page_indexed(unsigned long page)
193 {
194 unsigned long indexmask = current_cpu_data.icache.waysize - 1;
195 unsigned long start = INDEX_BASE + (page & indexmask);
196 unsigned long end = start + PAGE_SIZE;
197 unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit;
198 unsigned long ws_end = current_cpu_data.icache.ways <<
199 current_cpu_data.icache.waybit;
200 unsigned long ws, addr;
201
202 CACHE32_UNROLL32_ALIGN2;
203 /* I'm in even chunk. blast odd chunks */
204 for (ws = 0; ws < ws_end; ws += ws_inc)
205 for (addr = start + 0x400; addr < end; addr += 0x400 * 2)
206 cache32_unroll32(addr|ws, Index_Invalidate_I);
207 CACHE32_UNROLL32_ALIGN;
208 /* I'm in odd chunk. blast even chunks */
209 for (ws = 0; ws < ws_end; ws += ws_inc)
210 for (addr = start; addr < end; addr += 0x400 * 2)
211 cache32_unroll32(addr|ws, Index_Invalidate_I);
212 }
213
214 static void (* r4k_blast_icache_page)(unsigned long addr);
215
216 static void __cpuinit r4k_blast_icache_page_setup(void)
217 {
218 unsigned long ic_lsize = cpu_icache_line_size();
219
220 if (ic_lsize == 0)
221 r4k_blast_icache_page = (void *)cache_noop;
222 else if (ic_lsize == 16)
223 r4k_blast_icache_page = blast_icache16_page;
224 else if (ic_lsize == 32)
225 r4k_blast_icache_page = blast_icache32_page;
226 else if (ic_lsize == 64)
227 r4k_blast_icache_page = blast_icache64_page;
228 }
229
230
231 static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
232
233 static void __cpuinit r4k_blast_icache_page_indexed_setup(void)
234 {
235 unsigned long ic_lsize = cpu_icache_line_size();
236
237 if (ic_lsize == 0)
238 r4k_blast_icache_page_indexed = (void *)cache_noop;
239 else if (ic_lsize == 16)
240 r4k_blast_icache_page_indexed = blast_icache16_page_indexed;
241 else if (ic_lsize == 32) {
242 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
243 r4k_blast_icache_page_indexed =
244 blast_icache32_r4600_v1_page_indexed;
245 else if (TX49XX_ICACHE_INDEX_INV_WAR)
246 r4k_blast_icache_page_indexed =
247 tx49_blast_icache32_page_indexed;
248 else
249 r4k_blast_icache_page_indexed =
250 blast_icache32_page_indexed;
251 } else if (ic_lsize == 64)
252 r4k_blast_icache_page_indexed = blast_icache64_page_indexed;
253 }
254
255 static void (* r4k_blast_icache)(void);
256
257 static void __cpuinit r4k_blast_icache_setup(void)
258 {
259 unsigned long ic_lsize = cpu_icache_line_size();
260
261 if (ic_lsize == 0)
262 r4k_blast_icache = (void *)cache_noop;
263 else if (ic_lsize == 16)
264 r4k_blast_icache = blast_icache16;
265 else if (ic_lsize == 32) {
266 if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x())
267 r4k_blast_icache = blast_r4600_v1_icache32;
268 else if (TX49XX_ICACHE_INDEX_INV_WAR)
269 r4k_blast_icache = tx49_blast_icache32;
270 else
271 r4k_blast_icache = blast_icache32;
272 } else if (ic_lsize == 64)
273 r4k_blast_icache = blast_icache64;
274 }
275
276 static void (* r4k_blast_scache_page)(unsigned long addr);
277
278 static void __cpuinit r4k_blast_scache_page_setup(void)
279 {
280 unsigned long sc_lsize = cpu_scache_line_size();
281
282 if (scache_size == 0)
283 r4k_blast_scache_page = (void *)cache_noop;
284 else if (sc_lsize == 16)
285 r4k_blast_scache_page = blast_scache16_page;
286 else if (sc_lsize == 32)
287 r4k_blast_scache_page = blast_scache32_page;
288 else if (sc_lsize == 64)
289 r4k_blast_scache_page = blast_scache64_page;
290 else if (sc_lsize == 128)
291 r4k_blast_scache_page = blast_scache128_page;
292 }
293
294 static void (* r4k_blast_scache_page_indexed)(unsigned long addr);
295
296 static void __cpuinit r4k_blast_scache_page_indexed_setup(void)
297 {
298 unsigned long sc_lsize = cpu_scache_line_size();
299
300 if (scache_size == 0)
301 r4k_blast_scache_page_indexed = (void *)cache_noop;
302 else if (sc_lsize == 16)
303 r4k_blast_scache_page_indexed = blast_scache16_page_indexed;
304 else if (sc_lsize == 32)
305 r4k_blast_scache_page_indexed = blast_scache32_page_indexed;
306 else if (sc_lsize == 64)
307 r4k_blast_scache_page_indexed = blast_scache64_page_indexed;
308 else if (sc_lsize == 128)
309 r4k_blast_scache_page_indexed = blast_scache128_page_indexed;
310 }
311
312 static void (* r4k_blast_scache)(void);
313
314 static void __cpuinit r4k_blast_scache_setup(void)
315 {
316 unsigned long sc_lsize = cpu_scache_line_size();
317
318 if (scache_size == 0)
319 r4k_blast_scache = (void *)cache_noop;
320 else if (sc_lsize == 16)
321 r4k_blast_scache = blast_scache16;
322 else if (sc_lsize == 32)
323 r4k_blast_scache = blast_scache32;
324 else if (sc_lsize == 64)
325 r4k_blast_scache = blast_scache64;
326 else if (sc_lsize == 128)
327 r4k_blast_scache = blast_scache128;
328 }
329
330 static inline void local_r4k___flush_cache_all(void * args)
331 {
332 #if defined(CONFIG_CPU_LOONGSON2)
333 r4k_blast_scache();
334 return;
335 #endif
336 r4k_blast_dcache();
337 r4k_blast_icache();
338
339 switch (current_cpu_type()) {
340 case CPU_R4000SC:
341 case CPU_R4000MC:
342 case CPU_R4400SC:
343 case CPU_R4400MC:
344 case CPU_R10000:
345 case CPU_R12000:
346 case CPU_R14000:
347 r4k_blast_scache();
348 }
349 }
350
351 static void r4k___flush_cache_all(void)
352 {
353 r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1);
354 }
355
356 static inline int has_valid_asid(const struct mm_struct *mm)
357 {
358 #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
359 int i;
360
361 for_each_online_cpu(i)
362 if (cpu_context(i, mm))
363 return 1;
364
365 return 0;
366 #else
367 return cpu_context(smp_processor_id(), mm);
368 #endif
369 }
370
371 static void r4k__flush_cache_vmap(void)
372 {
373 r4k_blast_dcache();
374 }
375
376 static void r4k__flush_cache_vunmap(void)
377 {
378 r4k_blast_dcache();
379 }
380
381 static inline void local_r4k_flush_cache_range(void * args)
382 {
383 struct vm_area_struct *vma = args;
384 int exec = vma->vm_flags & VM_EXEC;
385
386 if (!(has_valid_asid(vma->vm_mm)))
387 return;
388
389 r4k_blast_dcache();
390 if (exec)
391 r4k_blast_icache();
392 }
393
394 static void r4k_flush_cache_range(struct vm_area_struct *vma,
395 unsigned long start, unsigned long end)
396 {
397 int exec = vma->vm_flags & VM_EXEC;
398
399 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
400 r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1);
401 }
402
403 static inline void local_r4k_flush_cache_mm(void * args)
404 {
405 struct mm_struct *mm = args;
406
407 if (!has_valid_asid(mm))
408 return;
409
410 /*
411 * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we
412 * only flush the primary caches but R10000 and R12000 behave sane ...
413 * R4000SC and R4400SC indexed S-cache ops also invalidate primary
414 * caches, so we can bail out early.
415 */
416 if (current_cpu_type() == CPU_R4000SC ||
417 current_cpu_type() == CPU_R4000MC ||
418 current_cpu_type() == CPU_R4400SC ||
419 current_cpu_type() == CPU_R4400MC) {
420 r4k_blast_scache();
421 return;
422 }
423
424 r4k_blast_dcache();
425 }
426
427 static void r4k_flush_cache_mm(struct mm_struct *mm)
428 {
429 if (!cpu_has_dc_aliases)
430 return;
431
432 r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1);
433 }
434
435 struct flush_cache_page_args {
436 struct vm_area_struct *vma;
437 unsigned long addr;
438 unsigned long pfn;
439 };
440
441 static inline void local_r4k_flush_cache_page(void *args)
442 {
443 struct flush_cache_page_args *fcp_args = args;
444 struct vm_area_struct *vma = fcp_args->vma;
445 unsigned long addr = fcp_args->addr;
446 struct page *page = pfn_to_page(fcp_args->pfn);
447 int exec = vma->vm_flags & VM_EXEC;
448 struct mm_struct *mm = vma->vm_mm;
449 int map_coherent = 0;
450 pgd_t *pgdp;
451 pud_t *pudp;
452 pmd_t *pmdp;
453 pte_t *ptep;
454 void *vaddr;
455
456 /*
457 * If ownes no valid ASID yet, cannot possibly have gotten
458 * this page into the cache.
459 */
460 if (!has_valid_asid(mm))
461 return;
462
463 addr &= PAGE_MASK;
464 pgdp = pgd_offset(mm, addr);
465 pudp = pud_offset(pgdp, addr);
466 pmdp = pmd_offset(pudp, addr);
467 ptep = pte_offset(pmdp, addr);
468
469 /*
470 * If the page isn't marked valid, the page cannot possibly be
471 * in the cache.
472 */
473 if (!(pte_present(*ptep)))
474 return;
475
476 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
477 vaddr = NULL;
478 else {
479 /*
480 * Use kmap_coherent or kmap_atomic to do flushes for
481 * another ASID than the current one.
482 */
483 map_coherent = (cpu_has_dc_aliases &&
484 page_mapped(page) && !Page_dcache_dirty(page));
485 if (map_coherent)
486 vaddr = kmap_coherent(page, addr);
487 else
488 vaddr = kmap_atomic(page, KM_USER0);
489 addr = (unsigned long)vaddr;
490 }
491
492 if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
493 r4k_blast_dcache_page(addr);
494 if (exec && !cpu_icache_snoops_remote_store)
495 r4k_blast_scache_page(addr);
496 }
497 if (exec) {
498 if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
499 int cpu = smp_processor_id();
500
501 if (cpu_context(cpu, mm) != 0)
502 drop_mmu_context(mm, cpu);
503 } else
504 r4k_blast_icache_page(addr);
505 }
506
507 if (vaddr) {
508 if (map_coherent)
509 kunmap_coherent();
510 else
511 kunmap_atomic(vaddr, KM_USER0);
512 }
513 }
514
515 static void r4k_flush_cache_page(struct vm_area_struct *vma,
516 unsigned long addr, unsigned long pfn)
517 {
518 struct flush_cache_page_args args;
519
520 args.vma = vma;
521 args.addr = addr;
522 args.pfn = pfn;
523
524 r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1);
525 }
526
527 static inline void local_r4k_flush_data_cache_page(void * addr)
528 {
529 r4k_blast_dcache_page((unsigned long) addr);
530 }
531
532 static void r4k_flush_data_cache_page(unsigned long addr)
533 {
534 if (in_atomic())
535 local_r4k_flush_data_cache_page((void *)addr);
536 else
537 r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr,
538 1);
539 }
540
541 struct flush_icache_range_args {
542 unsigned long start;
543 unsigned long end;
544 };
545
546 static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
547 {
548 if (!cpu_has_ic_fills_f_dc) {
549 if (end - start >= dcache_size) {
550 r4k_blast_dcache();
551 } else {
552 R4600_HIT_CACHEOP_WAR_IMPL;
553 protected_blast_dcache_range(start, end);
554 }
555 }
556
557 if (end - start > icache_size)
558 r4k_blast_icache();
559 else
560 protected_blast_icache_range(start, end);
561 }
562
563 static inline void local_r4k_flush_icache_range_ipi(void *args)
564 {
565 struct flush_icache_range_args *fir_args = args;
566 unsigned long start = fir_args->start;
567 unsigned long end = fir_args->end;
568
569 local_r4k_flush_icache_range(start, end);
570 }
571
572 static void r4k_flush_icache_range(unsigned long start, unsigned long end)
573 {
574 struct flush_icache_range_args args;
575
576 args.start = start;
577 args.end = end;
578
579 r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args, 1);
580 instruction_hazard();
581 }
582
583 #ifdef CONFIG_DMA_NONCOHERENT
584
585 static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
586 {
587 /* Catch bad driver code */
588 BUG_ON(size == 0);
589
590 if (cpu_has_inclusive_pcaches) {
591 if (size >= scache_size)
592 r4k_blast_scache();
593 else
594 blast_scache_range(addr, addr + size);
595 return;
596 }
597
598 /*
599 * Either no secondary cache or the available caches don't have the
600 * subset property so we have to flush the primary caches
601 * explicitly
602 */
603 if (cpu_has_safe_index_cacheops && size >= dcache_size) {
604 r4k_blast_dcache();
605 } else {
606 R4600_HIT_CACHEOP_WAR_IMPL;
607 blast_dcache_range(addr, addr + size);
608 }
609
610 bc_wback_inv(addr, size);
611 }
612
613 static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
614 {
615 /* Catch bad driver code */
616 BUG_ON(size == 0);
617
618 if (cpu_has_inclusive_pcaches) {
619 if (size >= scache_size)
620 r4k_blast_scache();
621 else {
622 unsigned long lsize = cpu_scache_line_size();
623 unsigned long almask = ~(lsize - 1);
624
625 /*
626 * There is no clearly documented alignment requirement
627 * for the cache instruction on MIPS processors and
628 * some processors, among them the RM5200 and RM7000
629 * QED processors will throw an address error for cache
630 * hit ops with insufficient alignment. Solved by
631 * aligning the address to cache line size.
632 */
633 cache_op(Hit_Writeback_Inv_SD, addr & almask);
634 cache_op(Hit_Writeback_Inv_SD,
635 (addr + size - 1) & almask);
636 blast_inv_scache_range(addr, addr + size);
637 }
638 return;
639 }
640
641 if (cpu_has_safe_index_cacheops && size >= dcache_size) {
642 r4k_blast_dcache();
643 } else {
644 unsigned long lsize = cpu_dcache_line_size();
645 unsigned long almask = ~(lsize - 1);
646
647 R4600_HIT_CACHEOP_WAR_IMPL;
648 cache_op(Hit_Writeback_Inv_D, addr & almask);
649 cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask);
650 blast_inv_dcache_range(addr, addr + size);
651 }
652
653 bc_inv(addr, size);
654 }
655 #endif /* CONFIG_DMA_NONCOHERENT */
656
657 /*
658 * While we're protected against bad userland addresses we don't care
659 * very much about what happens in that case. Usually a segmentation
660 * fault will dump the process later on anyway ...
661 */
662 static void local_r4k_flush_cache_sigtramp(void * arg)
663 {
664 unsigned long ic_lsize = cpu_icache_line_size();
665 unsigned long dc_lsize = cpu_dcache_line_size();
666 unsigned long sc_lsize = cpu_scache_line_size();
667 unsigned long addr = (unsigned long) arg;
668
669 R4600_HIT_CACHEOP_WAR_IMPL;
670 if (dc_lsize)
671 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
672 if (!cpu_icache_snoops_remote_store && scache_size)
673 protected_writeback_scache_line(addr & ~(sc_lsize - 1));
674 if (ic_lsize)
675 protected_flush_icache_line(addr & ~(ic_lsize - 1));
676 if (MIPS4K_ICACHE_REFILL_WAR) {
677 __asm__ __volatile__ (
678 ".set push\n\t"
679 ".set noat\n\t"
680 ".set mips3\n\t"
681 #ifdef CONFIG_32BIT
682 "la $at,1f\n\t"
683 #endif
684 #ifdef CONFIG_64BIT
685 "dla $at,1f\n\t"
686 #endif
687 "cache %0,($at)\n\t"
688 "nop; nop; nop\n"
689 "1:\n\t"
690 ".set pop"
691 :
692 : "i" (Hit_Invalidate_I));
693 }
694 if (MIPS_CACHE_SYNC_WAR)
695 __asm__ __volatile__ ("sync");
696 }
697
698 static void r4k_flush_cache_sigtramp(unsigned long addr)
699 {
700 r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1);
701 }
702
703 static void r4k_flush_icache_all(void)
704 {
705 if (cpu_has_vtag_icache)
706 r4k_blast_icache();
707 }
708
709 static inline void rm7k_erratum31(void)
710 {
711 const unsigned long ic_lsize = 32;
712 unsigned long addr;
713
714 /* RM7000 erratum #31. The icache is screwed at startup. */
715 write_c0_taglo(0);
716 write_c0_taghi(0);
717
718 for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) {
719 __asm__ __volatile__ (
720 ".set push\n\t"
721 ".set noreorder\n\t"
722 ".set mips3\n\t"
723 "cache\t%1, 0(%0)\n\t"
724 "cache\t%1, 0x1000(%0)\n\t"
725 "cache\t%1, 0x2000(%0)\n\t"
726 "cache\t%1, 0x3000(%0)\n\t"
727 "cache\t%2, 0(%0)\n\t"
728 "cache\t%2, 0x1000(%0)\n\t"
729 "cache\t%2, 0x2000(%0)\n\t"
730 "cache\t%2, 0x3000(%0)\n\t"
731 "cache\t%1, 0(%0)\n\t"
732 "cache\t%1, 0x1000(%0)\n\t"
733 "cache\t%1, 0x2000(%0)\n\t"
734 "cache\t%1, 0x3000(%0)\n\t"
735 ".set pop\n"
736 :
737 : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill));
738 }
739 }
740
741 static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
742 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
743 };
744
745 static void __cpuinit probe_pcache(void)
746 {
747 struct cpuinfo_mips *c = &current_cpu_data;
748 unsigned int config = read_c0_config();
749 unsigned int prid = read_c0_prid();
750 unsigned long config1;
751 unsigned int lsize;
752
753 switch (c->cputype) {
754 case CPU_R4600: /* QED style two way caches? */
755 case CPU_R4700:
756 case CPU_R5000:
757 case CPU_NEVADA:
758 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
759 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
760 c->icache.ways = 2;
761 c->icache.waybit = __ffs(icache_size/2);
762
763 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
764 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
765 c->dcache.ways = 2;
766 c->dcache.waybit= __ffs(dcache_size/2);
767
768 c->options |= MIPS_CPU_CACHE_CDEX_P;
769 break;
770
771 case CPU_R5432:
772 case CPU_R5500:
773 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
774 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
775 c->icache.ways = 2;
776 c->icache.waybit= 0;
777
778 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
779 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
780 c->dcache.ways = 2;
781 c->dcache.waybit = 0;
782
783 c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH;
784 break;
785
786 case CPU_TX49XX:
787 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
788 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
789 c->icache.ways = 4;
790 c->icache.waybit= 0;
791
792 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
793 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
794 c->dcache.ways = 4;
795 c->dcache.waybit = 0;
796
797 c->options |= MIPS_CPU_CACHE_CDEX_P;
798 c->options |= MIPS_CPU_PREFETCH;
799 break;
800
801 case CPU_R4000PC:
802 case CPU_R4000SC:
803 case CPU_R4000MC:
804 case CPU_R4400PC:
805 case CPU_R4400SC:
806 case CPU_R4400MC:
807 case CPU_R4300:
808 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
809 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
810 c->icache.ways = 1;
811 c->icache.waybit = 0; /* doesn't matter */
812
813 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
814 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
815 c->dcache.ways = 1;
816 c->dcache.waybit = 0; /* does not matter */
817
818 c->options |= MIPS_CPU_CACHE_CDEX_P;
819 break;
820
821 case CPU_R10000:
822 case CPU_R12000:
823 case CPU_R14000:
824 icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29));
825 c->icache.linesz = 64;
826 c->icache.ways = 2;
827 c->icache.waybit = 0;
828
829 dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26));
830 c->dcache.linesz = 32;
831 c->dcache.ways = 2;
832 c->dcache.waybit = 0;
833
834 c->options |= MIPS_CPU_PREFETCH;
835 break;
836
837 case CPU_VR4133:
838 write_c0_config(config & ~VR41_CONF_P4K);
839 case CPU_VR4131:
840 /* Workaround for cache instruction bug of VR4131 */
841 if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U ||
842 c->processor_id == 0x0c82U) {
843 config |= 0x00400000U;
844 if (c->processor_id == 0x0c80U)
845 config |= VR41_CONF_BP;
846 write_c0_config(config);
847 } else
848 c->options |= MIPS_CPU_CACHE_CDEX_P;
849
850 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
851 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
852 c->icache.ways = 2;
853 c->icache.waybit = __ffs(icache_size/2);
854
855 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
856 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
857 c->dcache.ways = 2;
858 c->dcache.waybit = __ffs(dcache_size/2);
859 break;
860
861 case CPU_VR41XX:
862 case CPU_VR4111:
863 case CPU_VR4121:
864 case CPU_VR4122:
865 case CPU_VR4181:
866 case CPU_VR4181A:
867 icache_size = 1 << (10 + ((config & CONF_IC) >> 9));
868 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
869 c->icache.ways = 1;
870 c->icache.waybit = 0; /* doesn't matter */
871
872 dcache_size = 1 << (10 + ((config & CONF_DC) >> 6));
873 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
874 c->dcache.ways = 1;
875 c->dcache.waybit = 0; /* does not matter */
876
877 c->options |= MIPS_CPU_CACHE_CDEX_P;
878 break;
879
880 case CPU_RM7000:
881 rm7k_erratum31();
882
883 case CPU_RM9000:
884 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
885 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
886 c->icache.ways = 4;
887 c->icache.waybit = __ffs(icache_size / c->icache.ways);
888
889 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
890 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
891 c->dcache.ways = 4;
892 c->dcache.waybit = __ffs(dcache_size / c->dcache.ways);
893
894 #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR)
895 c->options |= MIPS_CPU_CACHE_CDEX_P;
896 #endif
897 c->options |= MIPS_CPU_PREFETCH;
898 break;
899
900 case CPU_LOONGSON2:
901 icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
902 c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
903 if (prid & 0x3)
904 c->icache.ways = 4;
905 else
906 c->icache.ways = 2;
907 c->icache.waybit = 0;
908
909 dcache_size = 1 << (12 + ((config & CONF_DC) >> 6));
910 c->dcache.linesz = 16 << ((config & CONF_DB) >> 4);
911 if (prid & 0x3)
912 c->dcache.ways = 4;
913 else
914 c->dcache.ways = 2;
915 c->dcache.waybit = 0;
916 break;
917
918 default:
919 if (!(config & MIPS_CONF_M))
920 panic("Don't know how to probe P-caches on this cpu.");
921
922 /*
923 * So we seem to be a MIPS32 or MIPS64 CPU
924 * So let's probe the I-cache ...
925 */
926 config1 = read_c0_config1();
927
928 if ((lsize = ((config1 >> 19) & 7)))
929 c->icache.linesz = 2 << lsize;
930 else
931 c->icache.linesz = lsize;
932 c->icache.sets = 64 << ((config1 >> 22) & 7);
933 c->icache.ways = 1 + ((config1 >> 16) & 7);
934
935 icache_size = c->icache.sets *
936 c->icache.ways *
937 c->icache.linesz;
938 c->icache.waybit = __ffs(icache_size/c->icache.ways);
939
940 if (config & 0x8) /* VI bit */
941 c->icache.flags |= MIPS_CACHE_VTAG;
942
943 /*
944 * Now probe the MIPS32 / MIPS64 data cache.
945 */
946 c->dcache.flags = 0;
947
948 if ((lsize = ((config1 >> 10) & 7)))
949 c->dcache.linesz = 2 << lsize;
950 else
951 c->dcache.linesz= lsize;
952 c->dcache.sets = 64 << ((config1 >> 13) & 7);
953 c->dcache.ways = 1 + ((config1 >> 7) & 7);
954
955 dcache_size = c->dcache.sets *
956 c->dcache.ways *
957 c->dcache.linesz;
958 c->dcache.waybit = __ffs(dcache_size/c->dcache.ways);
959
960 c->options |= MIPS_CPU_PREFETCH;
961 break;
962 }
963
964 /*
965 * Processor configuration sanity check for the R4000SC erratum
966 * #5. With page sizes larger than 32kB there is no possibility
967 * to get a VCE exception anymore so we don't care about this
968 * misconfiguration. The case is rather theoretical anyway;
969 * presumably no vendor is shipping his hardware in the "bad"
970 * configuration.
971 */
972 if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 &&
973 !(config & CONF_SC) && c->icache.linesz != 16 &&
974 PAGE_SIZE <= 0x8000)
975 panic("Improper R4000SC processor configuration detected");
976
977 /* compute a couple of other cache variables */
978 c->icache.waysize = icache_size / c->icache.ways;
979 c->dcache.waysize = dcache_size / c->dcache.ways;
980
981 c->icache.sets = c->icache.linesz ?
982 icache_size / (c->icache.linesz * c->icache.ways) : 0;
983 c->dcache.sets = c->dcache.linesz ?
984 dcache_size / (c->dcache.linesz * c->dcache.ways) : 0;
985
986 /*
987 * R10000 and R12000 P-caches are odd in a positive way. They're 32kB
988 * 2-way virtually indexed so normally would suffer from aliases. So
989 * normally they'd suffer from aliases but magic in the hardware deals
990 * with that for us so we don't need to take care ourselves.
991 */
992 switch (c->cputype) {
993 case CPU_20KC:
994 case CPU_25KF:
995 case CPU_SB1:
996 case CPU_SB1A:
997 c->dcache.flags |= MIPS_CACHE_PINDEX;
998 break;
999
1000 case CPU_R10000:
1001 case CPU_R12000:
1002 case CPU_R14000:
1003 break;
1004
1005 case CPU_24K:
1006 case CPU_34K:
1007 case CPU_74K:
1008 case CPU_1004K:
1009 if ((read_c0_config7() & (1 << 16))) {
1010 /* effectively physically indexed dcache,
1011 thus no virtual aliases. */
1012 c->dcache.flags |= MIPS_CACHE_PINDEX;
1013 break;
1014 }
1015 default:
1016 if (c->dcache.waysize > PAGE_SIZE)
1017 c->dcache.flags |= MIPS_CACHE_ALIASES;
1018 }
1019
1020 switch (c->cputype) {
1021 case CPU_20KC:
1022 /*
1023 * Some older 20Kc chips doesn't have the 'VI' bit in
1024 * the config register.
1025 */
1026 c->icache.flags |= MIPS_CACHE_VTAG;
1027 break;
1028
1029 case CPU_ALCHEMY:
1030 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1031 break;
1032 }
1033
1034 #ifdef CONFIG_CPU_LOONGSON2
1035 /*
1036 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1037 * one op will act on all 4 ways
1038 */
1039 c->icache.ways = 1;
1040 #endif
1041
1042 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1043 icache_size >> 10,
1044 cpu_has_vtag_icache ? "VIVT" : "VIPT",
1045 way_string[c->icache.ways], c->icache.linesz);
1046
1047 printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
1048 dcache_size >> 10, way_string[c->dcache.ways],
1049 (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT",
1050 (c->dcache.flags & MIPS_CACHE_ALIASES) ?
1051 "cache aliases" : "no aliases",
1052 c->dcache.linesz);
1053 }
1054
1055 /*
1056 * If you even _breathe_ on this function, look at the gcc output and make sure
1057 * it does not pop things on and off the stack for the cache sizing loop that
1058 * executes in KSEG1 space or else you will crash and burn badly. You have
1059 * been warned.
1060 */
1061 static int __cpuinit probe_scache(void)
1062 {
1063 unsigned long flags, addr, begin, end, pow2;
1064 unsigned int config = read_c0_config();
1065 struct cpuinfo_mips *c = &current_cpu_data;
1066 int tmp;
1067
1068 if (config & CONF_SC)
1069 return 0;
1070
1071 begin = (unsigned long) &_stext;
1072 begin &= ~((4 * 1024 * 1024) - 1);
1073 end = begin + (4 * 1024 * 1024);
1074
1075 /*
1076 * This is such a bitch, you'd think they would make it easy to do
1077 * this. Away you daemons of stupidity!
1078 */
1079 local_irq_save(flags);
1080
1081 /* Fill each size-multiple cache line with a valid tag. */
1082 pow2 = (64 * 1024);
1083 for (addr = begin; addr < end; addr = (begin + pow2)) {
1084 unsigned long *p = (unsigned long *) addr;
1085 __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */
1086 pow2 <<= 1;
1087 }
1088
1089 /* Load first line with zero (therefore invalid) tag. */
1090 write_c0_taglo(0);
1091 write_c0_taghi(0);
1092 __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */
1093 cache_op(Index_Store_Tag_I, begin);
1094 cache_op(Index_Store_Tag_D, begin);
1095 cache_op(Index_Store_Tag_SD, begin);
1096
1097 /* Now search for the wrap around point. */
1098 pow2 = (128 * 1024);
1099 tmp = 0;
1100 for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) {
1101 cache_op(Index_Load_Tag_SD, addr);
1102 __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */
1103 if (!read_c0_taglo())
1104 break;
1105 pow2 <<= 1;
1106 }
1107 local_irq_restore(flags);
1108 addr -= begin;
1109
1110 scache_size = addr;
1111 c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
1112 c->scache.ways = 1;
1113 c->dcache.waybit = 0; /* does not matter */
1114
1115 return 1;
1116 }
1117
1118 #if defined(CONFIG_CPU_LOONGSON2)
1119 static void __init loongson2_sc_init(void)
1120 {
1121 struct cpuinfo_mips *c = &current_cpu_data;
1122
1123 scache_size = 512*1024;
1124 c->scache.linesz = 32;
1125 c->scache.ways = 4;
1126 c->scache.waybit = 0;
1127 c->scache.waysize = scache_size / (c->scache.ways);
1128 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1129 pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1130 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1131
1132 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1133 }
1134 #endif
1135
1136 extern int r5k_sc_init(void);
1137 extern int rm7k_sc_init(void);
1138 extern int mips_sc_init(void);
1139
1140 static void __cpuinit setup_scache(void)
1141 {
1142 struct cpuinfo_mips *c = &current_cpu_data;
1143 unsigned int config = read_c0_config();
1144 int sc_present = 0;
1145
1146 /*
1147 * Do the probing thing on R4000SC and R4400SC processors. Other
1148 * processors don't have a S-cache that would be relevant to the
1149 * Linux memory management.
1150 */
1151 switch (c->cputype) {
1152 case CPU_R4000SC:
1153 case CPU_R4000MC:
1154 case CPU_R4400SC:
1155 case CPU_R4400MC:
1156 sc_present = run_uncached(probe_scache);
1157 if (sc_present)
1158 c->options |= MIPS_CPU_CACHE_CDEX_S;
1159 break;
1160
1161 case CPU_R10000:
1162 case CPU_R12000:
1163 case CPU_R14000:
1164 scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16);
1165 c->scache.linesz = 64 << ((config >> 13) & 1);
1166 c->scache.ways = 2;
1167 c->scache.waybit= 0;
1168 sc_present = 1;
1169 break;
1170
1171 case CPU_R5000:
1172 case CPU_NEVADA:
1173 #ifdef CONFIG_R5000_CPU_SCACHE
1174 r5k_sc_init();
1175 #endif
1176 return;
1177
1178 case CPU_RM7000:
1179 case CPU_RM9000:
1180 #ifdef CONFIG_RM7000_CPU_SCACHE
1181 rm7k_sc_init();
1182 #endif
1183 return;
1184
1185 #if defined(CONFIG_CPU_LOONGSON2)
1186 case CPU_LOONGSON2:
1187 loongson2_sc_init();
1188 return;
1189 #endif
1190
1191 default:
1192 if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
1193 c->isa_level == MIPS_CPU_ISA_M32R2 ||
1194 c->isa_level == MIPS_CPU_ISA_M64R1 ||
1195 c->isa_level == MIPS_CPU_ISA_M64R2) {
1196 #ifdef CONFIG_MIPS_CPU_SCACHE
1197 if (mips_sc_init ()) {
1198 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
1199 printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n",
1200 scache_size >> 10,
1201 way_string[c->scache.ways], c->scache.linesz);
1202 }
1203 #else
1204 if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT))
1205 panic("Dunno how to handle MIPS32 / MIPS64 second level cache");
1206 #endif
1207 return;
1208 }
1209 sc_present = 0;
1210 }
1211
1212 if (!sc_present)
1213 return;
1214
1215 /* compute a couple of other cache variables */
1216 c->scache.waysize = scache_size / c->scache.ways;
1217
1218 c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways);
1219
1220 printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n",
1221 scache_size >> 10, way_string[c->scache.ways], c->scache.linesz);
1222
1223 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1224 }
1225
1226 void au1x00_fixup_config_od(void)
1227 {
1228 /*
1229 * c0_config.od (bit 19) was write only (and read as 0)
1230 * on the early revisions of Alchemy SOCs. It disables the bus
1231 * transaction overlapping and needs to be set to fix various errata.
1232 */
1233 switch (read_c0_prid()) {
1234 case 0x00030100: /* Au1000 DA */
1235 case 0x00030201: /* Au1000 HA */
1236 case 0x00030202: /* Au1000 HB */
1237 case 0x01030200: /* Au1500 AB */
1238 /*
1239 * Au1100 errata actually keeps silence about this bit, so we set it
1240 * just in case for those revisions that require it to be set according
1241 * to the (now gone) cpu table.
1242 */
1243 case 0x02030200: /* Au1100 AB */
1244 case 0x02030201: /* Au1100 BA */
1245 case 0x02030202: /* Au1100 BC */
1246 set_c0_config(1 << 19);
1247 break;
1248 }
1249 }
1250
1251 /* CP0 hazard avoidance. */
1252 #define NXP_BARRIER() \
1253 __asm__ __volatile__( \
1254 ".set noreorder\n\t" \
1255 "nop; nop; nop; nop; nop; nop;\n\t" \
1256 ".set reorder\n\t")
1257
1258 static void nxp_pr4450_fixup_config(void)
1259 {
1260 unsigned long config0;
1261
1262 config0 = read_c0_config();
1263
1264 /* clear all three cache coherency fields */
1265 config0 &= ~(0x7 | (7 << 25) | (7 << 28));
1266 config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) |
1267 ((_page_cachable_default >> _CACHE_SHIFT) << 25) |
1268 ((_page_cachable_default >> _CACHE_SHIFT) << 28));
1269 write_c0_config(config0);
1270 NXP_BARRIER();
1271 }
1272
1273 static int __cpuinitdata cca = -1;
1274
1275 static int __init cca_setup(char *str)
1276 {
1277 get_option(&str, &cca);
1278
1279 return 1;
1280 }
1281
1282 __setup("cca=", cca_setup);
1283
1284 static void __cpuinit coherency_setup(void)
1285 {
1286 if (cca < 0 || cca > 7)
1287 cca = read_c0_config() & CONF_CM_CMASK;
1288 _page_cachable_default = cca << _CACHE_SHIFT;
1289
1290 pr_debug("Using cache attribute %d\n", cca);
1291 change_c0_config(CONF_CM_CMASK, cca);
1292
1293 /*
1294 * c0_status.cu=0 specifies that updates by the sc instruction use
1295 * the coherency mode specified by the TLB; 1 means cachable
1296 * coherent update on write will be used. Not all processors have
1297 * this bit and; some wire it to zero, others like Toshiba had the
1298 * silly idea of putting something else there ...
1299 */
1300 switch (current_cpu_type()) {
1301 case CPU_R4000PC:
1302 case CPU_R4000SC:
1303 case CPU_R4000MC:
1304 case CPU_R4400PC:
1305 case CPU_R4400SC:
1306 case CPU_R4400MC:
1307 clear_c0_config(CONF_CU);
1308 break;
1309 /*
1310 * We need to catch the early Alchemy SOCs with
1311 * the write-only co_config.od bit and set it back to one on:
1312 * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB
1313 */
1314 case CPU_ALCHEMY:
1315 au1x00_fixup_config_od();
1316 break;
1317
1318 case PRID_IMP_PR4450:
1319 nxp_pr4450_fixup_config();
1320 break;
1321 }
1322 }
1323
1324 #if defined(CONFIG_DMA_NONCOHERENT)
1325
1326 static int __cpuinitdata coherentio;
1327
1328 static int __init setcoherentio(char *str)
1329 {
1330 coherentio = 1;
1331
1332 return 1;
1333 }
1334
1335 __setup("coherentio", setcoherentio);
1336 #endif
1337
1338 void __cpuinit r4k_cache_init(void)
1339 {
1340 extern void build_clear_page(void);
1341 extern void build_copy_page(void);
1342 extern char __weak except_vec2_generic;
1343 extern char __weak except_vec2_sb1;
1344 struct cpuinfo_mips *c = &current_cpu_data;
1345
1346 switch (c->cputype) {
1347 case CPU_SB1:
1348 case CPU_SB1A:
1349 set_uncached_handler(0x100, &except_vec2_sb1, 0x80);
1350 break;
1351
1352 default:
1353 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1354 break;
1355 }
1356
1357 probe_pcache();
1358 setup_scache();
1359
1360 r4k_blast_dcache_page_setup();
1361 r4k_blast_dcache_page_indexed_setup();
1362 r4k_blast_dcache_setup();
1363 r4k_blast_icache_page_setup();
1364 r4k_blast_icache_page_indexed_setup();
1365 r4k_blast_icache_setup();
1366 r4k_blast_scache_page_setup();
1367 r4k_blast_scache_page_indexed_setup();
1368 r4k_blast_scache_setup();
1369
1370 /*
1371 * Some MIPS32 and MIPS64 processors have physically indexed caches.
1372 * This code supports virtually indexed processors and will be
1373 * unnecessarily inefficient on physically indexed processors.
1374 */
1375 if (c->dcache.linesz)
1376 shm_align_mask = max_t( unsigned long,
1377 c->dcache.sets * c->dcache.linesz - 1,
1378 PAGE_SIZE - 1);
1379 else
1380 shm_align_mask = PAGE_SIZE-1;
1381
1382 __flush_cache_vmap = r4k__flush_cache_vmap;
1383 __flush_cache_vunmap = r4k__flush_cache_vunmap;
1384
1385 flush_cache_all = cache_noop;
1386 __flush_cache_all = r4k___flush_cache_all;
1387 flush_cache_mm = r4k_flush_cache_mm;
1388 flush_cache_page = r4k_flush_cache_page;
1389 flush_cache_range = r4k_flush_cache_range;
1390
1391 flush_cache_sigtramp = r4k_flush_cache_sigtramp;
1392 flush_icache_all = r4k_flush_icache_all;
1393 local_flush_data_cache_page = local_r4k_flush_data_cache_page;
1394 flush_data_cache_page = r4k_flush_data_cache_page;
1395 flush_icache_range = r4k_flush_icache_range;
1396 local_flush_icache_range = local_r4k_flush_icache_range;
1397
1398 #if defined(CONFIG_DMA_NONCOHERENT)
1399 if (coherentio) {
1400 _dma_cache_wback_inv = (void *)cache_noop;
1401 _dma_cache_wback = (void *)cache_noop;
1402 _dma_cache_inv = (void *)cache_noop;
1403 } else {
1404 _dma_cache_wback_inv = r4k_dma_cache_wback_inv;
1405 _dma_cache_wback = r4k_dma_cache_wback_inv;
1406 _dma_cache_inv = r4k_dma_cache_inv;
1407 }
1408 #endif
1409
1410 build_clear_page();
1411 build_copy_page();
1412 #if !defined(CONFIG_MIPS_CMP)
1413 local_r4k___flush_cache_all(NULL);
1414 #endif
1415 coherency_setup();
1416 }