]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/mips/mm/c-tx39.c
Cleanup the mess in cpu_cache_init.
[mirror_ubuntu-zesty-kernel.git] / arch / mips / mm / c-tx39.c
CommitLineData
1da177e4
LT
1/*
2 * r2300.c: R2000 and R3000 specific mmu/cache code.
3 *
4 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
5 *
6 * with a lot of changes to make this thing work for R3000s
7 * Tx39XX R4k style caches added. HK
8 * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9 * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15
16#include <asm/cacheops.h>
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/mmu_context.h>
20#include <asm/system.h>
21#include <asm/isadep.h>
22#include <asm/io.h>
23#include <asm/bootinfo.h>
24#include <asm/cpu.h>
25
26/* For R3000 cores with R4000 style caches */
27static unsigned long icache_size, dcache_size; /* Size in bytes */
28
29#include <asm/r4kcache.h>
30
31extern int r3k_have_wired_reg; /* in r3k-tlb.c */
32
33/* This sequence is required to ensure icache is disabled immediately */
34#define TX39_STOP_STREAMING() \
35__asm__ __volatile__( \
36 ".set push\n\t" \
37 ".set noreorder\n\t" \
38 "b 1f\n\t" \
39 "nop\n\t" \
40 "1:\n\t" \
41 ".set pop" \
42 )
43
44/* TX39H-style cache flush routines. */
45static void tx39h_flush_icache_all(void)
46{
47 unsigned long start = KSEG0;
48 unsigned long end = (start + icache_size);
49 unsigned long flags, config;
50
51 /* disable icache (set ICE#) */
52 local_irq_save(flags);
53 config = read_c0_conf();
54 write_c0_conf(config & ~TX39_CONF_ICE);
55 TX39_STOP_STREAMING();
56
57 /* invalidate icache */
58 while (start < end) {
59 cache16_unroll32(start, Index_Invalidate_I);
60 start += 0x200;
61 }
62
63 write_c0_conf(config);
64 local_irq_restore(flags);
65}
66
67static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
68{
69 unsigned long end, a;
70 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
71
72 /* Catch bad driver code */
73 BUG_ON(size == 0);
74
75 iob();
76 a = addr & ~(dc_lsize - 1);
77 end = (addr + size - 1) & ~(dc_lsize - 1);
78 while (1) {
79 invalidate_dcache_line(a); /* Hit_Invalidate_D */
80 if (a == end) break;
81 a += dc_lsize;
82 }
83}
84
85
86/* TX39H2,TX39H3 */
87static inline void tx39_blast_dcache_page(unsigned long addr)
88{
89 if (current_cpu_data.cputype != CPU_TX3912)
90 blast_dcache16_page(addr);
91}
92
93static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
94{
95 blast_dcache16_page_indexed(addr);
96}
97
98static inline void tx39_blast_dcache(void)
99{
100 blast_dcache16();
101}
102
103static inline void tx39_blast_icache_page(unsigned long addr)
104{
105 unsigned long flags, config;
106 /* disable icache (set ICE#) */
107 local_irq_save(flags);
108 config = read_c0_conf();
109 write_c0_conf(config & ~TX39_CONF_ICE);
110 TX39_STOP_STREAMING();
111 blast_icache16_page(addr);
112 write_c0_conf(config);
113 local_irq_restore(flags);
114}
115
116static inline void tx39_blast_icache_page_indexed(unsigned long addr)
117{
118 unsigned long flags, config;
119 /* disable icache (set ICE#) */
120 local_irq_save(flags);
121 config = read_c0_conf();
122 write_c0_conf(config & ~TX39_CONF_ICE);
123 TX39_STOP_STREAMING();
124 blast_icache16_page_indexed(addr);
125 write_c0_conf(config);
126 local_irq_restore(flags);
127}
128
129static inline void tx39_blast_icache(void)
130{
131 unsigned long flags, config;
132 /* disable icache (set ICE#) */
133 local_irq_save(flags);
134 config = read_c0_conf();
135 write_c0_conf(config & ~TX39_CONF_ICE);
136 TX39_STOP_STREAMING();
137 blast_icache16();
138 write_c0_conf(config);
139 local_irq_restore(flags);
140}
141
142static inline void tx39_flush_cache_all(void)
143{
144 if (!cpu_has_dc_aliases)
145 return;
146
147 tx39_blast_dcache();
148 tx39_blast_icache();
149}
150
151static inline void tx39___flush_cache_all(void)
152{
153 tx39_blast_dcache();
154 tx39_blast_icache();
155}
156
157static void tx39_flush_cache_mm(struct mm_struct *mm)
158{
159 if (!cpu_has_dc_aliases)
160 return;
161
162 if (cpu_context(smp_processor_id(), mm) != 0) {
163 tx39_flush_cache_all();
164 }
165}
166
167static void tx39_flush_cache_range(struct vm_area_struct *vma,
168 unsigned long start, unsigned long end)
169{
9043f7e9 170 int exec;
1da177e4 171
9043f7e9 172 if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
1da177e4
LT
173 return;
174
9043f7e9
AN
175 exec = vma->vm_flags & VM_EXEC;
176 if (cpu_has_dc_aliases || exec)
1da177e4 177 tx39_blast_dcache();
9043f7e9 178 if (exec)
1da177e4 179 tx39_blast_icache();
1da177e4
LT
180}
181
182static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
183{
184 int exec = vma->vm_flags & VM_EXEC;
185 struct mm_struct *mm = vma->vm_mm;
186 pgd_t *pgdp;
c6e8b587 187 pud_t *pudp;
1da177e4
LT
188 pmd_t *pmdp;
189 pte_t *ptep;
190
191 /*
192 * If ownes no valid ASID yet, cannot possibly have gotten
193 * this page into the cache.
194 */
195 if (cpu_context(smp_processor_id(), mm) == 0)
196 return;
197
198 page &= PAGE_MASK;
199 pgdp = pgd_offset(mm, page);
c6e8b587
RB
200 pudp = pud_offset(pgdp, page);
201 pmdp = pmd_offset(pudp, page);
1da177e4
LT
202 ptep = pte_offset(pmdp, page);
203
204 /*
205 * If the page isn't marked valid, the page cannot possibly be
206 * in the cache.
207 */
208 if (!(pte_val(*ptep) & _PAGE_PRESENT))
209 return;
210
211 /*
212 * Doing flushes for another ASID than the current one is
213 * too difficult since stupid R4k caches do a TLB translation
214 * for every cache flush operation. So we do indexed flushes
215 * in that case, which doesn't overly flush the cache too much.
216 */
217 if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
218 if (cpu_has_dc_aliases || exec)
219 tx39_blast_dcache_page(page);
220 if (exec)
221 tx39_blast_icache_page(page);
222
223 return;
224 }
225
226 /*
227 * Do indexed flush, too much work to get the (possible) TLB refills
228 * to work correctly.
229 */
230 page = (KSEG0 + (page & (dcache_size - 1)));
231 if (cpu_has_dc_aliases || exec)
232 tx39_blast_dcache_page_indexed(page);
233 if (exec)
234 tx39_blast_icache_page_indexed(page);
235}
236
237static void tx39_flush_data_cache_page(unsigned long addr)
238{
239 tx39_blast_dcache_page(addr);
240}
241
242static void tx39_flush_icache_range(unsigned long start, unsigned long end)
243{
244 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
245 unsigned long addr, aend;
246
247 if (end - start > dcache_size)
248 tx39_blast_dcache();
249 else {
250 addr = start & ~(dc_lsize - 1);
251 aend = (end - 1) & ~(dc_lsize - 1);
252
253 while (1) {
254 /* Hit_Writeback_Inv_D */
255 protected_writeback_dcache_line(addr);
256 if (addr == aend)
257 break;
258 addr += dc_lsize;
259 }
260 }
261
262 if (end - start > icache_size)
263 tx39_blast_icache();
264 else {
265 unsigned long flags, config;
266 addr = start & ~(dc_lsize - 1);
267 aend = (end - 1) & ~(dc_lsize - 1);
268 /* disable icache (set ICE#) */
269 local_irq_save(flags);
270 config = read_c0_conf();
271 write_c0_conf(config & ~TX39_CONF_ICE);
272 TX39_STOP_STREAMING();
273 while (1) {
274 /* Hit_Invalidate_I */
275 protected_flush_icache_line(addr);
276 if (addr == aend)
277 break;
278 addr += dc_lsize;
279 }
280 write_c0_conf(config);
281 local_irq_restore(flags);
282 }
283}
284
285/*
286 * Ok, this seriously sucks. We use them to flush a user page but don't
287 * know the virtual address, so we have to blast away the whole icache
288 * which is significantly more expensive than the real thing. Otoh we at
289 * least know the kernel address of the page so we can flush it
290 * selectivly.
291 */
292static void tx39_flush_icache_page(struct vm_area_struct *vma, struct page *page)
293{
294 unsigned long addr;
295 /*
296 * If there's no context yet, or the page isn't executable, no icache
297 * flush is needed.
298 */
299 if (!(vma->vm_flags & VM_EXEC))
300 return;
301
302 addr = (unsigned long) page_address(page);
303 tx39_blast_dcache_page(addr);
304
305 /*
306 * We're not sure of the virtual address(es) involved here, so
307 * we have to flush the entire I-cache.
308 */
309 tx39_blast_icache();
310}
311
312static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
313{
314 unsigned long end, a;
315
316 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
317 end = addr + size;
318 do {
319 tx39_blast_dcache_page(addr);
320 addr += PAGE_SIZE;
321 } while(addr != end);
322 } else if (size > dcache_size) {
323 tx39_blast_dcache();
324 } else {
325 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
326 a = addr & ~(dc_lsize - 1);
327 end = (addr + size - 1) & ~(dc_lsize - 1);
328 while (1) {
329 flush_dcache_line(a); /* Hit_Writeback_Inv_D */
330 if (a == end) break;
331 a += dc_lsize;
332 }
333 }
334}
335
336static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
337{
338 unsigned long end, a;
339
340 if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
341 end = addr + size;
342 do {
343 tx39_blast_dcache_page(addr);
344 addr += PAGE_SIZE;
345 } while(addr != end);
346 } else if (size > dcache_size) {
347 tx39_blast_dcache();
348 } else {
349 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
350 a = addr & ~(dc_lsize - 1);
351 end = (addr + size - 1) & ~(dc_lsize - 1);
352 while (1) {
353 invalidate_dcache_line(a); /* Hit_Invalidate_D */
354 if (a == end) break;
355 a += dc_lsize;
356 }
357 }
358}
359
360static void tx39_flush_cache_sigtramp(unsigned long addr)
361{
362 unsigned long ic_lsize = current_cpu_data.icache.linesz;
363 unsigned long dc_lsize = current_cpu_data.dcache.linesz;
364 unsigned long config;
365 unsigned long flags;
366
367 protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
368
369 /* disable icache (set ICE#) */
370 local_irq_save(flags);
371 config = read_c0_conf();
372 write_c0_conf(config & ~TX39_CONF_ICE);
373 TX39_STOP_STREAMING();
374 protected_flush_icache_line(addr & ~(ic_lsize - 1));
375 write_c0_conf(config);
376 local_irq_restore(flags);
377}
378
379static __init void tx39_probe_cache(void)
380{
381 unsigned long config;
382
383 config = read_c0_conf();
384
385 icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
386 TX39_CONF_ICS_SHIFT));
387 dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
388 TX39_CONF_DCS_SHIFT));
389
390 current_cpu_data.icache.linesz = 16;
391 switch (current_cpu_data.cputype) {
392 case CPU_TX3912:
393 current_cpu_data.icache.ways = 1;
394 current_cpu_data.dcache.ways = 1;
395 current_cpu_data.dcache.linesz = 4;
396 break;
397
398 case CPU_TX3927:
399 current_cpu_data.icache.ways = 2;
400 current_cpu_data.dcache.ways = 2;
401 current_cpu_data.dcache.linesz = 16;
402 break;
403
404 case CPU_TX3922:
405 default:
406 current_cpu_data.icache.ways = 1;
407 current_cpu_data.dcache.ways = 1;
408 current_cpu_data.dcache.linesz = 16;
409 break;
410 }
411}
412
02cf2119 413void __init tx39_cache_init(void)
1da177e4
LT
414{
415 extern void build_clear_page(void);
416 extern void build_copy_page(void);
417 unsigned long config;
418
419 config = read_c0_conf();
420 config &= ~TX39_CONF_WBON;
421 write_c0_conf(config);
422
423 tx39_probe_cache();
424
425 switch (current_cpu_data.cputype) {
426 case CPU_TX3912:
427 /* TX39/H core (writethru direct-map cache) */
428 flush_cache_all = tx39h_flush_icache_all;
429 __flush_cache_all = tx39h_flush_icache_all;
430 flush_cache_mm = (void *) tx39h_flush_icache_all;
431 flush_cache_range = (void *) tx39h_flush_icache_all;
432 flush_cache_page = (void *) tx39h_flush_icache_all;
433 flush_icache_page = (void *) tx39h_flush_icache_all;
434 flush_icache_range = (void *) tx39h_flush_icache_all;
435
436 flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
437 flush_data_cache_page = (void *) tx39h_flush_icache_all;
438
439 _dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
440
441 shm_align_mask = PAGE_SIZE - 1;
442
443 break;
444
445 case CPU_TX3922:
446 case CPU_TX3927:
447 default:
448 /* TX39/H2,H3 core (writeback 2way-set-associative cache) */
449 r3k_have_wired_reg = 1;
450 write_c0_wired(0); /* set 8 on reset... */
451 /* board-dependent init code may set WBON */
452
453 flush_cache_all = tx39_flush_cache_all;
454 __flush_cache_all = tx39___flush_cache_all;
455 flush_cache_mm = tx39_flush_cache_mm;
456 flush_cache_range = tx39_flush_cache_range;
457 flush_cache_page = tx39_flush_cache_page;
458 flush_icache_page = tx39_flush_icache_page;
459 flush_icache_range = tx39_flush_icache_range;
460
461 flush_cache_sigtramp = tx39_flush_cache_sigtramp;
462 flush_data_cache_page = tx39_flush_data_cache_page;
463
464 _dma_cache_wback_inv = tx39_dma_cache_wback_inv;
465 _dma_cache_wback = tx39_dma_cache_wback_inv;
466 _dma_cache_inv = tx39_dma_cache_inv;
467
468 shm_align_mask = max_t(unsigned long,
469 (dcache_size / current_cpu_data.dcache.ways) - 1,
470 PAGE_SIZE - 1);
471
472 break;
473 }
474
475 current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
476 current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
477
478 current_cpu_data.icache.sets =
479 current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
480 current_cpu_data.dcache.sets =
481 current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
482
483 if (current_cpu_data.dcache.waysize > PAGE_SIZE)
484 current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
485
486 current_cpu_data.icache.waybit = 0;
487 current_cpu_data.dcache.waybit = 0;
488
489 printk("Primary instruction cache %ldkB, linesize %d bytes\n",
490 icache_size >> 10, current_cpu_data.icache.linesz);
491 printk("Primary data cache %ldkB, linesize %d bytes\n",
492 dcache_size >> 10, current_cpu_data.dcache.linesz);
493
494 build_clear_page();
495 build_copy_page();
1d40cfcd 496 tx39h_flush_icache_all();
1da177e4 497}