]>
Commit | Line | Data |
---|---|---|
f1f3347d VG |
1 | /* |
2 | * TLB Management (flush/create/diagnostics) for ARC700 | |
3 | * | |
4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
d79e678d VG |
9 | * |
10 | * vineetg: Aug 2011 | |
11 | * -Reintroduce duplicate PD fixup - some customer chips still have the issue | |
12 | * | |
13 | * vineetg: May 2011 | |
14 | * -No need to flush_cache_page( ) for each call to update_mmu_cache() | |
15 | * some of the LMBench tests improved amazingly | |
16 | * = page-fault thrice as fast (75 usec to 28 usec) | |
17 | * = mmap twice as fast (9.6 msec to 4.6 msec), | |
18 | * = fork (5.3 msec to 3.7 msec) | |
19 | * | |
20 | * vineetg: April 2011 : | |
21 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, | |
22 | * helps avoid a shift when preparing PD0 from PTE | |
23 | * | |
24 | * vineetg: April 2011 : Preparing for MMU V3 | |
25 | * -MMU v2/v3 BCRs decoded differently | |
26 | * -Remove TLB_SIZE hardcoding as it's variable now: 256 or 512 | |
27 | * -tlb_entry_erase( ) can be void | |
28 | * -local_flush_tlb_range( ): | |
29 | * = need not "ceil" @end | |
30 | * = walks MMU only if range spans < 32 entries, as opposed to 256 | |
31 | * | |
32 | * Vineetg: Sept 10th 2008 | |
33 | * -Changes related to MMU v2 (Rel 4.8) | |
34 | * | |
35 | * Vineetg: Aug 29th 2008 | |
36 | * -In TLB Flush operations (Metal Fix MMU) there is a explict command to | |
37 | * flush Micro-TLBS. If TLB Index Reg is invalid prior to TLBIVUTLB cmd, | |
38 | * it fails. Thus need to load it with ANY valid value before invoking | |
39 | * TLBIVUTLB cmd | |
40 | * | |
41 | * Vineetg: Aug 21th 2008: | |
42 | * -Reduced the duration of IRQ lockouts in TLB Flush routines | |
43 | * -Multiple copies of TLB erase code seperated into a "single" function | |
44 | * -In TLB Flush routines, interrupt disabling moved UP to retrieve ASID | |
45 | * in interrupt-safe region. | |
46 | * | |
47 | * Vineetg: April 23rd Bug #93131 | |
48 | * Problem: tlb_flush_kernel_range() doesnt do anything if the range to | |
49 | * flush is more than the size of TLB itself. | |
50 | * | |
51 | * Rahul Trivedi : Codito Technologies 2004 | |
f1f3347d VG |
52 | */ |
53 | ||
54 | #include <linux/module.h> | |
55 | #include <asm/arcregs.h> | |
d79e678d | 56 | #include <asm/setup.h> |
f1f3347d VG |
57 | #include <asm/mmu_context.h> |
58 | #include <asm/tlb.h> | |
59 | ||
d79e678d VG |
60 | /* Need for ARC MMU v2 |
61 | * | |
62 | * ARC700 MMU-v1 had a Joint-TLB for Code and Data and is 2 way set-assoc. | |
63 | * For a memcpy operation with 3 players (src/dst/code) such that all 3 pages | |
64 | * map into same set, there would be contention for the 2 ways causing severe | |
65 | * Thrashing. | |
66 | * | |
67 | * Although J-TLB is 2 way set assoc, ARC700 caches J-TLB into uTLBS which has | |
68 | * much higher associativity. u-D-TLB is 8 ways, u-I-TLB is 4 ways. | |
69 | * Given this, the thrasing problem should never happen because once the 3 | |
70 | * J-TLB entries are created (even though 3rd will knock out one of the prev | |
71 | * two), the u-D-TLB and u-I-TLB will have what is required to accomplish memcpy | |
72 | * | |
73 | * Yet we still see the Thrashing because a J-TLB Write cause flush of u-TLBs. | |
74 | * This is a simple design for keeping them in sync. So what do we do? | |
75 | * The solution which James came up was pretty neat. It utilised the assoc | |
76 | * of uTLBs by not invalidating always but only when absolutely necessary. | |
77 | * | |
78 | * - Existing TLB commands work as before | |
79 | * - New command (TLBWriteNI) for TLB write without clearing uTLBs | |
80 | * - New command (TLBIVUTLB) to invalidate uTLBs. | |
81 | * | |
82 | * The uTLBs need only be invalidated when pages are being removed from the | |
83 | * OS page table. If a 'victim' TLB entry is being overwritten in the main TLB | |
84 | * as a result of a miss, the removed entry is still allowed to exist in the | |
85 | * uTLBs as it is still valid and present in the OS page table. This allows the | |
86 | * full associativity of the uTLBs to hide the limited associativity of the main | |
87 | * TLB. | |
88 | * | |
89 | * During a miss handler, the new "TLBWriteNI" command is used to load | |
90 | * entries without clearing the uTLBs. | |
91 | * | |
92 | * When the OS page table is updated, TLB entries that may be associated with a | |
93 | * removed page are removed (flushed) from the TLB using TLBWrite. In this | |
94 | * circumstance, the uTLBs must also be cleared. This is done by using the | |
95 | * existing TLBWrite command. An explicit IVUTLB is also required for those | |
96 | * corner cases when TLBWrite was not executed at all because the corresp | |
97 | * J-TLB entry got evicted/replaced. | |
98 | */ | |
99 | ||
f1f3347d VG |
100 | /* A copy of the ASID from the PID reg is kept in asid_cache */ |
101 | int asid_cache = FIRST_ASID; | |
102 | ||
103 | /* ASID to mm struct mapping. We have one extra entry corresponding to | |
104 | * NO_ASID to save us a compare when clearing the mm entry for old asid | |
105 | * see get_new_mmu_context (asm-arc/mmu_context.h) | |
106 | */ | |
107 | struct mm_struct *asid_mm_map[NUM_ASID + 1]; | |
cc562d2e | 108 | |
d79e678d VG |
109 | /* |
110 | * Utility Routine to erase a J-TLB entry | |
111 | * The procedure is to look it up in the MMU. If found, ERASE it by | |
112 | * issuing a TlbWrite CMD with PD0 = PD1 = 0 | |
113 | */ | |
114 | ||
115 | static void __tlb_entry_erase(void) | |
116 | { | |
117 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
118 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
119 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
120 | } | |
121 | ||
122 | static void tlb_entry_erase(unsigned int vaddr_n_asid) | |
123 | { | |
124 | unsigned int idx; | |
125 | ||
126 | /* Locate the TLB entry for this vaddr + ASID */ | |
127 | write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); | |
128 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | |
129 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
130 | ||
131 | /* No error means entry found, zero it out */ | |
132 | if (likely(!(idx & TLB_LKUP_ERR))) { | |
133 | __tlb_entry_erase(); | |
134 | } else { /* Some sort of Error */ | |
135 | ||
136 | /* Duplicate entry error */ | |
137 | if (idx & 0x1) { | |
138 | /* TODO we need to handle this case too */ | |
139 | pr_emerg("unhandled Duplicate flush for %x\n", | |
140 | vaddr_n_asid); | |
141 | } | |
142 | /* else entry not found so nothing to do */ | |
143 | } | |
144 | } | |
145 | ||
146 | /**************************************************************************** | |
147 | * ARC700 MMU caches recently used J-TLB entries (RAM) as uTLBs (FLOPs) | |
148 | * | |
149 | * New IVUTLB cmd in MMU v2 explictly invalidates the uTLB | |
150 | * | |
151 | * utlb_invalidate ( ) | |
152 | * -For v2 MMU calls Flush uTLB Cmd | |
153 | * -For v1 MMU does nothing (except for Metal Fix v1 MMU) | |
154 | * This is because in v1 TLBWrite itself invalidate uTLBs | |
155 | ***************************************************************************/ | |
156 | ||
157 | static void utlb_invalidate(void) | |
158 | { | |
159 | #if (CONFIG_ARC_MMU_VER >= 2) | |
160 | ||
161 | #if (CONFIG_ARC_MMU_VER < 3) | |
162 | /* MMU v2 introduced the uTLB Flush command. | |
163 | * There was however an obscure hardware bug, where uTLB flush would | |
164 | * fail when a prior probe for J-TLB (both totally unrelated) would | |
165 | * return lkup err - because the entry didnt exist in MMU. | |
166 | * The Workround was to set Index reg with some valid value, prior to | |
167 | * flush. This was fixed in MMU v3 hence not needed any more | |
168 | */ | |
169 | unsigned int idx; | |
170 | ||
171 | /* make sure INDEX Reg is valid */ | |
172 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
173 | ||
174 | /* If not write some dummy val */ | |
175 | if (unlikely(idx & TLB_LKUP_ERR)) | |
176 | write_aux_reg(ARC_REG_TLBINDEX, 0xa); | |
177 | #endif | |
178 | ||
179 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB); | |
180 | #endif | |
181 | ||
182 | } | |
183 | ||
184 | /* | |
185 | * Un-conditionally (without lookup) erase the entire MMU contents | |
186 | */ | |
187 | ||
188 | noinline void local_flush_tlb_all(void) | |
189 | { | |
190 | unsigned long flags; | |
191 | unsigned int entry; | |
192 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
193 | ||
194 | local_irq_save(flags); | |
195 | ||
196 | /* Load PD0 and PD1 with template for a Blank Entry */ | |
197 | write_aux_reg(ARC_REG_TLBPD1, 0); | |
198 | write_aux_reg(ARC_REG_TLBPD0, 0); | |
199 | ||
200 | for (entry = 0; entry < mmu->num_tlb; entry++) { | |
201 | /* write this entry to the TLB */ | |
202 | write_aux_reg(ARC_REG_TLBINDEX, entry); | |
203 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
204 | } | |
205 | ||
206 | utlb_invalidate(); | |
207 | ||
208 | local_irq_restore(flags); | |
209 | } | |
210 | ||
211 | /* | |
212 | * Flush the entrie MM for userland. The fastest way is to move to Next ASID | |
213 | */ | |
214 | noinline void local_flush_tlb_mm(struct mm_struct *mm) | |
215 | { | |
216 | /* | |
217 | * Small optimisation courtesy IA64 | |
218 | * flush_mm called during fork,exit,munmap etc, multiple times as well. | |
219 | * Only for fork( ) do we need to move parent to a new MMU ctxt, | |
220 | * all other cases are NOPs, hence this check. | |
221 | */ | |
222 | if (atomic_read(&mm->mm_users) == 0) | |
223 | return; | |
224 | ||
225 | /* | |
226 | * Workaround for Android weirdism: | |
227 | * A binder VMA could end up in a task such that vma->mm != tsk->mm | |
228 | * old code would cause h/w - s/w ASID to get out of sync | |
229 | */ | |
230 | if (current->mm != mm) | |
231 | destroy_context(mm); | |
232 | else | |
233 | get_new_mmu_context(mm); | |
234 | } | |
235 | ||
236 | /* | |
237 | * Flush a Range of TLB entries for userland. | |
238 | * @start is inclusive, while @end is exclusive | |
239 | * Difference between this and Kernel Range Flush is | |
240 | * -Here the fastest way (if range is too large) is to move to next ASID | |
241 | * without doing any explicit Shootdown | |
242 | * -In case of kernel Flush, entry has to be shot down explictly | |
243 | */ | |
244 | void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |
245 | unsigned long end) | |
246 | { | |
247 | unsigned long flags; | |
248 | unsigned int asid; | |
249 | ||
250 | /* If range @start to @end is more than 32 TLB entries deep, | |
251 | * its better to move to a new ASID rather than searching for | |
252 | * individual entries and then shooting them down | |
253 | * | |
254 | * The calc above is rough, doesn't account for unaligned parts, | |
255 | * since this is heuristics based anyways | |
256 | */ | |
257 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
258 | local_flush_tlb_mm(vma->vm_mm); | |
259 | return; | |
260 | } | |
261 | ||
262 | /* | |
263 | * @start moved to page start: this alone suffices for checking | |
264 | * loop end condition below, w/o need for aligning @end to end | |
265 | * e.g. 2000 to 4001 will anyhow loop twice | |
266 | */ | |
267 | start &= PAGE_MASK; | |
268 | ||
269 | local_irq_save(flags); | |
270 | asid = vma->vm_mm->context.asid; | |
271 | ||
272 | if (asid != NO_ASID) { | |
273 | while (start < end) { | |
274 | tlb_entry_erase(start | (asid & 0xff)); | |
275 | start += PAGE_SIZE; | |
276 | } | |
277 | } | |
278 | ||
279 | utlb_invalidate(); | |
280 | ||
281 | local_irq_restore(flags); | |
282 | } | |
283 | ||
284 | /* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective) | |
285 | * @start, @end interpreted as kvaddr | |
286 | * Interestingly, shared TLB entries can also be flushed using just | |
287 | * @start,@end alone (interpreted as user vaddr), although technically SASID | |
288 | * is also needed. However our smart TLbProbe lookup takes care of that. | |
289 | */ | |
290 | void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) | |
291 | { | |
292 | unsigned long flags; | |
293 | ||
294 | /* exactly same as above, except for TLB entry not taking ASID */ | |
295 | ||
296 | if (unlikely((end - start) >= PAGE_SIZE * 32)) { | |
297 | local_flush_tlb_all(); | |
298 | return; | |
299 | } | |
300 | ||
301 | start &= PAGE_MASK; | |
302 | ||
303 | local_irq_save(flags); | |
304 | while (start < end) { | |
305 | tlb_entry_erase(start); | |
306 | start += PAGE_SIZE; | |
307 | } | |
308 | ||
309 | utlb_invalidate(); | |
310 | ||
311 | local_irq_restore(flags); | |
312 | } | |
313 | ||
314 | /* | |
315 | * Delete TLB entry in MMU for a given page (??? address) | |
316 | * NOTE One TLB entry contains translation for single PAGE | |
317 | */ | |
318 | ||
319 | void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |
320 | { | |
321 | unsigned long flags; | |
322 | ||
323 | /* Note that it is critical that interrupts are DISABLED between | |
324 | * checking the ASID and using it flush the TLB entry | |
325 | */ | |
326 | local_irq_save(flags); | |
327 | ||
328 | if (vma->vm_mm->context.asid != NO_ASID) { | |
329 | tlb_entry_erase((page & PAGE_MASK) | | |
330 | (vma->vm_mm->context.asid & 0xff)); | |
331 | utlb_invalidate(); | |
332 | } | |
333 | ||
334 | local_irq_restore(flags); | |
335 | } | |
cc562d2e VG |
336 | |
337 | /* | |
338 | * Routine to create a TLB entry | |
339 | */ | |
340 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |
341 | { | |
342 | unsigned long flags; | |
343 | unsigned int idx, asid_or_sasid; | |
344 | unsigned long pd0_flags; | |
345 | ||
346 | /* | |
347 | * create_tlb() assumes that current->mm == vma->mm, since | |
348 | * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr) | |
349 | * -completes the lazy write to SASID reg (again valid for curr tsk) | |
350 | * | |
351 | * Removing the assumption involves | |
352 | * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg. | |
353 | * -Fix the TLB paranoid debug code to not trigger false negatives. | |
354 | * -More importantly it makes this handler inconsistent with fast-path | |
355 | * TLB Refill handler which always deals with "current" | |
356 | * | |
357 | * Lets see the use cases when current->mm != vma->mm and we land here | |
358 | * 1. execve->copy_strings()->__get_user_pages->handle_mm_fault | |
359 | * Here VM wants to pre-install a TLB entry for user stack while | |
360 | * current->mm still points to pre-execve mm (hence the condition). | |
361 | * However the stack vaddr is soon relocated (randomization) and | |
362 | * move_page_tables() tries to undo that TLB entry. | |
363 | * Thus not creating TLB entry is not any worse. | |
364 | * | |
365 | * 2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a | |
366 | * breakpoint in debugged task. Not creating a TLB now is not | |
367 | * performance critical. | |
368 | * | |
369 | * Both the cases above are not good enough for code churn. | |
370 | */ | |
371 | if (current->active_mm != vma->vm_mm) | |
372 | return; | |
373 | ||
374 | local_irq_save(flags); | |
375 | ||
376 | tlb_paranoid_check(vma->vm_mm->context.asid, address); | |
377 | ||
378 | address &= PAGE_MASK; | |
379 | ||
380 | /* update this PTE credentials */ | |
381 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | |
382 | ||
383 | /* Create HW TLB entry Flags (in PD0) from PTE Flags */ | |
384 | #if (CONFIG_ARC_MMU_VER <= 2) | |
385 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); | |
386 | #else | |
387 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); | |
388 | #endif | |
389 | ||
390 | /* ASID for this task */ | |
391 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | |
392 | ||
393 | write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); | |
394 | ||
395 | /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ | |
396 | write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1)); | |
397 | ||
398 | /* First verify if entry for this vaddr+ASID already exists */ | |
399 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | |
400 | idx = read_aux_reg(ARC_REG_TLBINDEX); | |
401 | ||
402 | /* | |
403 | * If Not already present get a free slot from MMU. | |
404 | * Otherwise, Probe would have located the entry and set INDEX Reg | |
405 | * with existing location. This will cause Write CMD to over-write | |
406 | * existing entry with new PD0 and PD1 | |
407 | */ | |
408 | if (likely(idx & TLB_LKUP_ERR)) | |
409 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); | |
410 | ||
411 | /* | |
412 | * Commit the Entry to MMU | |
413 | * It doesnt sound safe to use the TLBWriteNI cmd here | |
414 | * which doesn't flush uTLBs. I'd rather be safe than sorry. | |
415 | */ | |
416 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); | |
417 | ||
418 | local_irq_restore(flags); | |
419 | } | |
420 | ||
eacd0e95 VG |
421 | /* |
422 | * Called at the end of pagefault, for a userspace mapped page | |
423 | * -pre-install the corresponding TLB entry into MMU | |
4102b533 VG |
424 | * -Finalize the delayed D-cache flush of kernel mapping of page due to |
425 | * flush_dcache_page(), copy_user_page() | |
426 | * | |
427 | * Note that flush (when done) involves both WBACK - so physical page is | |
428 | * in sync as well as INV - so any non-congruent aliases don't remain | |
cc562d2e | 429 | */ |
24603fdd | 430 | void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, |
cc562d2e VG |
431 | pte_t *ptep) |
432 | { | |
24603fdd | 433 | unsigned long vaddr = vaddr_unaligned & PAGE_MASK; |
4102b533 | 434 | unsigned long paddr = pte_val(*ptep) & PAGE_MASK; |
24603fdd VG |
435 | |
436 | create_tlb(vma, vaddr, ptep); | |
cc562d2e | 437 | |
4102b533 VG |
438 | /* |
439 | * Exec page : Independent of aliasing/page-color considerations, | |
440 | * since icache doesn't snoop dcache on ARC, any dirty | |
441 | * K-mapping of a code page needs to be wback+inv so that | |
442 | * icache fetch by userspace sees code correctly. | |
443 | * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it | |
444 | * so userspace sees the right data. | |
445 | * (Avoids the flush for Non-exec + congruent mapping case) | |
446 | */ | |
3e87974d VG |
447 | if ((vma->vm_flags & VM_EXEC) || |
448 | addr_not_cache_congruent(paddr, vaddr)) { | |
eacd0e95 VG |
449 | struct page *page = pfn_to_page(pte_pfn(*ptep)); |
450 | ||
eacd0e95 VG |
451 | int dirty = test_and_clear_bit(PG_arch_1, &page->flags); |
452 | if (dirty) { | |
4102b533 | 453 | /* wback + inv dcache lines */ |
6ec18a81 | 454 | __flush_dcache_page(paddr, paddr); |
4102b533 VG |
455 | |
456 | /* invalidate any existing icache lines */ | |
457 | if (vma->vm_flags & VM_EXEC) | |
458 | __inv_icache_page(paddr, vaddr); | |
eacd0e95 | 459 | } |
24603fdd | 460 | } |
cc562d2e VG |
461 | } |
462 | ||
463 | /* Read the Cache Build Confuration Registers, Decode them and save into | |
464 | * the cpuinfo structure for later use. | |
465 | * No Validation is done here, simply read/convert the BCRs | |
466 | */ | |
30ecee8c | 467 | void __cpuinit read_decode_mmu_bcr(void) |
cc562d2e VG |
468 | { |
469 | unsigned int tmp; | |
470 | struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */ | |
471 | struct bcr_mmu_3 *mmu3; /* encoded MMU3 attr */ | |
472 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
473 | ||
474 | tmp = read_aux_reg(ARC_REG_MMU_BCR); | |
475 | mmu->ver = (tmp >> 24); | |
476 | ||
477 | if (mmu->ver <= 2) { | |
478 | mmu2 = (struct bcr_mmu_1_2 *)&tmp; | |
479 | mmu->pg_sz = PAGE_SIZE; | |
480 | mmu->sets = 1 << mmu2->sets; | |
481 | mmu->ways = 1 << mmu2->ways; | |
482 | mmu->u_dtlb = mmu2->u_dtlb; | |
483 | mmu->u_itlb = mmu2->u_itlb; | |
484 | } else { | |
485 | mmu3 = (struct bcr_mmu_3 *)&tmp; | |
486 | mmu->pg_sz = 512 << mmu3->pg_sz; | |
487 | mmu->sets = 1 << mmu3->sets; | |
488 | mmu->ways = 1 << mmu3->ways; | |
489 | mmu->u_dtlb = mmu3->u_dtlb; | |
490 | mmu->u_itlb = mmu3->u_itlb; | |
491 | } | |
492 | ||
493 | mmu->num_tlb = mmu->sets * mmu->ways; | |
494 | } | |
495 | ||
af617428 VG |
496 | char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) |
497 | { | |
498 | int n = 0; | |
e3edeb67 | 499 | struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; |
af617428 VG |
500 | |
501 | n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ", | |
502 | p_mmu->ver, TO_KB(p_mmu->pg_sz)); | |
503 | ||
504 | n += scnprintf(buf + n, len - n, | |
505 | "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", | |
506 | p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, | |
507 | p_mmu->u_dtlb, p_mmu->u_itlb, | |
8235703e | 508 | IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : ""); |
af617428 VG |
509 | |
510 | return buf; | |
511 | } | |
512 | ||
30ecee8c | 513 | void __cpuinit arc_mmu_init(void) |
cc562d2e | 514 | { |
af617428 VG |
515 | char str[256]; |
516 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
517 | ||
518 | printk(arc_mmu_mumbojumbo(0, str, sizeof(str))); | |
519 | ||
520 | /* For efficiency sake, kernel is compile time built for a MMU ver | |
521 | * This must match the hardware it is running on. | |
522 | * Linux built for MMU V2, if run on MMU V1 will break down because V1 | |
523 | * hardware doesn't understand cmds such as WriteNI, or IVUTLB | |
524 | * On the other hand, Linux built for V1 if run on MMU V2 will do | |
525 | * un-needed workarounds to prevent memcpy thrashing. | |
526 | * Similarly MMU V3 has new features which won't work on older MMU | |
527 | */ | |
528 | if (mmu->ver != CONFIG_ARC_MMU_VER) { | |
529 | panic("MMU ver %d doesn't match kernel built for %d...\n", | |
530 | mmu->ver, CONFIG_ARC_MMU_VER); | |
531 | } | |
532 | ||
533 | if (mmu->pg_sz != PAGE_SIZE) | |
534 | panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); | |
535 | ||
cc562d2e VG |
536 | /* |
537 | * ASID mgmt data structures are compile time init | |
538 | * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes | |
539 | */ | |
540 | ||
541 | local_flush_tlb_all(); | |
542 | ||
543 | /* Enable the MMU */ | |
544 | write_aux_reg(ARC_REG_PID, MMU_ENABLE); | |
41195d23 VG |
545 | |
546 | /* In smp we use this reg for interrupt 1 scratch */ | |
547 | #ifndef CONFIG_SMP | |
548 | /* swapper_pg_dir is the pgd for the kernel, used by vmalloc */ | |
549 | write_aux_reg(ARC_REG_SCRATCH_DATA0, swapper_pg_dir); | |
550 | #endif | |
cc562d2e VG |
551 | } |
552 | ||
553 | /* | |
554 | * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4} | |
555 | * The mapping is Column-first. | |
556 | * --------------------- ----------- | |
557 | * |way0|way1|way2|way3| |way0|way1| | |
558 | * --------------------- ----------- | |
559 | * [set0] | 0 | 1 | 2 | 3 | | 0 | 1 | | |
560 | * [set1] | 4 | 5 | 6 | 7 | | 2 | 3 | | |
561 | * ~ ~ ~ ~ | |
562 | * [set127] | 508| 509| 510| 511| | 254| 255| | |
563 | * --------------------- ----------- | |
564 | * For normal operations we don't(must not) care how above works since | |
565 | * MMU cmd getIndex(vaddr) abstracts that out. | |
566 | * However for walking WAYS of a SET, we need to know this | |
567 | */ | |
568 | #define SET_WAY_TO_IDX(mmu, set, way) ((set) * mmu->ways + (way)) | |
569 | ||
570 | /* Handling of Duplicate PD (TLB entry) in MMU. | |
571 | * -Could be due to buggy customer tapeouts or obscure kernel bugs | |
572 | * -MMU complaints not at the time of duplicate PD installation, but at the | |
573 | * time of lookup matching multiple ways. | |
574 | * -Ideally these should never happen - but if they do - workaround by deleting | |
575 | * the duplicate one. | |
576 | * -Knob to be verbose abt it.(TODO: hook them up to debugfs) | |
577 | */ | |
578 | volatile int dup_pd_verbose = 1;/* Be slient abt it or complain (default) */ | |
579 | ||
580 | void do_tlb_overlap_fault(unsigned long cause, unsigned long address, | |
581 | struct pt_regs *regs) | |
582 | { | |
583 | int set, way, n; | |
584 | unsigned int pd0[4], pd1[4]; /* assume max 4 ways */ | |
585 | unsigned long flags, is_valid; | |
586 | struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; | |
587 | ||
588 | local_irq_save(flags); | |
589 | ||
590 | /* re-enable the MMU */ | |
591 | write_aux_reg(ARC_REG_PID, MMU_ENABLE | read_aux_reg(ARC_REG_PID)); | |
592 | ||
593 | /* loop thru all sets of TLB */ | |
594 | for (set = 0; set < mmu->sets; set++) { | |
595 | ||
596 | /* read out all the ways of current set */ | |
597 | for (way = 0, is_valid = 0; way < mmu->ways; way++) { | |
598 | write_aux_reg(ARC_REG_TLBINDEX, | |
599 | SET_WAY_TO_IDX(mmu, set, way)); | |
600 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead); | |
601 | pd0[way] = read_aux_reg(ARC_REG_TLBPD0); | |
602 | pd1[way] = read_aux_reg(ARC_REG_TLBPD1); | |
603 | is_valid |= pd0[way] & _PAGE_PRESENT; | |
604 | } | |
605 | ||
606 | /* If all the WAYS in SET are empty, skip to next SET */ | |
607 | if (!is_valid) | |
608 | continue; | |
609 | ||
610 | /* Scan the set for duplicate ways: needs a nested loop */ | |
611 | for (way = 0; way < mmu->ways; way++) { | |
612 | if (!pd0[way]) | |
613 | continue; | |
614 | ||
615 | for (n = way + 1; n < mmu->ways; n++) { | |
616 | if ((pd0[way] & PAGE_MASK) == | |
617 | (pd0[n] & PAGE_MASK)) { | |
618 | ||
619 | if (dup_pd_verbose) { | |
620 | pr_info("Duplicate PD's @" | |
621 | "[%d:%d]/[%d:%d]\n", | |
622 | set, way, set, n); | |
623 | pr_info("TLBPD0[%u]: %08x\n", | |
624 | way, pd0[way]); | |
625 | } | |
626 | ||
627 | /* | |
628 | * clear entry @way and not @n. This is | |
629 | * critical to our optimised loop | |
630 | */ | |
631 | pd0[way] = pd1[way] = 0; | |
632 | write_aux_reg(ARC_REG_TLBINDEX, | |
633 | SET_WAY_TO_IDX(mmu, set, way)); | |
634 | __tlb_entry_erase(); | |
635 | } | |
636 | } | |
637 | } | |
638 | } | |
639 | ||
640 | local_irq_restore(flags); | |
641 | } | |
642 | ||
643 | /*********************************************************************** | |
644 | * Diagnostic Routines | |
645 | * -Called from Low Level TLB Hanlders if things don;t look good | |
646 | **********************************************************************/ | |
647 | ||
648 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA | |
649 | ||
650 | /* | |
651 | * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS | |
652 | * don't match | |
653 | */ | |
654 | void print_asid_mismatch(int is_fast_path) | |
655 | { | |
656 | int pid_sw, pid_hw; | |
657 | pid_sw = current->active_mm->context.asid; | |
658 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | |
659 | ||
660 | pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", | |
661 | is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw); | |
662 | ||
663 | __asm__ __volatile__("flag 1"); | |
664 | } | |
665 | ||
666 | void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr) | |
667 | { | |
668 | unsigned int pid_hw; | |
669 | ||
670 | pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; | |
671 | ||
672 | if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID))) | |
673 | print_asid_mismatch(0); | |
674 | } | |
675 | #endif |