2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
10 * Cache and TLB management
14 #include <linux/init.h>
15 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/sched/mm.h>
23 #include <asm/cache.h>
24 #include <asm/cacheflush.h>
25 #include <asm/tlbflush.h>
27 #include <asm/processor.h>
28 #include <asm/sections.h>
29 #include <asm/shmparam.h>
31 int split_tlb __ro_after_init
;
32 int dcache_stride __ro_after_init
;
33 int icache_stride __ro_after_init
;
34 EXPORT_SYMBOL(dcache_stride
);
36 void flush_dcache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
37 EXPORT_SYMBOL(flush_dcache_page_asm
);
38 void purge_dcache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
39 void flush_icache_page_asm(unsigned long phys_addr
, unsigned long vaddr
);
42 /* On some machines (i.e., ones with the Merced bus), there can be
43 * only a single PxTLB broadcast at a time; this must be guaranteed
44 * by software. We need a spinlock around all TLB flushes to ensure
47 DEFINE_SPINLOCK(pa_tlb_flush_lock
);
49 /* Swapper page setup lock. */
50 DEFINE_SPINLOCK(pa_swapper_pg_lock
);
52 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
53 int pa_serialize_tlb_flushes __ro_after_init
;
56 struct pdc_cache_info cache_info __ro_after_init
;
58 static struct pdc_btlb_info btlb_info __ro_after_init
;
63 flush_data_cache(void)
65 on_each_cpu(flush_data_cache_local
, NULL
, 1);
68 flush_instruction_cache(void)
70 on_each_cpu(flush_instruction_cache_local
, NULL
, 1);
75 flush_cache_all_local(void)
77 flush_instruction_cache_local(NULL
);
78 flush_data_cache_local(NULL
);
80 EXPORT_SYMBOL(flush_cache_all_local
);
82 /* Virtual address of pfn. */
83 #define pfn_va(pfn) __va(PFN_PHYS(pfn))
86 update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
, pte_t
*ptep
)
88 unsigned long pfn
= pte_pfn(*ptep
);
91 /* We don't have pte special. As a result, we can be called with
92 an invalid pfn and we don't need to flush the kernel dcache page.
93 This occurs with FireGL card in C8000. */
97 page
= pfn_to_page(pfn
);
98 if (page_mapping_file(page
) &&
99 test_bit(PG_dcache_dirty
, &page
->flags
)) {
100 flush_kernel_dcache_page_addr(pfn_va(pfn
));
101 clear_bit(PG_dcache_dirty
, &page
->flags
);
102 } else if (parisc_requires_coherency())
103 flush_kernel_dcache_page_addr(pfn_va(pfn
));
107 show_cache_info(struct seq_file
*m
)
111 seq_printf(m
, "I-cache\t\t: %ld KB\n",
112 cache_info
.ic_size
/1024 );
113 if (cache_info
.dc_loop
!= 1)
114 snprintf(buf
, 32, "%lu-way associative", cache_info
.dc_loop
);
115 seq_printf(m
, "D-cache\t\t: %ld KB (%s%s, %s)\n",
116 cache_info
.dc_size
/1024,
117 (cache_info
.dc_conf
.cc_wt
? "WT":"WB"),
118 (cache_info
.dc_conf
.cc_sh
? ", shared I/D":""),
119 ((cache_info
.dc_loop
== 1) ? "direct mapped" : buf
));
120 seq_printf(m
, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
123 cache_info
.dt_conf
.tc_sh
? " - shared with ITLB":""
127 /* BTLB - Block TLB */
128 if (btlb_info
.max_size
==0) {
129 seq_printf(m
, "BTLB\t\t: not supported\n" );
132 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
133 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
134 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
135 btlb_info
.max_size
, (int)4096,
136 btlb_info
.max_size
>>8,
137 btlb_info
.fixed_range_info
.num_i
,
138 btlb_info
.fixed_range_info
.num_d
,
139 btlb_info
.fixed_range_info
.num_comb
,
140 btlb_info
.variable_range_info
.num_i
,
141 btlb_info
.variable_range_info
.num_d
,
142 btlb_info
.variable_range_info
.num_comb
149 parisc_cache_init(void)
151 if (pdc_cache_info(&cache_info
) < 0)
152 panic("parisc_cache_init: pdc_cache_info failed");
155 printk("ic_size %lx dc_size %lx it_size %lx\n",
160 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162 cache_info
.dc_stride
,
166 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
167 *(unsigned long *) (&cache_info
.dc_conf
),
168 cache_info
.dc_conf
.cc_alias
,
169 cache_info
.dc_conf
.cc_block
,
170 cache_info
.dc_conf
.cc_line
,
171 cache_info
.dc_conf
.cc_shift
);
172 printk(" wt %d sh %d cst %d hv %d\n",
173 cache_info
.dc_conf
.cc_wt
,
174 cache_info
.dc_conf
.cc_sh
,
175 cache_info
.dc_conf
.cc_cst
,
176 cache_info
.dc_conf
.cc_hv
);
178 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
180 cache_info
.ic_stride
,
184 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
185 cache_info
.it_sp_base
,
186 cache_info
.it_sp_stride
,
187 cache_info
.it_sp_count
,
189 cache_info
.it_off_base
,
190 cache_info
.it_off_stride
,
191 cache_info
.it_off_count
);
193 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n",
194 cache_info
.dt_sp_base
,
195 cache_info
.dt_sp_stride
,
196 cache_info
.dt_sp_count
,
198 cache_info
.dt_off_base
,
199 cache_info
.dt_off_stride
,
200 cache_info
.dt_off_count
);
202 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
203 *(unsigned long *) (&cache_info
.ic_conf
),
204 cache_info
.ic_conf
.cc_alias
,
205 cache_info
.ic_conf
.cc_block
,
206 cache_info
.ic_conf
.cc_line
,
207 cache_info
.ic_conf
.cc_shift
);
208 printk(" wt %d sh %d cst %d hv %d\n",
209 cache_info
.ic_conf
.cc_wt
,
210 cache_info
.ic_conf
.cc_sh
,
211 cache_info
.ic_conf
.cc_cst
,
212 cache_info
.ic_conf
.cc_hv
);
214 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
215 cache_info
.dt_conf
.tc_sh
,
216 cache_info
.dt_conf
.tc_page
,
217 cache_info
.dt_conf
.tc_cst
,
218 cache_info
.dt_conf
.tc_aid
,
219 cache_info
.dt_conf
.tc_sr
);
221 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n",
222 cache_info
.it_conf
.tc_sh
,
223 cache_info
.it_conf
.tc_page
,
224 cache_info
.it_conf
.tc_cst
,
225 cache_info
.it_conf
.tc_aid
,
226 cache_info
.it_conf
.tc_sr
);
230 if (cache_info
.dt_conf
.tc_sh
== 0 || cache_info
.dt_conf
.tc_sh
== 2) {
231 if (cache_info
.dt_conf
.tc_sh
== 2)
232 printk(KERN_WARNING
"Unexpected TLB configuration. "
233 "Will flush I/D separately (could be optimized).\n");
238 /* "New and Improved" version from Jim Hull
239 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
240 * The following CAFL_STRIDE is an optimized version, see
241 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
242 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
244 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
245 dcache_stride
= CAFL_STRIDE(cache_info
.dc_conf
);
246 icache_stride
= CAFL_STRIDE(cache_info
.ic_conf
);
250 if (pdc_btlb_info(&btlb_info
) < 0) {
251 memset(&btlb_info
, 0, sizeof btlb_info
);
255 if ((boot_cpu_data
.pdc
.capabilities
& PDC_MODEL_NVA_MASK
) ==
256 PDC_MODEL_NVA_UNSUPPORTED
) {
257 printk(KERN_WARNING
"parisc_cache_init: Only equivalent aliasing supported!\n");
259 panic("SMP kernel required to avoid non-equivalent aliasing");
264 void __init
disable_sr_hashing(void)
266 int srhash_type
, retval
;
267 unsigned long space_bits
;
269 switch (boot_cpu_data
.cpu_type
) {
270 case pcx
: /* We shouldn't get this far. setup.c should prevent it. */
277 srhash_type
= SRHASH_PCXST
;
281 srhash_type
= SRHASH_PCXL
;
284 case pcxl2
: /* pcxl2 doesn't support space register hashing */
287 default: /* Currently all PA2.0 machines use the same ins. sequence */
288 srhash_type
= SRHASH_PA20
;
292 disable_sr_hashing_asm(srhash_type
);
294 retval
= pdc_spaceid_bits(&space_bits
);
295 /* If this procedure isn't implemented, don't panic. */
296 if (retval
< 0 && retval
!= PDC_BAD_OPTION
)
297 panic("pdc_spaceid_bits call failed.\n");
299 panic("SpaceID hashing is still on!\n");
303 __flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
,
304 unsigned long physaddr
)
307 flush_dcache_page_asm(physaddr
, vmaddr
);
308 if (vma
->vm_flags
& VM_EXEC
)
309 flush_icache_page_asm(physaddr
, vmaddr
);
314 __purge_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
,
315 unsigned long physaddr
)
318 purge_dcache_page_asm(physaddr
, vmaddr
);
319 if (vma
->vm_flags
& VM_EXEC
)
320 flush_icache_page_asm(physaddr
, vmaddr
);
324 void flush_dcache_page(struct page
*page
)
326 struct address_space
*mapping
= page_mapping_file(page
);
327 struct vm_area_struct
*mpnt
;
328 unsigned long offset
;
329 unsigned long addr
, old_addr
= 0;
332 if (mapping
&& !mapping_mapped(mapping
)) {
333 set_bit(PG_dcache_dirty
, &page
->flags
);
337 flush_kernel_dcache_page(page
);
344 /* We have carefully arranged in arch_get_unmapped_area() that
345 * *any* mappings of a file are always congruently mapped (whether
346 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
347 * to flush one address here for them all to become coherent */
349 flush_dcache_mmap_lock(mapping
);
350 vma_interval_tree_foreach(mpnt
, &mapping
->i_mmap
, pgoff
, pgoff
) {
351 offset
= (pgoff
- mpnt
->vm_pgoff
) << PAGE_SHIFT
;
352 addr
= mpnt
->vm_start
+ offset
;
354 /* The TLB is the engine of coherence on parisc: The
355 * CPU is entitled to speculate any page with a TLB
356 * mapping, so here we kill the mapping then flush the
357 * page along a special flush only alias mapping.
358 * This guarantees that the page is no-longer in the
359 * cache for any process and nor may it be
360 * speculatively read in (until the user or kernel
361 * specifically accesses it, of course) */
363 flush_tlb_page(mpnt
, addr
);
364 if (old_addr
== 0 || (old_addr
& (SHM_COLOUR
- 1))
365 != (addr
& (SHM_COLOUR
- 1))) {
366 __flush_cache_page(mpnt
, addr
, page_to_phys(page
));
367 if (parisc_requires_coherency() && old_addr
)
368 printk(KERN_ERR
"INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr
, addr
, mpnt
->vm_file
);
372 flush_dcache_mmap_unlock(mapping
);
374 EXPORT_SYMBOL(flush_dcache_page
);
376 /* Defined in arch/parisc/kernel/pacache.S */
377 EXPORT_SYMBOL(flush_kernel_dcache_range_asm
);
378 EXPORT_SYMBOL(flush_kernel_dcache_page_asm
);
379 EXPORT_SYMBOL(flush_data_cache_local
);
380 EXPORT_SYMBOL(flush_kernel_icache_range_asm
);
382 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
383 static unsigned long parisc_cache_flush_threshold __ro_after_init
= FLUSH_THRESHOLD
;
385 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */
386 static unsigned long parisc_tlb_flush_threshold __ro_after_init
= ~0UL;
388 void __init
parisc_setup_cache_timing(void)
390 unsigned long rangetime
, alltime
;
392 unsigned long threshold
;
396 alltime
= mfctl(16) - alltime
;
398 size
= (unsigned long)(_end
- _text
);
399 rangetime
= mfctl(16);
400 flush_kernel_dcache_range((unsigned long)_text
, size
);
401 rangetime
= mfctl(16) - rangetime
;
403 printk(KERN_DEBUG
"Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
404 alltime
, size
, rangetime
);
406 threshold
= L1_CACHE_ALIGN(size
* alltime
/ rangetime
);
407 if (threshold
> cache_info
.dc_size
)
408 threshold
= cache_info
.dc_size
;
410 parisc_cache_flush_threshold
= threshold
;
411 printk(KERN_INFO
"Cache flush threshold set to %lu KiB\n",
412 parisc_cache_flush_threshold
/1024);
414 /* calculate TLB flush threshold */
416 /* On SMP machines, skip the TLB measure of kernel text which
417 * has been mapped as huge pages. */
418 if (num_online_cpus() > 1 && !parisc_requires_coherency()) {
419 threshold
= max(cache_info
.it_size
, cache_info
.dt_size
);
420 threshold
*= PAGE_SIZE
;
421 threshold
/= num_online_cpus();
422 goto set_tlb_threshold
;
425 size
= (unsigned long)_end
- (unsigned long)_text
;
426 rangetime
= mfctl(16);
427 flush_tlb_kernel_range((unsigned long)_text
, (unsigned long)_end
);
428 rangetime
= mfctl(16) - rangetime
;
432 alltime
= mfctl(16) - alltime
;
434 printk(KERN_INFO
"Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n",
435 alltime
, size
, rangetime
);
437 threshold
= PAGE_ALIGN((num_online_cpus() * size
* alltime
) / rangetime
);
438 printk(KERN_INFO
"Calculated TLB flush threshold %lu KiB\n",
442 if (threshold
> FLUSH_TLB_THRESHOLD
)
443 parisc_tlb_flush_threshold
= threshold
;
445 parisc_tlb_flush_threshold
= FLUSH_TLB_THRESHOLD
;
447 printk(KERN_INFO
"TLB flush threshold set to %lu KiB\n",
448 parisc_tlb_flush_threshold
/1024);
451 extern void purge_kernel_dcache_page_asm(unsigned long);
452 extern void clear_user_page_asm(void *, unsigned long);
453 extern void copy_user_page_asm(void *, void *, unsigned long);
455 void flush_kernel_dcache_page_addr(void *addr
)
459 flush_kernel_dcache_page_asm(addr
);
460 purge_tlb_start(flags
);
462 purge_tlb_end(flags
);
464 EXPORT_SYMBOL(flush_kernel_dcache_page_addr
);
466 void copy_user_page(void *vto
, void *vfrom
, unsigned long vaddr
,
469 /* Copy using kernel mapping. No coherency is needed (all in
470 kunmap) for the `to' page. However, the `from' page needs to
471 be flushed through a mapping equivalent to the user mapping
472 before it can be accessed through the kernel mapping. */
474 flush_dcache_page_asm(__pa(vfrom
), vaddr
);
475 copy_page_asm(vto
, vfrom
);
478 EXPORT_SYMBOL(copy_user_page
);
480 /* __flush_tlb_range()
482 * returns 1 if all TLBs were flushed.
484 int __flush_tlb_range(unsigned long sid
, unsigned long start
,
489 if ((!IS_ENABLED(CONFIG_SMP
) || !arch_irqs_disabled()) &&
490 end
- start
>= parisc_tlb_flush_threshold
) {
495 /* Purge TLB entries for small ranges using the pdtlb and
496 pitlb instructions. These instructions execute locally
497 but cause a purge request to be broadcast to other TLBs. */
498 while (start
< end
) {
499 purge_tlb_start(flags
);
503 purge_tlb_end(flags
);
509 static void cacheflush_h_tmp_function(void *dummy
)
511 flush_cache_all_local();
514 void flush_cache_all(void)
516 on_each_cpu(cacheflush_h_tmp_function
, NULL
, 1);
519 static inline unsigned long mm_total_size(struct mm_struct
*mm
)
521 struct vm_area_struct
*vma
;
522 unsigned long usize
= 0;
524 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
)
525 usize
+= vma
->vm_end
- vma
->vm_start
;
529 static inline pte_t
*get_ptep(pgd_t
*pgd
, unsigned long addr
)
533 if (!pgd_none(*pgd
)) {
534 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
535 if (!p4d_none(*p4d
)) {
536 pud_t
*pud
= pud_offset(p4d
, addr
);
537 if (!pud_none(*pud
)) {
538 pmd_t
*pmd
= pmd_offset(pud
, addr
);
540 ptep
= pte_offset_map(pmd
, addr
);
547 void flush_cache_mm(struct mm_struct
*mm
)
549 struct vm_area_struct
*vma
;
552 /* Flushing the whole cache on each cpu takes forever on
553 rp3440, etc. So, avoid it if the mm isn't too big. */
554 if ((!IS_ENABLED(CONFIG_SMP
) || !arch_irqs_disabled()) &&
555 mm_total_size(mm
) >= parisc_cache_flush_threshold
) {
562 if (mm
->context
== mfsp(3)) {
563 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
564 flush_user_dcache_range_asm(vma
->vm_start
, vma
->vm_end
);
565 if (vma
->vm_flags
& VM_EXEC
)
566 flush_user_icache_range_asm(vma
->vm_start
, vma
->vm_end
);
567 flush_tlb_range(vma
, vma
->vm_start
, vma
->vm_end
);
573 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
576 for (addr
= vma
->vm_start
; addr
< vma
->vm_end
;
579 pte_t
*ptep
= get_ptep(pgd
, addr
);
582 pfn
= pte_pfn(*ptep
);
585 if (unlikely(mm
->context
)) {
586 flush_tlb_page(vma
, addr
);
587 __flush_cache_page(vma
, addr
, PFN_PHYS(pfn
));
589 __purge_cache_page(vma
, addr
, PFN_PHYS(pfn
));
595 void flush_cache_range(struct vm_area_struct
*vma
,
596 unsigned long start
, unsigned long end
)
601 if ((!IS_ENABLED(CONFIG_SMP
) || !arch_irqs_disabled()) &&
602 end
- start
>= parisc_cache_flush_threshold
) {
603 if (vma
->vm_mm
->context
)
604 flush_tlb_range(vma
, start
, end
);
609 if (vma
->vm_mm
->context
== mfsp(3)) {
610 flush_user_dcache_range_asm(start
, end
);
611 if (vma
->vm_flags
& VM_EXEC
)
612 flush_user_icache_range_asm(start
, end
);
613 flush_tlb_range(vma
, start
, end
);
617 pgd
= vma
->vm_mm
->pgd
;
618 for (addr
= vma
->vm_start
; addr
< vma
->vm_end
; addr
+= PAGE_SIZE
) {
620 pte_t
*ptep
= get_ptep(pgd
, addr
);
623 pfn
= pte_pfn(*ptep
);
624 if (pfn_valid(pfn
)) {
625 if (unlikely(vma
->vm_mm
->context
)) {
626 flush_tlb_page(vma
, addr
);
627 __flush_cache_page(vma
, addr
, PFN_PHYS(pfn
));
629 __purge_cache_page(vma
, addr
, PFN_PHYS(pfn
));
636 flush_cache_page(struct vm_area_struct
*vma
, unsigned long vmaddr
, unsigned long pfn
)
638 if (pfn_valid(pfn
)) {
639 if (likely(vma
->vm_mm
->context
)) {
640 flush_tlb_page(vma
, vmaddr
);
641 __flush_cache_page(vma
, vmaddr
, PFN_PHYS(pfn
));
643 __purge_cache_page(vma
, vmaddr
, PFN_PHYS(pfn
));
648 void flush_kernel_vmap_range(void *vaddr
, int size
)
650 unsigned long start
= (unsigned long)vaddr
;
651 unsigned long end
= start
+ size
;
653 if ((!IS_ENABLED(CONFIG_SMP
) || !arch_irqs_disabled()) &&
654 (unsigned long)size
>= parisc_cache_flush_threshold
) {
655 flush_tlb_kernel_range(start
, end
);
660 flush_kernel_dcache_range_asm(start
, end
);
661 flush_tlb_kernel_range(start
, end
);
663 EXPORT_SYMBOL(flush_kernel_vmap_range
);
665 void invalidate_kernel_vmap_range(void *vaddr
, int size
)
667 unsigned long start
= (unsigned long)vaddr
;
668 unsigned long end
= start
+ size
;
670 if ((!IS_ENABLED(CONFIG_SMP
) || !arch_irqs_disabled()) &&
671 (unsigned long)size
>= parisc_cache_flush_threshold
) {
672 flush_tlb_kernel_range(start
, end
);
677 purge_kernel_dcache_range_asm(start
, end
);
678 flush_tlb_kernel_range(start
, end
);
680 EXPORT_SYMBOL(invalidate_kernel_vmap_range
);