1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for TLB flushing.
4 * On machines where the MMU does not use a hash table to store virtual to
5 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
6 * this does -not- include 603 however which shares the implementation with
7 * hash based processors)
11 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
14 * Derived from arch/ppc/mm/init.c:
15 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
17 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
18 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
19 * Copyright (C) 1996 Paul Mackerras
21 * Derived from "arch/i386/mm/init.c"
22 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
25 #include <linux/kernel.h>
26 #include <linux/export.h>
28 #include <linux/init.h>
29 #include <linux/highmem.h>
30 #include <linux/pagemap.h>
31 #include <linux/preempt.h>
32 #include <linux/spinlock.h>
33 #include <linux/memblock.h>
34 #include <linux/of_fdt.h>
35 #include <linux/hugetlb.h>
37 #include <asm/tlbflush.h>
39 #include <asm/code-patching.h>
40 #include <asm/cputhreads.h>
41 #include <asm/hugetlb.h>
44 #include <mm/mmu_decl.h>
47 * This struct lists the sw-supported page sizes. The hardawre MMU may support
48 * other sizes not listed here. The .ind field is only used on MMUs that have
49 * indirect page table entries.
51 #if defined(CONFIG_PPC_BOOK3E_MMU) || defined(CONFIG_PPC_8xx)
52 #ifdef CONFIG_PPC_FSL_BOOK3E
53 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
] = {
56 .enc
= BOOK3E_PAGESZ_4K
,
60 .enc
= BOOK3E_PAGESZ_2M
,
64 .enc
= BOOK3E_PAGESZ_4M
,
68 .enc
= BOOK3E_PAGESZ_16M
,
72 .enc
= BOOK3E_PAGESZ_64M
,
76 .enc
= BOOK3E_PAGESZ_256M
,
80 .enc
= BOOK3E_PAGESZ_1GB
,
83 #elif defined(CONFIG_PPC_8xx)
84 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
] = {
85 /* we only manage 4k and 16k pages as normal pages */
86 #ifdef CONFIG_PPC_4K_PAGES
103 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
] = {
107 .enc
= BOOK3E_PAGESZ_4K
,
111 .enc
= BOOK3E_PAGESZ_16K
,
116 .enc
= BOOK3E_PAGESZ_64K
,
120 .enc
= BOOK3E_PAGESZ_1M
,
125 .enc
= BOOK3E_PAGESZ_16M
,
129 .enc
= BOOK3E_PAGESZ_256M
,
133 .enc
= BOOK3E_PAGESZ_1GB
,
136 #endif /* CONFIG_FSL_BOOKE */
138 static inline int mmu_get_tsize(int psize
)
140 return mmu_psize_defs
[psize
].enc
;
143 static inline int mmu_get_tsize(int psize
)
145 /* This isn't used on !Book3E for now */
148 #endif /* CONFIG_PPC_BOOK3E_MMU */
150 /* The variables below are currently only used on 64-bit Book3E
151 * though this will probably be made common with other nohash
152 * implementations at some point
156 int mmu_linear_psize
; /* Page size used for the linear mapping */
157 int mmu_pte_psize
; /* Page size used for PTE pages */
158 int mmu_vmemmap_psize
; /* Page size used for the virtual mem map */
159 int book3e_htw_mode
; /* HW tablewalk? Value is PPC_HTW_* */
160 unsigned long linear_map_top
; /* Top of linear mapping */
164 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
165 * exceptions. This is used for bolted and e6500 TLB miss handlers which
166 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
167 * this is set to zero.
171 #endif /* CONFIG_PPC64 */
173 #ifdef CONFIG_PPC_FSL_BOOK3E
174 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
175 DEFINE_PER_CPU(int, next_tlbcam_idx
);
176 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx
);
180 * Base TLB flushing operations:
182 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
183 * - flush_tlb_page(vma, vmaddr) flushes one page
184 * - flush_tlb_range(vma, start, end) flushes a range of pages
185 * - flush_tlb_kernel_range(start, end) flushes kernel pages
187 * - local_* variants of page and mm only apply to the current
192 * These are the base non-SMP variants of page and mm flushing
194 void local_flush_tlb_mm(struct mm_struct
*mm
)
199 pid
= mm
->context
.id
;
200 if (pid
!= MMU_NO_CONTEXT
)
204 EXPORT_SYMBOL(local_flush_tlb_mm
);
206 void __local_flush_tlb_page(struct mm_struct
*mm
, unsigned long vmaddr
,
212 pid
= mm
? mm
->context
.id
: 0;
213 if (pid
!= MMU_NO_CONTEXT
)
214 _tlbil_va(vmaddr
, pid
, tsize
, ind
);
218 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
220 __local_flush_tlb_page(vma
? vma
->vm_mm
: NULL
, vmaddr
,
221 mmu_get_tsize(mmu_virtual_psize
), 0);
223 EXPORT_SYMBOL(local_flush_tlb_page
);
226 * And here are the SMP non-local implementations
230 static DEFINE_RAW_SPINLOCK(tlbivax_lock
);
232 struct tlb_flush_param
{
239 static void do_flush_tlb_mm_ipi(void *param
)
241 struct tlb_flush_param
*p
= param
;
243 _tlbil_pid(p
? p
->pid
: 0);
246 static void do_flush_tlb_page_ipi(void *param
)
248 struct tlb_flush_param
*p
= param
;
250 _tlbil_va(p
->addr
, p
->pid
, p
->tsize
, p
->ind
);
254 /* Note on invalidations and PID:
256 * We snapshot the PID with preempt disabled. At this point, it can still
257 * change either because:
258 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
259 * - we are invaliating some target that isn't currently running here
260 * and is concurrently acquiring a new PID on another CPU
261 * - some other CPU is re-acquiring a lost PID for this mm
264 * However, this shouldn't be a problem as we only guarantee
265 * invalidation of TLB entries present prior to this call, so we
266 * don't care about the PID changing, and invalidating a stale PID
267 * is generally harmless.
270 void flush_tlb_mm(struct mm_struct
*mm
)
275 pid
= mm
->context
.id
;
276 if (unlikely(pid
== MMU_NO_CONTEXT
))
278 if (!mm_is_core_local(mm
)) {
279 struct tlb_flush_param p
= { .pid
= pid
};
280 /* Ignores smp_processor_id() even if set. */
281 smp_call_function_many(mm_cpumask(mm
),
282 do_flush_tlb_mm_ipi
, &p
, 1);
288 EXPORT_SYMBOL(flush_tlb_mm
);
290 void __flush_tlb_page(struct mm_struct
*mm
, unsigned long vmaddr
,
293 struct cpumask
*cpu_mask
;
297 * This function as well as __local_flush_tlb_page() must only be called
304 pid
= mm
->context
.id
;
305 if (unlikely(pid
== MMU_NO_CONTEXT
))
307 cpu_mask
= mm_cpumask(mm
);
308 if (!mm_is_core_local(mm
)) {
309 /* If broadcast tlbivax is supported, use it */
310 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST
)) {
311 int lock
= mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL
);
313 raw_spin_lock(&tlbivax_lock
);
314 _tlbivax_bcast(vmaddr
, pid
, tsize
, ind
);
316 raw_spin_unlock(&tlbivax_lock
);
319 struct tlb_flush_param p
= {
325 /* Ignores smp_processor_id() even if set in cpu_mask */
326 smp_call_function_many(cpu_mask
,
327 do_flush_tlb_page_ipi
, &p
, 1);
330 _tlbil_va(vmaddr
, pid
, tsize
, ind
);
335 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
337 #ifdef CONFIG_HUGETLB_PAGE
338 if (vma
&& is_vm_hugetlb_page(vma
))
339 flush_hugetlb_page(vma
, vmaddr
);
342 __flush_tlb_page(vma
? vma
->vm_mm
: NULL
, vmaddr
,
343 mmu_get_tsize(mmu_virtual_psize
), 0);
345 EXPORT_SYMBOL(flush_tlb_page
);
347 #endif /* CONFIG_SMP */
349 #ifdef CONFIG_PPC_47x
350 void __init
early_init_mmu_47x(void)
353 unsigned long root
= of_get_flat_dt_root();
354 if (of_get_flat_dt_prop(root
, "cooperative-partition", NULL
))
355 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST
);
356 #endif /* CONFIG_SMP */
358 #endif /* CONFIG_PPC_47x */
361 * Flush kernel TLB entries in the given range
363 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
367 smp_call_function(do_flush_tlb_mm_ipi
, NULL
, 1);
374 EXPORT_SYMBOL(flush_tlb_kernel_range
);
377 * Currently, for range flushing, we just do a full mm flush. This should
378 * be optimized based on a threshold on the size of the range, since
379 * some implementation can stack multiple tlbivax before a tlbsync but
380 * for now, we keep it that way
382 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
386 if (end
- start
== PAGE_SIZE
&& !(start
& ~PAGE_MASK
))
387 flush_tlb_page(vma
, start
);
389 flush_tlb_mm(vma
->vm_mm
);
391 EXPORT_SYMBOL(flush_tlb_range
);
393 void tlb_flush(struct mmu_gather
*tlb
)
395 flush_tlb_mm(tlb
->mm
);
399 * Below are functions specific to the 64-bit variant of Book3E though that
400 * may change in the future
406 * Handling of virtual linear page tables or indirect TLB entries
407 * flushing when PTE pages are freed
409 void tlb_flush_pgtable(struct mmu_gather
*tlb
, unsigned long address
)
411 int tsize
= mmu_psize_defs
[mmu_pte_psize
].enc
;
413 if (book3e_htw_mode
!= PPC_HTW_NONE
) {
414 unsigned long start
= address
& PMD_MASK
;
415 unsigned long end
= address
+ PMD_SIZE
;
416 unsigned long size
= 1UL << mmu_psize_defs
[mmu_pte_psize
].shift
;
418 /* This isn't the most optimal, ideally we would factor out the
419 * while preempt & CPU mask mucking around, or even the IPI but
422 while (start
< end
) {
423 __flush_tlb_page(tlb
->mm
, start
, tsize
, 1);
427 unsigned long rmask
= 0xf000000000000000ul
;
428 unsigned long rid
= (address
& rmask
) | 0x1000000000000000ul
;
429 unsigned long vpte
= address
& ~rmask
;
431 vpte
= (vpte
>> (PAGE_SHIFT
- 3)) & ~0xffful
;
433 __flush_tlb_page(tlb
->mm
, vpte
, tsize
, 0);
437 static void setup_page_sizes(void)
439 unsigned int tlb0cfg
;
444 #ifdef CONFIG_PPC_FSL_BOOK3E
445 unsigned int mmucfg
= mfspr(SPRN_MMUCFG
);
446 int fsl_mmu
= mmu_has_feature(MMU_FTR_TYPE_FSL_E
);
448 if (fsl_mmu
&& (mmucfg
& MMUCFG_MAVN
) == MMUCFG_MAVN_V1
) {
449 unsigned int tlb1cfg
= mfspr(SPRN_TLB1CFG
);
450 unsigned int min_pg
, max_pg
;
452 min_pg
= (tlb1cfg
& TLBnCFG_MINSIZE
) >> TLBnCFG_MINSIZE_SHIFT
;
453 max_pg
= (tlb1cfg
& TLBnCFG_MAXSIZE
) >> TLBnCFG_MAXSIZE_SHIFT
;
455 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
456 struct mmu_psize_def
*def
;
459 def
= &mmu_psize_defs
[psize
];
462 if (shift
== 0 || shift
& 1)
465 /* adjust to be in terms of 4^shift Kb */
466 shift
= (shift
- 10) >> 1;
468 if ((shift
>= min_pg
) && (shift
<= max_pg
))
469 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
475 if (fsl_mmu
&& (mmucfg
& MMUCFG_MAVN
) == MMUCFG_MAVN_V2
) {
478 tlb0cfg
= mfspr(SPRN_TLB0CFG
);
479 tlb1cfg
= mfspr(SPRN_TLB1CFG
);
480 tlb1ps
= mfspr(SPRN_TLB1PS
);
481 eptcfg
= mfspr(SPRN_EPTCFG
);
483 if ((tlb1cfg
& TLBnCFG_IND
) && (tlb0cfg
& TLBnCFG_PT
))
484 book3e_htw_mode
= PPC_HTW_E6500
;
487 * We expect 4K subpage size and unrestricted indirect size.
488 * The lack of a restriction on indirect size is a Freescale
489 * extension, indicated by PSn = 0 but SPSn != 0.
492 book3e_htw_mode
= PPC_HTW_NONE
;
494 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
495 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
500 if (tlb1ps
& (1U << (def
->shift
- 10))) {
501 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
503 if (book3e_htw_mode
&& psize
== MMU_PAGE_2M
)
504 def
->flags
|= MMU_PAGE_SIZE_INDIRECT
;
512 tlb0cfg
= mfspr(SPRN_TLB0CFG
);
513 tlb0ps
= mfspr(SPRN_TLB0PS
);
514 eptcfg
= mfspr(SPRN_EPTCFG
);
516 /* Look for supported direct sizes */
517 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
518 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
520 if (tlb0ps
& (1U << (def
->shift
- 10)))
521 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
524 /* Indirect page sizes supported ? */
525 if ((tlb0cfg
& TLBnCFG_IND
) == 0 ||
526 (tlb0cfg
& TLBnCFG_PT
) == 0)
529 book3e_htw_mode
= PPC_HTW_IBM
;
531 /* Now, we only deal with one IND page size for each
532 * direct size. Hopefully all implementations today are
533 * unambiguous, but we might want to be careful in the
536 for (i
= 0; i
< 3; i
++) {
537 unsigned int ps
, sps
;
545 for (psize
= 0; psize
< MMU_PAGE_COUNT
; psize
++) {
546 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
548 if (ps
== (def
->shift
- 10))
549 def
->flags
|= MMU_PAGE_SIZE_INDIRECT
;
550 if (sps
== (def
->shift
- 10))
556 /* Cleanup array and print summary */
557 pr_info("MMU: Supported page sizes\n");
558 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
559 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
560 const char *__page_type_names
[] = {
566 if (def
->flags
== 0) {
570 pr_info(" %8ld KB as %s\n", 1ul << (def
->shift
- 10),
571 __page_type_names
[def
->flags
& 0x3]);
575 static void setup_mmu_htw(void)
578 * If we want to use HW tablewalk, enable it by patching the TLB miss
579 * handlers to branch to the one dedicated to it.
582 switch (book3e_htw_mode
) {
584 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e
);
585 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e
);
587 #ifdef CONFIG_PPC_FSL_BOOK3E
589 extlb_level_exc
= EX_TLB_SIZE
;
590 patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e
);
591 patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e
);
595 pr_info("MMU: Book3E HW tablewalk %s\n",
596 book3e_htw_mode
!= PPC_HTW_NONE
? "enabled" : "not supported");
600 * Early initialization of the MMU TLB code
602 static void early_init_this_mmu(void)
606 /* Set MAS4 based on page table setting */
608 mas4
= 0x4 << MAS4_WIMGED_SHIFT
;
609 switch (book3e_htw_mode
) {
612 mas4
|= BOOK3E_PAGESZ_2M
<< MAS4_TSIZED_SHIFT
;
613 mas4
|= MAS4_TLBSELD(1);
614 mmu_pte_psize
= MMU_PAGE_2M
;
619 mas4
|= BOOK3E_PAGESZ_1M
<< MAS4_TSIZED_SHIFT
;
620 mmu_pte_psize
= MMU_PAGE_1M
;
624 mas4
|= BOOK3E_PAGESZ_4K
<< MAS4_TSIZED_SHIFT
;
625 mmu_pte_psize
= mmu_virtual_psize
;
628 mtspr(SPRN_MAS4
, mas4
);
630 #ifdef CONFIG_PPC_FSL_BOOK3E
631 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
632 unsigned int num_cams
;
633 int __maybe_unused cpu
= smp_processor_id();
636 /* use a quarter of the TLBCAM for bolted linear map */
637 num_cams
= (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) / 4;
640 * Only do the mapping once per core, or else the
641 * transient mapping would cause problems.
644 if (hweight32(get_tensr()) > 1)
649 linear_map_top
= map_mem_in_cams(linear_map_top
,
654 /* A sync won't hurt us after mucking around with
655 * the MMU configuration
660 static void __init
early_init_mmu_global(void)
662 /* XXX This will have to be decided at runtime, but right
663 * now our boot and TLB miss code hard wires it. Ideally
664 * we should find out a suitable page size and patch the
665 * TLB miss code (either that or use the PACA to store
668 mmu_linear_psize
= MMU_PAGE_1G
;
670 /* XXX This should be decided at runtime based on supported
671 * page sizes in the TLB, but for now let's assume 16M is
672 * always there and a good fit (which it probably is)
674 * Freescale booke only supports 4K pages in TLB0, so use that.
676 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
))
677 mmu_vmemmap_psize
= MMU_PAGE_4K
;
679 mmu_vmemmap_psize
= MMU_PAGE_16M
;
681 /* XXX This code only checks for TLB 0 capabilities and doesn't
682 * check what page size combos are supported by the HW. It
683 * also doesn't handle the case where a separate array holds
684 * the IND entries from the array loaded by the PT.
686 /* Look for supported page sizes */
689 /* Look for HW tablewalk support */
692 #ifdef CONFIG_PPC_FSL_BOOK3E
693 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
694 if (book3e_htw_mode
== PPC_HTW_NONE
) {
695 extlb_level_exc
= EX_TLB_SIZE
;
696 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e
);
697 patch_exception(0x1e0,
698 exc_instruction_tlb_miss_bolted_book3e
);
703 /* Set the global containing the top of the linear mapping
704 * for use by the TLB miss code
706 linear_map_top
= memblock_end_of_DRAM();
709 static void __init
early_mmu_set_memory_limit(void)
711 #ifdef CONFIG_PPC_FSL_BOOK3E
712 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
714 * Limit memory so we dont have linear faults.
715 * Unlike memblock_set_current_limit, which limits
716 * memory available during early boot, this permanently
717 * reduces the memory available to Linux. We need to
718 * do this because highmem is not supported on 64-bit.
720 memblock_enforce_memory_limit(linear_map_top
);
724 memblock_set_current_limit(linear_map_top
);
728 void __init
early_init_mmu(void)
730 early_init_mmu_global();
731 early_init_this_mmu();
732 early_mmu_set_memory_limit();
735 void early_init_mmu_secondary(void)
737 early_init_this_mmu();
740 void setup_initial_memory_limit(phys_addr_t first_memblock_base
,
741 phys_addr_t first_memblock_size
)
743 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
744 * the bolted TLB entry. We know for now that only 1G
745 * entries are supported though that may eventually
748 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
749 * unusual memory sizes it's possible for some RAM to not be mapped
750 * (such RAM is not used at all by Linux, since we don't support
751 * highmem on 64-bit). We limit ppc64_rma_size to what would be
752 * mappable if this memblock is the only one. Additional memblocks
753 * can only increase, not decrease, the amount that ends up getting
754 * mapped. We still limit max to 1G even if we'll eventually map
755 * more. This is due to what the early init code is set up to do.
757 * We crop it to the size of the first MEMBLOCK to
758 * avoid going over total available memory just in case...
760 #ifdef CONFIG_PPC_FSL_BOOK3E
761 if (early_mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
762 unsigned long linear_sz
;
763 unsigned int num_cams
;
765 /* use a quarter of the TLBCAM for bolted linear map */
766 num_cams
= (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) / 4;
768 linear_sz
= map_mem_in_cams(first_memblock_size
, num_cams
,
771 ppc64_rma_size
= min_t(u64
, linear_sz
, 0x40000000);
774 ppc64_rma_size
= min_t(u64
, first_memblock_size
, 0x40000000);
776 /* Finally limit subsequent allocations */
777 memblock_set_current_limit(first_memblock_base
+ ppc64_rma_size
);
779 #else /* ! CONFIG_PPC64 */
780 void __init
early_init_mmu(void)
782 #ifdef CONFIG_PPC_47x
783 early_init_mmu_47x();
786 #ifdef CONFIG_PPC_MM_SLICES
787 mm_ctx_set_slb_addr_limit(&init_mm
.context
, SLB_ADDR_LIMIT_DEFAULT
);
790 #endif /* CONFIG_PPC64 */