2 * This file contains the routines for TLB flushing.
3 * On machines where the MMU does not use a hash table to store virtual to
4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
5 * this does -not- include 603 however which shares the implementation with
6 * hash based processors)
10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
13 * Derived from arch/ppc/mm/init.c:
14 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
16 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
17 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
18 * Copyright (C) 1996 Paul Mackerras
20 * Derived from "arch/i386/mm/init.c"
21 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
23 * This program is free software; you can redistribute it and/or
24 * modify it under the terms of the GNU General Public License
25 * as published by the Free Software Foundation; either version
26 * 2 of the License, or (at your option) any later version.
30 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/preempt.h>
36 #include <linux/spinlock.h>
37 #include <linux/memblock.h>
38 #include <linux/of_fdt.h>
39 #include <linux/hugetlb.h>
41 #include <asm/tlbflush.h>
43 #include <asm/code-patching.h>
44 #include <asm/hugetlb.h>
49 * This struct lists the sw-supported page sizes. The hardawre MMU may support
50 * other sizes not listed here. The .ind field is only used on MMUs that have
51 * indirect page table entries.
53 #ifdef CONFIG_PPC_BOOK3E_MMU
54 #ifdef CONFIG_FSL_BOOKE
55 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
] = {
58 .enc
= BOOK3E_PAGESZ_4K
,
62 .enc
= BOOK3E_PAGESZ_4M
,
66 .enc
= BOOK3E_PAGESZ_16M
,
70 .enc
= BOOK3E_PAGESZ_64M
,
74 .enc
= BOOK3E_PAGESZ_256M
,
78 .enc
= BOOK3E_PAGESZ_1GB
,
82 struct mmu_psize_def mmu_psize_defs
[MMU_PAGE_COUNT
] = {
86 .enc
= BOOK3E_PAGESZ_4K
,
90 .enc
= BOOK3E_PAGESZ_16K
,
95 .enc
= BOOK3E_PAGESZ_64K
,
99 .enc
= BOOK3E_PAGESZ_1M
,
104 .enc
= BOOK3E_PAGESZ_16M
,
108 .enc
= BOOK3E_PAGESZ_256M
,
112 .enc
= BOOK3E_PAGESZ_1GB
,
115 #endif /* CONFIG_FSL_BOOKE */
117 static inline int mmu_get_tsize(int psize
)
119 return mmu_psize_defs
[psize
].enc
;
122 static inline int mmu_get_tsize(int psize
)
124 /* This isn't used on !Book3E for now */
127 #endif /* CONFIG_PPC_BOOK3E_MMU */
129 /* The variables below are currently only used on 64-bit Book3E
130 * though this will probably be made common with other nohash
131 * implementations at some point
135 int mmu_linear_psize
; /* Page size used for the linear mapping */
136 int mmu_pte_psize
; /* Page size used for PTE pages */
137 int mmu_vmemmap_psize
; /* Page size used for the virtual mem map */
138 int book3e_htw_enabled
; /* Is HW tablewalk enabled ? */
139 unsigned long linear_map_top
; /* Top of linear mapping */
141 #endif /* CONFIG_PPC64 */
143 #ifdef CONFIG_PPC_FSL_BOOK3E
144 /* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
145 DEFINE_PER_CPU(int, next_tlbcam_idx
);
146 EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx
);
150 * Base TLB flushing operations:
152 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
153 * - flush_tlb_page(vma, vmaddr) flushes one page
154 * - flush_tlb_range(vma, start, end) flushes a range of pages
155 * - flush_tlb_kernel_range(start, end) flushes kernel pages
157 * - local_* variants of page and mm only apply to the current
162 * These are the base non-SMP variants of page and mm flushing
164 void local_flush_tlb_mm(struct mm_struct
*mm
)
169 pid
= mm
->context
.id
;
170 if (pid
!= MMU_NO_CONTEXT
)
174 EXPORT_SYMBOL(local_flush_tlb_mm
);
176 void __local_flush_tlb_page(struct mm_struct
*mm
, unsigned long vmaddr
,
182 pid
= mm
? mm
->context
.id
: 0;
183 if (pid
!= MMU_NO_CONTEXT
)
184 _tlbil_va(vmaddr
, pid
, tsize
, ind
);
188 void local_flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
190 __local_flush_tlb_page(vma
? vma
->vm_mm
: NULL
, vmaddr
,
191 mmu_get_tsize(mmu_virtual_psize
), 0);
193 EXPORT_SYMBOL(local_flush_tlb_page
);
196 * And here are the SMP non-local implementations
200 static DEFINE_RAW_SPINLOCK(tlbivax_lock
);
202 static int mm_is_core_local(struct mm_struct
*mm
)
204 return cpumask_subset(mm_cpumask(mm
),
205 topology_thread_cpumask(smp_processor_id()));
208 struct tlb_flush_param
{
215 static void do_flush_tlb_mm_ipi(void *param
)
217 struct tlb_flush_param
*p
= param
;
219 _tlbil_pid(p
? p
->pid
: 0);
222 static void do_flush_tlb_page_ipi(void *param
)
224 struct tlb_flush_param
*p
= param
;
226 _tlbil_va(p
->addr
, p
->pid
, p
->tsize
, p
->ind
);
230 /* Note on invalidations and PID:
232 * We snapshot the PID with preempt disabled. At this point, it can still
233 * change either because:
234 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
235 * - we are invaliating some target that isn't currently running here
236 * and is concurrently acquiring a new PID on another CPU
237 * - some other CPU is re-acquiring a lost PID for this mm
240 * However, this shouldn't be a problem as we only guarantee
241 * invalidation of TLB entries present prior to this call, so we
242 * don't care about the PID changing, and invalidating a stale PID
243 * is generally harmless.
246 void flush_tlb_mm(struct mm_struct
*mm
)
251 pid
= mm
->context
.id
;
252 if (unlikely(pid
== MMU_NO_CONTEXT
))
254 if (!mm_is_core_local(mm
)) {
255 struct tlb_flush_param p
= { .pid
= pid
};
256 /* Ignores smp_processor_id() even if set. */
257 smp_call_function_many(mm_cpumask(mm
),
258 do_flush_tlb_mm_ipi
, &p
, 1);
264 EXPORT_SYMBOL(flush_tlb_mm
);
266 void __flush_tlb_page(struct mm_struct
*mm
, unsigned long vmaddr
,
269 struct cpumask
*cpu_mask
;
273 pid
= mm
? mm
->context
.id
: 0;
274 if (unlikely(pid
== MMU_NO_CONTEXT
))
276 cpu_mask
= mm_cpumask(mm
);
277 if (!mm_is_core_local(mm
)) {
278 /* If broadcast tlbivax is supported, use it */
279 if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST
)) {
280 int lock
= mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL
);
282 raw_spin_lock(&tlbivax_lock
);
283 _tlbivax_bcast(vmaddr
, pid
, tsize
, ind
);
285 raw_spin_unlock(&tlbivax_lock
);
288 struct tlb_flush_param p
= {
294 /* Ignores smp_processor_id() even if set in cpu_mask */
295 smp_call_function_many(cpu_mask
,
296 do_flush_tlb_page_ipi
, &p
, 1);
299 _tlbil_va(vmaddr
, pid
, tsize
, ind
);
304 void flush_tlb_page(struct vm_area_struct
*vma
, unsigned long vmaddr
)
306 #ifdef CONFIG_HUGETLB_PAGE
307 if (is_vm_hugetlb_page(vma
))
308 flush_hugetlb_page(vma
, vmaddr
);
311 __flush_tlb_page(vma
? vma
->vm_mm
: NULL
, vmaddr
,
312 mmu_get_tsize(mmu_virtual_psize
), 0);
314 EXPORT_SYMBOL(flush_tlb_page
);
316 #endif /* CONFIG_SMP */
318 #ifdef CONFIG_PPC_47x
319 void __init
early_init_mmu_47x(void)
322 unsigned long root
= of_get_flat_dt_root();
323 if (of_get_flat_dt_prop(root
, "cooperative-partition", NULL
))
324 mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST
);
325 #endif /* CONFIG_SMP */
327 #endif /* CONFIG_PPC_47x */
330 * Flush kernel TLB entries in the given range
332 void flush_tlb_kernel_range(unsigned long start
, unsigned long end
)
336 smp_call_function(do_flush_tlb_mm_ipi
, NULL
, 1);
343 EXPORT_SYMBOL(flush_tlb_kernel_range
);
346 * Currently, for range flushing, we just do a full mm flush. This should
347 * be optimized based on a threshold on the size of the range, since
348 * some implementation can stack multiple tlbivax before a tlbsync but
349 * for now, we keep it that way
351 void flush_tlb_range(struct vm_area_struct
*vma
, unsigned long start
,
355 flush_tlb_mm(vma
->vm_mm
);
357 EXPORT_SYMBOL(flush_tlb_range
);
359 void tlb_flush(struct mmu_gather
*tlb
)
361 flush_tlb_mm(tlb
->mm
);
365 * Below are functions specific to the 64-bit variant of Book3E though that
366 * may change in the future
372 * Handling of virtual linear page tables or indirect TLB entries
373 * flushing when PTE pages are freed
375 void tlb_flush_pgtable(struct mmu_gather
*tlb
, unsigned long address
)
377 int tsize
= mmu_psize_defs
[mmu_pte_psize
].enc
;
379 if (book3e_htw_enabled
) {
380 unsigned long start
= address
& PMD_MASK
;
381 unsigned long end
= address
+ PMD_SIZE
;
382 unsigned long size
= 1UL << mmu_psize_defs
[mmu_pte_psize
].shift
;
384 /* This isn't the most optimal, ideally we would factor out the
385 * while preempt & CPU mask mucking around, or even the IPI but
388 while (start
< end
) {
389 __flush_tlb_page(tlb
->mm
, start
, tsize
, 1);
393 unsigned long rmask
= 0xf000000000000000ul
;
394 unsigned long rid
= (address
& rmask
) | 0x1000000000000000ul
;
395 unsigned long vpte
= address
& ~rmask
;
397 #ifdef CONFIG_PPC_64K_PAGES
398 vpte
= (vpte
>> (PAGE_SHIFT
- 4)) & ~0xfffful
;
400 vpte
= (vpte
>> (PAGE_SHIFT
- 3)) & ~0xffful
;
403 __flush_tlb_page(tlb
->mm
, vpte
, tsize
, 0);
407 static void setup_page_sizes(void)
409 unsigned int tlb0cfg
;
414 #ifdef CONFIG_PPC_FSL_BOOK3E
415 unsigned int mmucfg
= mfspr(SPRN_MMUCFG
);
417 if (((mmucfg
& MMUCFG_MAVN
) == MMUCFG_MAVN_V1
) &&
418 (mmu_has_feature(MMU_FTR_TYPE_FSL_E
))) {
419 unsigned int tlb1cfg
= mfspr(SPRN_TLB1CFG
);
420 unsigned int min_pg
, max_pg
;
422 min_pg
= (tlb1cfg
& TLBnCFG_MINSIZE
) >> TLBnCFG_MINSIZE_SHIFT
;
423 max_pg
= (tlb1cfg
& TLBnCFG_MAXSIZE
) >> TLBnCFG_MAXSIZE_SHIFT
;
425 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
426 struct mmu_psize_def
*def
;
429 def
= &mmu_psize_defs
[psize
];
435 /* adjust to be in terms of 4^shift Kb */
436 shift
= (shift
- 10) >> 1;
438 if ((shift
>= min_pg
) && (shift
<= max_pg
))
439 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
446 tlb0cfg
= mfspr(SPRN_TLB0CFG
);
447 tlb0ps
= mfspr(SPRN_TLB0PS
);
448 eptcfg
= mfspr(SPRN_EPTCFG
);
450 /* Look for supported direct sizes */
451 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
452 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
454 if (tlb0ps
& (1U << (def
->shift
- 10)))
455 def
->flags
|= MMU_PAGE_SIZE_DIRECT
;
458 /* Indirect page sizes supported ? */
459 if ((tlb0cfg
& TLBnCFG_IND
) == 0)
462 /* Now, we only deal with one IND page size for each
463 * direct size. Hopefully all implementations today are
464 * unambiguous, but we might want to be careful in the
467 for (i
= 0; i
< 3; i
++) {
468 unsigned int ps
, sps
;
476 for (psize
= 0; psize
< MMU_PAGE_COUNT
; psize
++) {
477 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
479 if (ps
== (def
->shift
- 10))
480 def
->flags
|= MMU_PAGE_SIZE_INDIRECT
;
481 if (sps
== (def
->shift
- 10))
487 /* Cleanup array and print summary */
488 pr_info("MMU: Supported page sizes\n");
489 for (psize
= 0; psize
< MMU_PAGE_COUNT
; ++psize
) {
490 struct mmu_psize_def
*def
= &mmu_psize_defs
[psize
];
491 const char *__page_type_names
[] = {
497 if (def
->flags
== 0) {
501 pr_info(" %8ld KB as %s\n", 1ul << (def
->shift
- 10),
502 __page_type_names
[def
->flags
& 0x3]);
506 static void __patch_exception(int exc
, unsigned long addr
)
508 extern unsigned int interrupt_base_book3e
;
509 unsigned int *ibase
= &interrupt_base_book3e
;
511 /* Our exceptions vectors start with a NOP and -then- a branch
512 * to deal with single stepping from userspace which stops on
513 * the second instruction. Thus we need to patch the second
514 * instruction of the exception, not the first one
517 patch_branch(ibase
+ (exc
/ 4) + 1, addr
, 0);
520 #define patch_exception(exc, name) do { \
521 extern unsigned int name; \
522 __patch_exception((exc), (unsigned long)&name); \
525 static void setup_mmu_htw(void)
527 /* Check if HW tablewalk is present, and if yes, enable it by:
529 * - patching the TLB miss handlers to branch to the
530 * one dedicates to it
532 * - setting the global book3e_htw_enabled
534 unsigned int tlb0cfg
= mfspr(SPRN_TLB0CFG
);
536 if ((tlb0cfg
& TLBnCFG_IND
) &&
537 (tlb0cfg
& TLBnCFG_PT
)) {
538 patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e
);
539 patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e
);
540 book3e_htw_enabled
= 1;
542 pr_info("MMU: Book3E HW tablewalk %s\n",
543 book3e_htw_enabled
? "enabled" : "not supported");
547 * Early initialization of the MMU TLB code
549 static void __early_init_mmu(int boot_cpu
)
553 /* XXX This will have to be decided at runtime, but right
554 * now our boot and TLB miss code hard wires it. Ideally
555 * we should find out a suitable page size and patch the
556 * TLB miss code (either that or use the PACA to store
559 mmu_linear_psize
= MMU_PAGE_1G
;
561 /* XXX This should be decided at runtime based on supported
562 * page sizes in the TLB, but for now let's assume 16M is
563 * always there and a good fit (which it probably is)
565 mmu_vmemmap_psize
= MMU_PAGE_16M
;
567 /* XXX This code only checks for TLB 0 capabilities and doesn't
568 * check what page size combos are supported by the HW. It
569 * also doesn't handle the case where a separate array holds
570 * the IND entries from the array loaded by the PT.
573 /* Look for supported page sizes */
576 /* Look for HW tablewalk support */
580 /* Set MAS4 based on page table setting */
582 mas4
= 0x4 << MAS4_WIMGED_SHIFT
;
583 if (book3e_htw_enabled
) {
584 mas4
|= mas4
| MAS4_INDD
;
585 #ifdef CONFIG_PPC_64K_PAGES
586 mas4
|= BOOK3E_PAGESZ_256M
<< MAS4_TSIZED_SHIFT
;
587 mmu_pte_psize
= MMU_PAGE_256M
;
589 mas4
|= BOOK3E_PAGESZ_1M
<< MAS4_TSIZED_SHIFT
;
590 mmu_pte_psize
= MMU_PAGE_1M
;
593 #ifdef CONFIG_PPC_64K_PAGES
594 mas4
|= BOOK3E_PAGESZ_64K
<< MAS4_TSIZED_SHIFT
;
596 mas4
|= BOOK3E_PAGESZ_4K
<< MAS4_TSIZED_SHIFT
;
598 mmu_pte_psize
= mmu_virtual_psize
;
600 mtspr(SPRN_MAS4
, mas4
);
602 /* Set the global containing the top of the linear mapping
603 * for use by the TLB miss code
605 linear_map_top
= memblock_end_of_DRAM();
607 #ifdef CONFIG_PPC_FSL_BOOK3E
608 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
609 unsigned int num_cams
;
611 /* use a quarter of the TLBCAM for bolted linear map */
612 num_cams
= (mfspr(SPRN_TLB1CFG
) & TLBnCFG_N_ENTRY
) / 4;
613 linear_map_top
= map_mem_in_cams(linear_map_top
, num_cams
);
615 /* limit memory so we dont have linear faults */
616 memblock_enforce_memory_limit(linear_map_top
);
619 patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e
);
620 patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e
);
624 /* A sync won't hurt us after mucking around with
625 * the MMU configuration
629 memblock_set_current_limit(linear_map_top
);
632 void __init
early_init_mmu(void)
637 void __cpuinit
early_init_mmu_secondary(void)
642 void setup_initial_memory_limit(phys_addr_t first_memblock_base
,
643 phys_addr_t first_memblock_size
)
645 /* On non-FSL Embedded 64-bit, we adjust the RMA size to match
646 * the bolted TLB entry. We know for now that only 1G
647 * entries are supported though that may eventually
650 * on FSL Embedded 64-bit, we adjust the RMA size to match the
651 * first bolted TLB entry size. We still limit max to 1G even if
652 * the TLB could cover more. This is due to what the early init
653 * code is setup to do.
655 * We crop it to the size of the first MEMBLOCK to
656 * avoid going over total available memory just in case...
658 #ifdef CONFIG_PPC_FSL_BOOK3E
659 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E
)) {
660 unsigned long linear_sz
;
661 linear_sz
= calc_cam_sz(first_memblock_size
, PAGE_OFFSET
,
662 first_memblock_base
);
663 ppc64_rma_size
= min_t(u64
, linear_sz
, 0x40000000);
666 ppc64_rma_size
= min_t(u64
, first_memblock_size
, 0x40000000);
668 /* Finally limit subsequent allocations */
669 memblock_set_current_limit(first_memblock_base
+ ppc64_rma_size
);
671 #else /* ! CONFIG_PPC64 */
672 void __init
early_init_mmu(void)
674 #ifdef CONFIG_PPC_47x
675 early_init_mmu_47x();
678 #endif /* CONFIG_PPC64 */