1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for initializing the MMU
4 * on the 8xx series of chips.
7 * Derived from arch/powerpc/mm/40x_mmu.c:
10 #include <linux/memblock.h>
11 #include <linux/mmu_context.h>
12 #include <asm/fixmap.h>
13 #include <asm/code-patching.h>
15 #include <mm/mmu_decl.h>
17 #define IMMR_SIZE (FIX_IMMR_SIZE << PAGE_SHIFT)
19 extern int __map_without_ltlbs
;
21 static unsigned long block_mapped_ram
;
24 * Return PA for this VA if it is in an area mapped with LTLBs.
25 * Otherwise, returns 0
27 phys_addr_t
v_block_mapped(unsigned long va
)
29 unsigned long p
= PHYS_IMMR_BASE
;
31 if (__map_without_ltlbs
)
33 if (va
>= VIRT_IMMR_BASE
&& va
< VIRT_IMMR_BASE
+ IMMR_SIZE
)
34 return p
+ va
- VIRT_IMMR_BASE
;
35 if (va
>= PAGE_OFFSET
&& va
< PAGE_OFFSET
+ block_mapped_ram
)
41 * Return VA for a given PA mapped with LTLBs or 0 if not mapped
43 unsigned long p_block_mapped(phys_addr_t pa
)
45 unsigned long p
= PHYS_IMMR_BASE
;
47 if (__map_without_ltlbs
)
49 if (pa
>= p
&& pa
< p
+ IMMR_SIZE
)
50 return VIRT_IMMR_BASE
+ pa
- p
;
51 if (pa
< block_mapped_ram
)
52 return (unsigned long)__va(pa
);
56 #define LARGE_PAGE_SIZE_8M (1<<23)
59 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
61 void __init
MMU_init_hw(void)
63 /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
64 if (IS_ENABLED(CONFIG_PIN_TLB_DATA
)) {
65 unsigned long ctr
= mfspr(SPRN_MD_CTR
) & 0xfe000000;
66 unsigned long flags
= 0xf0 | MD_SPS16K
| _PAGE_SH
| _PAGE_DIRTY
;
67 int i
= IS_ENABLED(CONFIG_PIN_TLB_IMMR
) ? 29 : 28;
68 unsigned long addr
= 0;
69 unsigned long mem
= total_lowmem
;
71 for (; i
< 32 && mem
>= LARGE_PAGE_SIZE_8M
; i
++) {
72 mtspr(SPRN_MD_CTR
, ctr
| (i
<< 8));
73 mtspr(SPRN_MD_EPN
, (unsigned long)__va(addr
) | MD_EVALID
);
74 mtspr(SPRN_MD_TWC
, MD_PS8MEG
| MD_SVALID
);
75 mtspr(SPRN_MD_RPN
, addr
| flags
| _PAGE_PRESENT
);
76 addr
+= LARGE_PAGE_SIZE_8M
;
77 mem
-= LARGE_PAGE_SIZE_8M
;
82 static void __init
mmu_mapin_immr(void)
84 unsigned long p
= PHYS_IMMR_BASE
;
85 unsigned long v
= VIRT_IMMR_BASE
;
88 for (offset
= 0; offset
< IMMR_SIZE
; offset
+= PAGE_SIZE
)
89 map_kernel_page(v
+ offset
, p
+ offset
, PAGE_KERNEL_NCG
);
92 static void mmu_patch_cmp_limit(s32
*site
, unsigned long mapped
)
94 modify_instruction_site(site
, 0xffff, (unsigned long)__va(mapped
) >> 16);
97 static void mmu_patch_addis(s32
*site
, long simm
)
99 unsigned int instr
= *(unsigned int *)patch_site_addr(site
);
102 instr
|= ((unsigned long)simm
) >> 16;
103 patch_instruction_site(site
, instr
);
106 unsigned long __init
mmu_mapin_ram(unsigned long base
, unsigned long top
)
108 unsigned long mapped
;
110 if (__map_without_ltlbs
) {
113 if (!IS_ENABLED(CONFIG_PIN_TLB_IMMR
))
114 patch_instruction_site(&patch__dtlbmiss_immr_jmp
, PPC_INST_NOP
);
115 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT
))
116 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top
, 0);
118 mapped
= top
& ~(LARGE_PAGE_SIZE_8M
- 1);
119 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT
))
120 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top
,
121 _ALIGN(__pa(_einittext
), 8 << 20));
124 mmu_patch_cmp_limit(&patch__dtlbmiss_linmem_top
, mapped
);
125 mmu_patch_cmp_limit(&patch__fixupdar_linmem_top
, mapped
);
127 /* If the size of RAM is not an exact power of two, we may not
128 * have covered RAM in its entirety with 8 MiB
129 * pages. Consequently, restrict the top end of RAM currently
130 * allocable so that calls to the MEMBLOCK to allocate PTEs for "tail"
131 * coverage with normal-sized pages (or other reasons) do not
132 * attempt to allocate outside the allowed range.
135 memblock_set_current_limit(mapped
);
137 block_mapped_ram
= mapped
;
142 void mmu_mark_initmem_nx(void)
144 if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX
) && CONFIG_ETEXT_SHIFT
< 23)
145 mmu_patch_addis(&patch__itlbmiss_linmem_top8
,
146 -((long)_etext
& ~(LARGE_PAGE_SIZE_8M
- 1)));
147 if (!IS_ENABLED(CONFIG_PIN_TLB_TEXT
))
148 mmu_patch_cmp_limit(&patch__itlbmiss_linmem_top
, __pa(_etext
));
151 #ifdef CONFIG_STRICT_KERNEL_RWX
152 void mmu_mark_rodata_ro(void)
154 if (CONFIG_DATA_SHIFT
< 23)
155 mmu_patch_addis(&patch__dtlbmiss_romem_top8
,
156 -__pa(((unsigned long)_sinittext
) &
157 ~(LARGE_PAGE_SIZE_8M
- 1)));
158 mmu_patch_addis(&patch__dtlbmiss_romem_top
, -__pa(_sinittext
));
162 void __init
setup_initial_memory_limit(phys_addr_t first_memblock_base
,
163 phys_addr_t first_memblock_size
)
165 /* We don't currently support the first MEMBLOCK not mapping 0
166 * physical on those processors
168 BUG_ON(first_memblock_base
!= 0);
170 /* 8xx can only access 32MB at the moment */
171 memblock_set_current_limit(min_t(u64
, first_memblock_size
, 0x02000000));
175 * Set up to use a given MMU context.
176 * id is context number, pgd is PGD pointer.
178 * We place the physical address of the new task page directory loaded
179 * into the MMU base register, and set the ASID compare register with
182 void set_context(unsigned long id
, pgd_t
*pgd
)
184 s16 offset
= (s16
)(__pa(swapper_pg_dir
));
186 /* Context switch the PTE pointer for the Abatron BDI2000.
187 * The PGDIR is passed as second argument.
189 if (IS_ENABLED(CONFIG_BDI_SWITCH
))
190 abatron_pteptrs
[1] = pgd
;
192 /* Register M_TWB will contain base address of level 1 table minus the
193 * lower part of the kernel PGDIR base address, so that all accesses to
194 * level 1 table are done relative to lower part of kernel PGDIR base
197 mtspr(SPRN_M_TWB
, __pa(pgd
) - offset
);
200 mtspr(SPRN_M_CASID
, id
- 1);
205 void flush_instruction_cache(void)
208 mtspr(SPRN_IC_CST
, IDC_INVALL
);
212 #ifdef CONFIG_PPC_KUEP
213 void __init
setup_kuep(bool disabled
)
218 pr_info("Activating Kernel Userspace Execution Prevention\n");
220 mtspr(SPRN_MI_AP
, MI_APG_KUEP
);
224 #ifdef CONFIG_PPC_KUAP
225 void __init
setup_kuap(bool disabled
)
227 pr_info("Activating Kernel Userspace Access Protection\n");
230 pr_warn("KUAP cannot be disabled yet on 8xx when compiled in\n");
232 mtspr(SPRN_MD_AP
, MD_APG_KUAP
);