1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * This file contains the routines for handling the MMU on those
4 * PowerPC implementations where the MMU substantially follows the
5 * architecture specification. This includes the 6xx, 7xx, 7xxx,
6 * and 8260 implementations but excludes the 8xx and 4xx.
9 * Derived from arch/ppc/mm/init.c:
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
12 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
13 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
14 * Copyright (C) 1996 Paul Mackerras
16 * Derived from "arch/i386/mm/init.c"
17 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
20 #include <linux/kernel.h>
22 #include <linux/init.h>
23 #include <linux/highmem.h>
24 #include <linux/memblock.h>
28 #include <asm/machdep.h>
29 #include <asm/code-patching.h>
30 #include <asm/sections.h>
32 #include <mm/mmu_decl.h>
34 u8 __initdata early_hash
[SZ_256K
] __aligned(SZ_256K
) = {0};
36 static struct hash_pte __initdata
*Hash
= (struct hash_pte
*)early_hash
;
37 static unsigned long __initdata Hash_size
, Hash_mask
;
38 static unsigned int __initdata hash_mb
, hash_mb2
;
39 unsigned long __initdata _SDR1
;
41 struct ppc_bat BATS
[8][2]; /* 8 pairs of IBAT, DBAT */
43 static struct batrange
{ /* stores address ranges mapped by BATs */
50 unsigned long mmu_hash_lock
;
54 * Return PA for this VA if it is mapped by a BAT, or 0
56 phys_addr_t
v_block_mapped(unsigned long va
)
59 for (b
= 0; b
< ARRAY_SIZE(bat_addrs
); ++b
)
60 if (va
>= bat_addrs
[b
].start
&& va
< bat_addrs
[b
].limit
)
61 return bat_addrs
[b
].phys
+ (va
- bat_addrs
[b
].start
);
66 * Return VA for a given PA or 0 if not mapped
68 unsigned long p_block_mapped(phys_addr_t pa
)
71 for (b
= 0; b
< ARRAY_SIZE(bat_addrs
); ++b
)
72 if (pa
>= bat_addrs
[b
].phys
73 && pa
< (bat_addrs
[b
].limit
-bat_addrs
[b
].start
)
75 return bat_addrs
[b
].start
+(pa
-bat_addrs
[b
].phys
);
79 static int find_free_bat(void)
82 int n
= mmu_has_feature(MMU_FTR_USE_HIGH_BATS
) ? 8 : 4;
84 for (b
= 0; b
< n
; b
++) {
85 struct ppc_bat
*bat
= BATS
[b
];
87 if (!(bat
[1].batu
& 3))
94 * This function calculates the size of the larger block usable to map the
95 * beginning of an area based on the start address and size of that area:
96 * - max block size is 256 on 6xx.
97 * - base address must be aligned to the block size. So the maximum block size
98 * is identified by the lowest bit set to 1 in the base address (for instance
99 * if base is 0x16000000, max size is 0x02000000).
100 * - block size has to be a power of two. This is calculated by finding the
101 * highest bit set to 1.
103 static unsigned int block_size(unsigned long base
, unsigned long top
)
105 unsigned int max_size
= SZ_256M
;
106 unsigned int base_shift
= (ffs(base
) - 1) & 31;
107 unsigned int block_shift
= (fls(top
- base
) - 1) & 31;
109 return min3(max_size
, 1U << base_shift
, 1U << block_shift
);
113 * Set up one of the IBAT (block address translation) register pairs.
114 * The parameters are not checked; in particular size must be a power
115 * of 2 between 128k and 256M.
117 static void setibat(int index
, unsigned long virt
, phys_addr_t phys
,
118 unsigned int size
, pgprot_t prot
)
120 unsigned int bl
= (size
>> 17) - 1;
122 struct ppc_bat
*bat
= BATS
[index
];
123 unsigned long flags
= pgprot_val(prot
);
125 if (!cpu_has_feature(CPU_FTR_NEED_COHERENT
))
126 flags
&= ~_PAGE_COHERENT
;
128 wimgxpp
= (flags
& _PAGE_COHERENT
) | (_PAGE_EXEC
? BPP_RX
: BPP_XX
);
129 bat
[0].batu
= virt
| (bl
<< 2) | 2; /* Vs=1, Vp=0 */
130 bat
[0].batl
= BAT_PHYS_ADDR(phys
) | wimgxpp
;
131 if (flags
& _PAGE_USER
)
132 bat
[0].batu
|= 1; /* Vp = 1 */
135 static void clearibat(int index
)
137 struct ppc_bat
*bat
= BATS
[index
];
143 static unsigned long __init
__mmu_mapin_ram(unsigned long base
, unsigned long top
)
147 while ((idx
= find_free_bat()) != -1 && base
!= top
) {
148 unsigned int size
= block_size(base
, top
);
150 if (size
< 128 << 10)
152 setbat(idx
, PAGE_OFFSET
+ base
, base
, size
, PAGE_KERNEL_X
);
159 unsigned long __init
mmu_mapin_ram(unsigned long base
, unsigned long top
)
162 unsigned long border
= (unsigned long)__init_begin
- PAGE_OFFSET
;
165 if (debug_pagealloc_enabled_or_kfence() || __map_without_bats
) {
166 pr_debug_once("Read-Write memory mapped without BATs\n");
173 if (!strict_kernel_rwx_enabled() || base
>= border
|| top
<= border
)
174 return __mmu_mapin_ram(base
, top
);
176 done
= __mmu_mapin_ram(base
, border
);
180 return __mmu_mapin_ram(border
, top
);
183 static bool is_module_segment(unsigned long addr
)
185 if (!IS_ENABLED(CONFIG_MODULES
))
187 if (addr
< ALIGN_DOWN(MODULES_VADDR
, SZ_256M
))
189 if (addr
> ALIGN(MODULES_END
, SZ_256M
) - 1)
194 void mmu_mark_initmem_nx(void)
196 int nb
= mmu_has_feature(MMU_FTR_USE_HIGH_BATS
) ? 8 : 4;
198 unsigned long base
= (unsigned long)_stext
- PAGE_OFFSET
;
199 unsigned long top
= ALIGN((unsigned long)_etext
- PAGE_OFFSET
, SZ_128K
);
200 unsigned long border
= (unsigned long)__init_begin
- PAGE_OFFSET
;
203 for (i
= 0; i
< nb
- 1 && base
< top
;) {
204 size
= block_size(base
, top
);
205 setibat(i
++, PAGE_OFFSET
+ base
, base
, size
, PAGE_KERNEL_TEXT
);
209 size
= block_size(base
, top
);
210 if ((top
- base
) > size
) {
212 if (strict_kernel_rwx_enabled() && base
+ size
> border
)
213 pr_warn("Some RW data is getting mapped X. "
214 "Adjust CONFIG_DATA_SHIFT to avoid that.\n");
216 setibat(i
++, PAGE_OFFSET
+ base
, base
, size
, PAGE_KERNEL_TEXT
);
224 for (i
= TASK_SIZE
>> 28; i
< 16; i
++) {
225 /* Do not set NX on VM space for modules */
226 if (is_module_segment(i
<< 28))
229 mtsr(mfsr(i
<< 28) | 0x10000000, i
<< 28);
233 void mmu_mark_rodata_ro(void)
235 int nb
= mmu_has_feature(MMU_FTR_USE_HIGH_BATS
) ? 8 : 4;
238 for (i
= 0; i
< nb
; i
++) {
239 struct ppc_bat
*bat
= BATS
[i
];
241 if (bat_addrs
[i
].start
< (unsigned long)__init_begin
)
242 bat
[1].batl
= (bat
[1].batl
& ~BPP_RW
) | BPP_RX
;
249 * Set up one of the I/D BAT (block address translation) register pairs.
250 * The parameters are not checked; in particular size must be a power
251 * of 2 between 128k and 256M.
252 * On 603+, only set IBAT when _PAGE_EXEC is set
254 void __init
setbat(int index
, unsigned long virt
, phys_addr_t phys
,
255 unsigned int size
, pgprot_t prot
)
260 unsigned long flags
= pgprot_val(prot
);
263 index
= find_free_bat();
265 pr_err("%s: no BAT available for mapping 0x%llx\n", __func__
,
266 (unsigned long long)phys
);
271 if ((flags
& _PAGE_NO_CACHE
) ||
272 (cpu_has_feature(CPU_FTR_NEED_COHERENT
) == 0))
273 flags
&= ~_PAGE_COHERENT
;
275 bl
= (size
>> 17) - 1;
277 wimgxpp
= flags
& (_PAGE_WRITETHRU
| _PAGE_NO_CACHE
278 | _PAGE_COHERENT
| _PAGE_GUARDED
);
279 wimgxpp
|= (flags
& _PAGE_RW
)? BPP_RW
: BPP_RX
;
280 bat
[1].batu
= virt
| (bl
<< 2) | 2; /* Vs=1, Vp=0 */
281 bat
[1].batl
= BAT_PHYS_ADDR(phys
) | wimgxpp
;
282 if (flags
& _PAGE_USER
)
283 bat
[1].batu
|= 1; /* Vp = 1 */
284 if (flags
& _PAGE_GUARDED
) {
285 /* G bit must be zero in IBATs */
286 flags
&= ~_PAGE_EXEC
;
288 if (flags
& _PAGE_EXEC
)
291 bat
[0].batu
= bat
[0].batl
= 0;
293 bat_addrs
[index
].start
= virt
;
294 bat_addrs
[index
].limit
= virt
+ ((bl
+ 1) << 17) - 1;
295 bat_addrs
[index
].phys
= phys
;
299 * Preload a translation in the hash table
301 static void hash_preload(struct mm_struct
*mm
, unsigned long ea
)
305 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE
))
307 pmd
= pmd_off(mm
, ea
);
309 add_hash_page(mm
->context
.id
, ea
, pmd_val(*pmd
));
313 * This is called at the end of handling a user page fault, when the
314 * fault has been handled by updating a PTE in the linux page tables.
315 * We use it to preload an HPTE into the hash table corresponding to
316 * the updated linux PTE.
318 * This must always be called with the pte lock held.
320 void update_mmu_cache(struct vm_area_struct
*vma
, unsigned long address
,
323 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE
))
326 * We don't need to worry about _PAGE_PRESENT here because we are
327 * called with either mm->page_table_lock held or ptl lock held
330 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
331 if (!pte_young(*ptep
) || address
>= TASK_SIZE
)
334 /* We have to test for regs NULL since init will get here first thing at boot */
335 if (!current
->thread
.regs
)
338 /* We also avoid filling the hash if not coming from a fault */
339 if (TRAP(current
->thread
.regs
) != 0x300 && TRAP(current
->thread
.regs
) != 0x400)
342 hash_preload(vma
->vm_mm
, address
);
346 * Initialize the hash table and patch the instructions in hashtable.S.
348 void __init
MMU_init_hw(void)
350 unsigned int n_hpteg
, lg_n_hpteg
;
352 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE
))
355 if ( ppc_md
.progress
) ppc_md
.progress("hash:enter", 0x105);
357 #define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
358 #define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
359 #define MIN_N_HPTEG 1024 /* min 64kB hash table */
362 * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
363 * This is less than the recommended amount, but then
366 n_hpteg
= total_memory
/ (PAGE_SIZE
* 8);
367 if (n_hpteg
< MIN_N_HPTEG
)
368 n_hpteg
= MIN_N_HPTEG
;
369 lg_n_hpteg
= __ilog2(n_hpteg
);
370 if (n_hpteg
& (n_hpteg
- 1)) {
371 ++lg_n_hpteg
; /* round up if not power of 2 */
372 n_hpteg
= 1 << lg_n_hpteg
;
374 Hash_size
= n_hpteg
<< LG_HPTEG_SIZE
;
377 * Find some memory for the hash table.
379 if ( ppc_md
.progress
) ppc_md
.progress("hash:find piece", 0x322);
380 Hash
= memblock_alloc(Hash_size
, Hash_size
);
382 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
383 __func__
, Hash_size
, Hash_size
);
384 _SDR1
= __pa(Hash
) | SDR1_LOW_BITS
;
386 pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
387 (unsigned long long)(total_memory
>> 20), Hash_size
>> 10);
390 Hash_mask
= n_hpteg
- 1;
391 hash_mb2
= hash_mb
= 32 - LG_HPTEG_SIZE
- lg_n_hpteg
;
393 hash_mb2
= 16 - LG_HPTEG_SIZE
;
396 void __init
MMU_init_hw_patch(void)
398 unsigned int hmask
= Hash_mask
>> (16 - LG_HPTEG_SIZE
);
399 unsigned int hash
= (unsigned int)Hash
- PAGE_OFFSET
;
401 if (!mmu_has_feature(MMU_FTR_HPTE_TABLE
))
405 ppc_md
.progress("hash:patch", 0x345);
407 ppc_md
.progress("hash:done", 0x205);
409 /* WARNING: Make sure nothing can trigger a KASAN check past this point */
412 * Patch up the instructions in hashtable.S:create_hpte
414 modify_instruction_site(&patch__hash_page_A0
, 0xffff, hash
>> 16);
415 modify_instruction_site(&patch__hash_page_A1
, 0x7c0, hash_mb
<< 6);
416 modify_instruction_site(&patch__hash_page_A2
, 0x7c0, hash_mb2
<< 6);
417 modify_instruction_site(&patch__hash_page_B
, 0xffff, hmask
);
418 modify_instruction_site(&patch__hash_page_C
, 0xffff, hmask
);
421 * Patch up the instructions in hashtable.S:flush_hash_page
423 modify_instruction_site(&patch__flush_hash_A0
, 0xffff, hash
>> 16);
424 modify_instruction_site(&patch__flush_hash_A1
, 0x7c0, hash_mb
<< 6);
425 modify_instruction_site(&patch__flush_hash_A2
, 0x7c0, hash_mb2
<< 6);
426 modify_instruction_site(&patch__flush_hash_B
, 0xffff, hmask
);
429 void setup_initial_memory_limit(phys_addr_t first_memblock_base
,
430 phys_addr_t first_memblock_size
)
432 /* We don't currently support the first MEMBLOCK not mapping 0
433 * physical on those processors
435 BUG_ON(first_memblock_base
!= 0);
437 memblock_set_current_limit(min_t(u64
, first_memblock_size
, SZ_256M
));
440 void __init
print_system_hash_info(void)
442 pr_info("Hash_size = 0x%lx\n", Hash_size
);
444 pr_info("Hash_mask = 0x%lx\n", Hash_mask
);
447 void __init
early_init_mmu(void)