]>
Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
14cf11af PM |
2 | /* |
3 | * This file contains the routines setting up the linux page tables. | |
4 | * -- paulus | |
5 | * | |
6 | * Derived from arch/ppc/mm/init.c: | |
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
8 | * | |
9 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
10 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
11 | * Copyright (C) 1996 Paul Mackerras | |
14cf11af PM |
12 | * |
13 | * Derived from "arch/i386/mm/init.c" | |
14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
14cf11af PM |
15 | */ |
16 | ||
14cf11af PM |
17 | #include <linux/kernel.h> |
18 | #include <linux/module.h> | |
19 | #include <linux/types.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/highmem.h> | |
95f72d1e | 24 | #include <linux/memblock.h> |
5a0e3ad6 | 25 | #include <linux/slab.h> |
c988cfd3 | 26 | #include <linux/set_memory.h> |
14cf11af | 27 | |
14cf11af | 28 | #include <asm/pgalloc.h> |
2c419bde | 29 | #include <asm/fixmap.h> |
ae3a197e | 30 | #include <asm/setup.h> |
95902e6c | 31 | #include <asm/sections.h> |
925ac141 | 32 | #include <asm/early_ioremap.h> |
14cf11af | 33 | |
9d9f2ccc | 34 | #include <mm/mmu_decl.h> |
14cf11af | 35 | |
060ef9d8 | 36 | extern char etext[], _stext[], _sinittext[], _einittext[]; |
14cf11af | 37 | |
925ac141 CL |
38 | static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; |
39 | ||
40 | notrace void __init early_ioremap_init(void) | |
41 | { | |
42 | unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); | |
43 | pte_t *ptep = (pte_t *)early_fixmap_pagetable; | |
e05c7b1f | 44 | pmd_t *pmdp = pmd_off_k(addr); |
925ac141 CL |
45 | |
46 | for (; (s32)(FIXADDR_TOP - addr) > 0; | |
47 | addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) | |
48 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
49 | ||
50 | early_ioremap_setup(); | |
51 | } | |
52 | ||
4a6d8cf9 CL |
53 | static void __init *early_alloc_pgtable(unsigned long size) |
54 | { | |
55 | void *ptr = memblock_alloc(size, size); | |
56 | ||
57 | if (!ptr) | |
58 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", | |
59 | __func__, size, size); | |
60 | ||
61 | return ptr; | |
62 | } | |
63 | ||
34536d78 | 64 | pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) |
4a6d8cf9 CL |
65 | { |
66 | if (pmd_none(*pmdp)) { | |
67 | pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); | |
68 | ||
69 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
70 | } | |
71 | return pte_offset_kernel(pmdp, va); | |
72 | } | |
73 | ||
74 | ||
75 | int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) | |
14cf11af PM |
76 | { |
77 | pmd_t *pd; | |
78 | pte_t *pg; | |
79 | int err = -ENOMEM; | |
80 | ||
14cf11af | 81 | /* Use upper 10 bits of VA to index the first level map */ |
e05c7b1f | 82 | pd = pmd_off_k(va); |
14cf11af | 83 | /* Use middle 10 bits of VA to index the second-level map */ |
4a6d8cf9 CL |
84 | if (likely(slab_is_available())) |
85 | pg = pte_alloc_kernel(pd, va); | |
86 | else | |
87 | pg = early_pte_alloc_kernel(pd, va); | |
b84bf098 | 88 | if (pg) { |
14cf11af | 89 | err = 0; |
3be4e699 BH |
90 | /* The PTE should never be already set nor present in the |
91 | * hash table | |
92 | */ | |
26973fa5 | 93 | BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); |
c766ee72 | 94 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); |
14cf11af | 95 | } |
47ce8af4 | 96 | smp_wmb(); |
14cf11af PM |
97 | return err; |
98 | } | |
99 | ||
100 | /* | |
de32400d | 101 | * Map in a chunk of physical memory starting at start. |
14cf11af | 102 | */ |
86b19520 | 103 | static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) |
14cf11af | 104 | { |
c766ee72 | 105 | unsigned long v, s; |
99c62dd7 | 106 | phys_addr_t p; |
ee4f2ea4 | 107 | int ktext; |
14cf11af | 108 | |
de32400d | 109 | s = offset; |
ccdcef72 | 110 | v = PAGE_OFFSET + s; |
99c62dd7 | 111 | p = memstart_addr + s; |
de32400d | 112 | for (; s < top; s += PAGE_SIZE) { |
060ef9d8 CL |
113 | ktext = ((char *)v >= _stext && (char *)v < etext) || |
114 | ((char *)v >= _sinittext && (char *)v < _einittext); | |
c766ee72 | 115 | map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); |
14cf11af PM |
116 | v += PAGE_SIZE; |
117 | p += PAGE_SIZE; | |
118 | } | |
119 | } | |
120 | ||
de32400d AH |
121 | void __init mapin_ram(void) |
122 | { | |
b10d6bca MR |
123 | phys_addr_t base, end; |
124 | u64 i; | |
9e849f23 | 125 | |
b10d6bca MR |
126 | for_each_mem_range(i, &base, &end) { |
127 | phys_addr_t top = min(end, total_lowmem); | |
de32400d | 128 | |
9e849f23 CL |
129 | if (base >= top) |
130 | continue; | |
131 | base = mmu_mapin_ram(base, top); | |
a2227a27 | 132 | __mapin_ram_chunk(base, top); |
de32400d | 133 | } |
de32400d AH |
134 | } |
135 | ||
3184cc4b CL |
136 | void mark_initmem_nx(void) |
137 | { | |
3184cc4b CL |
138 | unsigned long numpages = PFN_UP((unsigned long)_einittext) - |
139 | PFN_DOWN((unsigned long)_sinittext); | |
140 | ||
caaa0246 | 141 | if (v_block_mapped((unsigned long)_sinittext)) { |
63b2bc61 | 142 | mmu_mark_initmem_nx(); |
caaa0246 CL |
143 | } else { |
144 | set_memory_nx((unsigned long)_sinittext, numpages); | |
145 | set_memory_rw((unsigned long)_sinittext, numpages); | |
146 | } | |
3184cc4b | 147 | } |
88df6e90 | 148 | |
95902e6c CL |
149 | #ifdef CONFIG_STRICT_KERNEL_RWX |
150 | void mark_rodata_ro(void) | |
151 | { | |
95902e6c CL |
152 | unsigned long numpages; |
153 | ||
4e3319c2 | 154 | if (v_block_mapped((unsigned long)_stext + 1)) { |
63b2bc61 | 155 | mmu_mark_rodata_ro(); |
e26ad936 | 156 | ptdump_check_wx(); |
63b2bc61 CL |
157 | return; |
158 | } | |
159 | ||
95902e6c | 160 | /* |
caaa0246 CL |
161 | * mark .text and .rodata as read only. Use __init_begin rather than |
162 | * __end_rodata to cover NOTES and EXCEPTION_TABLE. | |
95902e6c | 163 | */ |
95902e6c | 164 | numpages = PFN_UP((unsigned long)__init_begin) - |
caaa0246 | 165 | PFN_DOWN((unsigned long)_stext); |
95902e6c | 166 | |
caaa0246 | 167 | set_memory_ro((unsigned long)_stext, numpages); |
453d87f6 RC |
168 | |
169 | // mark_initmem_nx() should have already run by now | |
170 | ptdump_check_wx(); | |
95902e6c CL |
171 | } |
172 | #endif | |
173 | ||
c4f6a3a8 | 174 | #if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC) |
031bc574 | 175 | void __kernel_map_pages(struct page *page, int numpages, int enable) |
88df6e90 | 176 | { |
c988cfd3 CL |
177 | unsigned long addr = (unsigned long)page_address(page); |
178 | ||
88df6e90 BH |
179 | if (PageHighMem(page)) |
180 | return; | |
181 | ||
c988cfd3 | 182 | if (enable) |
caaa0246 | 183 | set_memory_p(addr, numpages); |
c988cfd3 | 184 | else |
caaa0246 | 185 | set_memory_np(addr, numpages); |
88df6e90 BH |
186 | } |
187 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |