]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /* | |
3 | * This file contains the routines setting up the linux page tables. | |
4 | * -- paulus | |
5 | * | |
6 | * Derived from arch/ppc/mm/init.c: | |
7 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
8 | * | |
9 | * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) | |
10 | * and Cort Dougan (PReP) (cort@cs.nmt.edu) | |
11 | * Copyright (C) 1996 Paul Mackerras | |
12 | * | |
13 | * Derived from "arch/i386/mm/init.c" | |
14 | * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/types.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <linux/init.h> | |
23 | #include <linux/highmem.h> | |
24 | #include <linux/memblock.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/set_memory.h> | |
27 | ||
28 | #include <asm/pgalloc.h> | |
29 | #include <asm/fixmap.h> | |
30 | #include <asm/setup.h> | |
31 | #include <asm/sections.h> | |
32 | #include <asm/early_ioremap.h> | |
33 | ||
34 | #include <mm/mmu_decl.h> | |
35 | ||
36 | extern char etext[], _stext[], _sinittext[], _einittext[]; | |
37 | ||
38 | static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data; | |
39 | ||
40 | notrace void __init early_ioremap_init(void) | |
41 | { | |
42 | unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE); | |
43 | pte_t *ptep = (pte_t *)early_fixmap_pagetable; | |
44 | pmd_t *pmdp = pmd_off_k(addr); | |
45 | ||
46 | for (; (s32)(FIXADDR_TOP - addr) > 0; | |
47 | addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++) | |
48 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
49 | ||
50 | early_ioremap_setup(); | |
51 | } | |
52 | ||
53 | static void __init *early_alloc_pgtable(unsigned long size) | |
54 | { | |
55 | void *ptr = memblock_alloc(size, size); | |
56 | ||
57 | if (!ptr) | |
58 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", | |
59 | __func__, size, size); | |
60 | ||
61 | return ptr; | |
62 | } | |
63 | ||
64 | pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) | |
65 | { | |
66 | if (pmd_none(*pmdp)) { | |
67 | pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE); | |
68 | ||
69 | pmd_populate_kernel(&init_mm, pmdp, ptep); | |
70 | } | |
71 | return pte_offset_kernel(pmdp, va); | |
72 | } | |
73 | ||
74 | ||
75 | int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot) | |
76 | { | |
77 | pmd_t *pd; | |
78 | pte_t *pg; | |
79 | int err = -ENOMEM; | |
80 | ||
81 | /* Use upper 10 bits of VA to index the first level map */ | |
82 | pd = pmd_off_k(va); | |
83 | /* Use middle 10 bits of VA to index the second-level map */ | |
84 | if (likely(slab_is_available())) | |
85 | pg = pte_alloc_kernel(pd, va); | |
86 | else | |
87 | pg = early_pte_alloc_kernel(pd, va); | |
88 | if (pg) { | |
89 | err = 0; | |
90 | /* The PTE should never be already set nor present in the | |
91 | * hash table | |
92 | */ | |
93 | BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot)); | |
94 | set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot)); | |
95 | } | |
96 | smp_wmb(); | |
97 | return err; | |
98 | } | |
99 | ||
100 | /* | |
101 | * Map in a chunk of physical memory starting at start. | |
102 | */ | |
103 | static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top) | |
104 | { | |
105 | unsigned long v, s; | |
106 | phys_addr_t p; | |
107 | int ktext; | |
108 | ||
109 | s = offset; | |
110 | v = PAGE_OFFSET + s; | |
111 | p = memstart_addr + s; | |
112 | for (; s < top; s += PAGE_SIZE) { | |
113 | ktext = ((char *)v >= _stext && (char *)v < etext) || | |
114 | ((char *)v >= _sinittext && (char *)v < _einittext); | |
115 | map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL); | |
116 | v += PAGE_SIZE; | |
117 | p += PAGE_SIZE; | |
118 | } | |
119 | } | |
120 | ||
121 | void __init mapin_ram(void) | |
122 | { | |
123 | phys_addr_t base, end; | |
124 | u64 i; | |
125 | ||
126 | for_each_mem_range(i, &base, &end) { | |
127 | phys_addr_t top = min(end, total_lowmem); | |
128 | ||
129 | if (base >= top) | |
130 | continue; | |
131 | base = mmu_mapin_ram(base, top); | |
132 | __mapin_ram_chunk(base, top); | |
133 | } | |
134 | } | |
135 | ||
136 | void mark_initmem_nx(void) | |
137 | { | |
138 | unsigned long numpages = PFN_UP((unsigned long)_einittext) - | |
139 | PFN_DOWN((unsigned long)_sinittext); | |
140 | ||
141 | if (v_block_mapped((unsigned long)_sinittext)) { | |
142 | mmu_mark_initmem_nx(); | |
143 | } else { | |
144 | set_memory_nx((unsigned long)_sinittext, numpages); | |
145 | set_memory_rw((unsigned long)_sinittext, numpages); | |
146 | } | |
147 | } | |
148 | ||
149 | #ifdef CONFIG_STRICT_KERNEL_RWX | |
150 | void mark_rodata_ro(void) | |
151 | { | |
152 | unsigned long numpages; | |
153 | ||
154 | if (v_block_mapped((unsigned long)_stext + 1)) { | |
155 | mmu_mark_rodata_ro(); | |
156 | ptdump_check_wx(); | |
157 | return; | |
158 | } | |
159 | ||
160 | /* | |
161 | * mark .text and .rodata as read only. Use __init_begin rather than | |
162 | * __end_rodata to cover NOTES and EXCEPTION_TABLE. | |
163 | */ | |
164 | numpages = PFN_UP((unsigned long)__init_begin) - | |
165 | PFN_DOWN((unsigned long)_stext); | |
166 | ||
167 | set_memory_ro((unsigned long)_stext, numpages); | |
168 | ||
169 | // mark_initmem_nx() should have already run by now | |
170 | ptdump_check_wx(); | |
171 | } | |
172 | #endif | |
173 | ||
174 | #if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC) | |
175 | void __kernel_map_pages(struct page *page, int numpages, int enable) | |
176 | { | |
177 | unsigned long addr = (unsigned long)page_address(page); | |
178 | ||
179 | if (PageHighMem(page)) | |
180 | return; | |
181 | ||
182 | if (enable) | |
183 | set_memory_p(addr, numpages); | |
184 | else | |
185 | set_memory_np(addr, numpages); | |
186 | } | |
187 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |