]>
Commit | Line | Data |
---|---|---|
c1cc1552 CM |
1 | /* |
2 | * Based on arch/arm/mm/mmu.c | |
3 | * | |
4 | * Copyright (C) 1995-2005 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/export.h> | |
21 | #include <linux/kernel.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/mman.h> | |
25 | #include <linux/nodemask.h> | |
26 | #include <linux/memblock.h> | |
27 | #include <linux/fs.h> | |
2475ff9d | 28 | #include <linux/io.h> |
c1cc1552 CM |
29 | |
30 | #include <asm/cputype.h> | |
31 | #include <asm/sections.h> | |
32 | #include <asm/setup.h> | |
33 | #include <asm/sizes.h> | |
34 | #include <asm/tlb.h> | |
35 | #include <asm/mmu_context.h> | |
36 | ||
37 | #include "mm.h" | |
38 | ||
39 | /* | |
40 | * Empty_zero_page is a special page that is used for zero-initialized data | |
41 | * and COW. | |
42 | */ | |
43 | struct page *empty_zero_page; | |
44 | EXPORT_SYMBOL(empty_zero_page); | |
45 | ||
46 | pgprot_t pgprot_default; | |
47 | EXPORT_SYMBOL(pgprot_default); | |
48 | ||
49 | static pmdval_t prot_sect_kernel; | |
50 | ||
51 | struct cachepolicy { | |
52 | const char policy[16]; | |
53 | u64 mair; | |
54 | u64 tcr; | |
55 | }; | |
56 | ||
57 | static struct cachepolicy cache_policies[] __initdata = { | |
58 | { | |
59 | .policy = "uncached", | |
60 | .mair = 0x44, /* inner, outer non-cacheable */ | |
61 | .tcr = TCR_IRGN_NC | TCR_ORGN_NC, | |
62 | }, { | |
63 | .policy = "writethrough", | |
64 | .mair = 0xaa, /* inner, outer write-through, read-allocate */ | |
65 | .tcr = TCR_IRGN_WT | TCR_ORGN_WT, | |
66 | }, { | |
67 | .policy = "writeback", | |
68 | .mair = 0xee, /* inner, outer write-back, read-allocate */ | |
69 | .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA, | |
70 | } | |
71 | }; | |
72 | ||
73 | /* | |
74 | * These are useful for identifying cache coherency problems by allowing the | |
75 | * cache or the cache and writebuffer to be turned off. It changes the Normal | |
76 | * memory caching attributes in the MAIR_EL1 register. | |
77 | */ | |
78 | static int __init early_cachepolicy(char *p) | |
79 | { | |
80 | int i; | |
81 | u64 tmp; | |
82 | ||
83 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { | |
84 | int len = strlen(cache_policies[i].policy); | |
85 | ||
86 | if (memcmp(p, cache_policies[i].policy, len) == 0) | |
87 | break; | |
88 | } | |
89 | if (i == ARRAY_SIZE(cache_policies)) { | |
90 | pr_err("ERROR: unknown or unsupported cache policy: %s\n", p); | |
91 | return 0; | |
92 | } | |
93 | ||
94 | flush_cache_all(); | |
95 | ||
96 | /* | |
97 | * Modify MT_NORMAL attributes in MAIR_EL1. | |
98 | */ | |
99 | asm volatile( | |
100 | " mrs %0, mair_el1\n" | |
101 | " bfi %0, %1, #%2, #8\n" | |
102 | " msr mair_el1, %0\n" | |
103 | " isb\n" | |
104 | : "=&r" (tmp) | |
105 | : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8)); | |
106 | ||
107 | /* | |
108 | * Modify TCR PTW cacheability attributes. | |
109 | */ | |
110 | asm volatile( | |
111 | " mrs %0, tcr_el1\n" | |
112 | " bic %0, %0, %2\n" | |
113 | " orr %0, %0, %1\n" | |
114 | " msr tcr_el1, %0\n" | |
115 | " isb\n" | |
116 | : "=&r" (tmp) | |
117 | : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK)); | |
118 | ||
119 | flush_cache_all(); | |
120 | ||
121 | return 0; | |
122 | } | |
123 | early_param("cachepolicy", early_cachepolicy); | |
124 | ||
125 | /* | |
126 | * Adjust the PMD section entries according to the CPU in use. | |
127 | */ | |
128 | static void __init init_mem_pgprot(void) | |
129 | { | |
130 | pteval_t default_pgprot; | |
131 | int i; | |
132 | ||
133 | default_pgprot = PTE_ATTRINDX(MT_NORMAL); | |
134 | prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL); | |
135 | ||
136 | #ifdef CONFIG_SMP | |
137 | /* | |
138 | * Mark memory with the "shared" attribute for SMP systems | |
139 | */ | |
140 | default_pgprot |= PTE_SHARED; | |
141 | prot_sect_kernel |= PMD_SECT_S; | |
142 | #endif | |
143 | ||
144 | for (i = 0; i < 16; i++) { | |
145 | unsigned long v = pgprot_val(protection_map[i]); | |
146 | protection_map[i] = __pgprot(v | default_pgprot); | |
147 | } | |
148 | ||
149 | pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot); | |
150 | } | |
151 | ||
152 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
153 | unsigned long size, pgprot_t vma_prot) | |
154 | { | |
155 | if (!pfn_valid(pfn)) | |
156 | return pgprot_noncached(vma_prot); | |
157 | else if (file->f_flags & O_SYNC) | |
158 | return pgprot_writecombine(vma_prot); | |
159 | return vma_prot; | |
160 | } | |
161 | EXPORT_SYMBOL(phys_mem_access_prot); | |
162 | ||
163 | static void __init *early_alloc(unsigned long sz) | |
164 | { | |
165 | void *ptr = __va(memblock_alloc(sz, sz)); | |
166 | memset(ptr, 0, sz); | |
167 | return ptr; | |
168 | } | |
169 | ||
170 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |
171 | unsigned long end, unsigned long pfn) | |
172 | { | |
173 | pte_t *pte; | |
174 | ||
175 | if (pmd_none(*pmd)) { | |
176 | pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); | |
177 | __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE); | |
178 | } | |
179 | BUG_ON(pmd_bad(*pmd)); | |
180 | ||
181 | pte = pte_offset_kernel(pmd, addr); | |
182 | do { | |
183 | set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); | |
184 | pfn++; | |
185 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
186 | } | |
187 | ||
188 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |
189 | unsigned long end, phys_addr_t phys) | |
190 | { | |
191 | pmd_t *pmd; | |
192 | unsigned long next; | |
193 | ||
194 | /* | |
195 | * Check for initial section mappings in the pgd/pud and remove them. | |
196 | */ | |
197 | if (pud_none(*pud) || pud_bad(*pud)) { | |
198 | pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t)); | |
199 | pud_populate(&init_mm, pud, pmd); | |
200 | } | |
201 | ||
202 | pmd = pmd_offset(pud, addr); | |
203 | do { | |
204 | next = pmd_addr_end(addr, end); | |
205 | /* try section mapping first */ | |
206 | if (((addr | next | phys) & ~SECTION_MASK) == 0) | |
207 | set_pmd(pmd, __pmd(phys | prot_sect_kernel)); | |
208 | else | |
209 | alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); | |
210 | phys += next - addr; | |
211 | } while (pmd++, addr = next, addr != end); | |
212 | } | |
213 | ||
214 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |
215 | unsigned long end, unsigned long phys) | |
216 | { | |
217 | pud_t *pud = pud_offset(pgd, addr); | |
218 | unsigned long next; | |
219 | ||
220 | do { | |
221 | next = pud_addr_end(addr, end); | |
222 | alloc_init_pmd(pud, addr, next, phys); | |
223 | phys += next - addr; | |
224 | } while (pud++, addr = next, addr != end); | |
225 | } | |
226 | ||
227 | /* | |
228 | * Create the page directory entries and any necessary page tables for the | |
229 | * mapping specified by 'md'. | |
230 | */ | |
231 | static void __init create_mapping(phys_addr_t phys, unsigned long virt, | |
232 | phys_addr_t size) | |
233 | { | |
234 | unsigned long addr, length, end, next; | |
235 | pgd_t *pgd; | |
236 | ||
237 | if (virt < VMALLOC_START) { | |
238 | pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n", | |
239 | phys, virt); | |
240 | return; | |
241 | } | |
242 | ||
243 | addr = virt & PAGE_MASK; | |
244 | length = PAGE_ALIGN(size + (virt & ~PAGE_MASK)); | |
245 | ||
246 | pgd = pgd_offset_k(addr); | |
247 | end = addr + length; | |
248 | do { | |
249 | next = pgd_addr_end(addr, end); | |
250 | alloc_init_pud(pgd, addr, next, phys); | |
251 | phys += next - addr; | |
252 | } while (pgd++, addr = next, addr != end); | |
253 | } | |
254 | ||
2475ff9d CM |
255 | #ifdef CONFIG_EARLY_PRINTK |
256 | /* | |
257 | * Create an early I/O mapping using the pgd/pmd entries already populated | |
258 | * in head.S as this function is called too early to allocated any memory. The | |
259 | * mapping size is 2MB with 4KB pages or 64KB or 64KB pages. | |
260 | */ | |
261 | void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt) | |
262 | { | |
263 | unsigned long size, mask; | |
d17cfb34 | 264 | bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES); |
2475ff9d CM |
265 | pgd_t *pgd; |
266 | pud_t *pud; | |
267 | pmd_t *pmd; | |
268 | pte_t *pte; | |
269 | ||
270 | /* | |
271 | * No early pte entries with !ARM64_64K_PAGES configuration, so using | |
272 | * sections (pmd). | |
273 | */ | |
274 | size = page64k ? PAGE_SIZE : SECTION_SIZE; | |
275 | mask = ~(size - 1); | |
276 | ||
277 | pgd = pgd_offset_k(virt); | |
278 | pud = pud_offset(pgd, virt); | |
279 | if (pud_none(*pud)) | |
280 | return NULL; | |
281 | pmd = pmd_offset(pud, virt); | |
282 | ||
283 | if (page64k) { | |
284 | if (pmd_none(*pmd)) | |
285 | return NULL; | |
286 | pte = pte_offset_kernel(pmd, virt); | |
287 | set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE)); | |
288 | } else { | |
289 | set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE)); | |
290 | } | |
291 | ||
292 | return (void __iomem *)((virt & mask) + (phys & ~mask)); | |
293 | } | |
294 | #endif | |
295 | ||
c1cc1552 CM |
296 | static void __init map_mem(void) |
297 | { | |
298 | struct memblock_region *reg; | |
e25208f7 | 299 | phys_addr_t limit; |
c1cc1552 | 300 | |
f6bc87c3 SC |
301 | /* |
302 | * Temporarily limit the memblock range. We need to do this as | |
303 | * create_mapping requires puds, pmds and ptes to be allocated from | |
304 | * memory addressable from the initial direct kernel mapping. | |
305 | * | |
306 | * The initial direct kernel mapping, located at swapper_pg_dir, | |
e25208f7 CM |
307 | * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be |
308 | * aligned to 2MB as per Documentation/arm64/booting.txt). | |
f6bc87c3 | 309 | */ |
e25208f7 CM |
310 | limit = PHYS_OFFSET + PGDIR_SIZE; |
311 | memblock_set_current_limit(limit); | |
f6bc87c3 | 312 | |
c1cc1552 CM |
313 | /* map all the memory banks */ |
314 | for_each_memblock(memory, reg) { | |
315 | phys_addr_t start = reg->base; | |
316 | phys_addr_t end = start + reg->size; | |
317 | ||
318 | if (start >= end) | |
319 | break; | |
320 | ||
e25208f7 CM |
321 | #ifndef CONFIG_ARM64_64K_PAGES |
322 | /* | |
323 | * For the first memory bank align the start address and | |
324 | * current memblock limit to prevent create_mapping() from | |
325 | * allocating pte page tables from unmapped memory. | |
326 | * When 64K pages are enabled, the pte page table for the | |
327 | * first PGDIR_SIZE is already present in swapper_pg_dir. | |
328 | */ | |
329 | if (start < limit) | |
330 | start = ALIGN(start, PMD_SIZE); | |
331 | if (end < limit) { | |
332 | limit = end & PMD_MASK; | |
333 | memblock_set_current_limit(limit); | |
334 | } | |
335 | #endif | |
336 | ||
c1cc1552 CM |
337 | create_mapping(start, __phys_to_virt(start), end - start); |
338 | } | |
f6bc87c3 SC |
339 | |
340 | /* Limit no longer required. */ | |
341 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | |
c1cc1552 CM |
342 | } |
343 | ||
344 | /* | |
345 | * paging_init() sets up the page tables, initialises the zone memory | |
346 | * maps and sets up the zero page. | |
347 | */ | |
348 | void __init paging_init(void) | |
349 | { | |
350 | void *zero_page; | |
351 | ||
c1cc1552 CM |
352 | init_mem_pgprot(); |
353 | map_mem(); | |
354 | ||
355 | /* | |
356 | * Finally flush the caches and tlb to ensure that we're in a | |
357 | * consistent state. | |
358 | */ | |
359 | flush_cache_all(); | |
360 | flush_tlb_all(); | |
361 | ||
362 | /* allocate the zero page. */ | |
363 | zero_page = early_alloc(PAGE_SIZE); | |
364 | ||
365 | bootmem_init(); | |
366 | ||
367 | empty_zero_page = virt_to_page(zero_page); | |
c1cc1552 CM |
368 | |
369 | /* | |
370 | * TTBR0 is only used for the identity mapping at this stage. Make it | |
371 | * point to zero page to avoid speculatively fetching new entries. | |
372 | */ | |
373 | cpu_set_reserved_ttbr0(); | |
374 | flush_tlb_all(); | |
375 | } | |
376 | ||
377 | /* | |
378 | * Enable the identity mapping to allow the MMU disabling. | |
379 | */ | |
380 | void setup_mm_for_reboot(void) | |
381 | { | |
382 | cpu_switch_mm(idmap_pg_dir, &init_mm); | |
383 | flush_tlb_all(); | |
384 | } | |
385 | ||
386 | /* | |
387 | * Check whether a kernel address is valid (derived from arch/x86/). | |
388 | */ | |
389 | int kern_addr_valid(unsigned long addr) | |
390 | { | |
391 | pgd_t *pgd; | |
392 | pud_t *pud; | |
393 | pmd_t *pmd; | |
394 | pte_t *pte; | |
395 | ||
396 | if ((((long)addr) >> VA_BITS) != -1UL) | |
397 | return 0; | |
398 | ||
399 | pgd = pgd_offset_k(addr); | |
400 | if (pgd_none(*pgd)) | |
401 | return 0; | |
402 | ||
403 | pud = pud_offset(pgd, addr); | |
404 | if (pud_none(*pud)) | |
405 | return 0; | |
406 | ||
407 | pmd = pmd_offset(pud, addr); | |
408 | if (pmd_none(*pmd)) | |
409 | return 0; | |
410 | ||
411 | pte = pte_offset_kernel(pmd, addr); | |
412 | if (pte_none(*pte)) | |
413 | return 0; | |
414 | ||
415 | return pfn_valid(pte_pfn(*pte)); | |
416 | } | |
417 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | |
418 | #ifdef CONFIG_ARM64_64K_PAGES | |
0aad818b | 419 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
c1cc1552 | 420 | { |
0aad818b | 421 | return vmemmap_populate_basepages(start, end, node); |
c1cc1552 CM |
422 | } |
423 | #else /* !CONFIG_ARM64_64K_PAGES */ | |
0aad818b | 424 | int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) |
c1cc1552 | 425 | { |
0aad818b | 426 | unsigned long addr = start; |
c1cc1552 CM |
427 | unsigned long next; |
428 | pgd_t *pgd; | |
429 | pud_t *pud; | |
430 | pmd_t *pmd; | |
431 | ||
432 | do { | |
433 | next = pmd_addr_end(addr, end); | |
434 | ||
435 | pgd = vmemmap_pgd_populate(addr, node); | |
436 | if (!pgd) | |
437 | return -ENOMEM; | |
438 | ||
439 | pud = vmemmap_pud_populate(pgd, addr, node); | |
440 | if (!pud) | |
441 | return -ENOMEM; | |
442 | ||
443 | pmd = pmd_offset(pud, addr); | |
444 | if (pmd_none(*pmd)) { | |
445 | void *p = NULL; | |
446 | ||
447 | p = vmemmap_alloc_block_buf(PMD_SIZE, node); | |
448 | if (!p) | |
449 | return -ENOMEM; | |
450 | ||
451 | set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel)); | |
452 | } else | |
453 | vmemmap_verify((pte_t *)pmd, node, addr, next); | |
454 | } while (addr = next, addr != end); | |
455 | ||
456 | return 0; | |
457 | } | |
458 | #endif /* CONFIG_ARM64_64K_PAGES */ | |
0aad818b | 459 | void vmemmap_free(unsigned long start, unsigned long end) |
0197518c TC |
460 | { |
461 | } | |
c1cc1552 | 462 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |