]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/mm/mmu.c
arm64/mm: add explicit struct_mm argument to __create_mapping()
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / mm / mmu.c
CommitLineData
c1cc1552
CM
1/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/mman.h>
25#include <linux/nodemask.h>
26#include <linux/memblock.h>
27#include <linux/fs.h>
2475ff9d 28#include <linux/io.h>
c1cc1552
CM
29
30#include <asm/cputype.h>
af86e597 31#include <asm/fixmap.h>
c1cc1552
CM
32#include <asm/sections.h>
33#include <asm/setup.h>
34#include <asm/sizes.h>
35#include <asm/tlb.h>
c79b954b 36#include <asm/memblock.h>
c1cc1552
CM
37#include <asm/mmu_context.h>
38
39#include "mm.h"
40
41/*
42 * Empty_zero_page is a special page that is used for zero-initialized data
43 * and COW.
44 */
45struct page *empty_zero_page;
46EXPORT_SYMBOL(empty_zero_page);
47
c1cc1552
CM
48struct cachepolicy {
49 const char policy[16];
50 u64 mair;
51 u64 tcr;
52};
53
54static struct cachepolicy cache_policies[] __initdata = {
55 {
56 .policy = "uncached",
57 .mair = 0x44, /* inner, outer non-cacheable */
58 .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
59 }, {
60 .policy = "writethrough",
61 .mair = 0xaa, /* inner, outer write-through, read-allocate */
62 .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
63 }, {
64 .policy = "writeback",
65 .mair = 0xee, /* inner, outer write-back, read-allocate */
66 .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
67 }
68};
69
70/*
71 * These are useful for identifying cache coherency problems by allowing the
72 * cache or the cache and writebuffer to be turned off. It changes the Normal
73 * memory caching attributes in the MAIR_EL1 register.
74 */
75static int __init early_cachepolicy(char *p)
76{
77 int i;
78 u64 tmp;
79
80 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
81 int len = strlen(cache_policies[i].policy);
82
83 if (memcmp(p, cache_policies[i].policy, len) == 0)
84 break;
85 }
86 if (i == ARRAY_SIZE(cache_policies)) {
87 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
88 return 0;
89 }
90
91 flush_cache_all();
92
93 /*
94 * Modify MT_NORMAL attributes in MAIR_EL1.
95 */
96 asm volatile(
97 " mrs %0, mair_el1\n"
fe184066 98 " bfi %0, %1, %2, #8\n"
c1cc1552
CM
99 " msr mair_el1, %0\n"
100 " isb\n"
101 : "=&r" (tmp)
102 : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
103
104 /*
105 * Modify TCR PTW cacheability attributes.
106 */
107 asm volatile(
108 " mrs %0, tcr_el1\n"
109 " bic %0, %0, %2\n"
110 " orr %0, %0, %1\n"
111 " msr tcr_el1, %0\n"
112 " isb\n"
113 : "=&r" (tmp)
114 : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
115
116 flush_cache_all();
117
118 return 0;
119}
120early_param("cachepolicy", early_cachepolicy);
121
c1cc1552
CM
122pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
123 unsigned long size, pgprot_t vma_prot)
124{
125 if (!pfn_valid(pfn))
126 return pgprot_noncached(vma_prot);
127 else if (file->f_flags & O_SYNC)
128 return pgprot_writecombine(vma_prot);
129 return vma_prot;
130}
131EXPORT_SYMBOL(phys_mem_access_prot);
132
133static void __init *early_alloc(unsigned long sz)
134{
135 void *ptr = __va(memblock_alloc(sz, sz));
136 memset(ptr, 0, sz);
137 return ptr;
138}
139
140static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
d7ecbddf
MS
141 unsigned long end, unsigned long pfn,
142 pgprot_t prot)
c1cc1552
CM
143{
144 pte_t *pte;
145
146 if (pmd_none(*pmd)) {
147 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
148 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
149 }
150 BUG_ON(pmd_bad(*pmd));
151
152 pte = pte_offset_kernel(pmd, addr);
153 do {
d7ecbddf 154 set_pte(pte, pfn_pte(pfn, prot));
c1cc1552
CM
155 pfn++;
156 } while (pte++, addr += PAGE_SIZE, addr != end);
157}
158
e1e1fdda
AB
159static void __init alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
160 unsigned long addr, unsigned long end,
161 phys_addr_t phys, int map_io)
c1cc1552
CM
162{
163 pmd_t *pmd;
164 unsigned long next;
d7ecbddf
MS
165 pmdval_t prot_sect;
166 pgprot_t prot_pte;
167
168 if (map_io) {
cc07aabc 169 prot_sect = PROT_SECT_DEVICE_nGnRE;
d7ecbddf
MS
170 prot_pte = __pgprot(PROT_DEVICE_nGnRE);
171 } else {
cc07aabc 172 prot_sect = PROT_SECT_NORMAL_EXEC;
d7ecbddf
MS
173 prot_pte = PAGE_KERNEL_EXEC;
174 }
c1cc1552
CM
175
176 /*
177 * Check for initial section mappings in the pgd/pud and remove them.
178 */
179 if (pud_none(*pud) || pud_bad(*pud)) {
180 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
e1e1fdda 181 pud_populate(mm, pud, pmd);
c1cc1552
CM
182 }
183
184 pmd = pmd_offset(pud, addr);
185 do {
186 next = pmd_addr_end(addr, end);
187 /* try section mapping first */
a55f9929
CM
188 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
189 pmd_t old_pmd =*pmd;
d7ecbddf 190 set_pmd(pmd, __pmd(phys | prot_sect));
a55f9929
CM
191 /*
192 * Check for previous table entries created during
193 * boot (__create_page_tables) and flush them.
194 */
195 if (!pmd_none(old_pmd))
196 flush_tlb_all();
197 } else {
d7ecbddf
MS
198 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
199 prot_pte);
a55f9929 200 }
c1cc1552
CM
201 phys += next - addr;
202 } while (pmd++, addr = next, addr != end);
203}
204
e1e1fdda
AB
205static void __init alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
206 unsigned long addr, unsigned long end,
207 phys_addr_t phys, int map_io)
c1cc1552 208{
c79b954b 209 pud_t *pud;
c1cc1552
CM
210 unsigned long next;
211
c79b954b
JL
212 if (pgd_none(*pgd)) {
213 pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
e1e1fdda 214 pgd_populate(mm, pgd, pud);
c79b954b
JL
215 }
216 BUG_ON(pgd_bad(*pgd));
217
218 pud = pud_offset(pgd, addr);
c1cc1552
CM
219 do {
220 next = pud_addr_end(addr, end);
206a2a73
SC
221
222 /*
223 * For 4K granule only, attempt to put down a 1GB block
224 */
cc07aabc 225 if (!map_io && (PAGE_SHIFT == 12) &&
206a2a73
SC
226 ((addr | next | phys) & ~PUD_MASK) == 0) {
227 pud_t old_pud = *pud;
228 set_pud(pud, __pud(phys | PROT_SECT_NORMAL_EXEC));
229
230 /*
231 * If we have an old value for a pud, it will
232 * be pointing to a pmd table that we no longer
233 * need (from swapper_pg_dir).
234 *
235 * Look up the old pmd table and free it.
236 */
237 if (!pud_none(old_pud)) {
238 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
239 memblock_free(table, PAGE_SIZE);
240 flush_tlb_all();
241 }
242 } else {
e1e1fdda 243 alloc_init_pmd(mm, pud, addr, next, phys, map_io);
206a2a73 244 }
c1cc1552
CM
245 phys += next - addr;
246 } while (pud++, addr = next, addr != end);
247}
248
249/*
250 * Create the page directory entries and any necessary page tables for the
251 * mapping specified by 'md'.
252 */
e1e1fdda
AB
253static void __init __create_mapping(struct mm_struct *mm, pgd_t *pgd,
254 phys_addr_t phys, unsigned long virt,
255 phys_addr_t size, int map_io)
c1cc1552
CM
256{
257 unsigned long addr, length, end, next;
c1cc1552
CM
258
259 addr = virt & PAGE_MASK;
260 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
261
c1cc1552
CM
262 end = addr + length;
263 do {
264 next = pgd_addr_end(addr, end);
e1e1fdda 265 alloc_init_pud(mm, pgd, addr, next, phys, map_io);
c1cc1552
CM
266 phys += next - addr;
267 } while (pgd++, addr = next, addr != end);
268}
269
d7ecbddf
MS
270static void __init create_mapping(phys_addr_t phys, unsigned long virt,
271 phys_addr_t size)
272{
273 if (virt < VMALLOC_START) {
274 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
275 &phys, virt);
276 return;
277 }
e1e1fdda
AB
278 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
279 size, 0);
d7ecbddf
MS
280}
281
282void __init create_id_mapping(phys_addr_t addr, phys_addr_t size, int map_io)
283{
284 if ((addr >> PGDIR_SHIFT) >= ARRAY_SIZE(idmap_pg_dir)) {
285 pr_warn("BUG: not creating id mapping for %pa\n", &addr);
286 return;
287 }
e1e1fdda 288 __create_mapping(&init_mm, &idmap_pg_dir[pgd_index(addr)],
d7ecbddf
MS
289 addr, addr, size, map_io);
290}
291
c1cc1552
CM
292static void __init map_mem(void)
293{
294 struct memblock_region *reg;
e25208f7 295 phys_addr_t limit;
c1cc1552 296
f6bc87c3
SC
297 /*
298 * Temporarily limit the memblock range. We need to do this as
299 * create_mapping requires puds, pmds and ptes to be allocated from
300 * memory addressable from the initial direct kernel mapping.
301 *
3dec0fe4
CM
302 * The initial direct kernel mapping, located at swapper_pg_dir, gives
303 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
304 * PHYS_OFFSET (which must be aligned to 2MB as per
305 * Documentation/arm64/booting.txt).
f6bc87c3 306 */
3dec0fe4
CM
307 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
308 limit = PHYS_OFFSET + PMD_SIZE;
309 else
310 limit = PHYS_OFFSET + PUD_SIZE;
e25208f7 311 memblock_set_current_limit(limit);
f6bc87c3 312
c1cc1552
CM
313 /* map all the memory banks */
314 for_each_memblock(memory, reg) {
315 phys_addr_t start = reg->base;
316 phys_addr_t end = start + reg->size;
317
318 if (start >= end)
319 break;
320
e25208f7
CM
321#ifndef CONFIG_ARM64_64K_PAGES
322 /*
323 * For the first memory bank align the start address and
324 * current memblock limit to prevent create_mapping() from
325 * allocating pte page tables from unmapped memory.
326 * When 64K pages are enabled, the pte page table for the
327 * first PGDIR_SIZE is already present in swapper_pg_dir.
328 */
329 if (start < limit)
330 start = ALIGN(start, PMD_SIZE);
331 if (end < limit) {
332 limit = end & PMD_MASK;
333 memblock_set_current_limit(limit);
334 }
335#endif
336
c1cc1552
CM
337 create_mapping(start, __phys_to_virt(start), end - start);
338 }
f6bc87c3
SC
339
340 /* Limit no longer required. */
341 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
c1cc1552
CM
342}
343
344/*
345 * paging_init() sets up the page tables, initialises the zone memory
346 * maps and sets up the zero page.
347 */
348void __init paging_init(void)
349{
350 void *zero_page;
351
c1cc1552
CM
352 map_mem();
353
354 /*
355 * Finally flush the caches and tlb to ensure that we're in a
356 * consistent state.
357 */
358 flush_cache_all();
359 flush_tlb_all();
360
361 /* allocate the zero page. */
362 zero_page = early_alloc(PAGE_SIZE);
363
364 bootmem_init();
365
366 empty_zero_page = virt_to_page(zero_page);
c1cc1552
CM
367
368 /*
369 * TTBR0 is only used for the identity mapping at this stage. Make it
370 * point to zero page to avoid speculatively fetching new entries.
371 */
372 cpu_set_reserved_ttbr0();
373 flush_tlb_all();
374}
375
376/*
377 * Enable the identity mapping to allow the MMU disabling.
378 */
379void setup_mm_for_reboot(void)
380{
381 cpu_switch_mm(idmap_pg_dir, &init_mm);
382 flush_tlb_all();
383}
384
385/*
386 * Check whether a kernel address is valid (derived from arch/x86/).
387 */
388int kern_addr_valid(unsigned long addr)
389{
390 pgd_t *pgd;
391 pud_t *pud;
392 pmd_t *pmd;
393 pte_t *pte;
394
395 if ((((long)addr) >> VA_BITS) != -1UL)
396 return 0;
397
398 pgd = pgd_offset_k(addr);
399 if (pgd_none(*pgd))
400 return 0;
401
402 pud = pud_offset(pgd, addr);
403 if (pud_none(*pud))
404 return 0;
405
206a2a73
SC
406 if (pud_sect(*pud))
407 return pfn_valid(pud_pfn(*pud));
408
c1cc1552
CM
409 pmd = pmd_offset(pud, addr);
410 if (pmd_none(*pmd))
411 return 0;
412
da6e4cb6
DA
413 if (pmd_sect(*pmd))
414 return pfn_valid(pmd_pfn(*pmd));
415
c1cc1552
CM
416 pte = pte_offset_kernel(pmd, addr);
417 if (pte_none(*pte))
418 return 0;
419
420 return pfn_valid(pte_pfn(*pte));
421}
422#ifdef CONFIG_SPARSEMEM_VMEMMAP
423#ifdef CONFIG_ARM64_64K_PAGES
0aad818b 424int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 425{
0aad818b 426 return vmemmap_populate_basepages(start, end, node);
c1cc1552
CM
427}
428#else /* !CONFIG_ARM64_64K_PAGES */
0aad818b 429int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 430{
0aad818b 431 unsigned long addr = start;
c1cc1552
CM
432 unsigned long next;
433 pgd_t *pgd;
434 pud_t *pud;
435 pmd_t *pmd;
436
437 do {
438 next = pmd_addr_end(addr, end);
439
440 pgd = vmemmap_pgd_populate(addr, node);
441 if (!pgd)
442 return -ENOMEM;
443
444 pud = vmemmap_pud_populate(pgd, addr, node);
445 if (!pud)
446 return -ENOMEM;
447
448 pmd = pmd_offset(pud, addr);
449 if (pmd_none(*pmd)) {
450 void *p = NULL;
451
452 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
453 if (!p)
454 return -ENOMEM;
455
a501e324 456 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
c1cc1552
CM
457 } else
458 vmemmap_verify((pte_t *)pmd, node, addr, next);
459 } while (addr = next, addr != end);
460
461 return 0;
462}
463#endif /* CONFIG_ARM64_64K_PAGES */
0aad818b 464void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
465{
466}
c1cc1552 467#endif /* CONFIG_SPARSEMEM_VMEMMAP */
af86e597
LA
468
469static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
470#if CONFIG_ARM64_PGTABLE_LEVELS > 2
471static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
472#endif
473#if CONFIG_ARM64_PGTABLE_LEVELS > 3
474static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
475#endif
476
477static inline pud_t * fixmap_pud(unsigned long addr)
478{
479 pgd_t *pgd = pgd_offset_k(addr);
480
481 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
482
483 return pud_offset(pgd, addr);
484}
485
486static inline pmd_t * fixmap_pmd(unsigned long addr)
487{
488 pud_t *pud = fixmap_pud(addr);
489
490 BUG_ON(pud_none(*pud) || pud_bad(*pud));
491
492 return pmd_offset(pud, addr);
493}
494
495static inline pte_t * fixmap_pte(unsigned long addr)
496{
497 pmd_t *pmd = fixmap_pmd(addr);
498
499 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
500
501 return pte_offset_kernel(pmd, addr);
502}
503
504void __init early_fixmap_init(void)
505{
506 pgd_t *pgd;
507 pud_t *pud;
508 pmd_t *pmd;
509 unsigned long addr = FIXADDR_START;
510
511 pgd = pgd_offset_k(addr);
512 pgd_populate(&init_mm, pgd, bm_pud);
513 pud = pud_offset(pgd, addr);
514 pud_populate(&init_mm, pud, bm_pmd);
515 pmd = pmd_offset(pud, addr);
516 pmd_populate_kernel(&init_mm, pmd, bm_pte);
517
518 /*
519 * The boot-ioremap range spans multiple pmds, for which
520 * we are not preparted:
521 */
522 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
523 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
524
525 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
526 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
527 WARN_ON(1);
528 pr_warn("pmd %p != %p, %p\n",
529 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
530 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
531 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
532 fix_to_virt(FIX_BTMAP_BEGIN));
533 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
534 fix_to_virt(FIX_BTMAP_END));
535
536 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
537 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
538 }
539}
540
541void __set_fixmap(enum fixed_addresses idx,
542 phys_addr_t phys, pgprot_t flags)
543{
544 unsigned long addr = __fix_to_virt(idx);
545 pte_t *pte;
546
547 if (idx >= __end_of_fixed_addresses) {
548 BUG();
549 return;
550 }
551
552 pte = fixmap_pte(addr);
553
554 if (pgprot_val(flags)) {
555 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
556 } else {
557 pte_clear(&init_mm, addr, pte);
558 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
559 }
560}