]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/mm/mmu.c
Merge remote-tracking branches 'regulator/topic/s5m8767', 'regulator/topic/st-pwm...
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / mm / mmu.c
1 /*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/export.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/mman.h>
25 #include <linux/nodemask.h>
26 #include <linux/memblock.h>
27 #include <linux/fs.h>
28 #include <linux/io.h>
29
30 #include <asm/cputype.h>
31 #include <asm/sections.h>
32 #include <asm/setup.h>
33 #include <asm/sizes.h>
34 #include <asm/tlb.h>
35 #include <asm/mmu_context.h>
36
37 #include "mm.h"
38
39 /*
40 * Empty_zero_page is a special page that is used for zero-initialized data
41 * and COW.
42 */
43 struct page *empty_zero_page;
44 EXPORT_SYMBOL(empty_zero_page);
45
46 pgprot_t pgprot_default;
47 EXPORT_SYMBOL(pgprot_default);
48
49 static pmdval_t prot_sect_kernel;
50
51 struct cachepolicy {
52 const char policy[16];
53 u64 mair;
54 u64 tcr;
55 };
56
57 static struct cachepolicy cache_policies[] __initdata = {
58 {
59 .policy = "uncached",
60 .mair = 0x44, /* inner, outer non-cacheable */
61 .tcr = TCR_IRGN_NC | TCR_ORGN_NC,
62 }, {
63 .policy = "writethrough",
64 .mair = 0xaa, /* inner, outer write-through, read-allocate */
65 .tcr = TCR_IRGN_WT | TCR_ORGN_WT,
66 }, {
67 .policy = "writeback",
68 .mair = 0xee, /* inner, outer write-back, read-allocate */
69 .tcr = TCR_IRGN_WBnWA | TCR_ORGN_WBnWA,
70 }
71 };
72
73 /*
74 * These are useful for identifying cache coherency problems by allowing the
75 * cache or the cache and writebuffer to be turned off. It changes the Normal
76 * memory caching attributes in the MAIR_EL1 register.
77 */
78 static int __init early_cachepolicy(char *p)
79 {
80 int i;
81 u64 tmp;
82
83 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
84 int len = strlen(cache_policies[i].policy);
85
86 if (memcmp(p, cache_policies[i].policy, len) == 0)
87 break;
88 }
89 if (i == ARRAY_SIZE(cache_policies)) {
90 pr_err("ERROR: unknown or unsupported cache policy: %s\n", p);
91 return 0;
92 }
93
94 flush_cache_all();
95
96 /*
97 * Modify MT_NORMAL attributes in MAIR_EL1.
98 */
99 asm volatile(
100 " mrs %0, mair_el1\n"
101 " bfi %0, %1, #%2, #8\n"
102 " msr mair_el1, %0\n"
103 " isb\n"
104 : "=&r" (tmp)
105 : "r" (cache_policies[i].mair), "i" (MT_NORMAL * 8));
106
107 /*
108 * Modify TCR PTW cacheability attributes.
109 */
110 asm volatile(
111 " mrs %0, tcr_el1\n"
112 " bic %0, %0, %2\n"
113 " orr %0, %0, %1\n"
114 " msr tcr_el1, %0\n"
115 " isb\n"
116 : "=&r" (tmp)
117 : "r" (cache_policies[i].tcr), "r" (TCR_IRGN_MASK | TCR_ORGN_MASK));
118
119 flush_cache_all();
120
121 return 0;
122 }
123 early_param("cachepolicy", early_cachepolicy);
124
125 /*
126 * Adjust the PMD section entries according to the CPU in use.
127 */
128 static void __init init_mem_pgprot(void)
129 {
130 pteval_t default_pgprot;
131 int i;
132
133 default_pgprot = PTE_ATTRINDX(MT_NORMAL);
134 prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
135
136 #ifdef CONFIG_SMP
137 /*
138 * Mark memory with the "shared" attribute for SMP systems
139 */
140 default_pgprot |= PTE_SHARED;
141 prot_sect_kernel |= PMD_SECT_S;
142 #endif
143
144 for (i = 0; i < 16; i++) {
145 unsigned long v = pgprot_val(protection_map[i]);
146 protection_map[i] = __pgprot(v | default_pgprot);
147 }
148
149 pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
150 }
151
152 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
153 unsigned long size, pgprot_t vma_prot)
154 {
155 if (!pfn_valid(pfn))
156 return pgprot_noncached(vma_prot);
157 else if (file->f_flags & O_SYNC)
158 return pgprot_writecombine(vma_prot);
159 return vma_prot;
160 }
161 EXPORT_SYMBOL(phys_mem_access_prot);
162
163 static void __init *early_alloc(unsigned long sz)
164 {
165 void *ptr = __va(memblock_alloc(sz, sz));
166 memset(ptr, 0, sz);
167 return ptr;
168 }
169
170 static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
171 unsigned long end, unsigned long pfn)
172 {
173 pte_t *pte;
174
175 if (pmd_none(*pmd)) {
176 pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t));
177 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
178 }
179 BUG_ON(pmd_bad(*pmd));
180
181 pte = pte_offset_kernel(pmd, addr);
182 do {
183 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
184 pfn++;
185 } while (pte++, addr += PAGE_SIZE, addr != end);
186 }
187
188 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
189 unsigned long end, phys_addr_t phys)
190 {
191 pmd_t *pmd;
192 unsigned long next;
193
194 /*
195 * Check for initial section mappings in the pgd/pud and remove them.
196 */
197 if (pud_none(*pud) || pud_bad(*pud)) {
198 pmd = early_alloc(PTRS_PER_PMD * sizeof(pmd_t));
199 pud_populate(&init_mm, pud, pmd);
200 }
201
202 pmd = pmd_offset(pud, addr);
203 do {
204 next = pmd_addr_end(addr, end);
205 /* try section mapping first */
206 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
207 pmd_t old_pmd =*pmd;
208 set_pmd(pmd, __pmd(phys | prot_sect_kernel));
209 /*
210 * Check for previous table entries created during
211 * boot (__create_page_tables) and flush them.
212 */
213 if (!pmd_none(old_pmd))
214 flush_tlb_all();
215 } else {
216 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
217 }
218 phys += next - addr;
219 } while (pmd++, addr = next, addr != end);
220 }
221
222 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
223 unsigned long end, unsigned long phys)
224 {
225 pud_t *pud = pud_offset(pgd, addr);
226 unsigned long next;
227
228 do {
229 next = pud_addr_end(addr, end);
230 alloc_init_pmd(pud, addr, next, phys);
231 phys += next - addr;
232 } while (pud++, addr = next, addr != end);
233 }
234
235 /*
236 * Create the page directory entries and any necessary page tables for the
237 * mapping specified by 'md'.
238 */
239 static void __init create_mapping(phys_addr_t phys, unsigned long virt,
240 phys_addr_t size)
241 {
242 unsigned long addr, length, end, next;
243 pgd_t *pgd;
244
245 if (virt < VMALLOC_START) {
246 pr_warning("BUG: not creating mapping for 0x%016llx at 0x%016lx - outside kernel range\n",
247 phys, virt);
248 return;
249 }
250
251 addr = virt & PAGE_MASK;
252 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
253
254 pgd = pgd_offset_k(addr);
255 end = addr + length;
256 do {
257 next = pgd_addr_end(addr, end);
258 alloc_init_pud(pgd, addr, next, phys);
259 phys += next - addr;
260 } while (pgd++, addr = next, addr != end);
261 }
262
263 #ifdef CONFIG_EARLY_PRINTK
264 /*
265 * Create an early I/O mapping using the pgd/pmd entries already populated
266 * in head.S as this function is called too early to allocated any memory. The
267 * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
268 */
269 void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
270 {
271 unsigned long size, mask;
272 bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
273 pgd_t *pgd;
274 pud_t *pud;
275 pmd_t *pmd;
276 pte_t *pte;
277
278 /*
279 * No early pte entries with !ARM64_64K_PAGES configuration, so using
280 * sections (pmd).
281 */
282 size = page64k ? PAGE_SIZE : SECTION_SIZE;
283 mask = ~(size - 1);
284
285 pgd = pgd_offset_k(virt);
286 pud = pud_offset(pgd, virt);
287 if (pud_none(*pud))
288 return NULL;
289 pmd = pmd_offset(pud, virt);
290
291 if (page64k) {
292 if (pmd_none(*pmd))
293 return NULL;
294 pte = pte_offset_kernel(pmd, virt);
295 set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
296 } else {
297 set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
298 }
299
300 return (void __iomem *)((virt & mask) + (phys & ~mask));
301 }
302 #endif
303
304 static void __init map_mem(void)
305 {
306 struct memblock_region *reg;
307 phys_addr_t limit;
308
309 /*
310 * Temporarily limit the memblock range. We need to do this as
311 * create_mapping requires puds, pmds and ptes to be allocated from
312 * memory addressable from the initial direct kernel mapping.
313 *
314 * The initial direct kernel mapping, located at swapper_pg_dir,
315 * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
316 * aligned to 2MB as per Documentation/arm64/booting.txt).
317 */
318 limit = PHYS_OFFSET + PGDIR_SIZE;
319 memblock_set_current_limit(limit);
320
321 /* map all the memory banks */
322 for_each_memblock(memory, reg) {
323 phys_addr_t start = reg->base;
324 phys_addr_t end = start + reg->size;
325
326 if (start >= end)
327 break;
328
329 #ifndef CONFIG_ARM64_64K_PAGES
330 /*
331 * For the first memory bank align the start address and
332 * current memblock limit to prevent create_mapping() from
333 * allocating pte page tables from unmapped memory.
334 * When 64K pages are enabled, the pte page table for the
335 * first PGDIR_SIZE is already present in swapper_pg_dir.
336 */
337 if (start < limit)
338 start = ALIGN(start, PMD_SIZE);
339 if (end < limit) {
340 limit = end & PMD_MASK;
341 memblock_set_current_limit(limit);
342 }
343 #endif
344
345 create_mapping(start, __phys_to_virt(start), end - start);
346 }
347
348 /* Limit no longer required. */
349 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
350 }
351
352 /*
353 * paging_init() sets up the page tables, initialises the zone memory
354 * maps and sets up the zero page.
355 */
356 void __init paging_init(void)
357 {
358 void *zero_page;
359
360 init_mem_pgprot();
361 map_mem();
362
363 /*
364 * Finally flush the caches and tlb to ensure that we're in a
365 * consistent state.
366 */
367 flush_cache_all();
368 flush_tlb_all();
369
370 /* allocate the zero page. */
371 zero_page = early_alloc(PAGE_SIZE);
372
373 bootmem_init();
374
375 empty_zero_page = virt_to_page(zero_page);
376
377 /*
378 * TTBR0 is only used for the identity mapping at this stage. Make it
379 * point to zero page to avoid speculatively fetching new entries.
380 */
381 cpu_set_reserved_ttbr0();
382 flush_tlb_all();
383 }
384
385 /*
386 * Enable the identity mapping to allow the MMU disabling.
387 */
388 void setup_mm_for_reboot(void)
389 {
390 cpu_switch_mm(idmap_pg_dir, &init_mm);
391 flush_tlb_all();
392 }
393
394 /*
395 * Check whether a kernel address is valid (derived from arch/x86/).
396 */
397 int kern_addr_valid(unsigned long addr)
398 {
399 pgd_t *pgd;
400 pud_t *pud;
401 pmd_t *pmd;
402 pte_t *pte;
403
404 if ((((long)addr) >> VA_BITS) != -1UL)
405 return 0;
406
407 pgd = pgd_offset_k(addr);
408 if (pgd_none(*pgd))
409 return 0;
410
411 pud = pud_offset(pgd, addr);
412 if (pud_none(*pud))
413 return 0;
414
415 pmd = pmd_offset(pud, addr);
416 if (pmd_none(*pmd))
417 return 0;
418
419 pte = pte_offset_kernel(pmd, addr);
420 if (pte_none(*pte))
421 return 0;
422
423 return pfn_valid(pte_pfn(*pte));
424 }
425 #ifdef CONFIG_SPARSEMEM_VMEMMAP
426 #ifdef CONFIG_ARM64_64K_PAGES
427 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
428 {
429 return vmemmap_populate_basepages(start, end, node);
430 }
431 #else /* !CONFIG_ARM64_64K_PAGES */
432 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
433 {
434 unsigned long addr = start;
435 unsigned long next;
436 pgd_t *pgd;
437 pud_t *pud;
438 pmd_t *pmd;
439
440 do {
441 next = pmd_addr_end(addr, end);
442
443 pgd = vmemmap_pgd_populate(addr, node);
444 if (!pgd)
445 return -ENOMEM;
446
447 pud = vmemmap_pud_populate(pgd, addr, node);
448 if (!pud)
449 return -ENOMEM;
450
451 pmd = pmd_offset(pud, addr);
452 if (pmd_none(*pmd)) {
453 void *p = NULL;
454
455 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
456 if (!p)
457 return -ENOMEM;
458
459 set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
460 } else
461 vmemmap_verify((pte_t *)pmd, node, addr, next);
462 } while (addr = next, addr != end);
463
464 return 0;
465 }
466 #endif /* CONFIG_ARM64_64K_PAGES */
467 void vmemmap_free(unsigned long start, unsigned long end)
468 {
469 }
470 #endif /* CONFIG_SPARSEMEM_VMEMMAP */