]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/mm/mmu.c
arm64: Drop alloc function from create_mapping
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / mm / mmu.c
CommitLineData
c1cc1552
CM
1/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
61bd93ce 24#include <linux/libfdt.h>
c1cc1552
CM
25#include <linux/mman.h>
26#include <linux/nodemask.h>
27#include <linux/memblock.h>
28#include <linux/fs.h>
2475ff9d 29#include <linux/io.h>
41089357 30#include <linux/slab.h>
da141706 31#include <linux/stop_machine.h>
c1cc1552 32
21ab99c2 33#include <asm/barrier.h>
c1cc1552 34#include <asm/cputype.h>
af86e597 35#include <asm/fixmap.h>
068a17a5 36#include <asm/kasan.h>
b433dce0 37#include <asm/kernel-pgtable.h>
c1cc1552
CM
38#include <asm/sections.h>
39#include <asm/setup.h>
40#include <asm/sizes.h>
41#include <asm/tlb.h>
c79b954b 42#include <asm/memblock.h>
c1cc1552
CM
43#include <asm/mmu_context.h>
44
45#include "mm.h"
46
dd006da2
AB
47u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
48
c1cc1552
CM
49/*
50 * Empty_zero_page is a special page that is used for zero-initialized data
51 * and COW.
52 */
5227cfa7 53unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
c1cc1552
CM
54EXPORT_SYMBOL(empty_zero_page);
55
c1cc1552
CM
56pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
57 unsigned long size, pgprot_t vma_prot)
58{
59 if (!pfn_valid(pfn))
60 return pgprot_noncached(vma_prot);
61 else if (file->f_flags & O_SYNC)
62 return pgprot_writecombine(vma_prot);
63 return vma_prot;
64}
65EXPORT_SYMBOL(phys_mem_access_prot);
66
f4710445 67static phys_addr_t __init early_pgtable_alloc(void)
c1cc1552 68{
7142392d
SP
69 phys_addr_t phys;
70 void *ptr;
71
21ab99c2 72 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
7142392d 73 BUG_ON(!phys);
f4710445
MR
74
75 /*
76 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
77 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
78 * any level of table.
79 */
80 ptr = pte_set_fixmap(phys);
81
21ab99c2
MR
82 memset(ptr, 0, PAGE_SIZE);
83
f4710445
MR
84 /*
85 * Implicit barriers also ensure the zeroed page is visible to the page
86 * table walker
87 */
88 pte_clear_fixmap();
89
90 return phys;
c1cc1552
CM
91}
92
da141706
LA
93/*
94 * remap a PMD into pages
95 */
96static void split_pmd(pmd_t *pmd, pte_t *pte)
97{
98 unsigned long pfn = pmd_pfn(*pmd);
99 int i = 0;
100
101 do {
102 /*
103 * Need to have the least restrictive permissions available
667c2759 104 * permissions will be fixed up later
da141706 105 */
667c2759 106 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
da141706
LA
107 pfn++;
108 } while (pte++, i++, i < PTRS_PER_PTE);
109}
110
111static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
667c2759 112 unsigned long end, unsigned long pfn,
da141706 113 pgprot_t prot,
f4710445 114 phys_addr_t (*pgtable_alloc)(void))
c1cc1552
CM
115{
116 pte_t *pte;
117
a1c76574 118 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
132233a7
LA
119 phys_addr_t pte_phys;
120 BUG_ON(!pgtable_alloc);
121 pte_phys = pgtable_alloc();
f4710445 122 pte = pte_set_fixmap(pte_phys);
da141706
LA
123 if (pmd_sect(*pmd))
124 split_pmd(pmd, pte);
f4710445 125 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
da141706 126 flush_tlb_all();
f4710445 127 pte_clear_fixmap();
c1cc1552 128 }
a1c76574 129 BUG_ON(pmd_bad(*pmd));
c1cc1552 130
f4710445 131 pte = pte_set_fixmap_offset(pmd, addr);
c1cc1552 132 do {
667c2759
CM
133 set_pte(pte, pfn_pte(pfn, prot));
134 pfn++;
135 } while (pte++, addr += PAGE_SIZE, addr != end);
f4710445
MR
136
137 pte_clear_fixmap();
c1cc1552
CM
138}
139
9a17a213 140static void split_pud(pud_t *old_pud, pmd_t *pmd)
da141706
LA
141{
142 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
143 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
144 int i = 0;
145
146 do {
1e43ba9c 147 set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
da141706
LA
148 addr += PMD_SIZE;
149 } while (pmd++, i++, i < PTRS_PER_PMD);
150}
151
11509a30 152static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
da141706 153 phys_addr_t phys, pgprot_t prot,
f4710445 154 phys_addr_t (*pgtable_alloc)(void))
c1cc1552
CM
155{
156 pmd_t *pmd;
157 unsigned long next;
158
159 /*
160 * Check for initial section mappings in the pgd/pud and remove them.
161 */
a1c76574 162 if (pud_none(*pud) || pud_sect(*pud)) {
132233a7
LA
163 phys_addr_t pmd_phys;
164 BUG_ON(!pgtable_alloc);
165 pmd_phys = pgtable_alloc();
f4710445 166 pmd = pmd_set_fixmap(pmd_phys);
da141706
LA
167 if (pud_sect(*pud)) {
168 /*
169 * need to have the 1G of mappings continue to be
170 * present
171 */
172 split_pud(pud, pmd);
173 }
f4710445 174 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
da141706 175 flush_tlb_all();
f4710445 176 pmd_clear_fixmap();
c1cc1552 177 }
a1c76574 178 BUG_ON(pud_bad(*pud));
c1cc1552 179
f4710445 180 pmd = pmd_set_fixmap_offset(pud, addr);
c1cc1552
CM
181 do {
182 next = pmd_addr_end(addr, end);
183 /* try section mapping first */
a55f9929
CM
184 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
185 pmd_t old_pmd =*pmd;
8ce837ce
AB
186 set_pmd(pmd, __pmd(phys |
187 pgprot_val(mk_sect_prot(prot))));
a55f9929
CM
188 /*
189 * Check for previous table entries created during
190 * boot (__create_page_tables) and flush them.
191 */
523d6e9f 192 if (!pmd_none(old_pmd)) {
a55f9929 193 flush_tlb_all();
523d6e9f 194 if (pmd_table(old_pmd)) {
316b39db 195 phys_addr_t table = pmd_page_paddr(old_pmd);
41089357
CM
196 if (!WARN_ON_ONCE(slab_is_available()))
197 memblock_free(table, PAGE_SIZE);
523d6e9f 198 }
199 }
a55f9929 200 } else {
667c2759 201 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
21ab99c2 202 prot, pgtable_alloc);
a55f9929 203 }
c1cc1552
CM
204 phys += next - addr;
205 } while (pmd++, addr = next, addr != end);
f4710445
MR
206
207 pmd_clear_fixmap();
c1cc1552
CM
208}
209
da141706
LA
210static inline bool use_1G_block(unsigned long addr, unsigned long next,
211 unsigned long phys)
212{
213 if (PAGE_SHIFT != 12)
214 return false;
215
216 if (((addr | next | phys) & ~PUD_MASK) != 0)
217 return false;
218
219 return true;
220}
221
11509a30 222static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
da141706 223 phys_addr_t phys, pgprot_t prot,
f4710445 224 phys_addr_t (*pgtable_alloc)(void))
c1cc1552 225{
c79b954b 226 pud_t *pud;
c1cc1552
CM
227 unsigned long next;
228
c79b954b 229 if (pgd_none(*pgd)) {
132233a7
LA
230 phys_addr_t pud_phys;
231 BUG_ON(!pgtable_alloc);
232 pud_phys = pgtable_alloc();
f4710445 233 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
c79b954b
JL
234 }
235 BUG_ON(pgd_bad(*pgd));
236
f4710445 237 pud = pud_set_fixmap_offset(pgd, addr);
c1cc1552
CM
238 do {
239 next = pud_addr_end(addr, end);
206a2a73
SC
240
241 /*
242 * For 4K granule only, attempt to put down a 1GB block
243 */
da141706 244 if (use_1G_block(addr, next, phys)) {
206a2a73 245 pud_t old_pud = *pud;
8ce837ce
AB
246 set_pud(pud, __pud(phys |
247 pgprot_val(mk_sect_prot(prot))));
206a2a73
SC
248
249 /*
250 * If we have an old value for a pud, it will
251 * be pointing to a pmd table that we no longer
252 * need (from swapper_pg_dir).
253 *
254 * Look up the old pmd table and free it.
255 */
256 if (!pud_none(old_pud)) {
206a2a73 257 flush_tlb_all();
523d6e9f 258 if (pud_table(old_pud)) {
316b39db 259 phys_addr_t table = pud_page_paddr(old_pud);
41089357
CM
260 if (!WARN_ON_ONCE(slab_is_available()))
261 memblock_free(table, PAGE_SIZE);
523d6e9f 262 }
206a2a73
SC
263 }
264 } else {
11509a30 265 alloc_init_pmd(pud, addr, next, phys, prot,
21ab99c2 266 pgtable_alloc);
206a2a73 267 }
c1cc1552
CM
268 phys += next - addr;
269 } while (pud++, addr = next, addr != end);
f4710445
MR
270
271 pud_clear_fixmap();
c1cc1552
CM
272}
273
274/*
275 * Create the page directory entries and any necessary page tables for the
276 * mapping specified by 'md'.
277 */
11509a30 278static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
da141706 279 phys_addr_t size, pgprot_t prot,
f4710445 280 phys_addr_t (*pgtable_alloc)(void))
c1cc1552
CM
281{
282 unsigned long addr, length, end, next;
c1cc1552 283
cc5d2b3b
MR
284 /*
285 * If the virtual and physical address don't have the same offset
286 * within a page, we cannot map the region as the caller expects.
287 */
288 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
289 return;
290
9c4e08a3 291 phys &= PAGE_MASK;
c1cc1552
CM
292 addr = virt & PAGE_MASK;
293 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
294
c1cc1552
CM
295 end = addr + length;
296 do {
297 next = pgd_addr_end(addr, end);
11509a30 298 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
c1cc1552
CM
299 phys += next - addr;
300 } while (pgd++, addr = next, addr != end);
301}
302
f4710445 303static phys_addr_t late_pgtable_alloc(void)
da141706 304{
21ab99c2 305 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
da141706 306 BUG_ON(!ptr);
21ab99c2
MR
307
308 /* Ensure the zeroed page is visible to the page table walker */
309 dsb(ishst);
f4710445 310 return __pa(ptr);
da141706
LA
311}
312
11509a30
MR
313static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
314 unsigned long virt, phys_addr_t size,
315 pgprot_t prot,
316 phys_addr_t (*alloc)(void))
317{
318 init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
319}
320
132233a7
LA
321/*
322 * This function can only be used to modify existing table entries,
323 * without allocating new levels of table. Note that this permits the
324 * creation of new section or page entries.
325 */
326static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
da141706 327 phys_addr_t size, pgprot_t prot)
d7ecbddf
MS
328{
329 if (virt < VMALLOC_START) {
330 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
331 &phys, virt);
332 return;
333 }
11509a30 334 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
132233a7 335 NULL);
d7ecbddf
MS
336}
337
8ce837ce
AB
338void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
339 unsigned long virt, phys_addr_t size,
340 pgprot_t prot)
341{
11509a30
MR
342 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
343 late_pgtable_alloc);
d7ecbddf
MS
344}
345
da141706
LA
346static void create_mapping_late(phys_addr_t phys, unsigned long virt,
347 phys_addr_t size, pgprot_t prot)
348{
349 if (virt < VMALLOC_START) {
350 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
351 &phys, virt);
352 return;
353 }
354
11509a30
MR
355 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
356 late_pgtable_alloc);
da141706
LA
357}
358
068a17a5 359static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
da141706 360{
068a17a5
MR
361
362 unsigned long kernel_start = __pa(_stext);
363 unsigned long kernel_end = __pa(_end);
364
da141706 365 /*
068a17a5
MR
366 * The kernel itself is mapped at page granularity. Map all other
367 * memory, making sure we don't overwrite the existing kernel mappings.
da141706 368 */
068a17a5
MR
369
370 /* No overlap with the kernel. */
371 if (end < kernel_start || start >= kernel_end) {
372 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
373 end - start, PAGE_KERNEL,
374 early_pgtable_alloc);
375 return;
da141706
LA
376 }
377
068a17a5
MR
378 /*
379 * This block overlaps the kernel mapping. Map the portion(s) which
380 * don't overlap.
381 */
382 if (start < kernel_start)
383 __create_pgd_mapping(pgd, start,
384 __phys_to_virt(start),
385 kernel_start - start, PAGE_KERNEL,
386 early_pgtable_alloc);
387 if (kernel_end < end)
388 __create_pgd_mapping(pgd, kernel_end,
389 __phys_to_virt(kernel_end),
390 end - kernel_end, PAGE_KERNEL,
391 early_pgtable_alloc);
da141706 392}
da141706 393
068a17a5 394static void __init map_mem(pgd_t *pgd)
c1cc1552
CM
395{
396 struct memblock_region *reg;
f6bc87c3 397
c1cc1552
CM
398 /* map all the memory banks */
399 for_each_memblock(memory, reg) {
400 phys_addr_t start = reg->base;
401 phys_addr_t end = start + reg->size;
402
403 if (start >= end)
404 break;
68709f45
AB
405 if (memblock_is_nomap(reg))
406 continue;
c1cc1552 407
068a17a5 408 __map_memblock(pgd, start, end);
c1cc1552
CM
409 }
410}
411
da141706
LA
412#ifdef CONFIG_DEBUG_RODATA
413void mark_rodata_ro(void)
414{
415 create_mapping_late(__pa(_stext), (unsigned long)_stext,
416 (unsigned long)_etext - (unsigned long)_stext,
0b2aa5b8 417 PAGE_KERNEL_ROX);
da141706
LA
418
419}
420#endif
421
422void fixup_init(void)
423{
424 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
425 (unsigned long)__init_end - (unsigned long)__init_begin,
426 PAGE_KERNEL);
427}
428
068a17a5
MR
429static void __init map_kernel_chunk(pgd_t *pgd, void *va_start, void *va_end,
430 pgprot_t prot)
431{
432 phys_addr_t pa_start = __pa(va_start);
433 unsigned long size = va_end - va_start;
434
435 BUG_ON(!PAGE_ALIGNED(pa_start));
436 BUG_ON(!PAGE_ALIGNED(size));
437
438 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
439 early_pgtable_alloc);
440}
441
442/*
443 * Create fine-grained mappings for the kernel.
444 */
445static void __init map_kernel(pgd_t *pgd)
446{
447
448 map_kernel_chunk(pgd, _stext, _etext, PAGE_KERNEL_EXEC);
449 map_kernel_chunk(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC);
450 map_kernel_chunk(pgd, _data, _end, PAGE_KERNEL);
451
452 /*
453 * The fixmap falls in a separate pgd to the kernel, and doesn't live
454 * in the carveout for the swapper_pg_dir. We can simply re-use the
455 * existing dir for the fixmap.
456 */
457 set_pgd(pgd_offset_raw(pgd, FIXADDR_START), *pgd_offset_k(FIXADDR_START));
458
459 kasan_copy_shadow(pgd);
460}
461
c1cc1552
CM
462/*
463 * paging_init() sets up the page tables, initialises the zone memory
464 * maps and sets up the zero page.
465 */
466void __init paging_init(void)
467{
068a17a5
MR
468 phys_addr_t pgd_phys = early_pgtable_alloc();
469 pgd_t *pgd = pgd_set_fixmap(pgd_phys);
470
471 map_kernel(pgd);
472 map_mem(pgd);
473
474 /*
475 * We want to reuse the original swapper_pg_dir so we don't have to
476 * communicate the new address to non-coherent secondaries in
477 * secondary_entry, and so cpu_switch_mm can generate the address with
478 * adrp+add rather than a load from some global variable.
479 *
480 * To do this we need to go via a temporary pgd.
481 */
482 cpu_replace_ttbr1(__va(pgd_phys));
483 memcpy(swapper_pg_dir, pgd, PAGE_SIZE);
484 cpu_replace_ttbr1(swapper_pg_dir);
485
486 pgd_clear_fixmap();
487 memblock_free(pgd_phys, PAGE_SIZE);
488
489 /*
490 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
491 * allocated with it.
492 */
493 memblock_free(__pa(swapper_pg_dir) + PAGE_SIZE,
494 SWAPPER_DIR_SIZE - PAGE_SIZE);
c1cc1552 495
c1cc1552 496 bootmem_init();
c1cc1552
CM
497}
498
c1cc1552
CM
499/*
500 * Check whether a kernel address is valid (derived from arch/x86/).
501 */
502int kern_addr_valid(unsigned long addr)
503{
504 pgd_t *pgd;
505 pud_t *pud;
506 pmd_t *pmd;
507 pte_t *pte;
508
509 if ((((long)addr) >> VA_BITS) != -1UL)
510 return 0;
511
512 pgd = pgd_offset_k(addr);
513 if (pgd_none(*pgd))
514 return 0;
515
516 pud = pud_offset(pgd, addr);
517 if (pud_none(*pud))
518 return 0;
519
206a2a73
SC
520 if (pud_sect(*pud))
521 return pfn_valid(pud_pfn(*pud));
522
c1cc1552
CM
523 pmd = pmd_offset(pud, addr);
524 if (pmd_none(*pmd))
525 return 0;
526
da6e4cb6
DA
527 if (pmd_sect(*pmd))
528 return pfn_valid(pmd_pfn(*pmd));
529
c1cc1552
CM
530 pte = pte_offset_kernel(pmd, addr);
531 if (pte_none(*pte))
532 return 0;
533
534 return pfn_valid(pte_pfn(*pte));
535}
536#ifdef CONFIG_SPARSEMEM_VMEMMAP
b433dce0 537#if !ARM64_SWAPPER_USES_SECTION_MAPS
0aad818b 538int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 539{
0aad818b 540 return vmemmap_populate_basepages(start, end, node);
c1cc1552 541}
b433dce0 542#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
0aad818b 543int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 544{
0aad818b 545 unsigned long addr = start;
c1cc1552
CM
546 unsigned long next;
547 pgd_t *pgd;
548 pud_t *pud;
549 pmd_t *pmd;
550
551 do {
552 next = pmd_addr_end(addr, end);
553
554 pgd = vmemmap_pgd_populate(addr, node);
555 if (!pgd)
556 return -ENOMEM;
557
558 pud = vmemmap_pud_populate(pgd, addr, node);
559 if (!pud)
560 return -ENOMEM;
561
562 pmd = pmd_offset(pud, addr);
563 if (pmd_none(*pmd)) {
564 void *p = NULL;
565
566 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
567 if (!p)
568 return -ENOMEM;
569
a501e324 570 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
c1cc1552
CM
571 } else
572 vmemmap_verify((pte_t *)pmd, node, addr, next);
573 } while (addr = next, addr != end);
574
575 return 0;
576}
577#endif /* CONFIG_ARM64_64K_PAGES */
0aad818b 578void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
579{
580}
c1cc1552 581#endif /* CONFIG_SPARSEMEM_VMEMMAP */
af86e597
LA
582
583static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
9f25e6ad 584#if CONFIG_PGTABLE_LEVELS > 2
af86e597
LA
585static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
586#endif
9f25e6ad 587#if CONFIG_PGTABLE_LEVELS > 3
af86e597
LA
588static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
589#endif
590
591static inline pud_t * fixmap_pud(unsigned long addr)
592{
593 pgd_t *pgd = pgd_offset_k(addr);
594
595 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
596
597 return pud_offset(pgd, addr);
598}
599
600static inline pmd_t * fixmap_pmd(unsigned long addr)
601{
602 pud_t *pud = fixmap_pud(addr);
603
604 BUG_ON(pud_none(*pud) || pud_bad(*pud));
605
606 return pmd_offset(pud, addr);
607}
608
609static inline pte_t * fixmap_pte(unsigned long addr)
610{
611 pmd_t *pmd = fixmap_pmd(addr);
612
613 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
614
615 return pte_offset_kernel(pmd, addr);
616}
617
618void __init early_fixmap_init(void)
619{
620 pgd_t *pgd;
621 pud_t *pud;
622 pmd_t *pmd;
623 unsigned long addr = FIXADDR_START;
624
625 pgd = pgd_offset_k(addr);
626 pgd_populate(&init_mm, pgd, bm_pud);
627 pud = pud_offset(pgd, addr);
628 pud_populate(&init_mm, pud, bm_pmd);
629 pmd = pmd_offset(pud, addr);
630 pmd_populate_kernel(&init_mm, pmd, bm_pte);
631
632 /*
633 * The boot-ioremap range spans multiple pmds, for which
634 * we are not preparted:
635 */
636 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
637 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
638
639 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
640 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
641 WARN_ON(1);
642 pr_warn("pmd %p != %p, %p\n",
643 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
644 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
645 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
646 fix_to_virt(FIX_BTMAP_BEGIN));
647 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
648 fix_to_virt(FIX_BTMAP_END));
649
650 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
651 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
652 }
653}
654
655void __set_fixmap(enum fixed_addresses idx,
656 phys_addr_t phys, pgprot_t flags)
657{
658 unsigned long addr = __fix_to_virt(idx);
659 pte_t *pte;
660
b63dbef9 661 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
af86e597
LA
662
663 pte = fixmap_pte(addr);
664
665 if (pgprot_val(flags)) {
666 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
667 } else {
668 pte_clear(&init_mm, addr, pte);
669 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
670 }
671}
61bd93ce
AB
672
673void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
674{
675 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
fb226c3d 676 pgprot_t prot = PAGE_KERNEL_RO;
b433dce0 677 int size, offset;
61bd93ce
AB
678 void *dt_virt;
679
680 /*
681 * Check whether the physical FDT address is set and meets the minimum
682 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
683 * at least 8 bytes so that we can always access the size field of the
684 * FDT header after mapping the first chunk, double check here if that
685 * is indeed the case.
686 */
687 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
688 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
689 return NULL;
690
691 /*
692 * Make sure that the FDT region can be mapped without the need to
693 * allocate additional translation table pages, so that it is safe
132233a7 694 * to call create_mapping_noalloc() this early.
61bd93ce
AB
695 *
696 * On 64k pages, the FDT will be mapped using PTEs, so we need to
697 * be in the same PMD as the rest of the fixmap.
698 * On 4k pages, we'll use section mappings for the FDT so we only
699 * have to be in the same PUD.
700 */
701 BUILD_BUG_ON(dt_virt_base % SZ_2M);
702
b433dce0
SP
703 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
704 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
61bd93ce 705
b433dce0 706 offset = dt_phys % SWAPPER_BLOCK_SIZE;
61bd93ce
AB
707 dt_virt = (void *)dt_virt_base + offset;
708
709 /* map the first chunk so we can read the size from the header */
132233a7
LA
710 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
711 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
61bd93ce
AB
712
713 if (fdt_check_header(dt_virt) != 0)
714 return NULL;
715
716 size = fdt_totalsize(dt_virt);
717 if (size > MAX_FDT_SIZE)
718 return NULL;
719
b433dce0 720 if (offset + size > SWAPPER_BLOCK_SIZE)
132233a7 721 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
b433dce0 722 round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
61bd93ce
AB
723
724 memblock_reserve(dt_phys, size);
725
726 return dt_virt;
727}