]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/arm64/mm/mmu.c
arm64: use fixmap region for permanent FDT mapping
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / mm / mmu.c
CommitLineData
c1cc1552
CM
1/*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/export.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/init.h>
61bd93ce 24#include <linux/libfdt.h>
c1cc1552
CM
25#include <linux/mman.h>
26#include <linux/nodemask.h>
27#include <linux/memblock.h>
28#include <linux/fs.h>
2475ff9d 29#include <linux/io.h>
41089357 30#include <linux/slab.h>
da141706 31#include <linux/stop_machine.h>
c1cc1552
CM
32
33#include <asm/cputype.h>
af86e597 34#include <asm/fixmap.h>
c1cc1552
CM
35#include <asm/sections.h>
36#include <asm/setup.h>
37#include <asm/sizes.h>
38#include <asm/tlb.h>
c79b954b 39#include <asm/memblock.h>
c1cc1552
CM
40#include <asm/mmu_context.h>
41
42#include "mm.h"
43
dd006da2
AB
44u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
45
c1cc1552
CM
46/*
47 * Empty_zero_page is a special page that is used for zero-initialized data
48 * and COW.
49 */
50struct page *empty_zero_page;
51EXPORT_SYMBOL(empty_zero_page);
52
c1cc1552
CM
53pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
54 unsigned long size, pgprot_t vma_prot)
55{
56 if (!pfn_valid(pfn))
57 return pgprot_noncached(vma_prot);
58 else if (file->f_flags & O_SYNC)
59 return pgprot_writecombine(vma_prot);
60 return vma_prot;
61}
62EXPORT_SYMBOL(phys_mem_access_prot);
63
64static void __init *early_alloc(unsigned long sz)
65{
66 void *ptr = __va(memblock_alloc(sz, sz));
da141706 67 BUG_ON(!ptr);
c1cc1552
CM
68 memset(ptr, 0, sz);
69 return ptr;
70}
71
da141706
LA
72/*
73 * remap a PMD into pages
74 */
75static void split_pmd(pmd_t *pmd, pte_t *pte)
76{
77 unsigned long pfn = pmd_pfn(*pmd);
78 int i = 0;
79
80 do {
81 /*
82 * Need to have the least restrictive permissions available
83 * permissions will be fixed up later
84 */
85 set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
86 pfn++;
87 } while (pte++, i++, i < PTRS_PER_PTE);
88}
89
90static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
d7ecbddf 91 unsigned long end, unsigned long pfn,
da141706
LA
92 pgprot_t prot,
93 void *(*alloc)(unsigned long size))
c1cc1552
CM
94{
95 pte_t *pte;
96
a1c76574 97 if (pmd_none(*pmd) || pmd_sect(*pmd)) {
da141706
LA
98 pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
99 if (pmd_sect(*pmd))
100 split_pmd(pmd, pte);
c1cc1552 101 __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
da141706 102 flush_tlb_all();
c1cc1552 103 }
a1c76574 104 BUG_ON(pmd_bad(*pmd));
c1cc1552
CM
105
106 pte = pte_offset_kernel(pmd, addr);
107 do {
d7ecbddf 108 set_pte(pte, pfn_pte(pfn, prot));
c1cc1552
CM
109 pfn++;
110 } while (pte++, addr += PAGE_SIZE, addr != end);
111}
112
da141706
LA
113void split_pud(pud_t *old_pud, pmd_t *pmd)
114{
115 unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
116 pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
117 int i = 0;
118
119 do {
120 set_pmd(pmd, __pmd(addr | prot));
121 addr += PMD_SIZE;
122 } while (pmd++, i++, i < PTRS_PER_PMD);
123}
124
125static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
e1e1fdda 126 unsigned long addr, unsigned long end,
da141706
LA
127 phys_addr_t phys, pgprot_t prot,
128 void *(*alloc)(unsigned long size))
c1cc1552
CM
129{
130 pmd_t *pmd;
131 unsigned long next;
132
133 /*
134 * Check for initial section mappings in the pgd/pud and remove them.
135 */
a1c76574 136 if (pud_none(*pud) || pud_sect(*pud)) {
da141706
LA
137 pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
138 if (pud_sect(*pud)) {
139 /*
140 * need to have the 1G of mappings continue to be
141 * present
142 */
143 split_pud(pud, pmd);
144 }
e1e1fdda 145 pud_populate(mm, pud, pmd);
da141706 146 flush_tlb_all();
c1cc1552 147 }
a1c76574 148 BUG_ON(pud_bad(*pud));
c1cc1552
CM
149
150 pmd = pmd_offset(pud, addr);
151 do {
152 next = pmd_addr_end(addr, end);
153 /* try section mapping first */
a55f9929
CM
154 if (((addr | next | phys) & ~SECTION_MASK) == 0) {
155 pmd_t old_pmd =*pmd;
8ce837ce
AB
156 set_pmd(pmd, __pmd(phys |
157 pgprot_val(mk_sect_prot(prot))));
a55f9929
CM
158 /*
159 * Check for previous table entries created during
160 * boot (__create_page_tables) and flush them.
161 */
523d6e9f 162 if (!pmd_none(old_pmd)) {
a55f9929 163 flush_tlb_all();
523d6e9f 164 if (pmd_table(old_pmd)) {
165 phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
41089357
CM
166 if (!WARN_ON_ONCE(slab_is_available()))
167 memblock_free(table, PAGE_SIZE);
523d6e9f 168 }
169 }
a55f9929 170 } else {
d7ecbddf 171 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
da141706 172 prot, alloc);
a55f9929 173 }
c1cc1552
CM
174 phys += next - addr;
175 } while (pmd++, addr = next, addr != end);
176}
177
da141706
LA
178static inline bool use_1G_block(unsigned long addr, unsigned long next,
179 unsigned long phys)
180{
181 if (PAGE_SHIFT != 12)
182 return false;
183
184 if (((addr | next | phys) & ~PUD_MASK) != 0)
185 return false;
186
187 return true;
188}
189
190static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
e1e1fdda 191 unsigned long addr, unsigned long end,
da141706
LA
192 phys_addr_t phys, pgprot_t prot,
193 void *(*alloc)(unsigned long size))
c1cc1552 194{
c79b954b 195 pud_t *pud;
c1cc1552
CM
196 unsigned long next;
197
c79b954b 198 if (pgd_none(*pgd)) {
da141706 199 pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
e1e1fdda 200 pgd_populate(mm, pgd, pud);
c79b954b
JL
201 }
202 BUG_ON(pgd_bad(*pgd));
203
204 pud = pud_offset(pgd, addr);
c1cc1552
CM
205 do {
206 next = pud_addr_end(addr, end);
206a2a73
SC
207
208 /*
209 * For 4K granule only, attempt to put down a 1GB block
210 */
da141706 211 if (use_1G_block(addr, next, phys)) {
206a2a73 212 pud_t old_pud = *pud;
8ce837ce
AB
213 set_pud(pud, __pud(phys |
214 pgprot_val(mk_sect_prot(prot))));
206a2a73
SC
215
216 /*
217 * If we have an old value for a pud, it will
218 * be pointing to a pmd table that we no longer
219 * need (from swapper_pg_dir).
220 *
221 * Look up the old pmd table and free it.
222 */
223 if (!pud_none(old_pud)) {
206a2a73 224 flush_tlb_all();
523d6e9f 225 if (pud_table(old_pud)) {
226 phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
41089357
CM
227 if (!WARN_ON_ONCE(slab_is_available()))
228 memblock_free(table, PAGE_SIZE);
523d6e9f 229 }
206a2a73
SC
230 }
231 } else {
da141706 232 alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
206a2a73 233 }
c1cc1552
CM
234 phys += next - addr;
235 } while (pud++, addr = next, addr != end);
236}
237
238/*
239 * Create the page directory entries and any necessary page tables for the
240 * mapping specified by 'md'.
241 */
da141706 242static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
e1e1fdda 243 phys_addr_t phys, unsigned long virt,
da141706
LA
244 phys_addr_t size, pgprot_t prot,
245 void *(*alloc)(unsigned long size))
c1cc1552
CM
246{
247 unsigned long addr, length, end, next;
c1cc1552
CM
248
249 addr = virt & PAGE_MASK;
250 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
251
c1cc1552
CM
252 end = addr + length;
253 do {
254 next = pgd_addr_end(addr, end);
da141706 255 alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
c1cc1552
CM
256 phys += next - addr;
257 } while (pgd++, addr = next, addr != end);
258}
259
da141706
LA
260static void *late_alloc(unsigned long size)
261{
262 void *ptr;
263
264 BUG_ON(size > PAGE_SIZE);
265 ptr = (void *)__get_free_page(PGALLOC_GFP);
266 BUG_ON(!ptr);
267 return ptr;
268}
269
270static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
271 phys_addr_t size, pgprot_t prot)
d7ecbddf
MS
272{
273 if (virt < VMALLOC_START) {
274 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
275 &phys, virt);
276 return;
277 }
e1e1fdda 278 __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
da141706 279 size, prot, early_alloc);
d7ecbddf
MS
280}
281
8ce837ce
AB
282void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
283 unsigned long virt, phys_addr_t size,
284 pgprot_t prot)
285{
da141706 286 __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
60305db9 287 late_alloc);
d7ecbddf
MS
288}
289
da141706
LA
290static void create_mapping_late(phys_addr_t phys, unsigned long virt,
291 phys_addr_t size, pgprot_t prot)
292{
293 if (virt < VMALLOC_START) {
294 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
295 &phys, virt);
296 return;
297 }
298
299 return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
300 phys, virt, size, prot, late_alloc);
301}
302
303#ifdef CONFIG_DEBUG_RODATA
304static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
305{
306 /*
307 * Set up the executable regions using the existing section mappings
308 * for now. This will get more fine grained later once all memory
309 * is mapped
310 */
311 unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
312 unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
313
314 if (end < kernel_x_start) {
315 create_mapping(start, __phys_to_virt(start),
316 end - start, PAGE_KERNEL);
317 } else if (start >= kernel_x_end) {
318 create_mapping(start, __phys_to_virt(start),
319 end - start, PAGE_KERNEL);
320 } else {
321 if (start < kernel_x_start)
322 create_mapping(start, __phys_to_virt(start),
323 kernel_x_start - start,
324 PAGE_KERNEL);
325 create_mapping(kernel_x_start,
326 __phys_to_virt(kernel_x_start),
327 kernel_x_end - kernel_x_start,
328 PAGE_KERNEL_EXEC);
329 if (kernel_x_end < end)
330 create_mapping(kernel_x_end,
331 __phys_to_virt(kernel_x_end),
332 end - kernel_x_end,
333 PAGE_KERNEL);
334 }
335
336}
337#else
338static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
339{
340 create_mapping(start, __phys_to_virt(start), end - start,
341 PAGE_KERNEL_EXEC);
342}
343#endif
344
c1cc1552
CM
345static void __init map_mem(void)
346{
347 struct memblock_region *reg;
e25208f7 348 phys_addr_t limit;
c1cc1552 349
f6bc87c3
SC
350 /*
351 * Temporarily limit the memblock range. We need to do this as
352 * create_mapping requires puds, pmds and ptes to be allocated from
353 * memory addressable from the initial direct kernel mapping.
354 *
3dec0fe4
CM
355 * The initial direct kernel mapping, located at swapper_pg_dir, gives
356 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
357 * PHYS_OFFSET (which must be aligned to 2MB as per
358 * Documentation/arm64/booting.txt).
f6bc87c3 359 */
3dec0fe4
CM
360 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
361 limit = PHYS_OFFSET + PMD_SIZE;
362 else
363 limit = PHYS_OFFSET + PUD_SIZE;
e25208f7 364 memblock_set_current_limit(limit);
f6bc87c3 365
c1cc1552
CM
366 /* map all the memory banks */
367 for_each_memblock(memory, reg) {
368 phys_addr_t start = reg->base;
369 phys_addr_t end = start + reg->size;
370
371 if (start >= end)
372 break;
373
e25208f7
CM
374#ifndef CONFIG_ARM64_64K_PAGES
375 /*
376 * For the first memory bank align the start address and
377 * current memblock limit to prevent create_mapping() from
378 * allocating pte page tables from unmapped memory.
379 * When 64K pages are enabled, the pte page table for the
380 * first PGDIR_SIZE is already present in swapper_pg_dir.
381 */
382 if (start < limit)
383 start = ALIGN(start, PMD_SIZE);
384 if (end < limit) {
385 limit = end & PMD_MASK;
386 memblock_set_current_limit(limit);
387 }
388#endif
da141706 389 __map_memblock(start, end);
c1cc1552 390 }
f6bc87c3
SC
391
392 /* Limit no longer required. */
393 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
c1cc1552
CM
394}
395
da141706
LA
396void __init fixup_executable(void)
397{
398#ifdef CONFIG_DEBUG_RODATA
399 /* now that we are actually fully mapped, make the start/end more fine grained */
400 if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
401 unsigned long aligned_start = round_down(__pa(_stext),
402 SECTION_SIZE);
403
404 create_mapping(aligned_start, __phys_to_virt(aligned_start),
405 __pa(_stext) - aligned_start,
406 PAGE_KERNEL);
407 }
408
409 if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
410 unsigned long aligned_end = round_up(__pa(__init_end),
411 SECTION_SIZE);
412 create_mapping(__pa(__init_end), (unsigned long)__init_end,
413 aligned_end - __pa(__init_end),
414 PAGE_KERNEL);
415 }
416#endif
417}
418
419#ifdef CONFIG_DEBUG_RODATA
420void mark_rodata_ro(void)
421{
422 create_mapping_late(__pa(_stext), (unsigned long)_stext,
423 (unsigned long)_etext - (unsigned long)_stext,
424 PAGE_KERNEL_EXEC | PTE_RDONLY);
425
426}
427#endif
428
429void fixup_init(void)
430{
431 create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
432 (unsigned long)__init_end - (unsigned long)__init_begin,
433 PAGE_KERNEL);
434}
435
c1cc1552
CM
436/*
437 * paging_init() sets up the page tables, initialises the zone memory
438 * maps and sets up the zero page.
439 */
440void __init paging_init(void)
441{
442 void *zero_page;
443
c1cc1552 444 map_mem();
da141706 445 fixup_executable();
c1cc1552 446
c1cc1552
CM
447 /* allocate the zero page. */
448 zero_page = early_alloc(PAGE_SIZE);
449
450 bootmem_init();
451
452 empty_zero_page = virt_to_page(zero_page);
c1cc1552
CM
453
454 /*
455 * TTBR0 is only used for the identity mapping at this stage. Make it
456 * point to zero page to avoid speculatively fetching new entries.
457 */
458 cpu_set_reserved_ttbr0();
459 flush_tlb_all();
dd006da2 460 cpu_set_default_tcr_t0sz();
c1cc1552
CM
461}
462
463/*
464 * Enable the identity mapping to allow the MMU disabling.
465 */
466void setup_mm_for_reboot(void)
467{
dd006da2 468 cpu_set_reserved_ttbr0();
c1cc1552 469 flush_tlb_all();
dd006da2
AB
470 cpu_set_idmap_tcr_t0sz();
471 cpu_switch_mm(idmap_pg_dir, &init_mm);
c1cc1552
CM
472}
473
474/*
475 * Check whether a kernel address is valid (derived from arch/x86/).
476 */
477int kern_addr_valid(unsigned long addr)
478{
479 pgd_t *pgd;
480 pud_t *pud;
481 pmd_t *pmd;
482 pte_t *pte;
483
484 if ((((long)addr) >> VA_BITS) != -1UL)
485 return 0;
486
487 pgd = pgd_offset_k(addr);
488 if (pgd_none(*pgd))
489 return 0;
490
491 pud = pud_offset(pgd, addr);
492 if (pud_none(*pud))
493 return 0;
494
206a2a73
SC
495 if (pud_sect(*pud))
496 return pfn_valid(pud_pfn(*pud));
497
c1cc1552
CM
498 pmd = pmd_offset(pud, addr);
499 if (pmd_none(*pmd))
500 return 0;
501
da6e4cb6
DA
502 if (pmd_sect(*pmd))
503 return pfn_valid(pmd_pfn(*pmd));
504
c1cc1552
CM
505 pte = pte_offset_kernel(pmd, addr);
506 if (pte_none(*pte))
507 return 0;
508
509 return pfn_valid(pte_pfn(*pte));
510}
511#ifdef CONFIG_SPARSEMEM_VMEMMAP
512#ifdef CONFIG_ARM64_64K_PAGES
0aad818b 513int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 514{
0aad818b 515 return vmemmap_populate_basepages(start, end, node);
c1cc1552
CM
516}
517#else /* !CONFIG_ARM64_64K_PAGES */
0aad818b 518int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
c1cc1552 519{
0aad818b 520 unsigned long addr = start;
c1cc1552
CM
521 unsigned long next;
522 pgd_t *pgd;
523 pud_t *pud;
524 pmd_t *pmd;
525
526 do {
527 next = pmd_addr_end(addr, end);
528
529 pgd = vmemmap_pgd_populate(addr, node);
530 if (!pgd)
531 return -ENOMEM;
532
533 pud = vmemmap_pud_populate(pgd, addr, node);
534 if (!pud)
535 return -ENOMEM;
536
537 pmd = pmd_offset(pud, addr);
538 if (pmd_none(*pmd)) {
539 void *p = NULL;
540
541 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
542 if (!p)
543 return -ENOMEM;
544
a501e324 545 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
c1cc1552
CM
546 } else
547 vmemmap_verify((pte_t *)pmd, node, addr, next);
548 } while (addr = next, addr != end);
549
550 return 0;
551}
552#endif /* CONFIG_ARM64_64K_PAGES */
0aad818b 553void vmemmap_free(unsigned long start, unsigned long end)
0197518c
TC
554{
555}
c1cc1552 556#endif /* CONFIG_SPARSEMEM_VMEMMAP */
af86e597
LA
557
558static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
9f25e6ad 559#if CONFIG_PGTABLE_LEVELS > 2
af86e597
LA
560static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
561#endif
9f25e6ad 562#if CONFIG_PGTABLE_LEVELS > 3
af86e597
LA
563static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
564#endif
565
566static inline pud_t * fixmap_pud(unsigned long addr)
567{
568 pgd_t *pgd = pgd_offset_k(addr);
569
570 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
571
572 return pud_offset(pgd, addr);
573}
574
575static inline pmd_t * fixmap_pmd(unsigned long addr)
576{
577 pud_t *pud = fixmap_pud(addr);
578
579 BUG_ON(pud_none(*pud) || pud_bad(*pud));
580
581 return pmd_offset(pud, addr);
582}
583
584static inline pte_t * fixmap_pte(unsigned long addr)
585{
586 pmd_t *pmd = fixmap_pmd(addr);
587
588 BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
589
590 return pte_offset_kernel(pmd, addr);
591}
592
593void __init early_fixmap_init(void)
594{
595 pgd_t *pgd;
596 pud_t *pud;
597 pmd_t *pmd;
598 unsigned long addr = FIXADDR_START;
599
600 pgd = pgd_offset_k(addr);
601 pgd_populate(&init_mm, pgd, bm_pud);
602 pud = pud_offset(pgd, addr);
603 pud_populate(&init_mm, pud, bm_pmd);
604 pmd = pmd_offset(pud, addr);
605 pmd_populate_kernel(&init_mm, pmd, bm_pte);
606
607 /*
608 * The boot-ioremap range spans multiple pmds, for which
609 * we are not preparted:
610 */
611 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
612 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
613
614 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
615 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
616 WARN_ON(1);
617 pr_warn("pmd %p != %p, %p\n",
618 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
619 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
620 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
621 fix_to_virt(FIX_BTMAP_BEGIN));
622 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
623 fix_to_virt(FIX_BTMAP_END));
624
625 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
626 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
627 }
628}
629
630void __set_fixmap(enum fixed_addresses idx,
631 phys_addr_t phys, pgprot_t flags)
632{
633 unsigned long addr = __fix_to_virt(idx);
634 pte_t *pte;
635
b63dbef9 636 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
af86e597
LA
637
638 pte = fixmap_pte(addr);
639
640 if (pgprot_val(flags)) {
641 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
642 } else {
643 pte_clear(&init_mm, addr, pte);
644 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
645 }
646}
61bd93ce
AB
647
648void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
649{
650 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
651 pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
652 int granularity, size, offset;
653 void *dt_virt;
654
655 /*
656 * Check whether the physical FDT address is set and meets the minimum
657 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
658 * at least 8 bytes so that we can always access the size field of the
659 * FDT header after mapping the first chunk, double check here if that
660 * is indeed the case.
661 */
662 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
663 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
664 return NULL;
665
666 /*
667 * Make sure that the FDT region can be mapped without the need to
668 * allocate additional translation table pages, so that it is safe
669 * to call create_mapping() this early.
670 *
671 * On 64k pages, the FDT will be mapped using PTEs, so we need to
672 * be in the same PMD as the rest of the fixmap.
673 * On 4k pages, we'll use section mappings for the FDT so we only
674 * have to be in the same PUD.
675 */
676 BUILD_BUG_ON(dt_virt_base % SZ_2M);
677
678 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES)) {
679 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PMD_SHIFT !=
680 __fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT);
681
682 granularity = PAGE_SIZE;
683 } else {
684 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> PUD_SHIFT !=
685 __fix_to_virt(FIX_BTMAP_BEGIN) >> PUD_SHIFT);
686
687 granularity = PMD_SIZE;
688 }
689
690 offset = dt_phys % granularity;
691 dt_virt = (void *)dt_virt_base + offset;
692
693 /* map the first chunk so we can read the size from the header */
694 create_mapping(round_down(dt_phys, granularity), dt_virt_base,
695 granularity, prot);
696
697 if (fdt_check_header(dt_virt) != 0)
698 return NULL;
699
700 size = fdt_totalsize(dt_virt);
701 if (size > MAX_FDT_SIZE)
702 return NULL;
703
704 if (offset + size > granularity)
705 create_mapping(round_down(dt_phys, granularity), dt_virt_base,
706 round_up(offset + size, granularity), prot);
707
708 memblock_reserve(dt_phys, size);
709
710 return dt_virt;
711}