]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/arm64/mm/mmu.c
arm64: alternatives: apply boot time fixups via the linear mapping
[mirror_ubuntu-hirsute-kernel.git] / arch / arm64 / mm / mmu.c
1 /*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/cache.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/init.h>
25 #include <linux/libfdt.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/memblock.h>
29 #include <linux/fs.h>
30 #include <linux/io.h>
31 #include <linux/mm.h>
32
33 #include <asm/barrier.h>
34 #include <asm/cputype.h>
35 #include <asm/fixmap.h>
36 #include <asm/kasan.h>
37 #include <asm/kernel-pgtable.h>
38 #include <asm/sections.h>
39 #include <asm/setup.h>
40 #include <asm/sizes.h>
41 #include <asm/tlb.h>
42 #include <asm/memblock.h>
43 #include <asm/mmu_context.h>
44 #include <asm/ptdump.h>
45
46 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
47
48 u64 kimage_voffset __ro_after_init;
49 EXPORT_SYMBOL(kimage_voffset);
50
51 /*
52 * Empty_zero_page is a special page that is used for zero-initialized data
53 * and COW.
54 */
55 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
56 EXPORT_SYMBOL(empty_zero_page);
57
58 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
59 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
60 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
61
62 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
63 unsigned long size, pgprot_t vma_prot)
64 {
65 if (!pfn_valid(pfn))
66 return pgprot_noncached(vma_prot);
67 else if (file->f_flags & O_SYNC)
68 return pgprot_writecombine(vma_prot);
69 return vma_prot;
70 }
71 EXPORT_SYMBOL(phys_mem_access_prot);
72
73 static phys_addr_t __init early_pgtable_alloc(void)
74 {
75 phys_addr_t phys;
76 void *ptr;
77
78 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
79
80 /*
81 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
82 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
83 * any level of table.
84 */
85 ptr = pte_set_fixmap(phys);
86
87 memset(ptr, 0, PAGE_SIZE);
88
89 /*
90 * Implicit barriers also ensure the zeroed page is visible to the page
91 * table walker
92 */
93 pte_clear_fixmap();
94
95 return phys;
96 }
97
98 static bool pgattr_change_is_safe(u64 old, u64 new)
99 {
100 /*
101 * The following mapping attributes may be updated in live
102 * kernel mappings without the need for break-before-make.
103 */
104 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
105
106 return old == 0 || new == 0 || ((old ^ new) & ~mask) == 0;
107 }
108
109 static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
110 unsigned long end, unsigned long pfn,
111 pgprot_t prot,
112 phys_addr_t (*pgtable_alloc)(void))
113 {
114 pte_t *pte;
115
116 BUG_ON(pmd_sect(*pmd));
117 if (pmd_none(*pmd)) {
118 phys_addr_t pte_phys;
119 BUG_ON(!pgtable_alloc);
120 pte_phys = pgtable_alloc();
121 pte = pte_set_fixmap(pte_phys);
122 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
123 pte_clear_fixmap();
124 }
125 BUG_ON(pmd_bad(*pmd));
126
127 pte = pte_set_fixmap_offset(pmd, addr);
128 do {
129 pte_t old_pte = *pte;
130
131 set_pte(pte, pfn_pte(pfn, prot));
132 pfn++;
133
134 /*
135 * After the PTE entry has been populated once, we
136 * only allow updates to the permission attributes.
137 */
138 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
139
140 } while (pte++, addr += PAGE_SIZE, addr != end);
141
142 pte_clear_fixmap();
143 }
144
145 static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
146 phys_addr_t phys, pgprot_t prot,
147 phys_addr_t (*pgtable_alloc)(void),
148 bool page_mappings_only)
149 {
150 pmd_t *pmd;
151 unsigned long next;
152
153 /*
154 * Check for initial section mappings in the pgd/pud and remove them.
155 */
156 BUG_ON(pud_sect(*pud));
157 if (pud_none(*pud)) {
158 phys_addr_t pmd_phys;
159 BUG_ON(!pgtable_alloc);
160 pmd_phys = pgtable_alloc();
161 pmd = pmd_set_fixmap(pmd_phys);
162 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
163 pmd_clear_fixmap();
164 }
165 BUG_ON(pud_bad(*pud));
166
167 pmd = pmd_set_fixmap_offset(pud, addr);
168 do {
169 pmd_t old_pmd = *pmd;
170
171 next = pmd_addr_end(addr, end);
172
173 /* try section mapping first */
174 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
175 !page_mappings_only) {
176 pmd_set_huge(pmd, phys, prot);
177
178 /*
179 * After the PMD entry has been populated once, we
180 * only allow updates to the permission attributes.
181 */
182 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
183 pmd_val(*pmd)));
184 } else {
185 alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
186 prot, pgtable_alloc);
187
188 BUG_ON(pmd_val(old_pmd) != 0 &&
189 pmd_val(old_pmd) != pmd_val(*pmd));
190 }
191 phys += next - addr;
192 } while (pmd++, addr = next, addr != end);
193
194 pmd_clear_fixmap();
195 }
196
197 static inline bool use_1G_block(unsigned long addr, unsigned long next,
198 unsigned long phys)
199 {
200 if (PAGE_SHIFT != 12)
201 return false;
202
203 if (((addr | next | phys) & ~PUD_MASK) != 0)
204 return false;
205
206 return true;
207 }
208
209 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
210 phys_addr_t phys, pgprot_t prot,
211 phys_addr_t (*pgtable_alloc)(void),
212 bool page_mappings_only)
213 {
214 pud_t *pud;
215 unsigned long next;
216
217 if (pgd_none(*pgd)) {
218 phys_addr_t pud_phys;
219 BUG_ON(!pgtable_alloc);
220 pud_phys = pgtable_alloc();
221 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
222 }
223 BUG_ON(pgd_bad(*pgd));
224
225 pud = pud_set_fixmap_offset(pgd, addr);
226 do {
227 pud_t old_pud = *pud;
228
229 next = pud_addr_end(addr, end);
230
231 /*
232 * For 4K granule only, attempt to put down a 1GB block
233 */
234 if (use_1G_block(addr, next, phys) && !page_mappings_only) {
235 pud_set_huge(pud, phys, prot);
236
237 /*
238 * After the PUD entry has been populated once, we
239 * only allow updates to the permission attributes.
240 */
241 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
242 pud_val(*pud)));
243 } else {
244 alloc_init_pmd(pud, addr, next, phys, prot,
245 pgtable_alloc, page_mappings_only);
246
247 BUG_ON(pud_val(old_pud) != 0 &&
248 pud_val(old_pud) != pud_val(*pud));
249 }
250 phys += next - addr;
251 } while (pud++, addr = next, addr != end);
252
253 pud_clear_fixmap();
254 }
255
256 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
257 unsigned long virt, phys_addr_t size,
258 pgprot_t prot,
259 phys_addr_t (*pgtable_alloc)(void),
260 bool page_mappings_only)
261 {
262 unsigned long addr, length, end, next;
263 pgd_t *pgd = pgd_offset_raw(pgdir, virt);
264
265 /*
266 * If the virtual and physical address don't have the same offset
267 * within a page, we cannot map the region as the caller expects.
268 */
269 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
270 return;
271
272 phys &= PAGE_MASK;
273 addr = virt & PAGE_MASK;
274 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
275
276 end = addr + length;
277 do {
278 next = pgd_addr_end(addr, end);
279 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
280 page_mappings_only);
281 phys += next - addr;
282 } while (pgd++, addr = next, addr != end);
283 }
284
285 static phys_addr_t pgd_pgtable_alloc(void)
286 {
287 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
288 if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
289 BUG();
290
291 /* Ensure the zeroed page is visible to the page table walker */
292 dsb(ishst);
293 return __pa(ptr);
294 }
295
296 /*
297 * This function can only be used to modify existing table entries,
298 * without allocating new levels of table. Note that this permits the
299 * creation of new section or page entries.
300 */
301 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
302 phys_addr_t size, pgprot_t prot)
303 {
304 if (virt < VMALLOC_START) {
305 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
306 &phys, virt);
307 return;
308 }
309 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, false);
310 }
311
312 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
313 unsigned long virt, phys_addr_t size,
314 pgprot_t prot, bool page_mappings_only)
315 {
316 BUG_ON(mm == &init_mm);
317
318 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
319 pgd_pgtable_alloc, page_mappings_only);
320 }
321
322 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
323 phys_addr_t size, pgprot_t prot)
324 {
325 if (virt < VMALLOC_START) {
326 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
327 &phys, virt);
328 return;
329 }
330
331 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
332 NULL, debug_pagealloc_enabled());
333
334 /* flush the TLBs after updating live kernel mappings */
335 flush_tlb_kernel_range(virt, virt + size);
336 }
337
338 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
339 {
340 phys_addr_t kernel_start = __pa_symbol(_text);
341 phys_addr_t kernel_end = __pa_symbol(__init_begin);
342
343 /*
344 * Take care not to create a writable alias for the
345 * read-only text and rodata sections of the kernel image.
346 */
347
348 /* No overlap with the kernel text/rodata */
349 if (end < kernel_start || start >= kernel_end) {
350 __create_pgd_mapping(pgd, start, __phys_to_virt(start),
351 end - start, PAGE_KERNEL,
352 early_pgtable_alloc,
353 debug_pagealloc_enabled());
354 return;
355 }
356
357 /*
358 * This block overlaps the kernel text/rodata mappings.
359 * Map the portion(s) which don't overlap.
360 */
361 if (start < kernel_start)
362 __create_pgd_mapping(pgd, start,
363 __phys_to_virt(start),
364 kernel_start - start, PAGE_KERNEL,
365 early_pgtable_alloc,
366 debug_pagealloc_enabled());
367 if (kernel_end < end)
368 __create_pgd_mapping(pgd, kernel_end,
369 __phys_to_virt(kernel_end),
370 end - kernel_end, PAGE_KERNEL,
371 early_pgtable_alloc,
372 debug_pagealloc_enabled());
373
374 /*
375 * Map the linear alias of the [_text, __init_begin) interval
376 * as non-executable now, and remove the write permission in
377 * mark_linear_text_alias_ro() below (which will be called after
378 * alternative patching has completed). This makes the contents
379 * of the region accessible to subsystems such as hibernate,
380 * but protects it from inadvertent modification or execution.
381 */
382 __create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
383 kernel_end - kernel_start, PAGE_KERNEL,
384 early_pgtable_alloc, debug_pagealloc_enabled());
385 }
386
387 void __init mark_linear_text_alias_ro(void)
388 {
389 /*
390 * Remove the write permissions from the linear alias of .text/.rodata
391 */
392 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
393 (unsigned long)__init_begin - (unsigned long)_text,
394 PAGE_KERNEL_RO);
395 }
396
397 static void __init map_mem(pgd_t *pgd)
398 {
399 struct memblock_region *reg;
400
401 /* map all the memory banks */
402 for_each_memblock(memory, reg) {
403 phys_addr_t start = reg->base;
404 phys_addr_t end = start + reg->size;
405
406 if (start >= end)
407 break;
408 if (memblock_is_nomap(reg))
409 continue;
410
411 __map_memblock(pgd, start, end);
412 }
413 }
414
415 void mark_rodata_ro(void)
416 {
417 unsigned long section_size;
418
419 section_size = (unsigned long)_etext - (unsigned long)_text;
420 update_mapping_prot(__pa_symbol(_text), (unsigned long)_text,
421 section_size, PAGE_KERNEL_ROX);
422 /*
423 * mark .rodata as read only. Use __init_begin rather than __end_rodata
424 * to cover NOTES and EXCEPTION_TABLE.
425 */
426 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
427 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
428 section_size, PAGE_KERNEL_RO);
429
430 debug_checkwx();
431 }
432
433 static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
434 pgprot_t prot, struct vm_struct *vma)
435 {
436 phys_addr_t pa_start = __pa_symbol(va_start);
437 unsigned long size = va_end - va_start;
438
439 BUG_ON(!PAGE_ALIGNED(pa_start));
440 BUG_ON(!PAGE_ALIGNED(size));
441
442 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
443 early_pgtable_alloc, debug_pagealloc_enabled());
444
445 vma->addr = va_start;
446 vma->phys_addr = pa_start;
447 vma->size = size;
448 vma->flags = VM_MAP;
449 vma->caller = __builtin_return_address(0);
450
451 vm_area_add_early(vma);
452 }
453
454 /*
455 * Create fine-grained mappings for the kernel.
456 */
457 static void __init map_kernel(pgd_t *pgd)
458 {
459 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
460
461 map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
462 map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
463 map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
464 &vmlinux_init);
465 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
466
467 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
468 /*
469 * The fixmap falls in a separate pgd to the kernel, and doesn't
470 * live in the carveout for the swapper_pg_dir. We can simply
471 * re-use the existing dir for the fixmap.
472 */
473 set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
474 *pgd_offset_k(FIXADDR_START));
475 } else if (CONFIG_PGTABLE_LEVELS > 3) {
476 /*
477 * The fixmap shares its top level pgd entry with the kernel
478 * mapping. This can really only occur when we are running
479 * with 16k/4 levels, so we can simply reuse the pud level
480 * entry instead.
481 */
482 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
483 set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
484 __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
485 pud_clear_fixmap();
486 } else {
487 BUG();
488 }
489
490 kasan_copy_shadow(pgd);
491 }
492
493 /*
494 * paging_init() sets up the page tables, initialises the zone memory
495 * maps and sets up the zero page.
496 */
497 void __init paging_init(void)
498 {
499 phys_addr_t pgd_phys = early_pgtable_alloc();
500 pgd_t *pgd = pgd_set_fixmap(pgd_phys);
501
502 map_kernel(pgd);
503 map_mem(pgd);
504
505 /*
506 * We want to reuse the original swapper_pg_dir so we don't have to
507 * communicate the new address to non-coherent secondaries in
508 * secondary_entry, and so cpu_switch_mm can generate the address with
509 * adrp+add rather than a load from some global variable.
510 *
511 * To do this we need to go via a temporary pgd.
512 */
513 cpu_replace_ttbr1(__va(pgd_phys));
514 memcpy(swapper_pg_dir, pgd, PGD_SIZE);
515 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
516
517 pgd_clear_fixmap();
518 memblock_free(pgd_phys, PAGE_SIZE);
519
520 /*
521 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
522 * allocated with it.
523 */
524 memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
525 SWAPPER_DIR_SIZE - PAGE_SIZE);
526 }
527
528 /*
529 * Check whether a kernel address is valid (derived from arch/x86/).
530 */
531 int kern_addr_valid(unsigned long addr)
532 {
533 pgd_t *pgd;
534 pud_t *pud;
535 pmd_t *pmd;
536 pte_t *pte;
537
538 if ((((long)addr) >> VA_BITS) != -1UL)
539 return 0;
540
541 pgd = pgd_offset_k(addr);
542 if (pgd_none(*pgd))
543 return 0;
544
545 pud = pud_offset(pgd, addr);
546 if (pud_none(*pud))
547 return 0;
548
549 if (pud_sect(*pud))
550 return pfn_valid(pud_pfn(*pud));
551
552 pmd = pmd_offset(pud, addr);
553 if (pmd_none(*pmd))
554 return 0;
555
556 if (pmd_sect(*pmd))
557 return pfn_valid(pmd_pfn(*pmd));
558
559 pte = pte_offset_kernel(pmd, addr);
560 if (pte_none(*pte))
561 return 0;
562
563 return pfn_valid(pte_pfn(*pte));
564 }
565 #ifdef CONFIG_SPARSEMEM_VMEMMAP
566 #if !ARM64_SWAPPER_USES_SECTION_MAPS
567 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
568 {
569 return vmemmap_populate_basepages(start, end, node);
570 }
571 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
572 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
573 {
574 unsigned long addr = start;
575 unsigned long next;
576 pgd_t *pgd;
577 pud_t *pud;
578 pmd_t *pmd;
579
580 do {
581 next = pmd_addr_end(addr, end);
582
583 pgd = vmemmap_pgd_populate(addr, node);
584 if (!pgd)
585 return -ENOMEM;
586
587 pud = vmemmap_pud_populate(pgd, addr, node);
588 if (!pud)
589 return -ENOMEM;
590
591 pmd = pmd_offset(pud, addr);
592 if (pmd_none(*pmd)) {
593 void *p = NULL;
594
595 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
596 if (!p)
597 return -ENOMEM;
598
599 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
600 } else
601 vmemmap_verify((pte_t *)pmd, node, addr, next);
602 } while (addr = next, addr != end);
603
604 return 0;
605 }
606 #endif /* CONFIG_ARM64_64K_PAGES */
607 void vmemmap_free(unsigned long start, unsigned long end)
608 {
609 }
610 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
611
612 static inline pud_t * fixmap_pud(unsigned long addr)
613 {
614 pgd_t *pgd = pgd_offset_k(addr);
615
616 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
617
618 return pud_offset_kimg(pgd, addr);
619 }
620
621 static inline pmd_t * fixmap_pmd(unsigned long addr)
622 {
623 pud_t *pud = fixmap_pud(addr);
624
625 BUG_ON(pud_none(*pud) || pud_bad(*pud));
626
627 return pmd_offset_kimg(pud, addr);
628 }
629
630 static inline pte_t * fixmap_pte(unsigned long addr)
631 {
632 return &bm_pte[pte_index(addr)];
633 }
634
635 /*
636 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
637 * directly on kernel symbols (bm_p*d). This function is called too early to use
638 * lm_alias so __p*d_populate functions must be used to populate with the
639 * physical address from __pa_symbol.
640 */
641 void __init early_fixmap_init(void)
642 {
643 pgd_t *pgd;
644 pud_t *pud;
645 pmd_t *pmd;
646 unsigned long addr = FIXADDR_START;
647
648 pgd = pgd_offset_k(addr);
649 if (CONFIG_PGTABLE_LEVELS > 3 &&
650 !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
651 /*
652 * We only end up here if the kernel mapping and the fixmap
653 * share the top level pgd entry, which should only happen on
654 * 16k/4 levels configurations.
655 */
656 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
657 pud = pud_offset_kimg(pgd, addr);
658 } else {
659 if (pgd_none(*pgd))
660 __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
661 pud = fixmap_pud(addr);
662 }
663 if (pud_none(*pud))
664 __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
665 pmd = fixmap_pmd(addr);
666 __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
667
668 /*
669 * The boot-ioremap range spans multiple pmds, for which
670 * we are not prepared:
671 */
672 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
673 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
674
675 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
676 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
677 WARN_ON(1);
678 pr_warn("pmd %p != %p, %p\n",
679 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
680 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
681 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
682 fix_to_virt(FIX_BTMAP_BEGIN));
683 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
684 fix_to_virt(FIX_BTMAP_END));
685
686 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
687 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
688 }
689 }
690
691 void __set_fixmap(enum fixed_addresses idx,
692 phys_addr_t phys, pgprot_t flags)
693 {
694 unsigned long addr = __fix_to_virt(idx);
695 pte_t *pte;
696
697 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
698
699 pte = fixmap_pte(addr);
700
701 if (pgprot_val(flags)) {
702 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
703 } else {
704 pte_clear(&init_mm, addr, pte);
705 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
706 }
707 }
708
709 void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
710 {
711 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
712 int offset;
713 void *dt_virt;
714
715 /*
716 * Check whether the physical FDT address is set and meets the minimum
717 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
718 * at least 8 bytes so that we can always access the magic and size
719 * fields of the FDT header after mapping the first chunk, double check
720 * here if that is indeed the case.
721 */
722 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
723 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
724 return NULL;
725
726 /*
727 * Make sure that the FDT region can be mapped without the need to
728 * allocate additional translation table pages, so that it is safe
729 * to call create_mapping_noalloc() this early.
730 *
731 * On 64k pages, the FDT will be mapped using PTEs, so we need to
732 * be in the same PMD as the rest of the fixmap.
733 * On 4k pages, we'll use section mappings for the FDT so we only
734 * have to be in the same PUD.
735 */
736 BUILD_BUG_ON(dt_virt_base % SZ_2M);
737
738 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
739 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
740
741 offset = dt_phys % SWAPPER_BLOCK_SIZE;
742 dt_virt = (void *)dt_virt_base + offset;
743
744 /* map the first chunk so we can read the size from the header */
745 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
746 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
747
748 if (fdt_magic(dt_virt) != FDT_MAGIC)
749 return NULL;
750
751 *size = fdt_totalsize(dt_virt);
752 if (*size > MAX_FDT_SIZE)
753 return NULL;
754
755 if (offset + *size > SWAPPER_BLOCK_SIZE)
756 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
757 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
758
759 return dt_virt;
760 }
761
762 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
763 {
764 void *dt_virt;
765 int size;
766
767 dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
768 if (!dt_virt)
769 return NULL;
770
771 memblock_reserve(dt_phys, size);
772 return dt_virt;
773 }
774
775 int __init arch_ioremap_pud_supported(void)
776 {
777 /* only 4k granule supports level 1 block mappings */
778 return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
779 }
780
781 int __init arch_ioremap_pmd_supported(void)
782 {
783 return 1;
784 }
785
786 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
787 {
788 BUG_ON(phys & ~PUD_MASK);
789 set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
790 return 1;
791 }
792
793 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
794 {
795 BUG_ON(phys & ~PMD_MASK);
796 set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
797 return 1;
798 }
799
800 int pud_clear_huge(pud_t *pud)
801 {
802 if (!pud_sect(*pud))
803 return 0;
804 pud_clear(pud);
805 return 1;
806 }
807
808 int pmd_clear_huge(pmd_t *pmd)
809 {
810 if (!pmd_sect(*pmd))
811 return 0;
812 pmd_clear(pmd);
813 return 1;
814 }