]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/mm/mmu.c
arm64: mm: Permit transitioning from Global to Non-Global without BBM
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / mm / mmu.c
1 /*
2 * Based on arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include <linux/cache.h>
21 #include <linux/export.h>
22 #include <linux/kernel.h>
23 #include <linux/errno.h>
24 #include <linux/init.h>
25 #include <linux/ioport.h>
26 #include <linux/kexec.h>
27 #include <linux/libfdt.h>
28 #include <linux/mman.h>
29 #include <linux/nodemask.h>
30 #include <linux/memblock.h>
31 #include <linux/fs.h>
32 #include <linux/io.h>
33 #include <linux/mm.h>
34 #include <linux/vmalloc.h>
35
36 #include <asm/barrier.h>
37 #include <asm/cputype.h>
38 #include <asm/fixmap.h>
39 #include <asm/kasan.h>
40 #include <asm/kernel-pgtable.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <asm/sizes.h>
44 #include <asm/tlb.h>
45 #include <asm/memblock.h>
46 #include <asm/mmu_context.h>
47 #include <asm/ptdump.h>
48
49 #define NO_BLOCK_MAPPINGS BIT(0)
50 #define NO_CONT_MAPPINGS BIT(1)
51
52 u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
53
54 u64 kimage_voffset __ro_after_init;
55 EXPORT_SYMBOL(kimage_voffset);
56
57 /*
58 * Empty_zero_page is a special page that is used for zero-initialized data
59 * and COW.
60 */
61 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
62 EXPORT_SYMBOL(empty_zero_page);
63
64 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
65 static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
66 static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
67
68 pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
69 unsigned long size, pgprot_t vma_prot)
70 {
71 if (!pfn_valid(pfn))
72 return pgprot_noncached(vma_prot);
73 else if (file->f_flags & O_SYNC)
74 return pgprot_writecombine(vma_prot);
75 return vma_prot;
76 }
77 EXPORT_SYMBOL(phys_mem_access_prot);
78
79 static phys_addr_t __init early_pgtable_alloc(void)
80 {
81 phys_addr_t phys;
82 void *ptr;
83
84 phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
85
86 /*
87 * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
88 * slot will be free, so we can (ab)use the FIX_PTE slot to initialise
89 * any level of table.
90 */
91 ptr = pte_set_fixmap(phys);
92
93 memset(ptr, 0, PAGE_SIZE);
94
95 /*
96 * Implicit barriers also ensure the zeroed page is visible to the page
97 * table walker
98 */
99 pte_clear_fixmap();
100
101 return phys;
102 }
103
104 static bool pgattr_change_is_safe(u64 old, u64 new)
105 {
106 /*
107 * The following mapping attributes may be updated in live
108 * kernel mappings without the need for break-before-make.
109 */
110 static const pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE;
111
112 /* creating or taking down mappings is always safe */
113 if (old == 0 || new == 0)
114 return true;
115
116 /* live contiguous mappings may not be manipulated at all */
117 if ((old | new) & PTE_CONT)
118 return false;
119
120 /* Transitioning from Global to Non-Global is safe */
121 if (((old ^ new) == PTE_NG) && (new & PTE_NG))
122 return true;
123
124 return ((old ^ new) & ~mask) == 0;
125 }
126
127 static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
128 phys_addr_t phys, pgprot_t prot)
129 {
130 pte_t *pte;
131
132 pte = pte_set_fixmap_offset(pmd, addr);
133 do {
134 pte_t old_pte = *pte;
135
136 set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot));
137
138 /*
139 * After the PTE entry has been populated once, we
140 * only allow updates to the permission attributes.
141 */
142 BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
143
144 phys += PAGE_SIZE;
145 } while (pte++, addr += PAGE_SIZE, addr != end);
146
147 pte_clear_fixmap();
148 }
149
150 static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
151 unsigned long end, phys_addr_t phys,
152 pgprot_t prot,
153 phys_addr_t (*pgtable_alloc)(void),
154 int flags)
155 {
156 unsigned long next;
157
158 BUG_ON(pmd_sect(*pmd));
159 if (pmd_none(*pmd)) {
160 phys_addr_t pte_phys;
161 BUG_ON(!pgtable_alloc);
162 pte_phys = pgtable_alloc();
163 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
164 }
165 BUG_ON(pmd_bad(*pmd));
166
167 do {
168 pgprot_t __prot = prot;
169
170 next = pte_cont_addr_end(addr, end);
171
172 /* use a contiguous mapping if the range is suitably aligned */
173 if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) &&
174 (flags & NO_CONT_MAPPINGS) == 0)
175 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
176
177 init_pte(pmd, addr, next, phys, __prot);
178
179 phys += next - addr;
180 } while (addr = next, addr != end);
181 }
182
183 static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
184 phys_addr_t phys, pgprot_t prot,
185 phys_addr_t (*pgtable_alloc)(void), int flags)
186 {
187 unsigned long next;
188 pmd_t *pmd;
189
190 pmd = pmd_set_fixmap_offset(pud, addr);
191 do {
192 pmd_t old_pmd = *pmd;
193
194 next = pmd_addr_end(addr, end);
195
196 /* try section mapping first */
197 if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
198 (flags & NO_BLOCK_MAPPINGS) == 0) {
199 pmd_set_huge(pmd, phys, prot);
200
201 /*
202 * After the PMD entry has been populated once, we
203 * only allow updates to the permission attributes.
204 */
205 BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
206 pmd_val(*pmd)));
207 } else {
208 alloc_init_cont_pte(pmd, addr, next, phys, prot,
209 pgtable_alloc, flags);
210
211 BUG_ON(pmd_val(old_pmd) != 0 &&
212 pmd_val(old_pmd) != pmd_val(*pmd));
213 }
214 phys += next - addr;
215 } while (pmd++, addr = next, addr != end);
216
217 pmd_clear_fixmap();
218 }
219
220 static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
221 unsigned long end, phys_addr_t phys,
222 pgprot_t prot,
223 phys_addr_t (*pgtable_alloc)(void), int flags)
224 {
225 unsigned long next;
226
227 /*
228 * Check for initial section mappings in the pgd/pud.
229 */
230 BUG_ON(pud_sect(*pud));
231 if (pud_none(*pud)) {
232 phys_addr_t pmd_phys;
233 BUG_ON(!pgtable_alloc);
234 pmd_phys = pgtable_alloc();
235 __pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
236 }
237 BUG_ON(pud_bad(*pud));
238
239 do {
240 pgprot_t __prot = prot;
241
242 next = pmd_cont_addr_end(addr, end);
243
244 /* use a contiguous mapping if the range is suitably aligned */
245 if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) &&
246 (flags & NO_CONT_MAPPINGS) == 0)
247 __prot = __pgprot(pgprot_val(prot) | PTE_CONT);
248
249 init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags);
250
251 phys += next - addr;
252 } while (addr = next, addr != end);
253 }
254
255 static inline bool use_1G_block(unsigned long addr, unsigned long next,
256 unsigned long phys)
257 {
258 if (PAGE_SHIFT != 12)
259 return false;
260
261 if (((addr | next | phys) & ~PUD_MASK) != 0)
262 return false;
263
264 return true;
265 }
266
267 static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
268 phys_addr_t phys, pgprot_t prot,
269 phys_addr_t (*pgtable_alloc)(void),
270 int flags)
271 {
272 pud_t *pud;
273 unsigned long next;
274
275 if (pgd_none(*pgd)) {
276 phys_addr_t pud_phys;
277 BUG_ON(!pgtable_alloc);
278 pud_phys = pgtable_alloc();
279 __pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
280 }
281 BUG_ON(pgd_bad(*pgd));
282
283 pud = pud_set_fixmap_offset(pgd, addr);
284 do {
285 pud_t old_pud = *pud;
286
287 next = pud_addr_end(addr, end);
288
289 /*
290 * For 4K granule only, attempt to put down a 1GB block
291 */
292 if (use_1G_block(addr, next, phys) &&
293 (flags & NO_BLOCK_MAPPINGS) == 0) {
294 pud_set_huge(pud, phys, prot);
295
296 /*
297 * After the PUD entry has been populated once, we
298 * only allow updates to the permission attributes.
299 */
300 BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
301 pud_val(*pud)));
302 } else {
303 alloc_init_cont_pmd(pud, addr, next, phys, prot,
304 pgtable_alloc, flags);
305
306 BUG_ON(pud_val(old_pud) != 0 &&
307 pud_val(old_pud) != pud_val(*pud));
308 }
309 phys += next - addr;
310 } while (pud++, addr = next, addr != end);
311
312 pud_clear_fixmap();
313 }
314
315 static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
316 unsigned long virt, phys_addr_t size,
317 pgprot_t prot,
318 phys_addr_t (*pgtable_alloc)(void),
319 int flags)
320 {
321 unsigned long addr, length, end, next;
322 pgd_t *pgd = pgd_offset_raw(pgdir, virt);
323
324 /*
325 * If the virtual and physical address don't have the same offset
326 * within a page, we cannot map the region as the caller expects.
327 */
328 if (WARN_ON((phys ^ virt) & ~PAGE_MASK))
329 return;
330
331 phys &= PAGE_MASK;
332 addr = virt & PAGE_MASK;
333 length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
334
335 end = addr + length;
336 do {
337 next = pgd_addr_end(addr, end);
338 alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
339 flags);
340 phys += next - addr;
341 } while (pgd++, addr = next, addr != end);
342 }
343
344 static phys_addr_t pgd_pgtable_alloc(void)
345 {
346 void *ptr = (void *)__get_free_page(PGALLOC_GFP);
347 if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
348 BUG();
349
350 /* Ensure the zeroed page is visible to the page table walker */
351 dsb(ishst);
352 return __pa(ptr);
353 }
354
355 /*
356 * This function can only be used to modify existing table entries,
357 * without allocating new levels of table. Note that this permits the
358 * creation of new section or page entries.
359 */
360 static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
361 phys_addr_t size, pgprot_t prot)
362 {
363 if (virt < VMALLOC_START) {
364 pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
365 &phys, virt);
366 return;
367 }
368 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
369 NO_CONT_MAPPINGS);
370 }
371
372 void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
373 unsigned long virt, phys_addr_t size,
374 pgprot_t prot, bool page_mappings_only)
375 {
376 int flags = 0;
377
378 BUG_ON(mm == &init_mm);
379
380 if (page_mappings_only)
381 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
382
383 __create_pgd_mapping(mm->pgd, phys, virt, size, prot,
384 pgd_pgtable_alloc, flags);
385 }
386
387 static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
388 phys_addr_t size, pgprot_t prot)
389 {
390 if (virt < VMALLOC_START) {
391 pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n",
392 &phys, virt);
393 return;
394 }
395
396 __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL,
397 NO_CONT_MAPPINGS);
398
399 /* flush the TLBs after updating live kernel mappings */
400 flush_tlb_kernel_range(virt, virt + size);
401 }
402
403 static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
404 phys_addr_t end, pgprot_t prot, int flags)
405 {
406 __create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start,
407 prot, early_pgtable_alloc, flags);
408 }
409
410 void __init mark_linear_text_alias_ro(void)
411 {
412 /*
413 * Remove the write permissions from the linear alias of .text/.rodata
414 */
415 update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text),
416 (unsigned long)__init_begin - (unsigned long)_text,
417 PAGE_KERNEL_RO);
418 }
419
420 static void __init map_mem(pgd_t *pgd)
421 {
422 phys_addr_t kernel_start = __pa_symbol(_text);
423 phys_addr_t kernel_end = __pa_symbol(__init_begin);
424 struct memblock_region *reg;
425 int flags = 0;
426
427 if (debug_pagealloc_enabled())
428 flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
429
430 /*
431 * Take care not to create a writable alias for the
432 * read-only text and rodata sections of the kernel image.
433 * So temporarily mark them as NOMAP to skip mappings in
434 * the following for-loop
435 */
436 memblock_mark_nomap(kernel_start, kernel_end - kernel_start);
437 #ifdef CONFIG_KEXEC_CORE
438 if (crashk_res.end)
439 memblock_mark_nomap(crashk_res.start,
440 resource_size(&crashk_res));
441 #endif
442
443 /* map all the memory banks */
444 for_each_memblock(memory, reg) {
445 phys_addr_t start = reg->base;
446 phys_addr_t end = start + reg->size;
447
448 if (start >= end)
449 break;
450 if (memblock_is_nomap(reg))
451 continue;
452
453 __map_memblock(pgd, start, end, PAGE_KERNEL, flags);
454 }
455
456 /*
457 * Map the linear alias of the [_text, __init_begin) interval
458 * as non-executable now, and remove the write permission in
459 * mark_linear_text_alias_ro() below (which will be called after
460 * alternative patching has completed). This makes the contents
461 * of the region accessible to subsystems such as hibernate,
462 * but protects it from inadvertent modification or execution.
463 * Note that contiguous mappings cannot be remapped in this way,
464 * so we should avoid them here.
465 */
466 __map_memblock(pgd, kernel_start, kernel_end,
467 PAGE_KERNEL, NO_CONT_MAPPINGS);
468 memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
469
470 #ifdef CONFIG_KEXEC_CORE
471 /*
472 * Use page-level mappings here so that we can shrink the region
473 * in page granularity and put back unused memory to buddy system
474 * through /sys/kernel/kexec_crash_size interface.
475 */
476 if (crashk_res.end) {
477 __map_memblock(pgd, crashk_res.start, crashk_res.end + 1,
478 PAGE_KERNEL,
479 NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
480 memblock_clear_nomap(crashk_res.start,
481 resource_size(&crashk_res));
482 }
483 #endif
484 }
485
486 void mark_rodata_ro(void)
487 {
488 unsigned long section_size;
489
490 /*
491 * mark .rodata as read only. Use __init_begin rather than __end_rodata
492 * to cover NOTES and EXCEPTION_TABLE.
493 */
494 section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
495 update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata,
496 section_size, PAGE_KERNEL_RO);
497
498 debug_checkwx();
499 }
500
501 static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
502 pgprot_t prot, struct vm_struct *vma,
503 int flags, unsigned long vm_flags)
504 {
505 phys_addr_t pa_start = __pa_symbol(va_start);
506 unsigned long size = va_end - va_start;
507
508 BUG_ON(!PAGE_ALIGNED(pa_start));
509 BUG_ON(!PAGE_ALIGNED(size));
510
511 __create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
512 early_pgtable_alloc, flags);
513
514 if (!(vm_flags & VM_NO_GUARD))
515 size += PAGE_SIZE;
516
517 vma->addr = va_start;
518 vma->phys_addr = pa_start;
519 vma->size = size;
520 vma->flags = VM_MAP | vm_flags;
521 vma->caller = __builtin_return_address(0);
522
523 vm_area_add_early(vma);
524 }
525
526 static int __init parse_rodata(char *arg)
527 {
528 return strtobool(arg, &rodata_enabled);
529 }
530 early_param("rodata", parse_rodata);
531
532 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
533 static int __init map_entry_trampoline(void)
534 {
535 extern char __entry_tramp_text_start[];
536
537 pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
538 phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start);
539
540 /* The trampoline is always mapped and can therefore be global */
541 pgprot_val(prot) &= ~PTE_NG;
542
543 /* Map only the text into the trampoline page table */
544 memset(tramp_pg_dir, 0, PGD_SIZE);
545 __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, PAGE_SIZE,
546 prot, pgd_pgtable_alloc, 0);
547
548 /* Map both the text and data into the kernel page table */
549 __set_fixmap(FIX_ENTRY_TRAMP_TEXT, pa_start, prot);
550 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
551 extern char __entry_tramp_data_start[];
552
553 __set_fixmap(FIX_ENTRY_TRAMP_DATA,
554 __pa_symbol(__entry_tramp_data_start),
555 PAGE_KERNEL_RO);
556 }
557
558 return 0;
559 }
560 core_initcall(map_entry_trampoline);
561 #endif
562
563 /*
564 * Create fine-grained mappings for the kernel.
565 */
566 static void __init map_kernel(pgd_t *pgd)
567 {
568 static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
569 vmlinux_initdata, vmlinux_data;
570
571 /*
572 * External debuggers may need to write directly to the text
573 * mapping to install SW breakpoints. Allow this (only) when
574 * explicitly requested with rodata=off.
575 */
576 pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC;
577
578 /*
579 * Only rodata will be remapped with different permissions later on,
580 * all other segments are allowed to use contiguous mappings.
581 */
582 map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
583 VM_NO_GUARD);
584 map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
585 &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
586 map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
587 &vmlinux_inittext, 0, VM_NO_GUARD);
588 map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
589 &vmlinux_initdata, 0, VM_NO_GUARD);
590 map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
591
592 if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
593 /*
594 * The fixmap falls in a separate pgd to the kernel, and doesn't
595 * live in the carveout for the swapper_pg_dir. We can simply
596 * re-use the existing dir for the fixmap.
597 */
598 set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
599 *pgd_offset_k(FIXADDR_START));
600 } else if (CONFIG_PGTABLE_LEVELS > 3) {
601 /*
602 * The fixmap shares its top level pgd entry with the kernel
603 * mapping. This can really only occur when we are running
604 * with 16k/4 levels, so we can simply reuse the pud level
605 * entry instead.
606 */
607 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
608 set_pud(pud_set_fixmap_offset(pgd, FIXADDR_START),
609 __pud(__pa_symbol(bm_pmd) | PUD_TYPE_TABLE));
610 pud_clear_fixmap();
611 } else {
612 BUG();
613 }
614
615 kasan_copy_shadow(pgd);
616 }
617
618 /*
619 * paging_init() sets up the page tables, initialises the zone memory
620 * maps and sets up the zero page.
621 */
622 void __init paging_init(void)
623 {
624 phys_addr_t pgd_phys = early_pgtable_alloc();
625 pgd_t *pgd = pgd_set_fixmap(pgd_phys);
626
627 map_kernel(pgd);
628 map_mem(pgd);
629
630 /*
631 * We want to reuse the original swapper_pg_dir so we don't have to
632 * communicate the new address to non-coherent secondaries in
633 * secondary_entry, and so cpu_switch_mm can generate the address with
634 * adrp+add rather than a load from some global variable.
635 *
636 * To do this we need to go via a temporary pgd.
637 */
638 cpu_replace_ttbr1(__va(pgd_phys));
639 memcpy(swapper_pg_dir, pgd, PGD_SIZE);
640 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
641
642 pgd_clear_fixmap();
643 memblock_free(pgd_phys, PAGE_SIZE);
644
645 /*
646 * We only reuse the PGD from the swapper_pg_dir, not the pud + pmd
647 * allocated with it.
648 */
649 memblock_free(__pa_symbol(swapper_pg_dir) + PAGE_SIZE,
650 SWAPPER_DIR_SIZE - PAGE_SIZE);
651 }
652
653 /*
654 * Check whether a kernel address is valid (derived from arch/x86/).
655 */
656 int kern_addr_valid(unsigned long addr)
657 {
658 pgd_t *pgd;
659 pud_t *pud;
660 pmd_t *pmd;
661 pte_t *pte;
662
663 if ((((long)addr) >> VA_BITS) != -1UL)
664 return 0;
665
666 pgd = pgd_offset_k(addr);
667 if (pgd_none(*pgd))
668 return 0;
669
670 pud = pud_offset(pgd, addr);
671 if (pud_none(*pud))
672 return 0;
673
674 if (pud_sect(*pud))
675 return pfn_valid(pud_pfn(*pud));
676
677 pmd = pmd_offset(pud, addr);
678 if (pmd_none(*pmd))
679 return 0;
680
681 if (pmd_sect(*pmd))
682 return pfn_valid(pmd_pfn(*pmd));
683
684 pte = pte_offset_kernel(pmd, addr);
685 if (pte_none(*pte))
686 return 0;
687
688 return pfn_valid(pte_pfn(*pte));
689 }
690 #ifdef CONFIG_SPARSEMEM_VMEMMAP
691 #if !ARM64_SWAPPER_USES_SECTION_MAPS
692 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
693 {
694 return vmemmap_populate_basepages(start, end, node);
695 }
696 #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
697 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
698 {
699 unsigned long addr = start;
700 unsigned long next;
701 pgd_t *pgd;
702 pud_t *pud;
703 pmd_t *pmd;
704
705 do {
706 next = pmd_addr_end(addr, end);
707
708 pgd = vmemmap_pgd_populate(addr, node);
709 if (!pgd)
710 return -ENOMEM;
711
712 pud = vmemmap_pud_populate(pgd, addr, node);
713 if (!pud)
714 return -ENOMEM;
715
716 pmd = pmd_offset(pud, addr);
717 if (pmd_none(*pmd)) {
718 void *p = NULL;
719
720 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
721 if (!p)
722 return -ENOMEM;
723
724 set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
725 } else
726 vmemmap_verify((pte_t *)pmd, node, addr, next);
727 } while (addr = next, addr != end);
728
729 return 0;
730 }
731 #endif /* CONFIG_ARM64_64K_PAGES */
732 void vmemmap_free(unsigned long start, unsigned long end)
733 {
734 }
735 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
736
737 static inline pud_t * fixmap_pud(unsigned long addr)
738 {
739 pgd_t *pgd = pgd_offset_k(addr);
740
741 BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
742
743 return pud_offset_kimg(pgd, addr);
744 }
745
746 static inline pmd_t * fixmap_pmd(unsigned long addr)
747 {
748 pud_t *pud = fixmap_pud(addr);
749
750 BUG_ON(pud_none(*pud) || pud_bad(*pud));
751
752 return pmd_offset_kimg(pud, addr);
753 }
754
755 static inline pte_t * fixmap_pte(unsigned long addr)
756 {
757 return &bm_pte[pte_index(addr)];
758 }
759
760 /*
761 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
762 * directly on kernel symbols (bm_p*d). This function is called too early to use
763 * lm_alias so __p*d_populate functions must be used to populate with the
764 * physical address from __pa_symbol.
765 */
766 void __init early_fixmap_init(void)
767 {
768 pgd_t *pgd;
769 pud_t *pud;
770 pmd_t *pmd;
771 unsigned long addr = FIXADDR_START;
772
773 pgd = pgd_offset_k(addr);
774 if (CONFIG_PGTABLE_LEVELS > 3 &&
775 !(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
776 /*
777 * We only end up here if the kernel mapping and the fixmap
778 * share the top level pgd entry, which should only happen on
779 * 16k/4 levels configurations.
780 */
781 BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
782 pud = pud_offset_kimg(pgd, addr);
783 } else {
784 if (pgd_none(*pgd))
785 __pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
786 pud = fixmap_pud(addr);
787 }
788 if (pud_none(*pud))
789 __pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
790 pmd = fixmap_pmd(addr);
791 __pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
792
793 /*
794 * The boot-ioremap range spans multiple pmds, for which
795 * we are not prepared:
796 */
797 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
798 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
799
800 if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
801 || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
802 WARN_ON(1);
803 pr_warn("pmd %p != %p, %p\n",
804 pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
805 fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
806 pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
807 fix_to_virt(FIX_BTMAP_BEGIN));
808 pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
809 fix_to_virt(FIX_BTMAP_END));
810
811 pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
812 pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
813 }
814 }
815
816 /*
817 * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
818 * ever need to use IPIs for TLB broadcasting, then we're in trouble here.
819 */
820 void __set_fixmap(enum fixed_addresses idx,
821 phys_addr_t phys, pgprot_t flags)
822 {
823 unsigned long addr = __fix_to_virt(idx);
824 pte_t *pte;
825
826 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
827
828 pte = fixmap_pte(addr);
829
830 if (pgprot_val(flags)) {
831 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
832 } else {
833 pte_clear(&init_mm, addr, pte);
834 flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
835 }
836 }
837
838 void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
839 {
840 const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
841 int offset;
842 void *dt_virt;
843
844 /*
845 * Check whether the physical FDT address is set and meets the minimum
846 * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
847 * at least 8 bytes so that we can always access the magic and size
848 * fields of the FDT header after mapping the first chunk, double check
849 * here if that is indeed the case.
850 */
851 BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
852 if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
853 return NULL;
854
855 /*
856 * Make sure that the FDT region can be mapped without the need to
857 * allocate additional translation table pages, so that it is safe
858 * to call create_mapping_noalloc() this early.
859 *
860 * On 64k pages, the FDT will be mapped using PTEs, so we need to
861 * be in the same PMD as the rest of the fixmap.
862 * On 4k pages, we'll use section mappings for the FDT so we only
863 * have to be in the same PUD.
864 */
865 BUILD_BUG_ON(dt_virt_base % SZ_2M);
866
867 BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
868 __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
869
870 offset = dt_phys % SWAPPER_BLOCK_SIZE;
871 dt_virt = (void *)dt_virt_base + offset;
872
873 /* map the first chunk so we can read the size from the header */
874 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE),
875 dt_virt_base, SWAPPER_BLOCK_SIZE, prot);
876
877 if (fdt_magic(dt_virt) != FDT_MAGIC)
878 return NULL;
879
880 *size = fdt_totalsize(dt_virt);
881 if (*size > MAX_FDT_SIZE)
882 return NULL;
883
884 if (offset + *size > SWAPPER_BLOCK_SIZE)
885 create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
886 round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot);
887
888 return dt_virt;
889 }
890
891 void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
892 {
893 void *dt_virt;
894 int size;
895
896 dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
897 if (!dt_virt)
898 return NULL;
899
900 memblock_reserve(dt_phys, size);
901 return dt_virt;
902 }
903
904 int __init arch_ioremap_pud_supported(void)
905 {
906 /* only 4k granule supports level 1 block mappings */
907 return IS_ENABLED(CONFIG_ARM64_4K_PAGES);
908 }
909
910 int __init arch_ioremap_pmd_supported(void)
911 {
912 return 1;
913 }
914
915 int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
916 {
917 BUG_ON(phys & ~PUD_MASK);
918 set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
919 return 1;
920 }
921
922 int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
923 {
924 BUG_ON(phys & ~PMD_MASK);
925 set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot))));
926 return 1;
927 }
928
929 int pud_clear_huge(pud_t *pud)
930 {
931 if (!pud_sect(*pud))
932 return 0;
933 pud_clear(pud);
934 return 1;
935 }
936
937 int pmd_clear_huge(pmd_t *pmd)
938 {
939 if (!pmd_sect(*pmd))
940 return 0;
941 pmd_clear(pmd);
942 return 1;
943 }