]>
Commit | Line | Data |
---|---|---|
9703d9d7 CM |
1 | /* |
2 | * Low-level CPU initialisation | |
3 | * Based on arch/arm/kernel/head.S | |
4 | * | |
5 | * Copyright (C) 1994-2002 Russell King | |
6 | * Copyright (C) 2003-2012 ARM Ltd. | |
7 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
8 | * Will Deacon <will.deacon@arm.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | #include <linux/linkage.h> | |
24 | #include <linux/init.h> | |
021f6537 | 25 | #include <linux/irqchip/arm-gic-v3.h> |
9703d9d7 CM |
26 | |
27 | #include <asm/assembler.h> | |
08cdac61 | 28 | #include <asm/boot.h> |
9703d9d7 CM |
29 | #include <asm/ptrace.h> |
30 | #include <asm/asm-offsets.h> | |
c218bca7 | 31 | #include <asm/cache.h> |
0359b0e2 | 32 | #include <asm/cputype.h> |
1e48ef7f | 33 | #include <asm/elf.h> |
87d1587b | 34 | #include <asm/kernel-pgtable.h> |
1f364c8c | 35 | #include <asm/kvm_arm.h> |
9703d9d7 | 36 | #include <asm/memory.h> |
9703d9d7 CM |
37 | #include <asm/pgtable-hwdef.h> |
38 | #include <asm/pgtable.h> | |
39 | #include <asm/page.h> | |
bb905274 | 40 | #include <asm/smp.h> |
4bf8b96e SP |
41 | #include <asm/sysreg.h> |
42 | #include <asm/thread_info.h> | |
f35a9205 | 43 | #include <asm/virt.h> |
9703d9d7 | 44 | |
6f4d57fa | 45 | #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) |
9703d9d7 | 46 | |
4190312b AB |
47 | #if (TEXT_OFFSET & 0xfff) != 0 |
48 | #error TEXT_OFFSET must be at least 4KB aligned | |
49 | #elif (PAGE_OFFSET & 0x1fffff) != 0 | |
da57a369 | 50 | #error PAGE_OFFSET must be at least 2MB aligned |
4190312b | 51 | #elif TEXT_OFFSET > 0x1fffff |
da57a369 | 52 | #error TEXT_OFFSET must be less than 2MB |
9703d9d7 CM |
53 | #endif |
54 | ||
9703d9d7 CM |
55 | /* |
56 | * Kernel startup entry point. | |
57 | * --------------------------- | |
58 | * | |
59 | * The requirements are: | |
60 | * MMU = off, D-cache = off, I-cache = on or off, | |
61 | * x0 = physical address to the FDT blob. | |
62 | * | |
63 | * This code is mostly position independent so you call this at | |
64 | * __pa(PAGE_OFFSET + TEXT_OFFSET). | |
65 | * | |
66 | * Note that the callee-saved registers are used for storing variables | |
67 | * that are useful before the MMU is enabled. The allocations are described | |
68 | * in the entry routines. | |
69 | */ | |
70 | __HEAD | |
2bf31a4a | 71 | _head: |
9703d9d7 CM |
72 | /* |
73 | * DO NOT MODIFY. Image header expected by Linux boot-loaders. | |
74 | */ | |
3c7f2550 | 75 | #ifdef CONFIG_EFI |
3c7f2550 MS |
76 | /* |
77 | * This add instruction has no meaningful effect except that | |
78 | * its opcode forms the magic "MZ" signature required by UEFI. | |
79 | */ | |
80 | add x13, x18, #0x16 | |
81 | b stext | |
82 | #else | |
9703d9d7 CM |
83 | b stext // branch to kernel start, magic |
84 | .long 0 // reserved | |
3c7f2550 | 85 | #endif |
6ad1fe5d AB |
86 | le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian |
87 | le64sym _kernel_size_le // Effective size of kernel image, little-endian | |
88 | le64sym _kernel_flags_le // Informative flags, little-endian | |
4370eec0 RF |
89 | .quad 0 // reserved |
90 | .quad 0 // reserved | |
91 | .quad 0 // reserved | |
92 | .byte 0x41 // Magic number, "ARM\x64" | |
93 | .byte 0x52 | |
94 | .byte 0x4d | |
95 | .byte 0x64 | |
3c7f2550 | 96 | #ifdef CONFIG_EFI |
2bf31a4a | 97 | .long pe_header - _head // Offset to the PE header. |
3c7f2550 | 98 | #else |
4370eec0 | 99 | .word 0 // reserved |
3c7f2550 MS |
100 | #endif |
101 | ||
102 | #ifdef CONFIG_EFI | |
103 | .align 3 | |
104 | pe_header: | |
105 | .ascii "PE" | |
106 | .short 0 | |
107 | coff_header: | |
108 | .short 0xaa64 // AArch64 | |
109 | .short 2 // nr_sections | |
110 | .long 0 // TimeDateStamp | |
111 | .long 0 // PointerToSymbolTable | |
112 | .long 1 // NumberOfSymbols | |
113 | .short section_table - optional_header // SizeOfOptionalHeader | |
114 | .short 0x206 // Characteristics. | |
115 | // IMAGE_FILE_DEBUG_STRIPPED | | |
116 | // IMAGE_FILE_EXECUTABLE_IMAGE | | |
117 | // IMAGE_FILE_LINE_NUMS_STRIPPED | |
118 | optional_header: | |
119 | .short 0x20b // PE32+ format | |
120 | .byte 0x02 // MajorLinkerVersion | |
121 | .byte 0x14 // MinorLinkerVersion | |
546c8c44 | 122 | .long _end - efi_header_end // SizeOfCode |
3c7f2550 MS |
123 | .long 0 // SizeOfInitializedData |
124 | .long 0 // SizeOfUninitializedData | |
2bf31a4a | 125 | .long __efistub_entry - _head // AddressOfEntryPoint |
546c8c44 | 126 | .long efi_header_end - _head // BaseOfCode |
3c7f2550 MS |
127 | |
128 | extra_header_fields: | |
129 | .quad 0 // ImageBase | |
ea6bc80d | 130 | .long 0x1000 // SectionAlignment |
a352ea3e | 131 | .long PECOFF_FILE_ALIGNMENT // FileAlignment |
3c7f2550 MS |
132 | .short 0 // MajorOperatingSystemVersion |
133 | .short 0 // MinorOperatingSystemVersion | |
134 | .short 0 // MajorImageVersion | |
135 | .short 0 // MinorImageVersion | |
136 | .short 0 // MajorSubsystemVersion | |
137 | .short 0 // MinorSubsystemVersion | |
138 | .long 0 // Win32VersionValue | |
139 | ||
2bf31a4a | 140 | .long _end - _head // SizeOfImage |
3c7f2550 MS |
141 | |
142 | // Everything before the kernel image is considered part of the header | |
546c8c44 | 143 | .long efi_header_end - _head // SizeOfHeaders |
3c7f2550 MS |
144 | .long 0 // CheckSum |
145 | .short 0xa // Subsystem (EFI application) | |
146 | .short 0 // DllCharacteristics | |
147 | .quad 0 // SizeOfStackReserve | |
148 | .quad 0 // SizeOfStackCommit | |
149 | .quad 0 // SizeOfHeapReserve | |
150 | .quad 0 // SizeOfHeapCommit | |
151 | .long 0 // LoaderFlags | |
152 | .long 0x6 // NumberOfRvaAndSizes | |
153 | ||
154 | .quad 0 // ExportTable | |
155 | .quad 0 // ImportTable | |
156 | .quad 0 // ResourceTable | |
157 | .quad 0 // ExceptionTable | |
158 | .quad 0 // CertificationTable | |
159 | .quad 0 // BaseRelocationTable | |
160 | ||
161 | // Section table | |
162 | section_table: | |
163 | ||
164 | /* | |
165 | * The EFI application loader requires a relocation section | |
166 | * because EFI applications must be relocatable. This is a | |
167 | * dummy section as far as we are concerned. | |
168 | */ | |
169 | .ascii ".reloc" | |
170 | .byte 0 | |
171 | .byte 0 // end of 0 padding of section name | |
172 | .long 0 | |
173 | .long 0 | |
174 | .long 0 // SizeOfRawData | |
175 | .long 0 // PointerToRawData | |
176 | .long 0 // PointerToRelocations | |
177 | .long 0 // PointerToLineNumbers | |
178 | .short 0 // NumberOfRelocations | |
179 | .short 0 // NumberOfLineNumbers | |
180 | .long 0x42100040 // Characteristics (section flags) | |
181 | ||
182 | ||
183 | .ascii ".text" | |
184 | .byte 0 | |
185 | .byte 0 | |
186 | .byte 0 // end of 0 padding of section name | |
546c8c44 AB |
187 | .long _end - efi_header_end // VirtualSize |
188 | .long efi_header_end - _head // VirtualAddress | |
189 | .long _edata - efi_header_end // SizeOfRawData | |
190 | .long efi_header_end - _head // PointerToRawData | |
3c7f2550 MS |
191 | |
192 | .long 0 // PointerToRelocations (0 for executables) | |
193 | .long 0 // PointerToLineNumbers (0 for executables) | |
194 | .short 0 // NumberOfRelocations (0 for executables) | |
195 | .short 0 // NumberOfLineNumbers (0 for executables) | |
196 | .long 0xe0500020 // Characteristics (section flags) | |
ea6bc80d AB |
197 | |
198 | /* | |
546c8c44 | 199 | * EFI will load .text onwards at the 4k section alignment |
ea6bc80d AB |
200 | * described in the PE/COFF header. To ensure that instruction |
201 | * sequences using an adrp and a :lo12: immediate will function | |
546c8c44 | 202 | * correctly at this alignment, we must ensure that .text is |
ea6bc80d AB |
203 | * placed at a 4k boundary in the Image to begin with. |
204 | */ | |
205 | .align 12 | |
546c8c44 | 206 | efi_header_end: |
3c7f2550 | 207 | #endif |
9703d9d7 | 208 | |
546c8c44 AB |
209 | __INIT |
210 | ||
a9be2ee0 AB |
211 | /* |
212 | * The following callee saved general purpose registers are used on the | |
213 | * primary lowlevel boot path: | |
214 | * | |
215 | * Register Scope Purpose | |
216 | * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 | |
217 | * x23 stext() .. start_kernel() physical misalignment/KASLR offset | |
218 | * x28 __create_page_tables() callee preserved temp register | |
219 | * x19/x20 __primary_switch() callee preserved temp registers | |
220 | */ | |
9703d9d7 | 221 | ENTRY(stext) |
da9c177d | 222 | bl preserve_boot_args |
23c8a500 | 223 | bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
b929fe32 AB |
224 | adrp x23, __PHYS_OFFSET |
225 | and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 | |
828e9834 | 226 | bl set_cpu_boot_mode_flag |
aea73abb | 227 | bl __create_page_tables |
9703d9d7 | 228 | /* |
a591ede4 MZ |
229 | * The following calls CPU setup code, see arch/arm64/mm/proc.S for |
230 | * details. | |
9703d9d7 CM |
231 | * On return, the CPU will be ready for the MMU to be turned on and |
232 | * the TCR will have been set. | |
233 | */ | |
0cd3defe | 234 | bl __cpu_setup // initialise processor |
3c5e9f23 | 235 | b __primary_switch |
9703d9d7 CM |
236 | ENDPROC(stext) |
237 | ||
da9c177d AB |
238 | /* |
239 | * Preserve the arguments passed by the bootloader in x0 .. x3 | |
240 | */ | |
241 | preserve_boot_args: | |
242 | mov x21, x0 // x21=FDT | |
243 | ||
244 | adr_l x0, boot_args // record the contents of | |
245 | stp x21, x1, [x0] // x0 .. x3 at kernel entry | |
246 | stp x2, x3, [x0, #16] | |
247 | ||
248 | dmb sy // needed before dc ivac with | |
249 | // MMU off | |
250 | ||
251 | add x1, x0, #0x20 // 4 x 8 bytes | |
252 | b __inval_cache_range // tail call | |
253 | ENDPROC(preserve_boot_args) | |
254 | ||
034edabe LA |
255 | /* |
256 | * Macro to create a table entry to the next page. | |
257 | * | |
258 | * tbl: page table address | |
259 | * virt: virtual address | |
260 | * shift: #imm page table shift | |
261 | * ptrs: #imm pointers per table page | |
262 | * | |
263 | * Preserves: virt | |
264 | * Corrupts: tmp1, tmp2 | |
265 | * Returns: tbl -> next level table page address | |
266 | */ | |
267 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | |
268 | lsr \tmp1, \virt, #\shift | |
269 | and \tmp1, \tmp1, #\ptrs - 1 // table index | |
270 | add \tmp2, \tbl, #PAGE_SIZE | |
271 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type | |
272 | str \tmp2, [\tbl, \tmp1, lsl #3] | |
273 | add \tbl, \tbl, #PAGE_SIZE // next level table page | |
274 | .endm | |
275 | ||
276 | /* | |
277 | * Macro to populate the PGD (and possibily PUD) for the corresponding | |
278 | * block entry in the next level (tbl) for the given virtual address. | |
279 | * | |
280 | * Preserves: tbl, next, virt | |
281 | * Corrupts: tmp1, tmp2 | |
282 | */ | |
283 | .macro create_pgd_entry, tbl, virt, tmp1, tmp2 | |
284 | create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 | |
6a3fd402 SP |
285 | #if SWAPPER_PGTABLE_LEVELS > 3 |
286 | create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 | |
287 | #endif | |
288 | #if SWAPPER_PGTABLE_LEVELS > 2 | |
87d1587b | 289 | create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 |
034edabe LA |
290 | #endif |
291 | .endm | |
292 | ||
293 | /* | |
294 | * Macro to populate block entries in the page table for the start..end | |
295 | * virtual range (inclusive). | |
296 | * | |
297 | * Preserves: tbl, flags | |
298 | * Corrupts: phys, start, end, pstate | |
299 | */ | |
300 | .macro create_block_map, tbl, flags, phys, start, end | |
87d1587b SP |
301 | lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT |
302 | lsr \start, \start, #SWAPPER_BLOCK_SHIFT | |
034edabe | 303 | and \start, \start, #PTRS_PER_PTE - 1 // table index |
87d1587b SP |
304 | orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry |
305 | lsr \end, \end, #SWAPPER_BLOCK_SHIFT | |
034edabe LA |
306 | and \end, \end, #PTRS_PER_PTE - 1 // table end index |
307 | 9999: str \phys, [\tbl, \start, lsl #3] // store the entry | |
308 | add \start, \start, #1 // next entry | |
87d1587b | 309 | add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block |
034edabe LA |
310 | cmp \start, \end |
311 | b.ls 9999b | |
312 | .endm | |
313 | ||
314 | /* | |
315 | * Setup the initial page tables. We only setup the barest amount which is | |
316 | * required to get the kernel running. The following sections are required: | |
317 | * - identity mapping to enable the MMU (low address, TTBR0) | |
318 | * - first few MB of the kernel linear mapping to jump to once the MMU has | |
61bd93ce | 319 | * been enabled |
034edabe LA |
320 | */ |
321 | __create_page_tables: | |
f80fb3a3 | 322 | mov x28, lr |
034edabe LA |
323 | |
324 | /* | |
325 | * Invalidate the idmap and swapper page tables to avoid potential | |
326 | * dirty cache lines being evicted. | |
327 | */ | |
aea73abb | 328 | adrp x0, idmap_pg_dir |
4b65a5db | 329 | adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE |
034edabe LA |
330 | bl __inval_cache_range |
331 | ||
332 | /* | |
333 | * Clear the idmap and swapper page tables. | |
334 | */ | |
aea73abb | 335 | adrp x0, idmap_pg_dir |
4b65a5db | 336 | adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE |
034edabe LA |
337 | 1: stp xzr, xzr, [x0], #16 |
338 | stp xzr, xzr, [x0], #16 | |
339 | stp xzr, xzr, [x0], #16 | |
340 | stp xzr, xzr, [x0], #16 | |
341 | cmp x0, x6 | |
342 | b.lo 1b | |
343 | ||
b03cc885 | 344 | mov x7, SWAPPER_MM_MMUFLAGS |
034edabe LA |
345 | |
346 | /* | |
347 | * Create the identity mapping. | |
348 | */ | |
aea73abb | 349 | adrp x0, idmap_pg_dir |
5dfe9d7d | 350 | adrp x3, __idmap_text_start // __pa(__idmap_text_start) |
dd006da2 AB |
351 | |
352 | #ifndef CONFIG_ARM64_VA_BITS_48 | |
353 | #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) | |
354 | #define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) | |
355 | ||
356 | /* | |
357 | * If VA_BITS < 48, it may be too small to allow for an ID mapping to be | |
358 | * created that covers system RAM if that is located sufficiently high | |
359 | * in the physical address space. So for the ID map, use an extended | |
360 | * virtual range in that case, by configuring an additional translation | |
361 | * level. | |
362 | * First, we have to verify our assumption that the current value of | |
363 | * VA_BITS was chosen such that all translation levels are fully | |
364 | * utilised, and that lowering T0SZ will always result in an additional | |
365 | * translation level to be configured. | |
366 | */ | |
367 | #if VA_BITS != EXTRA_SHIFT | |
368 | #error "Mismatch between VA_BITS and page size/number of translation levels" | |
369 | #endif | |
370 | ||
371 | /* | |
372 | * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the | |
5dfe9d7d | 373 | * entire ID map region can be mapped. As T0SZ == (64 - #bits used), |
dd006da2 | 374 | * this number conveniently equals the number of leading zeroes in |
5dfe9d7d | 375 | * the physical address of __idmap_text_end. |
dd006da2 | 376 | */ |
5dfe9d7d | 377 | adrp x5, __idmap_text_end |
dd006da2 AB |
378 | clz x5, x5 |
379 | cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? | |
380 | b.ge 1f // .. then skip additional level | |
381 | ||
0c20856c MR |
382 | adr_l x6, idmap_t0sz |
383 | str x5, [x6] | |
384 | dmb sy | |
385 | dc ivac, x6 // Invalidate potentially stale cache line | |
dd006da2 AB |
386 | |
387 | create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 | |
388 | 1: | |
389 | #endif | |
390 | ||
034edabe | 391 | create_pgd_entry x0, x3, x5, x6 |
5dfe9d7d AB |
392 | mov x5, x3 // __pa(__idmap_text_start) |
393 | adr_l x6, __idmap_text_end // __pa(__idmap_text_end) | |
034edabe LA |
394 | create_block_map x0, x7, x3, x5, x6 |
395 | ||
396 | /* | |
397 | * Map the kernel image (starting with PHYS_OFFSET). | |
398 | */ | |
aea73abb | 399 | adrp x0, swapper_pg_dir |
18b9c0d6 | 400 | mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) |
f80fb3a3 | 401 | add x5, x5, x23 // add KASLR displacement |
034edabe | 402 | create_pgd_entry x0, x5, x3, x6 |
18b9c0d6 AB |
403 | adrp x6, _end // runtime __pa(_end) |
404 | adrp x3, _text // runtime __pa(_text) | |
405 | sub x6, x6, x3 // _end - _text | |
406 | add x6, x6, x5 // runtime __va(_end) | |
034edabe LA |
407 | create_block_map x0, x7, x3, x5, x6 |
408 | ||
034edabe LA |
409 | /* |
410 | * Since the page tables have been populated with non-cacheable | |
411 | * accesses (MMU disabled), invalidate the idmap and swapper page | |
412 | * tables again to remove any speculatively loaded cache lines. | |
413 | */ | |
aea73abb | 414 | adrp x0, idmap_pg_dir |
4b65a5db | 415 | adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE |
91d57155 | 416 | dmb sy |
034edabe LA |
417 | bl __inval_cache_range |
418 | ||
f80fb3a3 | 419 | ret x28 |
034edabe LA |
420 | ENDPROC(__create_page_tables) |
421 | .ltorg | |
422 | ||
034edabe | 423 | /* |
a871d354 | 424 | * The following fragment of code is executed with the MMU enabled. |
b929fe32 AB |
425 | * |
426 | * x0 = __PHYS_OFFSET | |
034edabe | 427 | */ |
0cd3defe | 428 | __primary_switched: |
60699ba1 AB |
429 | adrp x4, init_thread_union |
430 | add sp, x4, #THREAD_SIZE | |
c02433dd MR |
431 | adr_l x5, init_task |
432 | msr sp_el0, x5 // Save thread_info | |
60699ba1 | 433 | |
2bf31a4a AB |
434 | adr_l x8, vectors // load VBAR_EL1 with virtual |
435 | msr vbar_el1, x8 // vector table address | |
436 | isb | |
437 | ||
60699ba1 AB |
438 | stp xzr, x30, [sp, #-16]! |
439 | mov x29, sp | |
440 | ||
b929fe32 AB |
441 | str_l x21, __fdt_pointer, x5 // Save FDT pointer |
442 | ||
443 | ldr_l x4, kimage_vaddr // Save the offset between | |
444 | sub x4, x4, x0 // the kernel virtual and | |
445 | str_l x4, kimage_voffset, x5 // physical mappings | |
446 | ||
2a803c4d MR |
447 | // Clear BSS |
448 | adr_l x0, __bss_start | |
449 | mov x1, xzr | |
450 | adr_l x2, __bss_stop | |
451 | sub x2, x2, x0 | |
452 | bl __pi_memset | |
5227cfa7 | 453 | dsb ishst // Make zero page visible to PTW |
2a803c4d | 454 | |
39d114dd AR |
455 | #ifdef CONFIG_KASAN |
456 | bl kasan_early_init | |
f80fb3a3 AB |
457 | #endif |
458 | #ifdef CONFIG_RANDOMIZE_BASE | |
08cdac61 AB |
459 | tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? |
460 | b.ne 0f | |
f80fb3a3 | 461 | mov x0, x21 // pass FDT address in x0 |
08cdac61 | 462 | mov x1, x23 // pass modulo offset in x1 |
f80fb3a3 AB |
463 | bl kaslr_early_init // parse FDT for KASLR options |
464 | cbz x0, 0f // KASLR disabled? just proceed | |
08cdac61 | 465 | orr x23, x23, x0 // record KASLR offset |
60699ba1 AB |
466 | ldp x29, x30, [sp], #16 // we must enable KASLR, return |
467 | ret // to __primary_switch() | |
f80fb3a3 | 468 | 0: |
39d114dd | 469 | #endif |
034edabe | 470 | b start_kernel |
0cd3defe | 471 | ENDPROC(__primary_switched) |
034edabe LA |
472 | |
473 | /* | |
474 | * end early head section, begin head code that is also used for | |
475 | * hotplug and needs to have the same protections as the text region | |
476 | */ | |
b6113038 | 477 | .section ".idmap.text","ax" |
f80fb3a3 AB |
478 | |
479 | ENTRY(kimage_vaddr) | |
480 | .quad _text - TEXT_OFFSET | |
481 | ||
9703d9d7 CM |
482 | /* |
483 | * If we're fortunate enough to boot at EL2, ensure that the world is | |
484 | * sane before dropping to EL1. | |
828e9834 ML |
485 | * |
486 | * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if | |
487 | * booted in EL1 or EL2 respectively. | |
9703d9d7 CM |
488 | */ |
489 | ENTRY(el2_setup) | |
490 | mrs x0, CurrentEL | |
974c8e45 | 491 | cmp x0, #CurrentEL_EL2 |
9cf71728 ML |
492 | b.ne 1f |
493 | mrs x0, sctlr_el2 | |
494 | CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 | |
495 | CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 | |
496 | msr sctlr_el2, x0 | |
497 | b 2f | |
498 | 1: mrs x0, sctlr_el1 | |
499 | CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 | |
500 | CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 | |
501 | msr sctlr_el1, x0 | |
23c8a500 | 502 | mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 |
9cf71728 | 503 | isb |
9703d9d7 CM |
504 | ret |
505 | ||
1f364c8c MZ |
506 | 2: |
507 | #ifdef CONFIG_ARM64_VHE | |
508 | /* | |
509 | * Check for VHE being present. For the rest of the EL2 setup, | |
510 | * x2 being non-zero indicates that we do have VHE, and that the | |
511 | * kernel is intended to run at EL2. | |
512 | */ | |
513 | mrs x2, id_aa64mmfr1_el1 | |
514 | ubfx x2, x2, #8, #4 | |
515 | #else | |
516 | mov x2, xzr | |
517 | #endif | |
518 | ||
9703d9d7 | 519 | /* Hyp configuration. */ |
1f364c8c MZ |
520 | mov x0, #HCR_RW // 64-bit EL1 |
521 | cbz x2, set_hcr | |
522 | orr x0, x0, #HCR_TGE // Enable Host Extensions | |
523 | orr x0, x0, #HCR_E2H | |
524 | set_hcr: | |
9703d9d7 | 525 | msr hcr_el2, x0 |
1f364c8c | 526 | isb |
9703d9d7 | 527 | |
1650ac49 J |
528 | /* |
529 | * Allow Non-secure EL1 and EL0 to access physical timer and counter. | |
530 | * This is not necessary for VHE, since the host kernel runs in EL2, | |
531 | * and EL0 accesses are configured in the later stage of boot process. | |
532 | * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout | |
533 | * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined | |
534 | * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 | |
535 | * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in | |
536 | * EL2. | |
537 | */ | |
538 | cbnz x2, 1f | |
9703d9d7 CM |
539 | mrs x0, cnthctl_el2 |
540 | orr x0, x0, #3 // Enable EL1 physical timers | |
541 | msr cnthctl_el2, x0 | |
1650ac49 | 542 | 1: |
1f75ff0a | 543 | msr cntvoff_el2, xzr // Clear virtual offset |
9703d9d7 | 544 | |
021f6537 MZ |
545 | #ifdef CONFIG_ARM_GIC_V3 |
546 | /* GICv3 system register access */ | |
547 | mrs x0, id_aa64pfr0_el1 | |
548 | ubfx x0, x0, #24, #4 | |
549 | cmp x0, #1 | |
550 | b.ne 3f | |
551 | ||
72c58395 | 552 | mrs_s x0, ICC_SRE_EL2 |
021f6537 MZ |
553 | orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 |
554 | orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 | |
72c58395 | 555 | msr_s ICC_SRE_EL2, x0 |
021f6537 | 556 | isb // Make sure SRE is now set |
d271976d MZ |
557 | mrs_s x0, ICC_SRE_EL2 // Read SRE back, |
558 | tbz x0, #0, 3f // and check that it sticks | |
72c58395 | 559 | msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults |
021f6537 MZ |
560 | |
561 | 3: | |
562 | #endif | |
563 | ||
9703d9d7 CM |
564 | /* Populate ID registers. */ |
565 | mrs x0, midr_el1 | |
566 | mrs x1, mpidr_el1 | |
567 | msr vpidr_el2, x0 | |
568 | msr vmpidr_el2, x1 | |
569 | ||
882416c1 DM |
570 | /* |
571 | * When VHE is not in use, early init of EL2 and EL1 needs to be | |
572 | * done here. | |
573 | * When VHE _is_ in use, EL1 will not be used in the host and | |
574 | * requires no configuration, and all non-hyp-specific EL2 setup | |
575 | * will be done via the _EL1 system register aliases in __cpu_setup. | |
576 | */ | |
577 | cbnz x2, 1f | |
578 | ||
9703d9d7 CM |
579 | /* sctlr_el1 */ |
580 | mov x0, #0x0800 // Set/clear RES{1,0} bits | |
9cf71728 ML |
581 | CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems |
582 | CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | |
9703d9d7 CM |
583 | msr sctlr_el1, x0 |
584 | ||
585 | /* Coprocessor traps. */ | |
586 | mov x0, #0x33ff | |
587 | msr cptr_el2, x0 // Disable copro. traps to EL2 | |
882416c1 | 588 | 1: |
9703d9d7 CM |
589 | |
590 | #ifdef CONFIG_COMPAT | |
591 | msr hstr_el2, xzr // Disable CP15 traps to EL2 | |
592 | #endif | |
593 | ||
d10bcd47 | 594 | /* EL2 debug */ |
f436b2ac LP |
595 | mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer |
596 | sbfx x0, x0, #8, #4 | |
597 | cmp x0, #1 | |
598 | b.lt 4f // Skip if no PMU present | |
d10bcd47 WD |
599 | mrs x0, pmcr_el0 // Disable debug access traps |
600 | ubfx x0, x0, #11, #5 // to EL2 and allow access to | |
f436b2ac | 601 | 4: |
85054035 MZ |
602 | csel x0, xzr, x0, lt // all PMU counters from EL1 |
603 | msr mdcr_el2, x0 // (if they exist) | |
d10bcd47 | 604 | |
7dbfbe5b MZ |
605 | /* Stage-2 translation */ |
606 | msr vttbr_el2, xzr | |
607 | ||
1f364c8c MZ |
608 | cbz x2, install_el2_stub |
609 | ||
23c8a500 | 610 | mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
1f364c8c MZ |
611 | isb |
612 | ret | |
613 | ||
614 | install_el2_stub: | |
712c6ff4 | 615 | /* Hypervisor stub */ |
ac2dec5f LA |
616 | adrp x0, __hyp_stub_vectors |
617 | add x0, x0, #:lo12:__hyp_stub_vectors | |
712c6ff4 MZ |
618 | msr vbar_el2, x0 |
619 | ||
9703d9d7 CM |
620 | /* spsr */ |
621 | mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | |
622 | PSR_MODE_EL1h) | |
623 | msr spsr_el2, x0 | |
624 | msr elr_el2, lr | |
23c8a500 | 625 | mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
9703d9d7 CM |
626 | eret |
627 | ENDPROC(el2_setup) | |
628 | ||
828e9834 ML |
629 | /* |
630 | * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed | |
631 | * in x20. See arch/arm64/include/asm/virt.h for more info. | |
632 | */ | |
190c056f | 633 | set_cpu_boot_mode_flag: |
6f4d57fa | 634 | adr_l x1, __boot_cpu_mode |
23c8a500 | 635 | cmp w0, #BOOT_CPU_MODE_EL2 |
828e9834 ML |
636 | b.ne 1f |
637 | add x1, x1, #4 | |
23c8a500 | 638 | 1: str w0, [x1] // This CPU has booted in EL1 |
d0488597 WD |
639 | dmb sy |
640 | dc ivac, x1 // Invalidate potentially stale cache line | |
828e9834 ML |
641 | ret |
642 | ENDPROC(set_cpu_boot_mode_flag) | |
643 | ||
b6113038 JM |
644 | /* |
645 | * These values are written with the MMU off, but read with the MMU on. | |
646 | * Writers will invalidate the corresponding address, discarding up to a | |
647 | * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures | |
648 | * sufficient alignment that the CWG doesn't overlap another section. | |
649 | */ | |
650 | .pushsection ".mmuoff.data.write", "aw" | |
f35a9205 MZ |
651 | /* |
652 | * We need to find out the CPU boot mode long after boot, so we need to | |
653 | * store it in a writable variable. | |
654 | * | |
655 | * This is not in .bss, because we set it sufficiently early that the boot-time | |
656 | * zeroing of .bss would clobber it. | |
657 | */ | |
947bb758 | 658 | ENTRY(__boot_cpu_mode) |
f35a9205 | 659 | .long BOOT_CPU_MODE_EL2 |
424a3838 | 660 | .long BOOT_CPU_MODE_EL1 |
b6113038 JM |
661 | /* |
662 | * The booting CPU updates the failed status @__early_cpu_boot_status, | |
663 | * with MMU turned off. | |
664 | */ | |
665 | ENTRY(__early_cpu_boot_status) | |
666 | .long 0 | |
667 | ||
f35a9205 MZ |
668 | .popsection |
669 | ||
9703d9d7 CM |
670 | /* |
671 | * This provides a "holding pen" for platforms to hold all secondary | |
672 | * cores are held until we're ready for them to initialise. | |
673 | */ | |
674 | ENTRY(secondary_holding_pen) | |
23c8a500 | 675 | bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
828e9834 | 676 | bl set_cpu_boot_mode_flag |
9703d9d7 | 677 | mrs x0, mpidr_el1 |
b03cc885 | 678 | mov_q x1, MPIDR_HWID_BITMASK |
0359b0e2 | 679 | and x0, x0, x1 |
b1c98297 | 680 | adr_l x3, secondary_holding_pen_release |
9703d9d7 CM |
681 | pen: ldr x4, [x3] |
682 | cmp x4, x0 | |
683 | b.eq secondary_startup | |
684 | wfe | |
685 | b pen | |
686 | ENDPROC(secondary_holding_pen) | |
652af899 MR |
687 | |
688 | /* | |
689 | * Secondary entry point that jumps straight into the kernel. Only to | |
690 | * be used where CPUs are brought online dynamically by the kernel. | |
691 | */ | |
692 | ENTRY(secondary_entry) | |
652af899 | 693 | bl el2_setup // Drop to EL1 |
85cc00ea | 694 | bl set_cpu_boot_mode_flag |
652af899 MR |
695 | b secondary_startup |
696 | ENDPROC(secondary_entry) | |
9703d9d7 | 697 | |
190c056f | 698 | secondary_startup: |
9703d9d7 CM |
699 | /* |
700 | * Common entry point for secondary CPUs. | |
701 | */ | |
a591ede4 | 702 | bl __cpu_setup // initialise processor |
9dcf7914 AB |
703 | bl __enable_mmu |
704 | ldr x8, =__secondary_switched | |
705 | br x8 | |
9703d9d7 CM |
706 | ENDPROC(secondary_startup) |
707 | ||
190c056f | 708 | __secondary_switched: |
2bf31a4a AB |
709 | adr_l x5, vectors |
710 | msr vbar_el1, x5 | |
711 | isb | |
712 | ||
bb905274 | 713 | adr_l x0, secondary_data |
c02433dd MR |
714 | ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack |
715 | mov sp, x1 | |
716 | ldr x2, [x0, #CPU_BOOT_TASK] | |
717 | msr sp_el0, x2 | |
9703d9d7 CM |
718 | mov x29, #0 |
719 | b secondary_start_kernel | |
720 | ENDPROC(__secondary_switched) | |
9703d9d7 | 721 | |
bb905274 SP |
722 | /* |
723 | * The booting CPU updates the failed status @__early_cpu_boot_status, | |
724 | * with MMU turned off. | |
725 | * | |
726 | * update_early_cpu_boot_status tmp, status | |
727 | * - Corrupts tmp1, tmp2 | |
728 | * - Writes 'status' to __early_cpu_boot_status and makes sure | |
729 | * it is committed to memory. | |
730 | */ | |
731 | ||
732 | .macro update_early_cpu_boot_status status, tmp1, tmp2 | |
733 | mov \tmp2, #\status | |
adb49070 AB |
734 | adr_l \tmp1, __early_cpu_boot_status |
735 | str \tmp2, [\tmp1] | |
bb905274 SP |
736 | dmb sy |
737 | dc ivac, \tmp1 // Invalidate potentially stale cache line | |
738 | .endm | |
739 | ||
9703d9d7 | 740 | /* |
8b0a9575 | 741 | * Enable the MMU. |
9703d9d7 | 742 | * |
8b0a9575 | 743 | * x0 = SCTLR_EL1 value for turning on the MMU. |
8b0a9575 | 744 | * |
9dcf7914 AB |
745 | * Returns to the caller via x30/lr. This requires the caller to be covered |
746 | * by the .idmap.text section. | |
4bf8b96e SP |
747 | * |
748 | * Checks if the selected granule size is supported by the CPU. | |
749 | * If it isn't, park the CPU | |
9703d9d7 | 750 | */ |
cabe1c81 | 751 | ENTRY(__enable_mmu) |
4bf8b96e SP |
752 | mrs x1, ID_AA64MMFR0_EL1 |
753 | ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 | |
754 | cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED | |
755 | b.ne __no_granule_support | |
bb905274 | 756 | update_early_cpu_boot_status 0, x1, x2 |
aea73abb AB |
757 | adrp x1, idmap_pg_dir |
758 | adrp x2, swapper_pg_dir | |
759 | msr ttbr0_el1, x1 // load TTBR0 | |
760 | msr ttbr1_el1, x2 // load TTBR1 | |
9703d9d7 | 761 | isb |
9703d9d7 CM |
762 | msr sctlr_el1, x0 |
763 | isb | |
8ec41987 WD |
764 | /* |
765 | * Invalidate the local I-cache so that any instructions fetched | |
766 | * speculatively from the PoC are discarded, since they may have | |
767 | * been dynamically patched at the PoU. | |
768 | */ | |
769 | ic iallu | |
770 | dsb nsh | |
771 | isb | |
9dcf7914 | 772 | ret |
8b0a9575 | 773 | ENDPROC(__enable_mmu) |
4bf8b96e SP |
774 | |
775 | __no_granule_support: | |
bb905274 SP |
776 | /* Indicate that this CPU can't boot and is stuck in the kernel */ |
777 | update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2 | |
778 | 1: | |
4bf8b96e | 779 | wfe |
bb905274 | 780 | wfi |
3c5e9f23 | 781 | b 1b |
4bf8b96e | 782 | ENDPROC(__no_granule_support) |
e5ebeec8 | 783 | |
0cd3defe | 784 | #ifdef CONFIG_RELOCATABLE |
3c5e9f23 | 785 | __relocate_kernel: |
0cd3defe AB |
786 | /* |
787 | * Iterate over each entry in the relocation table, and apply the | |
788 | * relocations in place. | |
789 | */ | |
0cd3defe AB |
790 | ldr w9, =__rela_offset // offset to reloc table |
791 | ldr w10, =__rela_size // size of reloc table | |
792 | ||
b03cc885 | 793 | mov_q x11, KIMAGE_VADDR // default virtual offset |
0cd3defe | 794 | add x11, x11, x23 // actual virtual offset |
0cd3defe AB |
795 | add x9, x9, x11 // __va(.rela) |
796 | add x10, x9, x10 // __va(.rela) + sizeof(.rela) | |
797 | ||
798 | 0: cmp x9, x10 | |
08cc55b2 | 799 | b.hs 1f |
0cd3defe AB |
800 | ldp x11, x12, [x9], #24 |
801 | ldr x13, [x9, #-8] | |
802 | cmp w12, #R_AARCH64_RELATIVE | |
08cc55b2 | 803 | b.ne 0b |
0cd3defe AB |
804 | add x13, x13, x23 // relocate |
805 | str x13, [x11, x23] | |
806 | b 0b | |
3c5e9f23 AB |
807 | 1: ret |
808 | ENDPROC(__relocate_kernel) | |
809 | #endif | |
0cd3defe | 810 | |
3c5e9f23 AB |
811 | __primary_switch: |
812 | #ifdef CONFIG_RANDOMIZE_BASE | |
813 | mov x19, x0 // preserve new SCTLR_EL1 value | |
814 | mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value | |
815 | #endif | |
816 | ||
9dcf7914 | 817 | bl __enable_mmu |
3c5e9f23 AB |
818 | #ifdef CONFIG_RELOCATABLE |
819 | bl __relocate_kernel | |
820 | #ifdef CONFIG_RANDOMIZE_BASE | |
821 | ldr x8, =__primary_switched | |
b929fe32 | 822 | adrp x0, __PHYS_OFFSET |
3c5e9f23 AB |
823 | blr x8 |
824 | ||
825 | /* | |
826 | * If we return here, we have a KASLR displacement in x23 which we need | |
827 | * to take into account by discarding the current kernel mapping and | |
828 | * creating a new one. | |
829 | */ | |
830 | msr sctlr_el1, x20 // disable the MMU | |
831 | isb | |
832 | bl __create_page_tables // recreate kernel mapping | |
833 | ||
834 | tlbi vmalle1 // Remove any stale TLB entries | |
835 | dsb nsh | |
836 | ||
837 | msr sctlr_el1, x19 // re-enable the MMU | |
838 | isb | |
839 | ic iallu // flush instructions fetched | |
840 | dsb nsh // via old mapping | |
841 | isb | |
842 | ||
843 | bl __relocate_kernel | |
844 | #endif | |
0cd3defe AB |
845 | #endif |
846 | ldr x8, =__primary_switched | |
b929fe32 | 847 | adrp x0, __PHYS_OFFSET |
0cd3defe AB |
848 | br x8 |
849 | ENDPROC(__primary_switch) |