]>
Commit | Line | Data |
---|---|---|
9703d9d7 CM |
1 | /* |
2 | * Low-level CPU initialisation | |
3 | * Based on arch/arm/kernel/head.S | |
4 | * | |
5 | * Copyright (C) 1994-2002 Russell King | |
6 | * Copyright (C) 2003-2012 ARM Ltd. | |
7 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
8 | * Will Deacon <will.deacon@arm.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License version 2 as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | #include <linux/linkage.h> | |
24 | #include <linux/init.h> | |
021f6537 | 25 | #include <linux/irqchip/arm-gic-v3.h> |
9703d9d7 CM |
26 | |
27 | #include <asm/assembler.h> | |
28 | #include <asm/ptrace.h> | |
29 | #include <asm/asm-offsets.h> | |
c218bca7 | 30 | #include <asm/cache.h> |
0359b0e2 | 31 | #include <asm/cputype.h> |
1e48ef7f | 32 | #include <asm/elf.h> |
87d1587b | 33 | #include <asm/kernel-pgtable.h> |
9703d9d7 | 34 | #include <asm/memory.h> |
9703d9d7 CM |
35 | #include <asm/pgtable-hwdef.h> |
36 | #include <asm/pgtable.h> | |
37 | #include <asm/page.h> | |
4bf8b96e SP |
38 | #include <asm/sysreg.h> |
39 | #include <asm/thread_info.h> | |
f35a9205 | 40 | #include <asm/virt.h> |
9703d9d7 | 41 | |
6f4d57fa | 42 | #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) |
9703d9d7 | 43 | |
4190312b AB |
44 | #if (TEXT_OFFSET & 0xfff) != 0 |
45 | #error TEXT_OFFSET must be at least 4KB aligned | |
46 | #elif (PAGE_OFFSET & 0x1fffff) != 0 | |
da57a369 | 47 | #error PAGE_OFFSET must be at least 2MB aligned |
4190312b | 48 | #elif TEXT_OFFSET > 0x1fffff |
da57a369 | 49 | #error TEXT_OFFSET must be less than 2MB |
9703d9d7 CM |
50 | #endif |
51 | ||
6f4d57fa | 52 | #define KERNEL_START _text |
9703d9d7 CM |
53 | #define KERNEL_END _end |
54 | ||
9703d9d7 CM |
55 | /* |
56 | * Kernel startup entry point. | |
57 | * --------------------------- | |
58 | * | |
59 | * The requirements are: | |
60 | * MMU = off, D-cache = off, I-cache = on or off, | |
61 | * x0 = physical address to the FDT blob. | |
62 | * | |
63 | * This code is mostly position independent so you call this at | |
64 | * __pa(PAGE_OFFSET + TEXT_OFFSET). | |
65 | * | |
66 | * Note that the callee-saved registers are used for storing variables | |
67 | * that are useful before the MMU is enabled. The allocations are described | |
68 | * in the entry routines. | |
69 | */ | |
70 | __HEAD | |
2bf31a4a | 71 | _head: |
9703d9d7 CM |
72 | /* |
73 | * DO NOT MODIFY. Image header expected by Linux boot-loaders. | |
74 | */ | |
3c7f2550 | 75 | #ifdef CONFIG_EFI |
3c7f2550 MS |
76 | /* |
77 | * This add instruction has no meaningful effect except that | |
78 | * its opcode forms the magic "MZ" signature required by UEFI. | |
79 | */ | |
80 | add x13, x18, #0x16 | |
81 | b stext | |
82 | #else | |
9703d9d7 CM |
83 | b stext // branch to kernel start, magic |
84 | .long 0 // reserved | |
3c7f2550 | 85 | #endif |
6ad1fe5d AB |
86 | le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian |
87 | le64sym _kernel_size_le // Effective size of kernel image, little-endian | |
88 | le64sym _kernel_flags_le // Informative flags, little-endian | |
4370eec0 RF |
89 | .quad 0 // reserved |
90 | .quad 0 // reserved | |
91 | .quad 0 // reserved | |
92 | .byte 0x41 // Magic number, "ARM\x64" | |
93 | .byte 0x52 | |
94 | .byte 0x4d | |
95 | .byte 0x64 | |
3c7f2550 | 96 | #ifdef CONFIG_EFI |
2bf31a4a | 97 | .long pe_header - _head // Offset to the PE header. |
3c7f2550 | 98 | #else |
4370eec0 | 99 | .word 0 // reserved |
3c7f2550 MS |
100 | #endif |
101 | ||
102 | #ifdef CONFIG_EFI | |
e8f3010f | 103 | .globl __efistub_stext_offset |
2bf31a4a | 104 | .set __efistub_stext_offset, stext - _head |
3c7f2550 MS |
105 | .align 3 |
106 | pe_header: | |
107 | .ascii "PE" | |
108 | .short 0 | |
109 | coff_header: | |
110 | .short 0xaa64 // AArch64 | |
111 | .short 2 // nr_sections | |
112 | .long 0 // TimeDateStamp | |
113 | .long 0 // PointerToSymbolTable | |
114 | .long 1 // NumberOfSymbols | |
115 | .short section_table - optional_header // SizeOfOptionalHeader | |
116 | .short 0x206 // Characteristics. | |
117 | // IMAGE_FILE_DEBUG_STRIPPED | | |
118 | // IMAGE_FILE_EXECUTABLE_IMAGE | | |
119 | // IMAGE_FILE_LINE_NUMS_STRIPPED | |
120 | optional_header: | |
121 | .short 0x20b // PE32+ format | |
122 | .byte 0x02 // MajorLinkerVersion | |
123 | .byte 0x14 // MinorLinkerVersion | |
c16173fa | 124 | .long _end - stext // SizeOfCode |
3c7f2550 MS |
125 | .long 0 // SizeOfInitializedData |
126 | .long 0 // SizeOfUninitializedData | |
2bf31a4a | 127 | .long __efistub_entry - _head // AddressOfEntryPoint |
e8f3010f | 128 | .long __efistub_stext_offset // BaseOfCode |
3c7f2550 MS |
129 | |
130 | extra_header_fields: | |
131 | .quad 0 // ImageBase | |
ea6bc80d | 132 | .long 0x1000 // SectionAlignment |
a352ea3e | 133 | .long PECOFF_FILE_ALIGNMENT // FileAlignment |
3c7f2550 MS |
134 | .short 0 // MajorOperatingSystemVersion |
135 | .short 0 // MinorOperatingSystemVersion | |
136 | .short 0 // MajorImageVersion | |
137 | .short 0 // MinorImageVersion | |
138 | .short 0 // MajorSubsystemVersion | |
139 | .short 0 // MinorSubsystemVersion | |
140 | .long 0 // Win32VersionValue | |
141 | ||
2bf31a4a | 142 | .long _end - _head // SizeOfImage |
3c7f2550 MS |
143 | |
144 | // Everything before the kernel image is considered part of the header | |
e8f3010f | 145 | .long __efistub_stext_offset // SizeOfHeaders |
3c7f2550 MS |
146 | .long 0 // CheckSum |
147 | .short 0xa // Subsystem (EFI application) | |
148 | .short 0 // DllCharacteristics | |
149 | .quad 0 // SizeOfStackReserve | |
150 | .quad 0 // SizeOfStackCommit | |
151 | .quad 0 // SizeOfHeapReserve | |
152 | .quad 0 // SizeOfHeapCommit | |
153 | .long 0 // LoaderFlags | |
154 | .long 0x6 // NumberOfRvaAndSizes | |
155 | ||
156 | .quad 0 // ExportTable | |
157 | .quad 0 // ImportTable | |
158 | .quad 0 // ResourceTable | |
159 | .quad 0 // ExceptionTable | |
160 | .quad 0 // CertificationTable | |
161 | .quad 0 // BaseRelocationTable | |
162 | ||
163 | // Section table | |
164 | section_table: | |
165 | ||
166 | /* | |
167 | * The EFI application loader requires a relocation section | |
168 | * because EFI applications must be relocatable. This is a | |
169 | * dummy section as far as we are concerned. | |
170 | */ | |
171 | .ascii ".reloc" | |
172 | .byte 0 | |
173 | .byte 0 // end of 0 padding of section name | |
174 | .long 0 | |
175 | .long 0 | |
176 | .long 0 // SizeOfRawData | |
177 | .long 0 // PointerToRawData | |
178 | .long 0 // PointerToRelocations | |
179 | .long 0 // PointerToLineNumbers | |
180 | .short 0 // NumberOfRelocations | |
181 | .short 0 // NumberOfLineNumbers | |
182 | .long 0x42100040 // Characteristics (section flags) | |
183 | ||
184 | ||
185 | .ascii ".text" | |
186 | .byte 0 | |
187 | .byte 0 | |
188 | .byte 0 // end of 0 padding of section name | |
c16173fa | 189 | .long _end - stext // VirtualSize |
e8f3010f | 190 | .long __efistub_stext_offset // VirtualAddress |
3c7f2550 | 191 | .long _edata - stext // SizeOfRawData |
e8f3010f | 192 | .long __efistub_stext_offset // PointerToRawData |
3c7f2550 MS |
193 | |
194 | .long 0 // PointerToRelocations (0 for executables) | |
195 | .long 0 // PointerToLineNumbers (0 for executables) | |
196 | .short 0 // NumberOfRelocations (0 for executables) | |
197 | .short 0 // NumberOfLineNumbers (0 for executables) | |
198 | .long 0xe0500020 // Characteristics (section flags) | |
ea6bc80d AB |
199 | |
200 | /* | |
201 | * EFI will load stext onwards at the 4k section alignment | |
202 | * described in the PE/COFF header. To ensure that instruction | |
203 | * sequences using an adrp and a :lo12: immediate will function | |
204 | * correctly at this alignment, we must ensure that stext is | |
205 | * placed at a 4k boundary in the Image to begin with. | |
206 | */ | |
207 | .align 12 | |
3c7f2550 | 208 | #endif |
9703d9d7 CM |
209 | |
210 | ENTRY(stext) | |
da9c177d | 211 | bl preserve_boot_args |
828e9834 | 212 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode |
6f4d57fa | 213 | adrp x24, __PHYS_OFFSET |
828e9834 | 214 | bl set_cpu_boot_mode_flag |
9703d9d7 CM |
215 | bl __create_page_tables // x25=TTBR0, x26=TTBR1 |
216 | /* | |
a591ede4 MZ |
217 | * The following calls CPU setup code, see arch/arm64/mm/proc.S for |
218 | * details. | |
9703d9d7 CM |
219 | * On return, the CPU will be ready for the MMU to be turned on and |
220 | * the TCR will have been set. | |
221 | */ | |
2bf31a4a | 222 | ldr x27, 0f // address to jump to after |
9703d9d7 | 223 | // MMU has been enabled |
8b0a9575 | 224 | adr_l lr, __enable_mmu // return (PIC) address |
a591ede4 | 225 | b __cpu_setup // initialise processor |
9703d9d7 | 226 | ENDPROC(stext) |
2bf31a4a AB |
227 | .align 3 |
228 | 0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR | |
9703d9d7 | 229 | |
da9c177d AB |
230 | /* |
231 | * Preserve the arguments passed by the bootloader in x0 .. x3 | |
232 | */ | |
233 | preserve_boot_args: | |
234 | mov x21, x0 // x21=FDT | |
235 | ||
236 | adr_l x0, boot_args // record the contents of | |
237 | stp x21, x1, [x0] // x0 .. x3 at kernel entry | |
238 | stp x2, x3, [x0, #16] | |
239 | ||
240 | dmb sy // needed before dc ivac with | |
241 | // MMU off | |
242 | ||
243 | add x1, x0, #0x20 // 4 x 8 bytes | |
244 | b __inval_cache_range // tail call | |
245 | ENDPROC(preserve_boot_args) | |
246 | ||
034edabe LA |
247 | /* |
248 | * Macro to create a table entry to the next page. | |
249 | * | |
250 | * tbl: page table address | |
251 | * virt: virtual address | |
252 | * shift: #imm page table shift | |
253 | * ptrs: #imm pointers per table page | |
254 | * | |
255 | * Preserves: virt | |
256 | * Corrupts: tmp1, tmp2 | |
257 | * Returns: tbl -> next level table page address | |
258 | */ | |
259 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | |
260 | lsr \tmp1, \virt, #\shift | |
261 | and \tmp1, \tmp1, #\ptrs - 1 // table index | |
262 | add \tmp2, \tbl, #PAGE_SIZE | |
263 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type | |
264 | str \tmp2, [\tbl, \tmp1, lsl #3] | |
265 | add \tbl, \tbl, #PAGE_SIZE // next level table page | |
266 | .endm | |
267 | ||
268 | /* | |
269 | * Macro to populate the PGD (and possibily PUD) for the corresponding | |
270 | * block entry in the next level (tbl) for the given virtual address. | |
271 | * | |
272 | * Preserves: tbl, next, virt | |
273 | * Corrupts: tmp1, tmp2 | |
274 | */ | |
275 | .macro create_pgd_entry, tbl, virt, tmp1, tmp2 | |
276 | create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 | |
6a3fd402 SP |
277 | #if SWAPPER_PGTABLE_LEVELS > 3 |
278 | create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2 | |
279 | #endif | |
280 | #if SWAPPER_PGTABLE_LEVELS > 2 | |
87d1587b | 281 | create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 |
034edabe LA |
282 | #endif |
283 | .endm | |
284 | ||
285 | /* | |
286 | * Macro to populate block entries in the page table for the start..end | |
287 | * virtual range (inclusive). | |
288 | * | |
289 | * Preserves: tbl, flags | |
290 | * Corrupts: phys, start, end, pstate | |
291 | */ | |
292 | .macro create_block_map, tbl, flags, phys, start, end | |
87d1587b SP |
293 | lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT |
294 | lsr \start, \start, #SWAPPER_BLOCK_SHIFT | |
034edabe | 295 | and \start, \start, #PTRS_PER_PTE - 1 // table index |
87d1587b SP |
296 | orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry |
297 | lsr \end, \end, #SWAPPER_BLOCK_SHIFT | |
034edabe LA |
298 | and \end, \end, #PTRS_PER_PTE - 1 // table end index |
299 | 9999: str \phys, [\tbl, \start, lsl #3] // store the entry | |
300 | add \start, \start, #1 // next entry | |
87d1587b | 301 | add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block |
034edabe LA |
302 | cmp \start, \end |
303 | b.ls 9999b | |
304 | .endm | |
305 | ||
306 | /* | |
307 | * Setup the initial page tables. We only setup the barest amount which is | |
308 | * required to get the kernel running. The following sections are required: | |
309 | * - identity mapping to enable the MMU (low address, TTBR0) | |
310 | * - first few MB of the kernel linear mapping to jump to once the MMU has | |
61bd93ce | 311 | * been enabled |
034edabe LA |
312 | */ |
313 | __create_page_tables: | |
6f4d57fa AB |
314 | adrp x25, idmap_pg_dir |
315 | adrp x26, swapper_pg_dir | |
034edabe LA |
316 | mov x27, lr |
317 | ||
318 | /* | |
319 | * Invalidate the idmap and swapper page tables to avoid potential | |
320 | * dirty cache lines being evicted. | |
321 | */ | |
322 | mov x0, x25 | |
323 | add x1, x26, #SWAPPER_DIR_SIZE | |
324 | bl __inval_cache_range | |
325 | ||
326 | /* | |
327 | * Clear the idmap and swapper page tables. | |
328 | */ | |
329 | mov x0, x25 | |
330 | add x6, x26, #SWAPPER_DIR_SIZE | |
331 | 1: stp xzr, xzr, [x0], #16 | |
332 | stp xzr, xzr, [x0], #16 | |
333 | stp xzr, xzr, [x0], #16 | |
334 | stp xzr, xzr, [x0], #16 | |
335 | cmp x0, x6 | |
336 | b.lo 1b | |
337 | ||
87d1587b | 338 | ldr x7, =SWAPPER_MM_MMUFLAGS |
034edabe LA |
339 | |
340 | /* | |
341 | * Create the identity mapping. | |
342 | */ | |
343 | mov x0, x25 // idmap_pg_dir | |
5dfe9d7d | 344 | adrp x3, __idmap_text_start // __pa(__idmap_text_start) |
dd006da2 AB |
345 | |
346 | #ifndef CONFIG_ARM64_VA_BITS_48 | |
347 | #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) | |
348 | #define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) | |
349 | ||
350 | /* | |
351 | * If VA_BITS < 48, it may be too small to allow for an ID mapping to be | |
352 | * created that covers system RAM if that is located sufficiently high | |
353 | * in the physical address space. So for the ID map, use an extended | |
354 | * virtual range in that case, by configuring an additional translation | |
355 | * level. | |
356 | * First, we have to verify our assumption that the current value of | |
357 | * VA_BITS was chosen such that all translation levels are fully | |
358 | * utilised, and that lowering T0SZ will always result in an additional | |
359 | * translation level to be configured. | |
360 | */ | |
361 | #if VA_BITS != EXTRA_SHIFT | |
362 | #error "Mismatch between VA_BITS and page size/number of translation levels" | |
363 | #endif | |
364 | ||
365 | /* | |
366 | * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the | |
5dfe9d7d | 367 | * entire ID map region can be mapped. As T0SZ == (64 - #bits used), |
dd006da2 | 368 | * this number conveniently equals the number of leading zeroes in |
5dfe9d7d | 369 | * the physical address of __idmap_text_end. |
dd006da2 | 370 | */ |
5dfe9d7d | 371 | adrp x5, __idmap_text_end |
dd006da2 AB |
372 | clz x5, x5 |
373 | cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? | |
374 | b.ge 1f // .. then skip additional level | |
375 | ||
0c20856c MR |
376 | adr_l x6, idmap_t0sz |
377 | str x5, [x6] | |
378 | dmb sy | |
379 | dc ivac, x6 // Invalidate potentially stale cache line | |
dd006da2 AB |
380 | |
381 | create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 | |
382 | 1: | |
383 | #endif | |
384 | ||
034edabe | 385 | create_pgd_entry x0, x3, x5, x6 |
5dfe9d7d AB |
386 | mov x5, x3 // __pa(__idmap_text_start) |
387 | adr_l x6, __idmap_text_end // __pa(__idmap_text_end) | |
034edabe LA |
388 | create_block_map x0, x7, x3, x5, x6 |
389 | ||
390 | /* | |
391 | * Map the kernel image (starting with PHYS_OFFSET). | |
392 | */ | |
393 | mov x0, x26 // swapper_pg_dir | |
ab893fb9 | 394 | ldr x5, =KIMAGE_VADDR |
034edabe | 395 | create_pgd_entry x0, x5, x3, x6 |
2bf31a4a AB |
396 | ldr w6, kernel_img_size |
397 | add x6, x6, x5 | |
034edabe LA |
398 | mov x3, x24 // phys offset |
399 | create_block_map x0, x7, x3, x5, x6 | |
400 | ||
034edabe LA |
401 | /* |
402 | * Since the page tables have been populated with non-cacheable | |
403 | * accesses (MMU disabled), invalidate the idmap and swapper page | |
404 | * tables again to remove any speculatively loaded cache lines. | |
405 | */ | |
406 | mov x0, x25 | |
407 | add x1, x26, #SWAPPER_DIR_SIZE | |
91d57155 | 408 | dmb sy |
034edabe LA |
409 | bl __inval_cache_range |
410 | ||
411 | mov lr, x27 | |
412 | ret | |
413 | ENDPROC(__create_page_tables) | |
2bf31a4a AB |
414 | |
415 | kernel_img_size: | |
416 | .long _end - (_head - TEXT_OFFSET) | |
034edabe LA |
417 | .ltorg |
418 | ||
034edabe | 419 | /* |
a871d354 | 420 | * The following fragment of code is executed with the MMU enabled. |
034edabe | 421 | */ |
a871d354 | 422 | .set initial_sp, init_thread_union + THREAD_START_SP |
034edabe | 423 | __mmap_switched: |
2bf31a4a AB |
424 | adr_l x8, vectors // load VBAR_EL1 with virtual |
425 | msr vbar_el1, x8 // vector table address | |
426 | isb | |
427 | ||
2a803c4d MR |
428 | // Clear BSS |
429 | adr_l x0, __bss_start | |
430 | mov x1, xzr | |
431 | adr_l x2, __bss_stop | |
432 | sub x2, x2, x0 | |
433 | bl __pi_memset | |
5227cfa7 | 434 | dsb ishst // Make zero page visible to PTW |
2a803c4d | 435 | |
1e48ef7f AB |
436 | #ifdef CONFIG_RELOCATABLE |
437 | ||
438 | /* | |
439 | * Iterate over each entry in the relocation table, and apply the | |
440 | * relocations in place. | |
441 | */ | |
442 | adr_l x8, __dynsym_start // start of symbol table | |
443 | adr_l x9, __reloc_start // start of reloc table | |
444 | adr_l x10, __reloc_end // end of reloc table | |
445 | ||
446 | 0: cmp x9, x10 | |
447 | b.hs 2f | |
448 | ldp x11, x12, [x9], #24 | |
449 | ldr x13, [x9, #-8] | |
450 | cmp w12, #R_AARCH64_RELATIVE | |
451 | b.ne 1f | |
452 | str x13, [x11] | |
453 | b 0b | |
454 | ||
455 | 1: cmp w12, #R_AARCH64_ABS64 | |
456 | b.ne 0b | |
457 | add x12, x12, x12, lsl #1 // symtab offset: 24x top word | |
458 | add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word | |
459 | ldr x15, [x12, #8] // Elf64_Sym::st_value | |
460 | add x15, x13, x15 | |
461 | str x15, [x11] | |
462 | b 0b | |
463 | ||
464 | 2: | |
465 | #endif | |
466 | ||
a871d354 | 467 | adr_l sp, initial_sp, x4 |
6cdf9c7c JL |
468 | mov x4, sp |
469 | and x4, x4, #~(THREAD_SIZE - 1) | |
470 | msr sp_el0, x4 // Save thread_info | |
a871d354 | 471 | str_l x21, __fdt_pointer, x5 // Save FDT pointer |
a7f8de16 AB |
472 | |
473 | ldr x4, =KIMAGE_VADDR // Save the offset between | |
474 | sub x4, x4, x24 // the kernel virtual and | |
475 | str_l x4, kimage_voffset, x5 // physical mappings | |
476 | ||
034edabe | 477 | mov x29, #0 |
39d114dd AR |
478 | #ifdef CONFIG_KASAN |
479 | bl kasan_early_init | |
480 | #endif | |
034edabe LA |
481 | b start_kernel |
482 | ENDPROC(__mmap_switched) | |
483 | ||
484 | /* | |
485 | * end early head section, begin head code that is also used for | |
486 | * hotplug and needs to have the same protections as the text region | |
487 | */ | |
488 | .section ".text","ax" | |
9703d9d7 CM |
489 | /* |
490 | * If we're fortunate enough to boot at EL2, ensure that the world is | |
491 | * sane before dropping to EL1. | |
828e9834 ML |
492 | * |
493 | * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if | |
494 | * booted in EL1 or EL2 respectively. | |
9703d9d7 CM |
495 | */ |
496 | ENTRY(el2_setup) | |
497 | mrs x0, CurrentEL | |
974c8e45 | 498 | cmp x0, #CurrentEL_EL2 |
9cf71728 ML |
499 | b.ne 1f |
500 | mrs x0, sctlr_el2 | |
501 | CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 | |
502 | CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2 | |
503 | msr sctlr_el2, x0 | |
504 | b 2f | |
505 | 1: mrs x0, sctlr_el1 | |
506 | CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1 | |
507 | CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 | |
508 | msr sctlr_el1, x0 | |
828e9834 | 509 | mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 |
9cf71728 | 510 | isb |
9703d9d7 CM |
511 | ret |
512 | ||
513 | /* Hyp configuration. */ | |
9cf71728 | 514 | 2: mov x0, #(1 << 31) // 64-bit EL1 |
9703d9d7 CM |
515 | msr hcr_el2, x0 |
516 | ||
517 | /* Generic timers. */ | |
518 | mrs x0, cnthctl_el2 | |
519 | orr x0, x0, #3 // Enable EL1 physical timers | |
520 | msr cnthctl_el2, x0 | |
1f75ff0a | 521 | msr cntvoff_el2, xzr // Clear virtual offset |
9703d9d7 | 522 | |
021f6537 MZ |
523 | #ifdef CONFIG_ARM_GIC_V3 |
524 | /* GICv3 system register access */ | |
525 | mrs x0, id_aa64pfr0_el1 | |
526 | ubfx x0, x0, #24, #4 | |
527 | cmp x0, #1 | |
528 | b.ne 3f | |
529 | ||
72c58395 | 530 | mrs_s x0, ICC_SRE_EL2 |
021f6537 MZ |
531 | orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 |
532 | orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 | |
72c58395 | 533 | msr_s ICC_SRE_EL2, x0 |
021f6537 | 534 | isb // Make sure SRE is now set |
d271976d MZ |
535 | mrs_s x0, ICC_SRE_EL2 // Read SRE back, |
536 | tbz x0, #0, 3f // and check that it sticks | |
72c58395 | 537 | msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults |
021f6537 MZ |
538 | |
539 | 3: | |
540 | #endif | |
541 | ||
9703d9d7 CM |
542 | /* Populate ID registers. */ |
543 | mrs x0, midr_el1 | |
544 | mrs x1, mpidr_el1 | |
545 | msr vpidr_el2, x0 | |
546 | msr vmpidr_el2, x1 | |
547 | ||
548 | /* sctlr_el1 */ | |
549 | mov x0, #0x0800 // Set/clear RES{1,0} bits | |
9cf71728 ML |
550 | CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems |
551 | CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems | |
9703d9d7 CM |
552 | msr sctlr_el1, x0 |
553 | ||
554 | /* Coprocessor traps. */ | |
555 | mov x0, #0x33ff | |
556 | msr cptr_el2, x0 // Disable copro. traps to EL2 | |
557 | ||
558 | #ifdef CONFIG_COMPAT | |
559 | msr hstr_el2, xzr // Disable CP15 traps to EL2 | |
560 | #endif | |
561 | ||
d10bcd47 | 562 | /* EL2 debug */ |
f436b2ac LP |
563 | mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer |
564 | sbfx x0, x0, #8, #4 | |
565 | cmp x0, #1 | |
566 | b.lt 4f // Skip if no PMU present | |
d10bcd47 WD |
567 | mrs x0, pmcr_el0 // Disable debug access traps |
568 | ubfx x0, x0, #11, #5 // to EL2 and allow access to | |
569 | msr mdcr_el2, x0 // all PMU counters from EL1 | |
f436b2ac | 570 | 4: |
d10bcd47 | 571 | |
7dbfbe5b MZ |
572 | /* Stage-2 translation */ |
573 | msr vttbr_el2, xzr | |
574 | ||
712c6ff4 | 575 | /* Hypervisor stub */ |
ac2dec5f LA |
576 | adrp x0, __hyp_stub_vectors |
577 | add x0, x0, #:lo12:__hyp_stub_vectors | |
712c6ff4 MZ |
578 | msr vbar_el2, x0 |
579 | ||
9703d9d7 CM |
580 | /* spsr */ |
581 | mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | |
582 | PSR_MODE_EL1h) | |
583 | msr spsr_el2, x0 | |
584 | msr elr_el2, lr | |
828e9834 | 585 | mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
9703d9d7 CM |
586 | eret |
587 | ENDPROC(el2_setup) | |
588 | ||
828e9834 ML |
589 | /* |
590 | * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed | |
591 | * in x20. See arch/arm64/include/asm/virt.h for more info. | |
592 | */ | |
593 | ENTRY(set_cpu_boot_mode_flag) | |
6f4d57fa | 594 | adr_l x1, __boot_cpu_mode |
828e9834 ML |
595 | cmp w20, #BOOT_CPU_MODE_EL2 |
596 | b.ne 1f | |
597 | add x1, x1, #4 | |
d0488597 WD |
598 | 1: str w20, [x1] // This CPU has booted in EL1 |
599 | dmb sy | |
600 | dc ivac, x1 // Invalidate potentially stale cache line | |
828e9834 ML |
601 | ret |
602 | ENDPROC(set_cpu_boot_mode_flag) | |
603 | ||
f35a9205 MZ |
604 | /* |
605 | * We need to find out the CPU boot mode long after boot, so we need to | |
606 | * store it in a writable variable. | |
607 | * | |
608 | * This is not in .bss, because we set it sufficiently early that the boot-time | |
609 | * zeroing of .bss would clobber it. | |
610 | */ | |
c218bca7 | 611 | .pushsection .data..cacheline_aligned |
c218bca7 | 612 | .align L1_CACHE_SHIFT |
947bb758 | 613 | ENTRY(__boot_cpu_mode) |
f35a9205 | 614 | .long BOOT_CPU_MODE_EL2 |
424a3838 | 615 | .long BOOT_CPU_MODE_EL1 |
f35a9205 MZ |
616 | .popsection |
617 | ||
9703d9d7 CM |
618 | /* |
619 | * This provides a "holding pen" for platforms to hold all secondary | |
620 | * cores are held until we're ready for them to initialise. | |
621 | */ | |
622 | ENTRY(secondary_holding_pen) | |
828e9834 | 623 | bl el2_setup // Drop to EL1, w20=cpu_boot_mode |
828e9834 | 624 | bl set_cpu_boot_mode_flag |
9703d9d7 | 625 | mrs x0, mpidr_el1 |
0359b0e2 JM |
626 | ldr x1, =MPIDR_HWID_BITMASK |
627 | and x0, x0, x1 | |
b1c98297 | 628 | adr_l x3, secondary_holding_pen_release |
9703d9d7 CM |
629 | pen: ldr x4, [x3] |
630 | cmp x4, x0 | |
631 | b.eq secondary_startup | |
632 | wfe | |
633 | b pen | |
634 | ENDPROC(secondary_holding_pen) | |
652af899 MR |
635 | |
636 | /* | |
637 | * Secondary entry point that jumps straight into the kernel. Only to | |
638 | * be used where CPUs are brought online dynamically by the kernel. | |
639 | */ | |
640 | ENTRY(secondary_entry) | |
652af899 | 641 | bl el2_setup // Drop to EL1 |
85cc00ea | 642 | bl set_cpu_boot_mode_flag |
652af899 MR |
643 | b secondary_startup |
644 | ENDPROC(secondary_entry) | |
9703d9d7 CM |
645 | |
646 | ENTRY(secondary_startup) | |
647 | /* | |
648 | * Common entry point for secondary CPUs. | |
649 | */ | |
6f4d57fa AB |
650 | adrp x25, idmap_pg_dir |
651 | adrp x26, swapper_pg_dir | |
a591ede4 | 652 | bl __cpu_setup // initialise processor |
9703d9d7 | 653 | |
2bf31a4a AB |
654 | ldr x8, =KIMAGE_VADDR |
655 | ldr w9, 0f | |
656 | sub x27, x8, w9, sxtw // address to jump to after enabling the MMU | |
9703d9d7 CM |
657 | b __enable_mmu |
658 | ENDPROC(secondary_startup) | |
2bf31a4a | 659 | 0: .long (_text - TEXT_OFFSET) - __secondary_switched |
9703d9d7 CM |
660 | |
661 | ENTRY(__secondary_switched) | |
2bf31a4a AB |
662 | adr_l x5, vectors |
663 | msr vbar_el1, x5 | |
664 | isb | |
665 | ||
666 | ldr_l x0, secondary_data // get secondary_data.stack | |
9703d9d7 | 667 | mov sp, x0 |
6cdf9c7c JL |
668 | and x0, x0, #~(THREAD_SIZE - 1) |
669 | msr sp_el0, x0 // save thread_info | |
9703d9d7 CM |
670 | mov x29, #0 |
671 | b secondary_start_kernel | |
672 | ENDPROC(__secondary_switched) | |
9703d9d7 CM |
673 | |
674 | /* | |
8b0a9575 | 675 | * Enable the MMU. |
9703d9d7 | 676 | * |
8b0a9575 AB |
677 | * x0 = SCTLR_EL1 value for turning on the MMU. |
678 | * x27 = *virtual* address to jump to upon completion | |
679 | * | |
4bf8b96e SP |
680 | * Other registers depend on the function called upon completion. |
681 | * | |
682 | * Checks if the selected granule size is supported by the CPU. | |
683 | * If it isn't, park the CPU | |
9703d9d7 | 684 | */ |
5dfe9d7d | 685 | .section ".idmap.text", "ax" |
9703d9d7 | 686 | __enable_mmu: |
4bf8b96e SP |
687 | mrs x1, ID_AA64MMFR0_EL1 |
688 | ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4 | |
689 | cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED | |
690 | b.ne __no_granule_support | |
9703d9d7 CM |
691 | msr ttbr0_el1, x25 // load TTBR0 |
692 | msr ttbr1_el1, x26 // load TTBR1 | |
693 | isb | |
9703d9d7 CM |
694 | msr sctlr_el1, x0 |
695 | isb | |
8ec41987 WD |
696 | /* |
697 | * Invalidate the local I-cache so that any instructions fetched | |
698 | * speculatively from the PoC are discarded, since they may have | |
699 | * been dynamically patched at the PoU. | |
700 | */ | |
701 | ic iallu | |
702 | dsb nsh | |
703 | isb | |
9703d9d7 | 704 | br x27 |
8b0a9575 | 705 | ENDPROC(__enable_mmu) |
4bf8b96e SP |
706 | |
707 | __no_granule_support: | |
708 | wfe | |
709 | b __no_granule_support | |
710 | ENDPROC(__no_granule_support) |