]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
9703d9d7 CM |
2 | /* |
3 | * Low-level CPU initialisation | |
4 | * Based on arch/arm/kernel/head.S | |
5 | * | |
6 | * Copyright (C) 1994-2002 Russell King | |
7 | * Copyright (C) 2003-2012 ARM Ltd. | |
8 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
9 | * Will Deacon <will.deacon@arm.com> | |
9703d9d7 CM |
10 | */ |
11 | ||
12 | #include <linux/linkage.h> | |
13 | #include <linux/init.h> | |
021f6537 | 14 | #include <linux/irqchip/arm-gic-v3.h> |
9703d9d7 CM |
15 | |
16 | #include <asm/assembler.h> | |
08cdac61 | 17 | #include <asm/boot.h> |
9703d9d7 CM |
18 | #include <asm/ptrace.h> |
19 | #include <asm/asm-offsets.h> | |
c218bca7 | 20 | #include <asm/cache.h> |
0359b0e2 | 21 | #include <asm/cputype.h> |
1e48ef7f | 22 | #include <asm/elf.h> |
f56063c5 | 23 | #include <asm/image.h> |
87d1587b | 24 | #include <asm/kernel-pgtable.h> |
1f364c8c | 25 | #include <asm/kvm_arm.h> |
9703d9d7 | 26 | #include <asm/memory.h> |
9703d9d7 CM |
27 | #include <asm/pgtable-hwdef.h> |
28 | #include <asm/pgtable.h> | |
29 | #include <asm/page.h> | |
bb905274 | 30 | #include <asm/smp.h> |
4bf8b96e SP |
31 | #include <asm/sysreg.h> |
32 | #include <asm/thread_info.h> | |
f35a9205 | 33 | #include <asm/virt.h> |
9703d9d7 | 34 | |
b5f4a214 AB |
35 | #include "efi-header.S" |
36 | ||
6f4d57fa | 37 | #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) |
9703d9d7 | 38 | |
4190312b AB |
39 | #if (TEXT_OFFSET & 0xfff) != 0 |
40 | #error TEXT_OFFSET must be at least 4KB aligned | |
41 | #elif (PAGE_OFFSET & 0x1fffff) != 0 | |
da57a369 | 42 | #error PAGE_OFFSET must be at least 2MB aligned |
4190312b | 43 | #elif TEXT_OFFSET > 0x1fffff |
da57a369 | 44 | #error TEXT_OFFSET must be less than 2MB |
9703d9d7 CM |
45 | #endif |
46 | ||
9703d9d7 CM |
47 | /* |
48 | * Kernel startup entry point. | |
49 | * --------------------------- | |
50 | * | |
51 | * The requirements are: | |
52 | * MMU = off, D-cache = off, I-cache = on or off, | |
53 | * x0 = physical address to the FDT blob. | |
54 | * | |
55 | * This code is mostly position independent so you call this at | |
56 | * __pa(PAGE_OFFSET + TEXT_OFFSET). | |
57 | * | |
58 | * Note that the callee-saved registers are used for storing variables | |
59 | * that are useful before the MMU is enabled. The allocations are described | |
60 | * in the entry routines. | |
61 | */ | |
62 | __HEAD | |
2bf31a4a | 63 | _head: |
9703d9d7 CM |
64 | /* |
65 | * DO NOT MODIFY. Image header expected by Linux boot-loaders. | |
66 | */ | |
3c7f2550 | 67 | #ifdef CONFIG_EFI |
3c7f2550 MS |
68 | /* |
69 | * This add instruction has no meaningful effect except that | |
70 | * its opcode forms the magic "MZ" signature required by UEFI. | |
71 | */ | |
72 | add x13, x18, #0x16 | |
73 | b stext | |
74 | #else | |
9703d9d7 CM |
75 | b stext // branch to kernel start, magic |
76 | .long 0 // reserved | |
3c7f2550 | 77 | #endif |
6ad1fe5d AB |
78 | le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian |
79 | le64sym _kernel_size_le // Effective size of kernel image, little-endian | |
80 | le64sym _kernel_flags_le // Informative flags, little-endian | |
4370eec0 RF |
81 | .quad 0 // reserved |
82 | .quad 0 // reserved | |
83 | .quad 0 // reserved | |
f56063c5 | 84 | .ascii ARM64_IMAGE_MAGIC // Magic number |
3c7f2550 | 85 | #ifdef CONFIG_EFI |
2bf31a4a | 86 | .long pe_header - _head // Offset to the PE header. |
3c7f2550 | 87 | |
3c7f2550 | 88 | pe_header: |
b5f4a214 | 89 | __EFI_PE_HEADER |
99922257 AB |
90 | #else |
91 | .long 0 // reserved | |
3c7f2550 | 92 | #endif |
9703d9d7 | 93 | |
546c8c44 AB |
94 | __INIT |
95 | ||
a9be2ee0 AB |
96 | /* |
97 | * The following callee saved general purpose registers are used on the | |
98 | * primary lowlevel boot path: | |
99 | * | |
100 | * Register Scope Purpose | |
101 | * x21 stext() .. start_kernel() FDT pointer passed at boot in x0 | |
102 | * x23 stext() .. start_kernel() physical misalignment/KASLR offset | |
103 | * x28 __create_page_tables() callee preserved temp register | |
104 | * x19/x20 __primary_switch() callee preserved temp registers | |
5cf896fb PC |
105 | * x24 __primary_switch() .. relocate_kernel() |
106 | * current RELR displacement | |
a9be2ee0 | 107 | */ |
9703d9d7 | 108 | ENTRY(stext) |
da9c177d | 109 | bl preserve_boot_args |
23c8a500 | 110 | bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
b929fe32 AB |
111 | adrp x23, __PHYS_OFFSET |
112 | and x23, x23, MIN_KIMG_ALIGN - 1 // KASLR offset, defaults to 0 | |
828e9834 | 113 | bl set_cpu_boot_mode_flag |
aea73abb | 114 | bl __create_page_tables |
9703d9d7 | 115 | /* |
a591ede4 MZ |
116 | * The following calls CPU setup code, see arch/arm64/mm/proc.S for |
117 | * details. | |
9703d9d7 CM |
118 | * On return, the CPU will be ready for the MMU to be turned on and |
119 | * the TCR will have been set. | |
120 | */ | |
0cd3defe | 121 | bl __cpu_setup // initialise processor |
3c5e9f23 | 122 | b __primary_switch |
9703d9d7 CM |
123 | ENDPROC(stext) |
124 | ||
da9c177d AB |
125 | /* |
126 | * Preserve the arguments passed by the bootloader in x0 .. x3 | |
127 | */ | |
128 | preserve_boot_args: | |
129 | mov x21, x0 // x21=FDT | |
130 | ||
131 | adr_l x0, boot_args // record the contents of | |
132 | stp x21, x1, [x0] // x0 .. x3 at kernel entry | |
133 | stp x2, x3, [x0, #16] | |
134 | ||
135 | dmb sy // needed before dc ivac with | |
136 | // MMU off | |
137 | ||
d46befef RM |
138 | mov x1, #0x20 // 4 x 8 bytes |
139 | b __inval_dcache_area // tail call | |
da9c177d AB |
140 | ENDPROC(preserve_boot_args) |
141 | ||
034edabe LA |
142 | /* |
143 | * Macro to create a table entry to the next page. | |
144 | * | |
145 | * tbl: page table address | |
146 | * virt: virtual address | |
147 | * shift: #imm page table shift | |
148 | * ptrs: #imm pointers per table page | |
149 | * | |
150 | * Preserves: virt | |
fa2a8445 | 151 | * Corrupts: ptrs, tmp1, tmp2 |
034edabe LA |
152 | * Returns: tbl -> next level table page address |
153 | */ | |
154 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | |
e6d588a8 | 155 | add \tmp1, \tbl, #PAGE_SIZE |
79ddab3b | 156 | phys_to_pte \tmp2, \tmp1 |
e6d588a8 | 157 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type |
034edabe | 158 | lsr \tmp1, \virt, #\shift |
fa2a8445 KM |
159 | sub \ptrs, \ptrs, #1 |
160 | and \tmp1, \tmp1, \ptrs // table index | |
034edabe LA |
161 | str \tmp2, [\tbl, \tmp1, lsl #3] |
162 | add \tbl, \tbl, #PAGE_SIZE // next level table page | |
163 | .endm | |
164 | ||
165 | /* | |
0370b31e SC |
166 | * Macro to populate page table entries, these entries can be pointers to the next level |
167 | * or last level entries pointing to physical memory. | |
034edabe | 168 | * |
0370b31e SC |
169 | * tbl: page table address |
170 | * rtbl: pointer to page table or physical memory | |
171 | * index: start index to write | |
172 | * eindex: end index to write - [index, eindex] written to | |
173 | * flags: flags for pagetable entry to or in | |
174 | * inc: increment to rtbl between each entry | |
175 | * tmp1: temporary variable | |
176 | * | |
177 | * Preserves: tbl, eindex, flags, inc | |
178 | * Corrupts: index, tmp1 | |
179 | * Returns: rtbl | |
034edabe | 180 | */ |
0370b31e | 181 | .macro populate_entries, tbl, rtbl, index, eindex, flags, inc, tmp1 |
79ddab3b | 182 | .Lpe\@: phys_to_pte \tmp1, \rtbl |
0370b31e SC |
183 | orr \tmp1, \tmp1, \flags // tmp1 = table entry |
184 | str \tmp1, [\tbl, \index, lsl #3] | |
185 | add \rtbl, \rtbl, \inc // rtbl = pa next level | |
186 | add \index, \index, #1 | |
187 | cmp \index, \eindex | |
188 | b.ls .Lpe\@ | |
189 | .endm | |
190 | ||
191 | /* | |
192 | * Compute indices of table entries from virtual address range. If multiple entries | |
193 | * were needed in the previous page table level then the next page table level is assumed | |
194 | * to be composed of multiple pages. (This effectively scales the end index). | |
195 | * | |
196 | * vstart: virtual address of start of range | |
91fe1d0b | 197 | * vend: virtual address of end of range - we map [vstart, vend] |
0370b31e SC |
198 | * shift: shift used to transform virtual address into index |
199 | * ptrs: number of entries in page table | |
200 | * istart: index in table corresponding to vstart | |
201 | * iend: index in table corresponding to vend | |
202 | * count: On entry: how many extra entries were required in previous level, scales | |
203 | * our end index. | |
204 | * On exit: returns how many extra entries required for next page table level | |
205 | * | |
206 | * Preserves: vstart, vend, shift, ptrs | |
207 | * Returns: istart, iend, count | |
208 | */ | |
209 | .macro compute_indices, vstart, vend, shift, ptrs, istart, iend, count | |
210 | lsr \iend, \vend, \shift | |
211 | mov \istart, \ptrs | |
212 | sub \istart, \istart, #1 | |
213 | and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1) | |
214 | mov \istart, \ptrs | |
215 | mul \istart, \istart, \count | |
216 | add \iend, \iend, \istart // iend += (count - 1) * ptrs | |
217 | // our entries span multiple tables | |
218 | ||
219 | lsr \istart, \vstart, \shift | |
220 | mov \count, \ptrs | |
221 | sub \count, \count, #1 | |
222 | and \istart, \istart, \count | |
223 | ||
224 | sub \count, \iend, \istart | |
034edabe LA |
225 | .endm |
226 | ||
227 | /* | |
0370b31e SC |
228 | * Map memory for specified virtual address range. Each level of page table needed supports |
229 | * multiple entries. If a level requires n entries the next page table level is assumed to be | |
230 | * formed from n pages. | |
231 | * | |
232 | * tbl: location of page table | |
233 | * rtbl: address to be used for first level page table entry (typically tbl + PAGE_SIZE) | |
91fe1d0b MR |
234 | * vstart: virtual address of start of range |
235 | * vend: virtual address of end of range - we map [vstart, vend - 1] | |
0370b31e SC |
236 | * flags: flags to use to map last level entries |
237 | * phys: physical address corresponding to vstart - physical memory is contiguous | |
238 | * pgds: the number of pgd entries | |
034edabe | 239 | * |
0370b31e | 240 | * Temporaries: istart, iend, tmp, count, sv - these need to be different registers |
91fe1d0b MR |
241 | * Preserves: vstart, flags |
242 | * Corrupts: tbl, rtbl, vend, istart, iend, tmp, count, sv | |
034edabe | 243 | */ |
0370b31e | 244 | .macro map_memory, tbl, rtbl, vstart, vend, flags, phys, pgds, istart, iend, tmp, count, sv |
91fe1d0b | 245 | sub \vend, \vend, #1 |
0370b31e SC |
246 | add \rtbl, \tbl, #PAGE_SIZE |
247 | mov \sv, \rtbl | |
248 | mov \count, #0 | |
249 | compute_indices \vstart, \vend, #PGDIR_SHIFT, \pgds, \istart, \iend, \count | |
250 | populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp | |
251 | mov \tbl, \sv | |
252 | mov \sv, \rtbl | |
253 | ||
254 | #if SWAPPER_PGTABLE_LEVELS > 3 | |
255 | compute_indices \vstart, \vend, #PUD_SHIFT, #PTRS_PER_PUD, \istart, \iend, \count | |
256 | populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp | |
257 | mov \tbl, \sv | |
258 | mov \sv, \rtbl | |
259 | #endif | |
260 | ||
261 | #if SWAPPER_PGTABLE_LEVELS > 2 | |
262 | compute_indices \vstart, \vend, #SWAPPER_TABLE_SHIFT, #PTRS_PER_PMD, \istart, \iend, \count | |
263 | populate_entries \tbl, \rtbl, \istart, \iend, #PMD_TYPE_TABLE, #PAGE_SIZE, \tmp | |
264 | mov \tbl, \sv | |
265 | #endif | |
266 | ||
267 | compute_indices \vstart, \vend, #SWAPPER_BLOCK_SHIFT, #PTRS_PER_PTE, \istart, \iend, \count | |
268 | bic \count, \phys, #SWAPPER_BLOCK_SIZE - 1 | |
269 | populate_entries \tbl, \count, \istart, \iend, \flags, #SWAPPER_BLOCK_SIZE, \tmp | |
034edabe LA |
270 | .endm |
271 | ||
272 | /* | |
273 | * Setup the initial page tables. We only setup the barest amount which is | |
274 | * required to get the kernel running. The following sections are required: | |
275 | * - identity mapping to enable the MMU (low address, TTBR0) | |
276 | * - first few MB of the kernel linear mapping to jump to once the MMU has | |
61bd93ce | 277 | * been enabled |
034edabe LA |
278 | */ |
279 | __create_page_tables: | |
f80fb3a3 | 280 | mov x28, lr |
034edabe LA |
281 | |
282 | /* | |
8eb7e28d JY |
283 | * Invalidate the init page tables to avoid potential dirty cache lines |
284 | * being evicted. Other page tables are allocated in rodata as part of | |
285 | * the kernel image, and thus are clean to the PoC per the boot | |
286 | * protocol. | |
034edabe | 287 | */ |
8eb7e28d | 288 | adrp x0, init_pg_dir |
2b5548b6 | 289 | adrp x1, init_pg_end |
0370b31e | 290 | sub x1, x1, x0 |
d46befef | 291 | bl __inval_dcache_area |
034edabe LA |
292 | |
293 | /* | |
8eb7e28d | 294 | * Clear the init page tables. |
034edabe | 295 | */ |
8eb7e28d | 296 | adrp x0, init_pg_dir |
2b5548b6 | 297 | adrp x1, init_pg_end |
0370b31e | 298 | sub x1, x1, x0 |
034edabe LA |
299 | 1: stp xzr, xzr, [x0], #16 |
300 | stp xzr, xzr, [x0], #16 | |
301 | stp xzr, xzr, [x0], #16 | |
302 | stp xzr, xzr, [x0], #16 | |
d46befef RM |
303 | subs x1, x1, #64 |
304 | b.ne 1b | |
034edabe | 305 | |
b03cc885 | 306 | mov x7, SWAPPER_MM_MMUFLAGS |
034edabe LA |
307 | |
308 | /* | |
309 | * Create the identity mapping. | |
310 | */ | |
aea73abb | 311 | adrp x0, idmap_pg_dir |
5dfe9d7d | 312 | adrp x3, __idmap_text_start // __pa(__idmap_text_start) |
dd006da2 | 313 | |
b6d00d47 | 314 | #ifdef CONFIG_ARM64_VA_BITS_52 |
67e7fdfc SC |
315 | mrs_s x6, SYS_ID_AA64MMFR2_EL1 |
316 | and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) | |
317 | mov x5, #52 | |
318 | cbnz x6, 1f | |
319 | #endif | |
90ec95cd | 320 | mov x5, #VA_BITS_MIN |
67e7fdfc | 321 | 1: |
5383cc6e | 322 | adr_l x6, vabits_actual |
67e7fdfc SC |
323 | str x5, [x6] |
324 | dmb sy | |
325 | dc ivac, x6 // Invalidate potentially stale cache line | |
326 | ||
dd006da2 | 327 | /* |
fa2a8445 KM |
328 | * VA_BITS may be too small to allow for an ID mapping to be created |
329 | * that covers system RAM if that is located sufficiently high in the | |
330 | * physical address space. So for the ID map, use an extended virtual | |
331 | * range in that case, and configure an additional translation level | |
332 | * if needed. | |
333 | * | |
dd006da2 | 334 | * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the |
5dfe9d7d | 335 | * entire ID map region can be mapped. As T0SZ == (64 - #bits used), |
dd006da2 | 336 | * this number conveniently equals the number of leading zeroes in |
5dfe9d7d | 337 | * the physical address of __idmap_text_end. |
dd006da2 | 338 | */ |
5dfe9d7d | 339 | adrp x5, __idmap_text_end |
dd006da2 | 340 | clz x5, x5 |
2645cb90 | 341 | cmp x5, TCR_T0SZ(VA_BITS_MIN) // default T0SZ small enough? |
fa2a8445 | 342 | b.ge 1f // .. then skip VA range extension |
dd006da2 | 343 | |
0c20856c MR |
344 | adr_l x6, idmap_t0sz |
345 | str x5, [x6] | |
346 | dmb sy | |
347 | dc ivac, x6 // Invalidate potentially stale cache line | |
dd006da2 | 348 | |
fa2a8445 KM |
349 | #if (VA_BITS < 48) |
350 | #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) | |
351 | #define EXTRA_PTRS (1 << (PHYS_MASK_SHIFT - EXTRA_SHIFT)) | |
352 | ||
353 | /* | |
354 | * If VA_BITS < 48, we have to configure an additional table level. | |
355 | * First, we have to verify our assumption that the current value of | |
356 | * VA_BITS was chosen such that all translation levels are fully | |
357 | * utilised, and that lowering T0SZ will always result in an additional | |
358 | * translation level to be configured. | |
359 | */ | |
360 | #if VA_BITS != EXTRA_SHIFT | |
361 | #error "Mismatch between VA_BITS and page size/number of translation levels" | |
dd006da2 AB |
362 | #endif |
363 | ||
fa2a8445 KM |
364 | mov x4, EXTRA_PTRS |
365 | create_table_entry x0, x3, EXTRA_SHIFT, x4, x5, x6 | |
366 | #else | |
367 | /* | |
368 | * If VA_BITS == 48, we don't have to configure an additional | |
369 | * translation level, but the top-level table has more entries. | |
370 | */ | |
371 | mov x4, #1 << (PHYS_MASK_SHIFT - PGDIR_SHIFT) | |
372 | str_l x4, idmap_ptrs_per_pgd, x5 | |
373 | #endif | |
374 | 1: | |
375 | ldr_l x4, idmap_ptrs_per_pgd | |
5dfe9d7d AB |
376 | mov x5, x3 // __pa(__idmap_text_start) |
377 | adr_l x6, __idmap_text_end // __pa(__idmap_text_end) | |
0370b31e SC |
378 | |
379 | map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14 | |
034edabe LA |
380 | |
381 | /* | |
382 | * Map the kernel image (starting with PHYS_OFFSET). | |
383 | */ | |
2b5548b6 | 384 | adrp x0, init_pg_dir |
18b9c0d6 | 385 | mov_q x5, KIMAGE_VADDR + TEXT_OFFSET // compile time __va(_text) |
f80fb3a3 | 386 | add x5, x5, x23 // add KASLR displacement |
fa2a8445 | 387 | mov x4, PTRS_PER_PGD |
18b9c0d6 AB |
388 | adrp x6, _end // runtime __pa(_end) |
389 | adrp x3, _text // runtime __pa(_text) | |
390 | sub x6, x6, x3 // _end - _text | |
391 | add x6, x6, x5 // runtime __va(_end) | |
0370b31e SC |
392 | |
393 | map_memory x0, x1, x5, x6, x7, x3, x4, x10, x11, x12, x13, x14 | |
034edabe | 394 | |
034edabe LA |
395 | /* |
396 | * Since the page tables have been populated with non-cacheable | |
5eb88be0 GS |
397 | * accesses (MMU disabled), invalidate those tables again to |
398 | * remove any speculatively loaded cache lines. | |
034edabe | 399 | */ |
5eb88be0 GS |
400 | dmb sy |
401 | ||
aea73abb | 402 | adrp x0, idmap_pg_dir |
5eb88be0 GS |
403 | adrp x1, idmap_pg_end |
404 | sub x1, x1, x0 | |
405 | bl __inval_dcache_area | |
406 | ||
407 | adrp x0, init_pg_dir | |
2b5548b6 | 408 | adrp x1, init_pg_end |
0370b31e | 409 | sub x1, x1, x0 |
d46befef | 410 | bl __inval_dcache_area |
034edabe | 411 | |
f80fb3a3 | 412 | ret x28 |
034edabe LA |
413 | ENDPROC(__create_page_tables) |
414 | .ltorg | |
415 | ||
034edabe | 416 | /* |
a871d354 | 417 | * The following fragment of code is executed with the MMU enabled. |
b929fe32 AB |
418 | * |
419 | * x0 = __PHYS_OFFSET | |
034edabe | 420 | */ |
0cd3defe | 421 | __primary_switched: |
60699ba1 AB |
422 | adrp x4, init_thread_union |
423 | add sp, x4, #THREAD_SIZE | |
c02433dd MR |
424 | adr_l x5, init_task |
425 | msr sp_el0, x5 // Save thread_info | |
60699ba1 | 426 | |
2bf31a4a AB |
427 | adr_l x8, vectors // load VBAR_EL1 with virtual |
428 | msr vbar_el1, x8 // vector table address | |
429 | isb | |
430 | ||
60699ba1 AB |
431 | stp xzr, x30, [sp, #-16]! |
432 | mov x29, sp | |
433 | ||
b929fe32 AB |
434 | str_l x21, __fdt_pointer, x5 // Save FDT pointer |
435 | ||
436 | ldr_l x4, kimage_vaddr // Save the offset between | |
437 | sub x4, x4, x0 // the kernel virtual and | |
438 | str_l x4, kimage_voffset, x5 // physical mappings | |
439 | ||
2a803c4d MR |
440 | // Clear BSS |
441 | adr_l x0, __bss_start | |
442 | mov x1, xzr | |
443 | adr_l x2, __bss_stop | |
444 | sub x2, x2, x0 | |
445 | bl __pi_memset | |
5227cfa7 | 446 | dsb ishst // Make zero page visible to PTW |
2a803c4d | 447 | |
39d114dd AR |
448 | #ifdef CONFIG_KASAN |
449 | bl kasan_early_init | |
f80fb3a3 AB |
450 | #endif |
451 | #ifdef CONFIG_RANDOMIZE_BASE | |
08cdac61 AB |
452 | tst x23, ~(MIN_KIMG_ALIGN - 1) // already running randomized? |
453 | b.ne 0f | |
f80fb3a3 AB |
454 | mov x0, x21 // pass FDT address in x0 |
455 | bl kaslr_early_init // parse FDT for KASLR options | |
456 | cbz x0, 0f // KASLR disabled? just proceed | |
08cdac61 | 457 | orr x23, x23, x0 // record KASLR offset |
60699ba1 AB |
458 | ldp x29, x30, [sp], #16 // we must enable KASLR, return |
459 | ret // to __primary_switch() | |
f80fb3a3 | 460 | 0: |
39d114dd | 461 | #endif |
73267498 AB |
462 | add sp, sp, #16 |
463 | mov x29, #0 | |
464 | mov x30, #0 | |
034edabe | 465 | b start_kernel |
0cd3defe | 466 | ENDPROC(__primary_switched) |
034edabe LA |
467 | |
468 | /* | |
469 | * end early head section, begin head code that is also used for | |
470 | * hotplug and needs to have the same protections as the text region | |
471 | */ | |
439e70e2 | 472 | .section ".idmap.text","awx" |
f80fb3a3 AB |
473 | |
474 | ENTRY(kimage_vaddr) | |
475 | .quad _text - TEXT_OFFSET | |
b89d82ef | 476 | EXPORT_SYMBOL(kimage_vaddr) |
f80fb3a3 | 477 | |
9703d9d7 CM |
478 | /* |
479 | * If we're fortunate enough to boot at EL2, ensure that the world is | |
480 | * sane before dropping to EL1. | |
828e9834 | 481 | * |
510224c2 | 482 | * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in w0 if |
828e9834 | 483 | * booted in EL1 or EL2 respectively. |
9703d9d7 CM |
484 | */ |
485 | ENTRY(el2_setup) | |
5371513f | 486 | msr SPsel, #1 // We want to use SP_EL{1,2} |
9703d9d7 | 487 | mrs x0, CurrentEL |
974c8e45 | 488 | cmp x0, #CurrentEL_EL2 |
3ad47d05 | 489 | b.eq 1f |
7a00d68e | 490 | mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) |
9cf71728 | 491 | msr sctlr_el1, x0 |
23c8a500 | 492 | mov w0, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1 |
9cf71728 | 493 | isb |
9703d9d7 CM |
494 | ret |
495 | ||
7a00d68e | 496 | 1: mov_q x0, (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) |
3ad47d05 MR |
497 | msr sctlr_el2, x0 |
498 | ||
1f364c8c MZ |
499 | #ifdef CONFIG_ARM64_VHE |
500 | /* | |
501 | * Check for VHE being present. For the rest of the EL2 setup, | |
502 | * x2 being non-zero indicates that we do have VHE, and that the | |
503 | * kernel is intended to run at EL2. | |
504 | */ | |
505 | mrs x2, id_aa64mmfr1_el1 | |
f6e56435 | 506 | ubfx x2, x2, #ID_AA64MMFR1_VHE_SHIFT, #4 |
1f364c8c MZ |
507 | #else |
508 | mov x2, xzr | |
509 | #endif | |
510 | ||
9703d9d7 | 511 | /* Hyp configuration. */ |
4eaed6aa | 512 | mov_q x0, HCR_HOST_NVHE_FLAGS |
1f364c8c | 513 | cbz x2, set_hcr |
4eaed6aa | 514 | mov_q x0, HCR_HOST_VHE_FLAGS |
1f364c8c | 515 | set_hcr: |
9703d9d7 | 516 | msr hcr_el2, x0 |
1f364c8c | 517 | isb |
9703d9d7 | 518 | |
1650ac49 J |
519 | /* |
520 | * Allow Non-secure EL1 and EL0 to access physical timer and counter. | |
521 | * This is not necessary for VHE, since the host kernel runs in EL2, | |
522 | * and EL0 accesses are configured in the later stage of boot process. | |
523 | * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout | |
524 | * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined | |
525 | * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1 | |
526 | * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in | |
527 | * EL2. | |
528 | */ | |
529 | cbnz x2, 1f | |
9703d9d7 CM |
530 | mrs x0, cnthctl_el2 |
531 | orr x0, x0, #3 // Enable EL1 physical timers | |
532 | msr cnthctl_el2, x0 | |
1650ac49 | 533 | 1: |
1f75ff0a | 534 | msr cntvoff_el2, xzr // Clear virtual offset |
9703d9d7 | 535 | |
021f6537 MZ |
536 | #ifdef CONFIG_ARM_GIC_V3 |
537 | /* GICv3 system register access */ | |
538 | mrs x0, id_aa64pfr0_el1 | |
f6e56435 | 539 | ubfx x0, x0, #ID_AA64PFR0_GIC_SHIFT, #4 |
74698f69 | 540 | cbz x0, 3f |
021f6537 | 541 | |
0e9884fe | 542 | mrs_s x0, SYS_ICC_SRE_EL2 |
021f6537 MZ |
543 | orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 |
544 | orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 | |
0e9884fe | 545 | msr_s SYS_ICC_SRE_EL2, x0 |
021f6537 | 546 | isb // Make sure SRE is now set |
0e9884fe | 547 | mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back, |
d271976d | 548 | tbz x0, #0, 3f // and check that it sticks |
0e9884fe | 549 | msr_s SYS_ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults |
021f6537 MZ |
550 | |
551 | 3: | |
552 | #endif | |
553 | ||
9703d9d7 CM |
554 | /* Populate ID registers. */ |
555 | mrs x0, midr_el1 | |
556 | mrs x1, mpidr_el1 | |
557 | msr vpidr_el2, x0 | |
558 | msr vmpidr_el2, x1 | |
559 | ||
9703d9d7 CM |
560 | #ifdef CONFIG_COMPAT |
561 | msr hstr_el2, xzr // Disable CP15 traps to EL2 | |
562 | #endif | |
563 | ||
d10bcd47 | 564 | /* EL2 debug */ |
f6e56435 AE |
565 | mrs x1, id_aa64dfr0_el1 |
566 | sbfx x0, x1, #ID_AA64DFR0_PMUVER_SHIFT, #4 | |
f436b2ac LP |
567 | cmp x0, #1 |
568 | b.lt 4f // Skip if no PMU present | |
d10bcd47 WD |
569 | mrs x0, pmcr_el0 // Disable debug access traps |
570 | ubfx x0, x0, #11, #5 // to EL2 and allow access to | |
f436b2ac | 571 | 4: |
2bf47e19 WD |
572 | csel x3, xzr, x0, lt // all PMU counters from EL1 |
573 | ||
574 | /* Statistical profiling */ | |
f6e56435 | 575 | ubfx x0, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 |
b0c57e10 WD |
576 | cbz x0, 7f // Skip if SPE not present |
577 | cbnz x2, 6f // VHE? | |
578 | mrs_s x4, SYS_PMBIDR_EL1 // If SPE available at EL2, | |
579 | and x4, x4, #(1 << SYS_PMBIDR_EL1_P_SHIFT) | |
580 | cbnz x4, 5f // then permit sampling of physical | |
581 | mov x4, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ | |
582 | 1 << SYS_PMSCR_EL2_PA_SHIFT) | |
583 | msr_s SYS_PMSCR_EL2, x4 // addresses and physical counter | |
584 | 5: | |
2bf47e19 WD |
585 | mov x1, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) |
586 | orr x3, x3, x1 // If we don't have VHE, then | |
b0c57e10 WD |
587 | b 7f // use EL1&0 translation. |
588 | 6: // For VHE, use EL2 translation | |
2bf47e19 | 589 | orr x3, x3, #MDCR_EL2_TPMS // and disable access from EL1 |
b0c57e10 | 590 | 7: |
2bf47e19 | 591 | msr mdcr_el2, x3 // Configure debug traps |
d10bcd47 | 592 | |
cc33c4e2 MR |
593 | /* LORegions */ |
594 | mrs x1, id_aa64mmfr1_el1 | |
595 | ubfx x0, x1, #ID_AA64MMFR1_LOR_SHIFT, 4 | |
596 | cbz x0, 1f | |
597 | msr_s SYS_LORC_EL1, xzr | |
598 | 1: | |
599 | ||
7dbfbe5b MZ |
600 | /* Stage-2 translation */ |
601 | msr vttbr_el2, xzr | |
602 | ||
1f364c8c MZ |
603 | cbz x2, install_el2_stub |
604 | ||
23c8a500 | 605 | mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
1f364c8c MZ |
606 | isb |
607 | ret | |
608 | ||
609 | install_el2_stub: | |
d61c97a7 MR |
610 | /* |
611 | * When VHE is not in use, early init of EL2 and EL1 needs to be | |
612 | * done here. | |
613 | * When VHE _is_ in use, EL1 will not be used in the host and | |
614 | * requires no configuration, and all non-hyp-specific EL2 setup | |
615 | * will be done via the _EL1 system register aliases in __cpu_setup. | |
616 | */ | |
7a00d68e | 617 | mov_q x0, (SCTLR_EL1_RES1 | ENDIAN_SET_EL1) |
d61c97a7 MR |
618 | msr sctlr_el1, x0 |
619 | ||
620 | /* Coprocessor traps. */ | |
621 | mov x0, #0x33ff | |
622 | msr cptr_el2, x0 // Disable copro. traps to EL2 | |
623 | ||
22043a3c DM |
624 | /* SVE register access */ |
625 | mrs x1, id_aa64pfr0_el1 | |
626 | ubfx x1, x1, #ID_AA64PFR0_SVE_SHIFT, #4 | |
627 | cbz x1, 7f | |
628 | ||
629 | bic x0, x0, #CPTR_EL2_TZ // Also disable SVE traps | |
630 | msr cptr_el2, x0 // Disable copro. traps to EL2 | |
631 | isb | |
632 | mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector | |
633 | msr_s SYS_ZCR_EL2, x1 // length for EL1. | |
634 | ||
712c6ff4 | 635 | /* Hypervisor stub */ |
22043a3c | 636 | 7: adr_l x0, __hyp_stub_vectors |
712c6ff4 MZ |
637 | msr vbar_el2, x0 |
638 | ||
9703d9d7 CM |
639 | /* spsr */ |
640 | mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ | |
641 | PSR_MODE_EL1h) | |
642 | msr spsr_el2, x0 | |
643 | msr elr_el2, lr | |
23c8a500 | 644 | mov w0, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2 |
9703d9d7 CM |
645 | eret |
646 | ENDPROC(el2_setup) | |
647 | ||
828e9834 ML |
648 | /* |
649 | * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed | |
510224c2 | 650 | * in w0. See arch/arm64/include/asm/virt.h for more info. |
828e9834 | 651 | */ |
190c056f | 652 | set_cpu_boot_mode_flag: |
6f4d57fa | 653 | adr_l x1, __boot_cpu_mode |
23c8a500 | 654 | cmp w0, #BOOT_CPU_MODE_EL2 |
828e9834 ML |
655 | b.ne 1f |
656 | add x1, x1, #4 | |
23c8a500 | 657 | 1: str w0, [x1] // This CPU has booted in EL1 |
d0488597 WD |
658 | dmb sy |
659 | dc ivac, x1 // Invalidate potentially stale cache line | |
828e9834 ML |
660 | ret |
661 | ENDPROC(set_cpu_boot_mode_flag) | |
662 | ||
b6113038 JM |
663 | /* |
664 | * These values are written with the MMU off, but read with the MMU on. | |
665 | * Writers will invalidate the corresponding address, discarding up to a | |
666 | * 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures | |
667 | * sufficient alignment that the CWG doesn't overlap another section. | |
668 | */ | |
669 | .pushsection ".mmuoff.data.write", "aw" | |
f35a9205 MZ |
670 | /* |
671 | * We need to find out the CPU boot mode long after boot, so we need to | |
672 | * store it in a writable variable. | |
673 | * | |
674 | * This is not in .bss, because we set it sufficiently early that the boot-time | |
675 | * zeroing of .bss would clobber it. | |
676 | */ | |
947bb758 | 677 | ENTRY(__boot_cpu_mode) |
f35a9205 | 678 | .long BOOT_CPU_MODE_EL2 |
424a3838 | 679 | .long BOOT_CPU_MODE_EL1 |
b6113038 JM |
680 | /* |
681 | * The booting CPU updates the failed status @__early_cpu_boot_status, | |
682 | * with MMU turned off. | |
683 | */ | |
684 | ENTRY(__early_cpu_boot_status) | |
61cf61d8 | 685 | .quad 0 |
b6113038 | 686 | |
f35a9205 MZ |
687 | .popsection |
688 | ||
9703d9d7 CM |
689 | /* |
690 | * This provides a "holding pen" for platforms to hold all secondary | |
691 | * cores are held until we're ready for them to initialise. | |
692 | */ | |
693 | ENTRY(secondary_holding_pen) | |
23c8a500 | 694 | bl el2_setup // Drop to EL1, w0=cpu_boot_mode |
828e9834 | 695 | bl set_cpu_boot_mode_flag |
9703d9d7 | 696 | mrs x0, mpidr_el1 |
b03cc885 | 697 | mov_q x1, MPIDR_HWID_BITMASK |
0359b0e2 | 698 | and x0, x0, x1 |
b1c98297 | 699 | adr_l x3, secondary_holding_pen_release |
9703d9d7 CM |
700 | pen: ldr x4, [x3] |
701 | cmp x4, x0 | |
702 | b.eq secondary_startup | |
703 | wfe | |
704 | b pen | |
705 | ENDPROC(secondary_holding_pen) | |
652af899 MR |
706 | |
707 | /* | |
708 | * Secondary entry point that jumps straight into the kernel. Only to | |
709 | * be used where CPUs are brought online dynamically by the kernel. | |
710 | */ | |
711 | ENTRY(secondary_entry) | |
652af899 | 712 | bl el2_setup // Drop to EL1 |
85cc00ea | 713 | bl set_cpu_boot_mode_flag |
652af899 MR |
714 | b secondary_startup |
715 | ENDPROC(secondary_entry) | |
9703d9d7 | 716 | |
190c056f | 717 | secondary_startup: |
9703d9d7 CM |
718 | /* |
719 | * Common entry point for secondary CPUs. | |
720 | */ | |
a96a33b1 | 721 | bl __cpu_secondary_check52bitva |
a591ede4 | 722 | bl __cpu_setup // initialise processor |
693d5639 | 723 | adrp x1, swapper_pg_dir |
9dcf7914 AB |
724 | bl __enable_mmu |
725 | ldr x8, =__secondary_switched | |
726 | br x8 | |
9703d9d7 CM |
727 | ENDPROC(secondary_startup) |
728 | ||
190c056f | 729 | __secondary_switched: |
2bf31a4a AB |
730 | adr_l x5, vectors |
731 | msr vbar_el1, x5 | |
732 | isb | |
733 | ||
bb905274 | 734 | adr_l x0, secondary_data |
c02433dd | 735 | ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack |
5b1cfe3a | 736 | cbz x1, __secondary_too_slow |
c02433dd MR |
737 | mov sp, x1 |
738 | ldr x2, [x0, #CPU_BOOT_TASK] | |
5b1cfe3a | 739 | cbz x2, __secondary_too_slow |
c02433dd | 740 | msr sp_el0, x2 |
9703d9d7 | 741 | mov x29, #0 |
73267498 | 742 | mov x30, #0 |
9703d9d7 CM |
743 | b secondary_start_kernel |
744 | ENDPROC(__secondary_switched) | |
9703d9d7 | 745 | |
5b1cfe3a WD |
746 | __secondary_too_slow: |
747 | wfe | |
748 | wfi | |
749 | b __secondary_too_slow | |
750 | ENDPROC(__secondary_too_slow) | |
751 | ||
bb905274 SP |
752 | /* |
753 | * The booting CPU updates the failed status @__early_cpu_boot_status, | |
754 | * with MMU turned off. | |
755 | * | |
756 | * update_early_cpu_boot_status tmp, status | |
757 | * - Corrupts tmp1, tmp2 | |
758 | * - Writes 'status' to __early_cpu_boot_status and makes sure | |
759 | * it is committed to memory. | |
760 | */ | |
761 | ||
762 | .macro update_early_cpu_boot_status status, tmp1, tmp2 | |
763 | mov \tmp2, #\status | |
adb49070 AB |
764 | adr_l \tmp1, __early_cpu_boot_status |
765 | str \tmp2, [\tmp1] | |
bb905274 SP |
766 | dmb sy |
767 | dc ivac, \tmp1 // Invalidate potentially stale cache line | |
768 | .endm | |
769 | ||
9703d9d7 | 770 | /* |
8b0a9575 | 771 | * Enable the MMU. |
9703d9d7 | 772 | * |
8b0a9575 | 773 | * x0 = SCTLR_EL1 value for turning on the MMU. |
693d5639 | 774 | * x1 = TTBR1_EL1 value |
8b0a9575 | 775 | * |
9dcf7914 AB |
776 | * Returns to the caller via x30/lr. This requires the caller to be covered |
777 | * by the .idmap.text section. | |
4bf8b96e SP |
778 | * |
779 | * Checks if the selected granule size is supported by the CPU. | |
780 | * If it isn't, park the CPU | |
9703d9d7 | 781 | */ |
cabe1c81 | 782 | ENTRY(__enable_mmu) |
693d5639 JY |
783 | mrs x2, ID_AA64MMFR0_EL1 |
784 | ubfx x2, x2, #ID_AA64MMFR0_TGRAN_SHIFT, 4 | |
4bf8b96e SP |
785 | cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED |
786 | b.ne __no_granule_support | |
693d5639 JY |
787 | update_early_cpu_boot_status 0, x2, x3 |
788 | adrp x2, idmap_pg_dir | |
789 | phys_to_ttbr x1, x1 | |
790 | phys_to_ttbr x2, x2 | |
791 | msr ttbr0_el1, x2 // load TTBR0 | |
c812026c | 792 | offset_ttbr1 x1, x3 |
693d5639 | 793 | msr ttbr1_el1, x1 // load TTBR1 |
9703d9d7 | 794 | isb |
9703d9d7 CM |
795 | msr sctlr_el1, x0 |
796 | isb | |
8ec41987 WD |
797 | /* |
798 | * Invalidate the local I-cache so that any instructions fetched | |
799 | * speculatively from the PoC are discarded, since they may have | |
800 | * been dynamically patched at the PoU. | |
801 | */ | |
802 | ic iallu | |
803 | dsb nsh | |
804 | isb | |
9dcf7914 | 805 | ret |
8b0a9575 | 806 | ENDPROC(__enable_mmu) |
4bf8b96e | 807 | |
a96a33b1 | 808 | ENTRY(__cpu_secondary_check52bitva) |
b6d00d47 | 809 | #ifdef CONFIG_ARM64_VA_BITS_52 |
2c624fe6 | 810 | ldr_l x0, vabits_actual |
a96a33b1 SC |
811 | cmp x0, #52 |
812 | b.ne 2f | |
813 | ||
814 | mrs_s x0, SYS_ID_AA64MMFR2_EL1 | |
815 | and x0, x0, #(0xf << ID_AA64MMFR2_LVA_SHIFT) | |
816 | cbnz x0, 2f | |
817 | ||
66f16a24 WD |
818 | update_early_cpu_boot_status \ |
819 | CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_52_BIT_VA, x0, x1 | |
a96a33b1 SC |
820 | 1: wfe |
821 | wfi | |
822 | b 1b | |
823 | ||
824 | #endif | |
825 | 2: ret | |
826 | ENDPROC(__cpu_secondary_check52bitva) | |
827 | ||
4bf8b96e | 828 | __no_granule_support: |
bb905274 | 829 | /* Indicate that this CPU can't boot and is stuck in the kernel */ |
66f16a24 WD |
830 | update_early_cpu_boot_status \ |
831 | CPU_STUCK_IN_KERNEL | CPU_STUCK_REASON_NO_GRAN, x1, x2 | |
bb905274 | 832 | 1: |
4bf8b96e | 833 | wfe |
bb905274 | 834 | wfi |
3c5e9f23 | 835 | b 1b |
4bf8b96e | 836 | ENDPROC(__no_granule_support) |
e5ebeec8 | 837 | |
0cd3defe | 838 | #ifdef CONFIG_RELOCATABLE |
3c5e9f23 | 839 | __relocate_kernel: |
0cd3defe AB |
840 | /* |
841 | * Iterate over each entry in the relocation table, and apply the | |
842 | * relocations in place. | |
843 | */ | |
0cd3defe AB |
844 | ldr w9, =__rela_offset // offset to reloc table |
845 | ldr w10, =__rela_size // size of reloc table | |
846 | ||
b03cc885 | 847 | mov_q x11, KIMAGE_VADDR // default virtual offset |
0cd3defe | 848 | add x11, x11, x23 // actual virtual offset |
0cd3defe AB |
849 | add x9, x9, x11 // __va(.rela) |
850 | add x10, x9, x10 // __va(.rela) + sizeof(.rela) | |
851 | ||
852 | 0: cmp x9, x10 | |
08cc55b2 | 853 | b.hs 1f |
5cf896fb PC |
854 | ldp x12, x13, [x9], #24 |
855 | ldr x14, [x9, #-8] | |
856 | cmp w13, #R_AARCH64_RELATIVE | |
08cc55b2 | 857 | b.ne 0b |
5cf896fb PC |
858 | add x14, x14, x23 // relocate |
859 | str x14, [x12, x23] | |
0cd3defe | 860 | b 0b |
5cf896fb PC |
861 | |
862 | 1: | |
863 | #ifdef CONFIG_RELR | |
864 | /* | |
865 | * Apply RELR relocations. | |
866 | * | |
867 | * RELR is a compressed format for storing relative relocations. The | |
868 | * encoded sequence of entries looks like: | |
869 | * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] | |
870 | * | |
871 | * i.e. start with an address, followed by any number of bitmaps. The | |
872 | * address entry encodes 1 relocation. The subsequent bitmap entries | |
873 | * encode up to 63 relocations each, at subsequent offsets following | |
874 | * the last address entry. | |
875 | * | |
876 | * The bitmap entries must have 1 in the least significant bit. The | |
877 | * assumption here is that an address cannot have 1 in lsb. Odd | |
878 | * addresses are not supported. Any odd addresses are stored in the RELA | |
879 | * section, which is handled above. | |
880 | * | |
881 | * Excluding the least significant bit in the bitmap, each non-zero | |
882 | * bit in the bitmap represents a relocation to be applied to | |
883 | * a corresponding machine word that follows the base address | |
884 | * word. The second least significant bit represents the machine | |
885 | * word immediately following the initial address, and each bit | |
886 | * that follows represents the next word, in linear order. As such, | |
887 | * a single bitmap can encode up to 63 relocations in a 64-bit object. | |
888 | * | |
889 | * In this implementation we store the address of the next RELR table | |
890 | * entry in x9, the address being relocated by the current address or | |
891 | * bitmap entry in x13 and the address being relocated by the current | |
892 | * bit in x14. | |
893 | * | |
894 | * Because addends are stored in place in the binary, RELR relocations | |
895 | * cannot be applied idempotently. We use x24 to keep track of the | |
896 | * currently applied displacement so that we can correctly relocate if | |
897 | * __relocate_kernel is called twice with non-zero displacements (i.e. | |
898 | * if there is both a physical misalignment and a KASLR displacement). | |
899 | */ | |
900 | ldr w9, =__relr_offset // offset to reloc table | |
901 | ldr w10, =__relr_size // size of reloc table | |
902 | add x9, x9, x11 // __va(.relr) | |
903 | add x10, x9, x10 // __va(.relr) + sizeof(.relr) | |
904 | ||
905 | sub x15, x23, x24 // delta from previous offset | |
906 | cbz x15, 7f // nothing to do if unchanged | |
907 | mov x24, x23 // save new offset | |
908 | ||
909 | 2: cmp x9, x10 | |
910 | b.hs 7f | |
911 | ldr x11, [x9], #8 | |
912 | tbnz x11, #0, 3f // branch to handle bitmaps | |
913 | add x13, x11, x23 | |
914 | ldr x12, [x13] // relocate address entry | |
915 | add x12, x12, x15 | |
916 | str x12, [x13], #8 // adjust to start of bitmap | |
917 | b 2b | |
918 | ||
919 | 3: mov x14, x13 | |
920 | 4: lsr x11, x11, #1 | |
921 | cbz x11, 6f | |
922 | tbz x11, #0, 5f // skip bit if not set | |
923 | ldr x12, [x14] // relocate bit | |
924 | add x12, x12, x15 | |
925 | str x12, [x14] | |
926 | ||
927 | 5: add x14, x14, #8 // move to next bit's address | |
928 | b 4b | |
929 | ||
930 | 6: /* | |
931 | * Move to the next bitmap's address. 8 is the word size, and 63 is the | |
932 | * number of significant bits in a bitmap entry. | |
933 | */ | |
934 | add x13, x13, #(8 * 63) | |
935 | b 2b | |
936 | ||
937 | 7: | |
938 | #endif | |
939 | ret | |
940 | ||
3c5e9f23 AB |
941 | ENDPROC(__relocate_kernel) |
942 | #endif | |
0cd3defe | 943 | |
3c5e9f23 AB |
944 | __primary_switch: |
945 | #ifdef CONFIG_RANDOMIZE_BASE | |
946 | mov x19, x0 // preserve new SCTLR_EL1 value | |
947 | mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value | |
948 | #endif | |
949 | ||
2b5548b6 | 950 | adrp x1, init_pg_dir |
9dcf7914 | 951 | bl __enable_mmu |
3c5e9f23 | 952 | #ifdef CONFIG_RELOCATABLE |
5cf896fb PC |
953 | #ifdef CONFIG_RELR |
954 | mov x24, #0 // no RELR displacement yet | |
955 | #endif | |
3c5e9f23 AB |
956 | bl __relocate_kernel |
957 | #ifdef CONFIG_RANDOMIZE_BASE | |
958 | ldr x8, =__primary_switched | |
b929fe32 | 959 | adrp x0, __PHYS_OFFSET |
3c5e9f23 AB |
960 | blr x8 |
961 | ||
962 | /* | |
963 | * If we return here, we have a KASLR displacement in x23 which we need | |
964 | * to take into account by discarding the current kernel mapping and | |
965 | * creating a new one. | |
966 | */ | |
3060e9f0 | 967 | pre_disable_mmu_workaround |
3c5e9f23 AB |
968 | msr sctlr_el1, x20 // disable the MMU |
969 | isb | |
970 | bl __create_page_tables // recreate kernel mapping | |
971 | ||
972 | tlbi vmalle1 // Remove any stale TLB entries | |
973 | dsb nsh | |
2a313408 | 974 | isb |
3c5e9f23 AB |
975 | |
976 | msr sctlr_el1, x19 // re-enable the MMU | |
977 | isb | |
978 | ic iallu // flush instructions fetched | |
979 | dsb nsh // via old mapping | |
980 | isb | |
981 | ||
982 | bl __relocate_kernel | |
983 | #endif | |
0cd3defe AB |
984 | #endif |
985 | ldr x8, =__primary_switched | |
b929fe32 | 986 | adrp x0, __PHYS_OFFSET |
0cd3defe AB |
987 | br x8 |
988 | ENDPROC(__primary_switch) |