]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/arm64/kernel/head.S
doc: arm: add UEFI support documentation
[mirror_ubuntu-artful-kernel.git] / arch / arm64 / kernel / head.S
CommitLineData
9703d9d7
CM
1/*
2 * Low-level CPU initialisation
3 * Based on arch/arm/kernel/head.S
4 *
5 * Copyright (C) 1994-2002 Russell King
6 * Copyright (C) 2003-2012 ARM Ltd.
7 * Authors: Catalin Marinas <catalin.marinas@arm.com>
8 * Will Deacon <will.deacon@arm.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/linkage.h>
24#include <linux/init.h>
25
26#include <asm/assembler.h>
27#include <asm/ptrace.h>
28#include <asm/asm-offsets.h>
c218bca7 29#include <asm/cache.h>
0359b0e2 30#include <asm/cputype.h>
9703d9d7
CM
31#include <asm/memory.h>
32#include <asm/thread_info.h>
33#include <asm/pgtable-hwdef.h>
34#include <asm/pgtable.h>
35#include <asm/page.h>
f35a9205 36#include <asm/virt.h>
9703d9d7
CM
37
38/*
39 * swapper_pg_dir is the virtual address of the initial page table. We place
40 * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
41 * 2 pages and is placed below swapper_pg_dir.
42 */
43#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)
44
45#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
46#error KERNEL_RAM_VADDR must start at 0xXXX80000
47#endif
48
49#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE)
50#define IDMAP_DIR_SIZE (2 * PAGE_SIZE)
51
52 .globl swapper_pg_dir
53 .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
54
55 .globl idmap_pg_dir
56 .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
57
58 .macro pgtbl, ttb0, ttb1, phys
59 add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
60 sub \ttb0, \ttb1, #IDMAP_DIR_SIZE
61 .endm
62
63#ifdef CONFIG_ARM64_64K_PAGES
64#define BLOCK_SHIFT PAGE_SHIFT
65#define BLOCK_SIZE PAGE_SIZE
66#else
67#define BLOCK_SHIFT SECTION_SHIFT
68#define BLOCK_SIZE SECTION_SIZE
69#endif
70
71#define KERNEL_START KERNEL_RAM_VADDR
72#define KERNEL_END _end
73
74/*
75 * Initial memory map attributes.
76 */
77#ifndef CONFIG_SMP
78#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF
79#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF
80#else
81#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED
82#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S
83#endif
84
85#ifdef CONFIG_ARM64_64K_PAGES
86#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS
9703d9d7
CM
87#else
88#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS
9703d9d7
CM
89#endif
90
91/*
92 * Kernel startup entry point.
93 * ---------------------------
94 *
95 * The requirements are:
96 * MMU = off, D-cache = off, I-cache = on or off,
97 * x0 = physical address to the FDT blob.
98 *
99 * This code is mostly position independent so you call this at
100 * __pa(PAGE_OFFSET + TEXT_OFFSET).
101 *
102 * Note that the callee-saved registers are used for storing variables
103 * that are useful before the MMU is enabled. The allocations are described
104 * in the entry routines.
105 */
106 __HEAD
107
108 /*
109 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
110 */
111 b stext // branch to kernel start, magic
112 .long 0 // reserved
113 .quad TEXT_OFFSET // Image load offset from start of RAM
114 .quad 0 // reserved
115 .quad 0 // reserved
4370eec0
RF
116 .quad 0 // reserved
117 .quad 0 // reserved
118 .quad 0 // reserved
119 .byte 0x41 // Magic number, "ARM\x64"
120 .byte 0x52
121 .byte 0x4d
122 .byte 0x64
123 .word 0 // reserved
9703d9d7
CM
124
125ENTRY(stext)
126 mov x21, x0 // x21=FDT
828e9834 127 bl el2_setup // Drop to EL1, w20=cpu_boot_mode
f35a9205 128 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
828e9834 129 bl set_cpu_boot_mode_flag
9703d9d7
CM
130 mrs x22, midr_el1 // x22=cpuid
131 mov x0, x22
132 bl lookup_processor_type
133 mov x23, x0 // x23=current cpu_table
134 cbz x23, __error_p // invalid processor (x23=0)?
9703d9d7
CM
135 bl __vet_fdt
136 bl __create_page_tables // x25=TTBR0, x26=TTBR1
137 /*
138 * The following calls CPU specific code in a position independent
139 * manner. See arch/arm64/mm/proc.S for details. x23 = base of
140 * cpu_info structure selected by lookup_processor_type above.
141 * On return, the CPU will be ready for the MMU to be turned on and
142 * the TCR will have been set.
143 */
144 ldr x27, __switch_data // address to jump to after
145 // MMU has been enabled
146 adr lr, __enable_mmu // return (PIC) address
147 ldr x12, [x23, #CPU_INFO_SETUP]
148 add x12, x12, x28 // __virt_to_phys
149 br x12 // initialise processor
150ENDPROC(stext)
151
152/*
153 * If we're fortunate enough to boot at EL2, ensure that the world is
154 * sane before dropping to EL1.
828e9834
ML
155 *
156 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
157 * booted in EL1 or EL2 respectively.
9703d9d7
CM
158 */
159ENTRY(el2_setup)
160 mrs x0, CurrentEL
161 cmp x0, #PSR_MODE_EL2t
162 ccmp x0, #PSR_MODE_EL2h, #0x4, ne
9cf71728
ML
163 b.ne 1f
164 mrs x0, sctlr_el2
165CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
166CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
167 msr sctlr_el2, x0
168 b 2f
1691: mrs x0, sctlr_el1
170CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
171CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
172 msr sctlr_el1, x0
828e9834 173 mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
9cf71728 174 isb
9703d9d7
CM
175 ret
176
177 /* Hyp configuration. */
9cf71728 1782: mov x0, #(1 << 31) // 64-bit EL1
9703d9d7
CM
179 msr hcr_el2, x0
180
181 /* Generic timers. */
182 mrs x0, cnthctl_el2
183 orr x0, x0, #3 // Enable EL1 physical timers
184 msr cnthctl_el2, x0
1f75ff0a 185 msr cntvoff_el2, xzr // Clear virtual offset
9703d9d7
CM
186
187 /* Populate ID registers. */
188 mrs x0, midr_el1
189 mrs x1, mpidr_el1
190 msr vpidr_el2, x0
191 msr vmpidr_el2, x1
192
193 /* sctlr_el1 */
194 mov x0, #0x0800 // Set/clear RES{1,0} bits
9cf71728
ML
195CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
196CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
9703d9d7
CM
197 msr sctlr_el1, x0
198
199 /* Coprocessor traps. */
200 mov x0, #0x33ff
201 msr cptr_el2, x0 // Disable copro. traps to EL2
202
203#ifdef CONFIG_COMPAT
204 msr hstr_el2, xzr // Disable CP15 traps to EL2
205#endif
206
7dbfbe5b
MZ
207 /* Stage-2 translation */
208 msr vttbr_el2, xzr
209
712c6ff4
MZ
210 /* Hypervisor stub */
211 adr x0, __hyp_stub_vectors
212 msr vbar_el2, x0
213
9703d9d7
CM
214 /* spsr */
215 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
216 PSR_MODE_EL1h)
217 msr spsr_el2, x0
218 msr elr_el2, lr
828e9834 219 mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
9703d9d7
CM
220 eret
221ENDPROC(el2_setup)
222
828e9834
ML
223/*
224 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
225 * in x20. See arch/arm64/include/asm/virt.h for more info.
226 */
227ENTRY(set_cpu_boot_mode_flag)
228 ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode
229 add x1, x1, x28
230 cmp w20, #BOOT_CPU_MODE_EL2
231 b.ne 1f
232 add x1, x1, #4
c218bca7
CM
2331: dc cvac, x1 // Clean potentially dirty cache line
234 dsb sy
235 str w20, [x1] // This CPU has booted in EL1
236 dc civac, x1 // Clean&invalidate potentially stale cache line
237 dsb sy
828e9834
ML
238 ret
239ENDPROC(set_cpu_boot_mode_flag)
240
f35a9205
MZ
241/*
242 * We need to find out the CPU boot mode long after boot, so we need to
243 * store it in a writable variable.
244 *
245 * This is not in .bss, because we set it sufficiently early that the boot-time
246 * zeroing of .bss would clobber it.
247 */
c218bca7 248 .pushsection .data..cacheline_aligned
f35a9205 249ENTRY(__boot_cpu_mode)
c218bca7 250 .align L1_CACHE_SHIFT
f35a9205
MZ
251 .long BOOT_CPU_MODE_EL2
252 .long 0
253 .popsection
254
9703d9d7
CM
255 .align 3
2562: .quad .
257 .quad PAGE_OFFSET
258
259#ifdef CONFIG_SMP
9703d9d7
CM
260 .align 3
2611: .quad .
262 .quad secondary_holding_pen_release
263
264 /*
265 * This provides a "holding pen" for platforms to hold all secondary
266 * cores are held until we're ready for them to initialise.
267 */
268ENTRY(secondary_holding_pen)
828e9834
ML
269 bl el2_setup // Drop to EL1, w20=cpu_boot_mode
270 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
271 bl set_cpu_boot_mode_flag
9703d9d7 272 mrs x0, mpidr_el1
0359b0e2
JM
273 ldr x1, =MPIDR_HWID_BITMASK
274 and x0, x0, x1
9703d9d7
CM
275 adr x1, 1b
276 ldp x2, x3, [x1]
277 sub x1, x1, x2
278 add x3, x3, x1
279pen: ldr x4, [x3]
280 cmp x4, x0
281 b.eq secondary_startup
282 wfe
283 b pen
284ENDPROC(secondary_holding_pen)
652af899
MR
285
286 /*
287 * Secondary entry point that jumps straight into the kernel. Only to
288 * be used where CPUs are brought online dynamically by the kernel.
289 */
290ENTRY(secondary_entry)
652af899 291 bl el2_setup // Drop to EL1
85cc00ea
LP
292 bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET
293 bl set_cpu_boot_mode_flag
652af899
MR
294 b secondary_startup
295ENDPROC(secondary_entry)
9703d9d7
CM
296
297ENTRY(secondary_startup)
298 /*
299 * Common entry point for secondary CPUs.
300 */
301 mrs x22, midr_el1 // x22=cpuid
302 mov x0, x22
303 bl lookup_processor_type
304 mov x23, x0 // x23=current cpu_table
305 cbz x23, __error_p // invalid processor (x23=0)?
306
9703d9d7
CM
307 pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1
308 ldr x12, [x23, #CPU_INFO_SETUP]
309 add x12, x12, x28 // __virt_to_phys
310 blr x12 // initialise processor
311
312 ldr x21, =secondary_data
313 ldr x27, =__secondary_switched // address to jump to after enabling the MMU
314 b __enable_mmu
315ENDPROC(secondary_startup)
316
317ENTRY(__secondary_switched)
318 ldr x0, [x21] // get secondary_data.stack
319 mov sp, x0
320 mov x29, #0
321 b secondary_start_kernel
322ENDPROC(__secondary_switched)
323#endif /* CONFIG_SMP */
324
325/*
326 * Setup common bits before finally enabling the MMU. Essentially this is just
327 * loading the page table pointer and vector base registers.
328 *
329 * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on
330 * the MMU.
331 */
332__enable_mmu:
333 ldr x5, =vectors
334 msr vbar_el1, x5
335 msr ttbr0_el1, x25 // load TTBR0
336 msr ttbr1_el1, x26 // load TTBR1
337 isb
338 b __turn_mmu_on
339ENDPROC(__enable_mmu)
340
341/*
342 * Enable the MMU. This completely changes the structure of the visible memory
343 * space. You will not be able to trace execution through this.
344 *
345 * x0 = system control register
346 * x27 = *virtual* address to jump to upon completion
347 *
348 * other registers depend on the function called upon completion
349 */
350 .align 6
351__turn_mmu_on:
352 msr sctlr_el1, x0
353 isb
354 br x27
355ENDPROC(__turn_mmu_on)
356
357/*
358 * Calculate the start of physical memory.
359 */
360__calc_phys_offset:
361 adr x0, 1f
362 ldp x1, x2, [x0]
363 sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET
364 add x24, x2, x28 // x24 = PHYS_OFFSET
365 ret
366ENDPROC(__calc_phys_offset)
367
368 .align 3
3691: .quad .
370 .quad PAGE_OFFSET
371
372/*
373 * Macro to populate the PGD for the corresponding block entry in the next
374 * level (tbl) for the given virtual address.
375 *
376 * Preserves: pgd, tbl, virt
377 * Corrupts: tmp1, tmp2
378 */
379 .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2
380 lsr \tmp1, \virt, #PGDIR_SHIFT
381 and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index
382 orr \tmp2, \tbl, #3 // PGD entry table type
383 str \tmp2, [\pgd, \tmp1, lsl #3]
384 .endm
385
386/*
387 * Macro to populate block entries in the page table for the start..end
388 * virtual range (inclusive).
389 *
390 * Preserves: tbl, flags
391 * Corrupts: phys, start, end, pstate
392 */
ea8c2e11 393 .macro create_block_map, tbl, flags, phys, start, end
9703d9d7 394 lsr \phys, \phys, #BLOCK_SHIFT
9703d9d7
CM
395 lsr \start, \start, #BLOCK_SHIFT
396 and \start, \start, #PTRS_PER_PTE - 1 // table index
9703d9d7 397 orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
9703d9d7
CM
398 lsr \end, \end, #BLOCK_SHIFT
399 and \end, \end, #PTRS_PER_PTE - 1 // table end index
9703d9d7 4009999: str \phys, [\tbl, \start, lsl #3] // store the entry
9703d9d7
CM
401 add \start, \start, #1 // next entry
402 add \phys, \phys, #BLOCK_SIZE // next block
403 cmp \start, \end
404 b.ls 9999b
9703d9d7
CM
405 .endm
406
407/*
408 * Setup the initial page tables. We only setup the barest amount which is
409 * required to get the kernel running. The following sections are required:
410 * - identity mapping to enable the MMU (low address, TTBR0)
411 * - first few MB of the kernel linear mapping to jump to once the MMU has
412 * been enabled, including the FDT blob (TTBR1)
bf4b558e 413 * - pgd entry for fixed mappings (TTBR1)
9703d9d7
CM
414 */
415__create_page_tables:
416 pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
c218bca7
CM
417 mov x27, lr
418
419 /*
420 * Invalidate the idmap and swapper page tables to avoid potential
421 * dirty cache lines being evicted.
422 */
423 mov x0, x25
424 add x1, x26, #SWAPPER_DIR_SIZE
425 bl __inval_cache_range
9703d9d7
CM
426
427 /*
428 * Clear the idmap and swapper page tables.
429 */
430 mov x0, x25
431 add x6, x26, #SWAPPER_DIR_SIZE
4321: stp xzr, xzr, [x0], #16
433 stp xzr, xzr, [x0], #16
434 stp xzr, xzr, [x0], #16
435 stp xzr, xzr, [x0], #16
436 cmp x0, x6
437 b.lo 1b
438
439 ldr x7, =MM_MMUFLAGS
440
441 /*
442 * Create the identity mapping.
443 */
444 add x0, x25, #PAGE_SIZE // section table address
ea8c2e11
CM
445 ldr x3, =KERNEL_START
446 add x3, x3, x28 // __pa(KERNEL_START)
9703d9d7 447 create_pgd_entry x25, x0, x3, x5, x6
ea8c2e11
CM
448 ldr x6, =KERNEL_END
449 mov x5, x3 // __pa(KERNEL_START)
450 add x6, x6, x28 // __pa(KERNEL_END)
451 create_block_map x0, x7, x3, x5, x6
9703d9d7
CM
452
453 /*
454 * Map the kernel image (starting with PHYS_OFFSET).
455 */
456 add x0, x26, #PAGE_SIZE // section table address
457 mov x5, #PAGE_OFFSET
458 create_pgd_entry x26, x0, x5, x3, x6
ea8c2e11 459 ldr x6, =KERNEL_END
9703d9d7
CM
460 mov x3, x24 // phys offset
461 create_block_map x0, x7, x3, x5, x6
462
463 /*
464 * Map the FDT blob (maximum 2MB; must be within 512MB of
465 * PHYS_OFFSET).
466 */
467 mov x3, x21 // FDT phys address
468 and x3, x3, #~((1 << 21) - 1) // 2MB aligned
469 mov x6, #PAGE_OFFSET
470 sub x5, x3, x24 // subtract PHYS_OFFSET
471 tst x5, #~((1 << 29) - 1) // within 512MB?
472 csel x21, xzr, x21, ne // zero the FDT pointer
473 b.ne 1f
474 add x5, x5, x6 // __va(FDT blob)
475 add x6, x5, #1 << 21 // 2MB for the FDT blob
476 sub x6, x6, #1 // inclusive range
477 create_block_map x0, x7, x3, x5, x6
4781:
2475ff9d 479 /*
bf4b558e 480 * Create the pgd entry for the fixed mappings.
2475ff9d 481 */
bf4b558e 482 ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
2475ff9d
CM
483 add x0, x26, #2 * PAGE_SIZE // section table address
484 create_pgd_entry x26, x0, x5, x6, x7
c218bca7
CM
485
486 /*
487 * Since the page tables have been populated with non-cacheable
488 * accesses (MMU disabled), invalidate the idmap and swapper page
489 * tables again to remove any speculatively loaded cache lines.
490 */
491 mov x0, x25
492 add x1, x26, #SWAPPER_DIR_SIZE
493 bl __inval_cache_range
494
495 mov lr, x27
9703d9d7
CM
496 ret
497ENDPROC(__create_page_tables)
498 .ltorg
499
500 .align 3
501 .type __switch_data, %object
502__switch_data:
503 .quad __mmap_switched
9703d9d7
CM
504 .quad __bss_start // x6
505 .quad _end // x7
506 .quad processor_id // x4
507 .quad __fdt_pointer // x5
508 .quad memstart_addr // x6
509 .quad init_thread_union + THREAD_START_SP // sp
510
511/*
512 * The following fragment of code is executed with the MMU on in MMU mode, and
513 * uses absolute addresses; this is not position independent.
514 */
515__mmap_switched:
516 adr x3, __switch_data + 8
517
9703d9d7 518 ldp x6, x7, [x3], #16
9703d9d7
CM
5191: cmp x6, x7
520 b.hs 2f
521 str xzr, [x6], #8 // Clear BSS
522 b 1b
5232:
524 ldp x4, x5, [x3], #16
525 ldr x6, [x3], #8
526 ldr x16, [x3]
527 mov sp, x16
528 str x22, [x4] // Save processor ID
529 str x21, [x5] // Save FDT pointer
530 str x24, [x6] // Save PHYS_OFFSET
531 mov x29, #0
532 b start_kernel
533ENDPROC(__mmap_switched)
534
535/*
536 * Exception handling. Something went wrong and we can't proceed. We ought to
537 * tell the user, but since we don't have any guarantee that we're even
538 * running on the right architecture, we do virtually nothing.
539 */
540__error_p:
541ENDPROC(__error_p)
542
543__error:
5441: nop
545 b 1b
546ENDPROC(__error)
547
548/*
549 * This function gets the processor ID in w0 and searches the cpu_table[] for
550 * a match. It returns a pointer to the struct cpu_info it found. The
551 * cpu_table[] must end with an empty (all zeros) structure.
552 *
553 * This routine can be called via C code and it needs to work with the MMU
554 * both disabled and enabled (the offset is calculated automatically).
555 */
556ENTRY(lookup_processor_type)
557 adr x1, __lookup_processor_type_data
558 ldp x2, x3, [x1]
559 sub x1, x1, x2 // get offset between VA and PA
560 add x3, x3, x1 // convert VA to PA
5611:
562 ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask
563 cbz w5, 2f // end of list?
564 and w6, w6, w0
565 cmp w5, w6
566 b.eq 3f
567 add x3, x3, #CPU_INFO_SZ
568 b 1b
5692:
570 mov x3, #0 // unknown processor
5713:
572 mov x0, x3
573 ret
574ENDPROC(lookup_processor_type)
575
576 .align 3
577 .type __lookup_processor_type_data, %object
578__lookup_processor_type_data:
579 .quad .
580 .quad cpu_table
581 .size __lookup_processor_type_data, . - __lookup_processor_type_data
582
583/*
584 * Determine validity of the x21 FDT pointer.
585 * The dtb must be 8-byte aligned and live in the first 512M of memory.
586 */
587__vet_fdt:
588 tst x21, #0x7
589 b.ne 1f
590 cmp x21, x24
591 b.lt 1f
592 mov x0, #(1 << 29)
593 add x0, x0, x24
594 cmp x21, x0
595 b.ge 1f
596 ret
5971:
598 mov x21, #0
599 ret
600ENDPROC(__vet_fdt)