]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - arch/arm64/kernel/head.S
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[mirror_ubuntu-hirsute-kernel.git] / arch / arm64 / kernel / head.S
index 85da0f599cd6ac174c52eb1a8b376d171686c718..2c6e598a94dc896b4f18c5b160001d00ad8aeb1f 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
+#include <asm/boot.h>
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
@@ -51,9 +52,6 @@
 #error TEXT_OFFSET must be less than 2MB
 #endif
 
-#define KERNEL_START   _text
-#define KERNEL_END     _end
-
 /*
  * Kernel startup entry point.
  * ---------------------------
@@ -102,8 +100,6 @@ _head:
 #endif
 
 #ifdef CONFIG_EFI
-       .globl  __efistub_stext_offset
-       .set    __efistub_stext_offset, stext - _head
        .align 3
 pe_header:
        .ascii  "PE"
@@ -123,11 +119,11 @@ optional_header:
        .short  0x20b                           // PE32+ format
        .byte   0x02                            // MajorLinkerVersion
        .byte   0x14                            // MinorLinkerVersion
-       .long   _end - stext                    // SizeOfCode
+       .long   _end - efi_header_end           // SizeOfCode
        .long   0                               // SizeOfInitializedData
        .long   0                               // SizeOfUninitializedData
        .long   __efistub_entry - _head         // AddressOfEntryPoint
-       .long   __efistub_stext_offset          // BaseOfCode
+       .long   efi_header_end - _head          // BaseOfCode
 
 extra_header_fields:
        .quad   0                               // ImageBase
@@ -144,7 +140,7 @@ extra_header_fields:
        .long   _end - _head                    // SizeOfImage
 
        // Everything before the kernel image is considered part of the header
-       .long   __efistub_stext_offset          // SizeOfHeaders
+       .long   efi_header_end - _head          // SizeOfHeaders
        .long   0                               // CheckSum
        .short  0xa                             // Subsystem (EFI application)
        .short  0                               // DllCharacteristics
@@ -188,10 +184,10 @@ section_table:
        .byte   0
        .byte   0
        .byte   0                       // end of 0 padding of section name
-       .long   _end - stext            // VirtualSize
-       .long   __efistub_stext_offset  // VirtualAddress
-       .long   _edata - stext          // SizeOfRawData
-       .long   __efistub_stext_offset  // PointerToRawData
+       .long   _end - efi_header_end   // VirtualSize
+       .long   efi_header_end - _head  // VirtualAddress
+       .long   _edata - efi_header_end // SizeOfRawData
+       .long   efi_header_end - _head  // PointerToRawData
 
        .long   0               // PointerToRelocations (0 for executables)
        .long   0               // PointerToLineNumbers (0 for executables)
@@ -200,20 +196,23 @@ section_table:
        .long   0xe0500020      // Characteristics (section flags)
 
        /*
-        * EFI will load stext onwards at the 4k section alignment
+        * EFI will load .text onwards at the 4k section alignment
         * described in the PE/COFF header. To ensure that instruction
         * sequences using an adrp and a :lo12: immediate will function
-        * correctly at this alignment, we must ensure that stext is
+        * correctly at this alignment, we must ensure that .text is
         * placed at a 4k boundary in the Image to begin with.
         */
        .align 12
+efi_header_end:
 #endif
 
+       __INIT
+
 ENTRY(stext)
        bl      preserve_boot_args
        bl      el2_setup                       // Drop to EL1, w20=cpu_boot_mode
-       mov     x23, xzr                        // KASLR offset, defaults to 0
        adrp    x24, __PHYS_OFFSET
+       and     x23, x24, MIN_KIMG_ALIGN - 1    // KASLR offset, defaults to 0
        bl      set_cpu_boot_mode_flag
        bl      __create_page_tables            // x25=TTBR0, x26=TTBR1
        /*
@@ -222,13 +221,11 @@ ENTRY(stext)
         * On return, the CPU will be ready for the MMU to be turned on and
         * the TCR will have been set.
         */
-       ldr     x27, 0f                         // address to jump to after
+       bl      __cpu_setup                     // initialise processor
+       adr_l   x27, __primary_switch           // address to jump to after
                                                // MMU has been enabled
-       adr_l   lr, __enable_mmu                // return (PIC) address
-       b       __cpu_setup                     // initialise processor
+       b       __enable_mmu
 ENDPROC(stext)
-       .align  3
-0:     .quad   __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
 
 /*
  * Preserve the arguments passed by the bootloader in x0 .. x3
@@ -338,7 +335,7 @@ __create_page_tables:
        cmp     x0, x6
        b.lo    1b
 
-       ldr     x7, =SWAPPER_MM_MMUFLAGS
+       mov     x7, SWAPPER_MM_MMUFLAGS
 
        /*
         * Create the identity mapping.
@@ -394,12 +391,13 @@ __create_page_tables:
         * Map the kernel image (starting with PHYS_OFFSET).
         */
        mov     x0, x26                         // swapper_pg_dir
-       ldr     x5, =KIMAGE_VADDR
+       mov_q   x5, KIMAGE_VADDR + TEXT_OFFSET  // compile time __va(_text)
        add     x5, x5, x23                     // add KASLR displacement
        create_pgd_entry x0, x5, x3, x6
-       ldr     w6, kernel_img_size
-       add     x6, x6, x5
-       mov     x3, x24                         // phys offset
+       adrp    x6, _end                        // runtime __pa(_end)
+       adrp    x3, _text                       // runtime __pa(_text)
+       sub     x6, x6, x3                      // _end - _text
+       add     x6, x6, x5                      // runtime __va(_end)
        create_block_map x0, x7, x3, x5, x6
 
        /*
@@ -414,16 +412,13 @@ __create_page_tables:
 
        ret     x28
 ENDPROC(__create_page_tables)
-
-kernel_img_size:
-       .long   _end - (_head - TEXT_OFFSET)
        .ltorg
 
 /*
  * The following fragment of code is executed with the MMU enabled.
  */
        .set    initial_sp, init_thread_union + THREAD_START_SP
-__mmap_switched:
+__primary_switched:
        mov     x28, lr                         // preserve LR
        adr_l   x8, vectors                     // load VBAR_EL1 with virtual
        msr     vbar_el1, x8                    // vector table address
@@ -437,44 +432,6 @@ __mmap_switched:
        bl      __pi_memset
        dsb     ishst                           // Make zero page visible to PTW
 
-#ifdef CONFIG_RELOCATABLE
-
-       /*
-        * Iterate over each entry in the relocation table, and apply the
-        * relocations in place.
-        */
-       adr_l   x8, __dynsym_start              // start of symbol table
-       adr_l   x9, __reloc_start               // start of reloc table
-       adr_l   x10, __reloc_end                // end of reloc table
-
-0:     cmp     x9, x10
-       b.hs    2f
-       ldp     x11, x12, [x9], #24
-       ldr     x13, [x9, #-8]
-       cmp     w12, #R_AARCH64_RELATIVE
-       b.ne    1f
-       add     x13, x13, x23                   // relocate
-       str     x13, [x11, x23]
-       b       0b
-
-1:     cmp     w12, #R_AARCH64_ABS64
-       b.ne    0b
-       add     x12, x12, x12, lsl #1           // symtab offset: 24x top word
-       add     x12, x8, x12, lsr #(32 - 3)     // ... shifted into bottom word
-       ldrsh   w14, [x12, #6]                  // Elf64_Sym::st_shndx
-       ldr     x15, [x12, #8]                  // Elf64_Sym::st_value
-       cmp     w14, #-0xf                      // SHN_ABS (0xfff1) ?
-       add     x14, x15, x23                   // relocate
-       csel    x15, x14, x15, ne
-       add     x15, x13, x15
-       str     x15, [x11, x23]
-       b       0b
-
-2:     adr_l   x8, kimage_vaddr                // make relocated kimage_vaddr
-       dc      cvac, x8                        // value visible to secondaries
-       dsb     sy                              // with MMU off
-#endif
-
        adr_l   sp, initial_sp, x4
        mov     x4, sp
        and     x4, x4, #~(THREAD_SIZE - 1)
@@ -490,17 +447,19 @@ __mmap_switched:
        bl      kasan_early_init
 #endif
 #ifdef CONFIG_RANDOMIZE_BASE
-       cbnz    x23, 0f                         // already running randomized?
+       tst     x23, ~(MIN_KIMG_ALIGN - 1)      // already running randomized?
+       b.ne    0f
        mov     x0, x21                         // pass FDT address in x0
+       mov     x1, x23                         // pass modulo offset in x1
        bl      kaslr_early_init                // parse FDT for KASLR options
        cbz     x0, 0f                          // KASLR disabled? just proceed
-       mov     x23, x0                         // record KASLR offset
+       orr     x23, x23, x0                    // record KASLR offset
        ret     x28                             // we must enable KASLR, return
                                                // to __enable_mmu()
 0:
 #endif
        b       start_kernel
-ENDPROC(__mmap_switched)
+ENDPROC(__primary_switched)
 
 /*
  * end early head section, begin head code that is also used for
@@ -650,7 +609,7 @@ ENDPROC(el2_setup)
  * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
  * in x20. See arch/arm64/include/asm/virt.h for more info.
  */
-ENTRY(set_cpu_boot_mode_flag)
+set_cpu_boot_mode_flag:
        adr_l   x1, __boot_cpu_mode
        cmp     w20, #BOOT_CPU_MODE_EL2
        b.ne    1f
@@ -683,7 +642,7 @@ ENTRY(secondary_holding_pen)
        bl      el2_setup                       // Drop to EL1, w20=cpu_boot_mode
        bl      set_cpu_boot_mode_flag
        mrs     x0, mpidr_el1
-       ldr     x1, =MPIDR_HWID_BITMASK
+       mov_q   x1, MPIDR_HWID_BITMASK
        and     x0, x0, x1
        adr_l   x3, secondary_holding_pen_release
 pen:   ldr     x4, [x3]
@@ -703,7 +662,7 @@ ENTRY(secondary_entry)
        b       secondary_startup
 ENDPROC(secondary_entry)
 
-ENTRY(secondary_startup)
+secondary_startup:
        /*
         * Common entry point for secondary CPUs.
         */
@@ -711,14 +670,11 @@ ENTRY(secondary_startup)
        adrp    x26, swapper_pg_dir
        bl      __cpu_setup                     // initialise processor
 
-       ldr     x8, kimage_vaddr
-       ldr     w9, 0f
-       sub     x27, x8, w9, sxtw               // address to jump to after enabling the MMU
+       adr_l   x27, __secondary_switch         // address to jump to after enabling the MMU
        b       __enable_mmu
 ENDPROC(secondary_startup)
-0:     .long   (_text - TEXT_OFFSET) - __secondary_switched
 
-ENTRY(__secondary_switched)
+__secondary_switched:
        adr_l   x5, vectors
        msr     vbar_el1, x5
        isb
@@ -768,7 +724,7 @@ ENTRY(__early_cpu_boot_status)
  * If it isn't, park the CPU
  */
        .section        ".idmap.text", "ax"
-__enable_mmu:
+ENTRY(__enable_mmu)
        mrs     x22, sctlr_el1                  // preserve old SCTLR_EL1 value
        mrs     x1, ID_AA64MMFR0_EL1
        ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
@@ -806,7 +762,6 @@ __enable_mmu:
        ic      iallu                           // flush instructions fetched
        dsb     nsh                             // via old mapping
        isb
-       add     x27, x27, x23                   // relocated __mmap_switched
 #endif
        br      x27
 ENDPROC(__enable_mmu)
@@ -819,3 +774,53 @@ __no_granule_support:
        wfi
        b 1b
 ENDPROC(__no_granule_support)
+
+__primary_switch:
+#ifdef CONFIG_RELOCATABLE
+       /*
+        * Iterate over each entry in the relocation table, and apply the
+        * relocations in place.
+        */
+       ldr     w8, =__dynsym_offset            // offset to symbol table
+       ldr     w9, =__rela_offset              // offset to reloc table
+       ldr     w10, =__rela_size               // size of reloc table
+
+       mov_q   x11, KIMAGE_VADDR               // default virtual offset
+       add     x11, x11, x23                   // actual virtual offset
+       add     x8, x8, x11                     // __va(.dynsym)
+       add     x9, x9, x11                     // __va(.rela)
+       add     x10, x9, x10                    // __va(.rela) + sizeof(.rela)
+
+0:     cmp     x9, x10
+       b.hs    2f
+       ldp     x11, x12, [x9], #24
+       ldr     x13, [x9, #-8]
+       cmp     w12, #R_AARCH64_RELATIVE
+       b.ne    1f
+       add     x13, x13, x23                   // relocate
+       str     x13, [x11, x23]
+       b       0b
+
+1:     cmp     w12, #R_AARCH64_ABS64
+       b.ne    0b
+       add     x12, x12, x12, lsl #1           // symtab offset: 24x top word
+       add     x12, x8, x12, lsr #(32 - 3)     // ... shifted into bottom word
+       ldrsh   w14, [x12, #6]                  // Elf64_Sym::st_shndx
+       ldr     x15, [x12, #8]                  // Elf64_Sym::st_value
+       cmp     w14, #-0xf                      // SHN_ABS (0xfff1) ?
+       add     x14, x15, x23                   // relocate
+       csel    x15, x14, x15, ne
+       add     x15, x13, x15
+       str     x15, [x11, x23]
+       b       0b
+
+2:
+#endif
+       ldr     x8, =__primary_switched
+       br      x8
+ENDPROC(__primary_switch)
+
+__secondary_switch:
+       ldr     x8, =__secondary_switched
+       br      x8
+ENDPROC(__secondary_switch)