#------------------------------------------------------------------------------\r
#\r
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
-# Copyright (c) 2011 - 2014, ARM Limited. All rights reserved.\r
+# Copyright (c) 2011 - 2017, ARM Limited. All rights reserved.\r
+# Copyright (c) 2016, Linaro Limited. All rights reserved.\r
#\r
-# This program and the accompanying materials\r
-# are licensed and made available under the terms and conditions of the BSD License\r
-# which accompanies this distribution. The full text of the license may be found at\r
-# http://opensource.org/licenses/bsd-license.php\r
-#\r
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+# SPDX-License-Identifier: BSD-2-Clause-Patent\r
#\r
#------------------------------------------------------------------------------\r
\r
#include <Chipset/AArch64.h>\r
#include <AsmMacroIoLibV8.h>\r
\r
-.text\r
-.align 3\r
-\r
-GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r
-GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r
-GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r
-GCC_ASM_EXPORT (ArmCleanDataCacheEntryToPoUByMVA)\r
-GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r
-GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r
-GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r
-GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r
-GCC_ASM_EXPORT (ArmEnableMmu)\r
-GCC_ASM_EXPORT (ArmDisableMmu)\r
-GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r
-GCC_ASM_EXPORT (ArmMmuEnabled)\r
-GCC_ASM_EXPORT (ArmEnableDataCache)\r
-GCC_ASM_EXPORT (ArmDisableDataCache)\r
-GCC_ASM_EXPORT (ArmEnableInstructionCache)\r
-GCC_ASM_EXPORT (ArmDisableInstructionCache)\r
-GCC_ASM_EXPORT (ArmDisableAlignmentCheck)\r
-GCC_ASM_EXPORT (ArmEnableAlignmentCheck)\r
-GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r
-GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r
-GCC_ASM_EXPORT (AArch64AllDataCachesOperation)\r
-GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r
-GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)\r
-GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r
-GCC_ASM_EXPORT (ArmWriteVBar)\r
-GCC_ASM_EXPORT (ArmReadVBar)\r
-GCC_ASM_EXPORT (ArmEnableVFP)\r
-GCC_ASM_EXPORT (ArmCallWFI)\r
-GCC_ASM_EXPORT (ArmReadMpidr)\r
-GCC_ASM_EXPORT (ArmReadTpidrurw)\r
-GCC_ASM_EXPORT (ArmWriteTpidrurw)\r
-GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r
-GCC_ASM_EXPORT (ArmReadIdPfr0)\r
-GCC_ASM_EXPORT (ArmReadIdPfr1)\r
-GCC_ASM_EXPORT (ArmWriteHcr)\r
-GCC_ASM_EXPORT (ArmReadCurrentEL)\r
-\r
.set CTRL_M_BIT, (1 << 0)\r
.set CTRL_A_BIT, (1 << 1)\r
.set CTRL_C_BIT, (1 << 2)\r
+.set CTRL_SA_BIT, (1 << 3)\r
.set CTRL_I_BIT, (1 << 12)\r
.set CTRL_V_BIT, (1 << 12)\r
.set CPACR_VFP_BITS, (3 << 20)\r
\r
-ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r
+ASM_FUNC(ArmInvalidateDataCacheEntryByMVA)\r
dc ivac, x0 // Invalidate single data cache line\r
ret\r
\r
\r
-ASM_PFX(ArmCleanDataCacheEntryByMVA):\r
+ASM_FUNC(ArmCleanDataCacheEntryByMVA)\r
dc cvac, x0 // Clean single data cache line\r
ret\r
\r
\r
-ASM_PFX(ArmCleanDataCacheEntryToPoUByMVA):\r
+ASM_FUNC(ArmCleanDataCacheEntryToPoUByMVA)\r
dc cvau, x0 // Clean single data cache line to PoU\r
ret\r
\r
+ASM_FUNC(ArmInvalidateInstructionCacheEntryToPoUByMVA)\r
+ ic ivau, x0 // Invalidate single instruction cache line to PoU\r
+ ret\r
\r
-ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r
+\r
+ASM_FUNC(ArmCleanInvalidateDataCacheEntryByMVA)\r
dc civac, x0 // Clean and invalidate single data cache line\r
ret\r
\r
\r
-ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r
+ASM_FUNC(ArmInvalidateDataCacheEntryBySetWay)\r
dc isw, x0 // Invalidate this line\r
ret\r
\r
\r
-ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r
+ASM_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay)\r
dc cisw, x0 // Clean and Invalidate this line\r
ret\r
\r
\r
-ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r
+ASM_FUNC(ArmCleanDataCacheEntryBySetWay)\r
dc csw, x0 // Clean this line\r
ret\r
\r
\r
-ASM_PFX(ArmInvalidateInstructionCache):\r
+ASM_FUNC(ArmInvalidateInstructionCache)\r
ic iallu // Invalidate entire instruction cache\r
dsb sy\r
isb\r
ret\r
\r
\r
-ASM_PFX(ArmEnableMmu):\r
+ASM_FUNC(ArmEnableMmu)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Read System control register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmDisableMmu):\r
+ASM_FUNC(ArmDisableMmu)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Read System Control Register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmDisableCachesAndMmu):\r
+ASM_FUNC(ArmDisableCachesAndMmu)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmMmuEnabled):\r
+ASM_FUNC(ArmMmuEnabled)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmEnableDataCache):\r
+ASM_FUNC(ArmEnableDataCache)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmDisableDataCache):\r
+ASM_FUNC(ArmDisableDataCache)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmEnableInstructionCache):\r
+ASM_FUNC(ArmEnableInstructionCache)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmDisableInstructionCache):\r
+ASM_FUNC(ArmDisableInstructionCache)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 4f\r
ret\r
\r
\r
-ASM_PFX(ArmEnableAlignmentCheck):\r
+ASM_FUNC(ArmEnableAlignmentCheck)\r
EL1_OR_EL2(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 3f\r
ret\r
\r
\r
-ASM_PFX(ArmDisableAlignmentCheck):\r
+ASM_FUNC(ArmDisableAlignmentCheck)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, sctlr_el1 // Get control register EL1\r
b 4f\r
isb\r
ret\r
\r
+ASM_FUNC(ArmEnableStackAlignmentCheck)\r
+ EL1_OR_EL2(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 3f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+3: orr x0, x0, #CTRL_SA_BIT // Set SA (stack alignment check) bit\r
+ EL1_OR_EL2(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 3f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+3: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_FUNC(ArmDisableStackAlignmentCheck)\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: bic x0, x0, #CTRL_SA_BIT // Clear SA (stack alignment check) bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back control register\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
\r
// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now\r
-ASM_PFX(ArmEnableBranchPrediction):\r
+ASM_FUNC(ArmEnableBranchPrediction)\r
ret\r
\r
\r
// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.\r
-ASM_PFX(ArmDisableBranchPrediction):\r
+ASM_FUNC(ArmDisableBranchPrediction)\r
ret\r
\r
\r
-ASM_PFX(AArch64AllDataCachesOperation):\r
+ASM_FUNC(AArch64AllDataCachesOperation)\r
// We can use regs 0-7 and 9-15 without having to save/restore.\r
// Save our link register on the stack. - The stack must always be quad-word aligned\r
- str x30, [sp, #-16]!\r
+ stp x29, x30, [sp, #-16]!\r
+ mov x29, sp\r
mov x1, x0 // Save Function call in x1\r
mrs x6, clidr_el1 // Read EL1 CLIDR\r
and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)\r
L_Finished:\r
dsb sy\r
isb\r
- ldr x30, [sp], #0x10\r
+ ldp x29, x30, [sp], #0x10\r
ret\r
\r
\r
-ASM_PFX(ArmDataMemoryBarrier):\r
+ASM_FUNC(ArmDataMemoryBarrier)\r
dmb sy\r
ret\r
\r
\r
-ASM_PFX(ArmDataSynchronizationBarrier):\r
+ASM_FUNC(ArmDataSynchronizationBarrier)\r
dsb sy\r
ret\r
\r
\r
-ASM_PFX(ArmInstructionSynchronizationBarrier):\r
+ASM_FUNC(ArmInstructionSynchronizationBarrier)\r
isb\r
ret\r
\r
\r
-ASM_PFX(ArmWriteVBar):\r
+ASM_FUNC(ArmWriteVBar)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register\r
b 4f\r
4: isb\r
ret\r
\r
-ASM_PFX(ArmReadVBar):\r
+ASM_FUNC(ArmReadVBar)\r
EL1_OR_EL2_OR_EL3(x1)\r
1: mrs x0, vbar_el1 // Set the Address of the EL1 Vector Table in the VBAR register\r
ret\r
ret\r
\r
\r
-ASM_PFX(ArmEnableVFP):\r
+ASM_FUNC(ArmEnableVFP)\r
// Check whether floating-point is implemented in the processor.\r
mov x1, x30 // Save LR\r
bl ArmReadIdPfr0 // Read EL1 Processor Feature Register (PFR0)\r
mov x30, x1 // Restore LR\r
- ands x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation\r
- cmp x0, #0 // VFP is implemented if '0'.\r
- b.ne 4f // Exit if VFP not implemented.\r
+ ubfx x0, x0, #16, #4 // Extract the FP bits 16:19\r
+ cmp x0, #0xF // Check if FP bits are '1111b',\r
+ // i.e. Floating Point not implemented\r
+ b.eq 4f // Exit when VFP is not implemented.\r
+\r
// FVP is implemented.\r
// Make sure VFP exceptions are not trapped (to any exception level).\r
mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)\r
4:ret\r
\r
\r
-ASM_PFX(ArmCallWFI):\r
+ASM_FUNC(ArmCallWFI)\r
wfi\r
ret\r
\r
\r
-ASM_PFX(ArmReadMpidr):\r
+ASM_FUNC(ArmReadMpidr)\r
mrs x0, mpidr_el1 // read EL1 MPIDR\r
ret\r
\r
\r
// Keep old function names for C compatibilty for now. Change later?\r
-ASM_PFX(ArmReadTpidrurw):\r
+ASM_FUNC(ArmReadTpidrurw)\r
mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r
ret\r
\r
\r
// Keep old function names for C compatibilty for now. Change later?\r
-ASM_PFX(ArmWriteTpidrurw):\r
+ASM_FUNC(ArmWriteTpidrurw)\r
msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r
ret\r
\r
\r
// Arch timers are mandatory on AArch64\r
-ASM_PFX(ArmIsArchTimerImplemented):\r
+ASM_FUNC(ArmIsArchTimerImplemented)\r
mov x0, #1\r
ret\r
\r
\r
-ASM_PFX(ArmReadIdPfr0):\r
+ASM_FUNC(ArmReadIdPfr0)\r
mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register\r
ret\r
\r
// A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.\r
// See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c\r
// Not defined yet, but stick in here for now, should read all zeros.\r
-ASM_PFX(ArmReadIdPfr1):\r
+ASM_FUNC(ArmReadIdPfr1)\r
mrs x0, id_aa64pfr1_el1 // Read ID_PFR1 Register\r
ret\r
\r
// VOID ArmWriteHcr(UINTN Hcr)\r
-ASM_PFX(ArmWriteHcr):\r
+ASM_FUNC(ArmWriteHcr)\r
msr hcr_el2, x0 // Write the passed HCR value\r
ret\r
\r
+// UINTN ArmReadHcr(VOID)\r
+ASM_FUNC(ArmReadHcr)\r
+ mrs x0, hcr_el2\r
+ ret\r
+\r
// UINTN ArmReadCurrentEL(VOID)\r
-ASM_PFX(ArmReadCurrentEL):\r
+ASM_FUNC(ArmReadCurrentEL)\r
mrs x0, CurrentEL\r
ret\r
\r
+// UINT32 ArmReadCntHctl(VOID)\r
+ASM_FUNC(ArmReadCntHctl)\r
+ mrs x0, cnthctl_el2\r
+ ret\r
+\r
+// VOID ArmWriteCntHctl(UINT32 CntHctl)\r
+ASM_FUNC(ArmWriteCntHctl)\r
+ msr cnthctl_el2, x0\r
+ ret\r
+\r
ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r