gArmTokenSpaceGuid.PcdArmLinuxFdtMaxOffset|0x4000|UINT32|0x00000023\r
# The FDT blob must be loaded at a 64bit aligned address.\r
gArmTokenSpaceGuid.PcdArmLinuxFdtAlignment|0x8|UINT32|0x00000026\r
+\r
+[PcdsFixedAtBuild.AARCH64]\r
+ # By default we do transition to EL2 non-secure mode with Stack for EL2.\r
+ # Mode Description Bits\r
+ # NS EL2 SP2 all interupts disabled = 0x3c9\r
+ # NS EL1 SP1 all interupts disabled = 0x3c5\r
+ # Other modes include using SP0 or switching to Aarch32, but these are\r
+ # not currently supported.\r
+ gArmTokenSpaceGuid.PcdArmNonSecModeTransition|0x3c9|UINT32|0x0000003E\r
PLATFORM_VERSION = 0.1\r
DSC_SPECIFICATION = 0x00010005\r
OUTPUT_DIRECTORY = Build/Arm\r
- SUPPORTED_ARCHITECTURES = ARM\r
+ SUPPORTED_ARCHITECTURES = ARM|AARCH64\r
BUILD_TARGETS = DEBUG|RELEASE\r
SKUID_IDENTIFIER = DEFAULT\r
\r
DxeServicesTableLib|MdePkg/Library/DxeServicesTableLib/DxeServicesTableLib.inf\r
DefaultExceptionHandlerLib|ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf\r
\r
- ArmLib|ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.inf\r
CpuLib|MdePkg/Library/BaseCpuLib/BaseCpuLib.inf\r
+ ArmGicLib|ArmPkg/Drivers/PL390Gic/PL390GicLib.inf\r
ArmSmcLib|ArmPkg/Library/ArmSmcLib/ArmSmcLib.inf\r
ArmDisassemblerLib|ArmPkg/Library/ArmDisassemblerLib/ArmDisassemblerLib.inf\r
DmaLib|ArmPkg/Library/ArmDmaLib/ArmDmaLib.inf\r
\r
IoLib|MdePkg/Library/BaseIoLibIntrinsic/BaseIoLibIntrinsic.inf\r
\r
+[LibraryClasses.ARM]\r
+ ArmLib|ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.inf\r
+\r
+[LibraryClasses.AARCH64]\r
+ ArmLib|ArmPkg/Library/ArmLib/AArch64/AArch64Lib.inf\r
+\r
[LibraryClasses.common.PEIM]\r
HobLib|MdePkg/Library/PeiHobLib/PeiHobLib.inf\r
PeimEntryPoint|MdePkg/Library/PeimEntryPoint/PeimEntryPoint.inf\r
[LibraryClasses.ARM]\r
NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
\r
+[LibraryClasses.AARCH64]\r
+ NULL|ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
+\r
[Components.common]\r
ArmPkg/Library/ArmCacheMaintenanceLib/ArmCacheMaintenanceLib.inf\r
ArmPkg/Library/ArmDisassemblerLib/ArmDisassemblerLib.inf\r
ArmPkg/Library/ArmDmaLib/ArmDmaLib.inf\r
-# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLib.inf\r
-# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLibPrePi.inf\r
-# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLib.inf\r
-# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLibPrePi.inf\r
- ArmPkg/Library/ArmLib/ArmV7/ArmV7Lib.inf\r
- ArmPkg/Library/ArmLib/ArmV7/ArmV7LibPrePi.inf\r
- ArmPkg/Library/ArmLib/ArmV7/ArmV7LibSec.inf\r
ArmPkg/Library/ArmLib/Null/NullArmLib.inf\r
ArmPkg/Library/BaseMemoryLibStm/BaseMemoryLibStm.inf\r
- ArmPkg/Library/BaseMemoryLibVstm/BaseMemoryLibVstm.inf\r
ArmPkg/Library/BasePeCoffLib/BasePeCoffLib.inf\r
ArmPkg/Library/BdsLib/BdsLib.inf\r
ArmPkg/Library/CompilerIntrinsicsLib/CompilerIntrinsicsLib.inf\r
ArmPkg/Library/SemihostLib/SemihostLib.inf\r
ArmPkg/Library/UncachedMemoryAllocationLib/UncachedMemoryAllocationLib.inf\r
\r
- ArmPkg/Drivers/ArmCpuLib/ArmCortexA8Lib/ArmCortexA8Lib.inf\r
- ArmPkg/Drivers/ArmCpuLib/ArmCortexA9Lib/ArmCortexA9Lib.inf\r
- ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.inf\r
ArmPkg/Drivers/CpuDxe/CpuDxe.inf\r
ArmPkg/Drivers/CpuPei/CpuPei.inf\r
ArmPkg/Drivers/PL390Gic/PL390GicDxe.inf\r
\r
ArmPkg/Application/LinuxLoader/LinuxAtagLoader.inf\r
ArmPkg/Application/LinuxLoader/LinuxFdtLoader.inf\r
+\r
+[Components.ARM]\r
+ ArmPkg/Library/BaseMemoryLibVstm/BaseMemoryLibVstm.inf\r
+\r
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA8Lib/ArmCortexA8Lib.inf\r
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA9Lib/ArmCortexA9Lib.inf\r
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA15Lib/ArmCortexA15Lib.inf\r
+\r
+# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLib.inf\r
+# ArmPkg/Library/ArmLib/Arm11/Arm11ArmLibPrePi.inf\r
+# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLib.inf\r
+# ArmPkg/Library/ArmLib/Arm9/Arm9ArmLibPrePi.inf\r
+ ArmPkg/Library/ArmLib/ArmV7/ArmV7LibSec.inf\r
+ ArmPkg/Library/ArmLib/ArmV7/ArmV7LibPrePi.inf\r
+\r
+[Components.AARCH64]\r
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexAEMv8Lib/ArmCortexAEMv8Lib.inf\r
+ ArmPkg/Drivers/ArmCpuLib/ArmCortexA5xLib/ArmCortexA5xLib.inf\r
+\r
+ ArmPkg/Library/ArmLib/AArch64/AArch64LibSec.inf\r
+ ArmPkg/Library/ArmLib/AArch64/AArch64LibPrePi.inf\r
#include <Base.h>\r
#include <Library/ArmLib.h>\r
#include <Library/ArmCpuLib.h>\r
-#include <Library/ArmV7ArchTimerLib.h>\r
+#include <Library/ArmArchTimerLib.h>\r
#include <Library/DebugLib.h>\r
#include <Library/IoLib.h>\r
#include <Library/PcdLib.h>\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <Base.h>\r
+#include <Library/ArmLib.h>\r
+#include <Library/ArmCpuLib.h>\r
+#include <Library/ArmArchTimerLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/IoLib.h>\r
+#include <Library/PcdLib.h>\r
+\r
+#include <Chipset/ArmCortexA5x.h>\r
+\r
+VOID\r
+ArmCpuSetup (\r
+ IN UINTN MpId\r
+ )\r
+{\r
+ // Check if Architectural Timer frequency is valid number (should not be 0)\r
+ ASSERT (PcdGet32 (PcdArmArchTimerFreqInHz));\r
+ ASSERT (ArmIsArchTimerImplemented () != 0);\r
+\r
+ // Note: System Counter frequency can only be set in Secure privileged mode,\r
+ // if security extensions are implemented.\r
+ ArmArchTimerSetTimerFreq (PcdGet32 (PcdArmArchTimerFreqInHz));\r
+\r
+ if (ArmIsMpCore ()) {\r
+ // Turn on SMP coherency\r
+ ArmSetAuxCrBit (A5X_FEATURE_SMP);\r
+ }\r
+\r
+}\r
+\r
+VOID\r
+ArmCpuSetupSmpNonSecure (\r
+ IN UINTN MpId\r
+ )\r
+{\r
+}\r
--- /dev/null
+#/* @file\r
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#*/\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = ArmCortexA5xLib\r
+ FILE_GUID = 08107938-85d8-4967-ba65-b673f708fcb2\r
+ MODULE_TYPE = BASE\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = ArmCpuLib\r
+\r
+[Packages]\r
+ MdePkg/MdePkg.dec\r
+ ArmPkg/ArmPkg.dec\r
+\r
+[LibraryClasses]\r
+ ArmLib\r
+ IoLib\r
+ PcdLib\r
+\r
+[Sources.common]\r
+ ArmCortexA5xLib.c\r
+\r
+[FixedPcd]\r
+ gArmTokenSpaceGuid.PcdArmArchTimerFreqInHz\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <Base.h>\r
+#include <Library/ArmLib.h>\r
+#include <Library/ArmCpuLib.h>\r
+#include <Library/ArmGicLib.h>\r
+#include <Library/ArmArchTimerLib.h>\r
+#include <Library/IoLib.h>\r
+#include <Library/PcdLib.h>\r
+\r
+#include <Chipset/ArmAemV8.h>\r
+\r
+VOID\r
+ArmCpuSetup (\r
+ IN UINTN MpId\r
+ )\r
+{\r
+ // Note: System Counter frequency can only be set in Secure privileged mode,\r
+ // if security extensions are implemented.\r
+ ArmArchTimerSetTimerFreq (PcdGet32 (PcdArmArchTimerFreqInHz));\r
+}\r
+\r
+\r
+VOID\r
+ArmCpuSetupSmpNonSecure (\r
+ IN UINTN MpId\r
+ )\r
+{\r
+ // Nothing to do\r
+}\r
--- /dev/null
+#/* @file\r
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#*/\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = ArmCortexAEMv8Lib\r
+ FILE_GUID = 8ab5a7e3-86b1-4dd3-a092-09ee801e774b\r
+ MODULE_TYPE = BASE\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = ArmCpuLib\r
+\r
+[Packages]\r
+ MdePkg/MdePkg.dec\r
+ ArmPkg/ArmPkg.dec\r
+\r
+[LibraryClasses]\r
+ ArmLib\r
+ IoLib\r
+ PcdLib\r
+\r
+[Sources.common]\r
+ ArmCortexAEMv8Lib.c\r
+\r
+[FixedPcd]\r
+ gArmTokenSpaceGuid.PcdArmPrimaryCoreMask\r
+ gArmTokenSpaceGuid.PcdArmPrimaryCore\r
+\r
+ gArmTokenSpaceGuid.PcdArmArchTimerFreqInHz\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+ Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "CpuDxe.h"\r
+\r
+#include <Chipset/AArch64.h>\r
+\r
+VOID\r
+ExceptionHandlersStart (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+ExceptionHandlersEnd (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+CommonExceptionEntry (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+AsmCommonExceptionEntry (\r
+ VOID\r
+ );\r
+\r
+\r
+EFI_EXCEPTION_CALLBACK gExceptionHandlers[MAX_ARM_EXCEPTION + 1];\r
+EFI_EXCEPTION_CALLBACK gDebuggerExceptionHandlers[MAX_ARM_EXCEPTION + 1];\r
+\r
+\r
+\r
+/**\r
+ This function registers and enables the handler specified by InterruptHandler for a processor\r
+ interrupt or exception type specified by InterruptType. If InterruptHandler is NULL, then the\r
+ handler for the processor interrupt or exception type specified by InterruptType is uninstalled.\r
+ The installed handler is called once for each processor interrupt or exception.\r
+\r
+ @param InterruptType A pointer to the processor's current interrupt state. Set to TRUE if interrupts\r
+ are enabled and FALSE if interrupts are disabled.\r
+ @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER that is called\r
+ when a processor interrupt occurs. If this parameter is NULL, then the handler\r
+ will be uninstalled.\r
+\r
+ @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled.\r
+ @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was\r
+ previously installed.\r
+ @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not\r
+ previously installed.\r
+ @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported.\r
+\r
+**/\r
+EFI_STATUS\r
+RegisterInterruptHandler (\r
+ IN EFI_EXCEPTION_TYPE InterruptType,\r
+ IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler\r
+ )\r
+{\r
+ if (InterruptType > MAX_ARM_EXCEPTION) {\r
+ return EFI_UNSUPPORTED;\r
+ }\r
+\r
+ if ((InterruptHandler != NULL) && (gExceptionHandlers[InterruptType] != NULL)) {\r
+ return EFI_ALREADY_STARTED;\r
+ }\r
+\r
+ gExceptionHandlers[InterruptType] = InterruptHandler;\r
+\r
+ return EFI_SUCCESS;\r
+}\r
+\r
+\r
+\r
+VOID\r
+EFIAPI\r
+CommonCExceptionHandler (\r
+ IN EFI_EXCEPTION_TYPE ExceptionType,\r
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ if (ExceptionType <= MAX_AARCH64_EXCEPTION) {\r
+ if (gExceptionHandlers[ExceptionType]) {\r
+ gExceptionHandlers[ExceptionType] (ExceptionType, SystemContext);\r
+ return;\r
+ }\r
+ } else {\r
+ DEBUG ((EFI_D_ERROR, "Unknown exception type %d from %016lx\n", ExceptionType, SystemContext.SystemContextAArch64->ELR));\r
+ ASSERT (FALSE);\r
+ }\r
+\r
+ DefaultExceptionHandler (ExceptionType, SystemContext);\r
+}\r
+\r
+\r
+\r
+EFI_STATUS\r
+InitializeExceptions (\r
+ IN EFI_CPU_ARCH_PROTOCOL *Cpu\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ BOOLEAN IrqEnabled;\r
+ BOOLEAN FiqEnabled;\r
+\r
+ Status = EFI_SUCCESS;\r
+ ZeroMem (gExceptionHandlers,sizeof(*gExceptionHandlers));\r
+\r
+ //\r
+ // Disable interrupts\r
+ //\r
+ Cpu->GetInterruptState (Cpu, &IrqEnabled);\r
+ Cpu->DisableInterrupt (Cpu);\r
+\r
+ //\r
+ // EFI does not use the FIQ, but a debugger might so we must disable\r
+ // as we take over the exception vectors.\r
+ //\r
+ FiqEnabled = ArmGetFiqState ();\r
+ ArmDisableFiq ();\r
+\r
+ // AArch64 alignment? The Vector table must be 2k-byte aligned (bottom 11 bits zero)?\r
+ //DEBUG ((EFI_D_ERROR, "vbar set addr: 0x%016lx\n",(UINTN)ExceptionHandlersStart));\r
+ //ASSERT(((UINTN)ExceptionHandlersStart & ((1 << 11)-1)) == 0);\r
+\r
+ // We do not copy the Exception Table at PcdGet32(PcdCpuVectorBaseAddress). We just set Vector Base Address to point into CpuDxe code.\r
+ ArmWriteVBar ((UINTN)ExceptionHandlersStart);\r
+\r
+ if (FiqEnabled) {\r
+ ArmEnableFiq ();\r
+ }\r
+\r
+ if (IrqEnabled) {\r
+ //\r
+ // Restore interrupt state\r
+ //\r
+ Status = Cpu->EnableInterrupt (Cpu);\r
+ }\r
+\r
+ return Status;\r
+}\r
--- /dev/null
+//\r
+// Copyright (c) 2011 - 2013 ARM LTD. All rights reserved.<BR>\r
+//\r
+// This program and the accompanying materials\r
+// are licensed and made available under the terms and conditions of the BSD License\r
+// which accompanies this distribution. The full text of the license may be found at\r
+// http://opensource.org/licenses/bsd-license.php\r
+//\r
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+//\r
+//------------------------------------------------------------------------------\r
+\r
+#include <Library/PcdLib.h>\r
+#include <AsmMacroIoLibV8.h>\r
+\r
+/*\r
+ This is the stack constructed by the exception handler (low address to high address).\r
+ X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64.\r
+\r
+ UINT64 X0; 0x000\r
+ UINT64 X1; 0x008\r
+ UINT64 X2; 0x010\r
+ UINT64 X3; 0x018\r
+ UINT64 X4; 0x020\r
+ UINT64 X5; 0x028\r
+ UINT64 X6; 0x030\r
+ UINT64 X7; 0x038\r
+ UINT64 X8; 0x040\r
+ UINT64 X9; 0x048\r
+ UINT64 X10; 0x050\r
+ UINT64 X11; 0x058\r
+ UINT64 X12; 0x060\r
+ UINT64 X13; 0x068\r
+ UINT64 X14; 0x070\r
+ UINT64 X15; 0x078\r
+ UINT64 X16; 0x080\r
+ UINT64 X17; 0x088\r
+ UINT64 X18; 0x090\r
+ UINT64 X19; 0x098\r
+ UINT64 X20; 0x0a0\r
+ UINT64 X21; 0x0a8\r
+ UINT64 X22; 0x0b0\r
+ UINT64 X23; 0x0b8\r
+ UINT64 X24; 0x0c0\r
+ UINT64 X25; 0x0c8\r
+ UINT64 X26; 0x0d0\r
+ UINT64 X27; 0x0d8\r
+ UINT64 X28; 0x0e0\r
+ UINT64 FP; 0x0e8 // x29 - Frame Pointer\r
+ UINT64 LR; 0x0f0 // x30 - Link Register\r
+ UINT64 SP; 0x0f8 // x31 - Stack Pointer\r
+\r
+ // FP/SIMD Registers. 128bit if used as Q-regs.\r
+ UINT64 V0[2]; 0x100\r
+ UINT64 V1[2]; 0x110\r
+ UINT64 V2[2]; 0x120\r
+ UINT64 V3[2]; 0x130\r
+ UINT64 V4[2]; 0x140\r
+ UINT64 V5[2]; 0x150\r
+ UINT64 V6[2]; 0x160\r
+ UINT64 V7[2]; 0x170\r
+ UINT64 V8[2]; 0x180\r
+ UINT64 V9[2]; 0x190\r
+ UINT64 V10[2]; 0x1a0\r
+ UINT64 V11[2]; 0x1b0\r
+ UINT64 V12[2]; 0x1c0\r
+ UINT64 V13[2]; 0x1d0\r
+ UINT64 V14[2]; 0x1e0\r
+ UINT64 V15[2]; 0x1f0\r
+ UINT64 V16[2]; 0x200\r
+ UINT64 V17[2]; 0x210\r
+ UINT64 V18[2]; 0x220\r
+ UINT64 V19[2]; 0x230\r
+ UINT64 V20[2]; 0x240\r
+ UINT64 V21[2]; 0x250\r
+ UINT64 V22[2]; 0x260\r
+ UINT64 V23[2]; 0x270\r
+ UINT64 V24[2]; 0x280\r
+ UINT64 V25[2]; 0x290\r
+ UINT64 V26[2]; 0x2a0\r
+ UINT64 V27[2]; 0x2b0\r
+ UINT64 V28[2]; 0x2c0\r
+ UINT64 V29[2]; 0x2d0\r
+ UINT64 V30[2]; 0x2e0\r
+ UINT64 V31[2]; 0x2f0\r
+\r
+ // System Context\r
+ UINT64 ELR; 0x300 // Exception Link Register\r
+ UINT64 SPSR; 0x308 // Saved Processor Status Register\r
+ UINT64 FPSR; 0x310 // Floating Point Status Register\r
+ UINT64 ESR; 0x318 // EL1 Fault Address Register\r
+ UINT64 FAR; 0x320 // EL1 Exception syndrome register\r
+ UINT64 Padding;0x328 // Required for stack alignment\r
+*/\r
+\r
+ASM_GLOBAL ASM_PFX(ExceptionHandlersStart)\r
+ASM_GLOBAL ASM_PFX(ExceptionHandlersEnd)\r
+ASM_GLOBAL ASM_PFX(CommonExceptionEntry)\r
+ASM_GLOBAL ASM_PFX(AsmCommonExceptionEntry)\r
+ASM_GLOBAL ASM_PFX(CommonCExceptionHandler)\r
+\r
+.text\r
+.align 11\r
+\r
+#define GP_CONTEXT_SIZE (32 * 8)\r
+#define FP_CONTEXT_SIZE (32 * 16)\r
+#define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10)\r
+\r
+// Cannot str x31 directly\r
+#define ALL_GP_REGS \\r
+ REG_PAIR (x0, x1, 0x000, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x2, x3, 0x010, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x4, x5, 0x020, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x6, x7, 0x030, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x8, x9, 0x040, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x10, x11, 0x050, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x12, x13, 0x060, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x14, x15, 0x070, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x16, x17, 0x080, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x18, x19, 0x090, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x20, x21, 0x0a0, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x22, x23, 0x0b0, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x24, x25, 0x0c0, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x26, x27, 0x0d0, GP_CONTEXT_SIZE); \\r
+ REG_PAIR (x28, x29, 0x0e0, GP_CONTEXT_SIZE); \\r
+ REG_ONE (x30, 0x0f0, GP_CONTEXT_SIZE);\r
+\r
+// In order to save the SP we need to put it somwhere else first.\r
+// STR only works with XZR/WZR directly\r
+#define SAVE_SP \\r
+ add x1, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE; \\r
+ REG_ONE (x1, 0x0f8, GP_CONTEXT_SIZE);\r
+\r
+#define ALL_FP_REGS \\r
+ REG_PAIR (q0, q1, 0x000, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q2, q3, 0x020, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q4, q5, 0x040, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q6, q7, 0x060, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q8, q9, 0x080, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q10, q11, 0x0a0, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q12, q13, 0x0c0, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q14, q15, 0x0e0, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q16, q17, 0x100, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q18, q19, 0x120, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q20, q21, 0x140, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q22, q23, 0x160, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q24, q25, 0x180, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q26, q27, 0x1a0, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q28, q29, 0x1c0, FP_CONTEXT_SIZE); \\r
+ REG_PAIR (q30, q31, 0x1e0, FP_CONTEXT_SIZE);\r
+\r
+#define ALL_SYS_REGS \\r
+ REG_PAIR (x1, x2, 0x000, SYS_CONTEXT_SIZE); \\r
+ REG_PAIR (x3, x4, 0x010, SYS_CONTEXT_SIZE); \\r
+ REG_ONE (x5, 0x020, SYS_CONTEXT_SIZE);\r
+\r
+//\r
+// This code gets copied to the ARM vector table\r
+// VectorTableStart - VectorTableEnd gets copied\r
+//\r
+ASM_PFX(ExceptionHandlersStart):\r
+\r
+//\r
+// Current EL with SP0 : 0x0 - 0x180\r
+//\r
+.align 7\r
+ASM_PFX(SynchronousExceptionSP0):\r
+ b ASM_PFX(SynchronousExceptionEntry)\r
+\r
+.align 7\r
+ASM_PFX(IrqSP0):\r
+ b ASM_PFX(IrqEntry)\r
+\r
+.align 7\r
+ASM_PFX(FiqSP0):\r
+ b ASM_PFX(FiqEntry)\r
+\r
+.align 7\r
+ASM_PFX(SErrorSP0):\r
+ b ASM_PFX(SErrorEntry)\r
+\r
+//\r
+// Current EL with SPx: 0x200 - 0x380\r
+//\r
+.align 7\r
+ASM_PFX(SynchronousExceptionSPx):\r
+ b ASM_PFX(SynchronousExceptionEntry)\r
+\r
+.align 7\r
+ASM_PFX(IrqSPx):\r
+ b ASM_PFX(IrqEntry)\r
+\r
+.align 7\r
+ASM_PFX(FiqSPx):\r
+ b ASM_PFX(FiqEntry)\r
+\r
+.align 7\r
+ASM_PFX(SErrorSPx):\r
+ b ASM_PFX(SErrorEntry)\r
+\r
+//\r
+// Lower EL using AArch64 : 0x400 - 0x580\r
+//\r
+.align 7\r
+ASM_PFX(SynchronousExceptionA64):\r
+ b ASM_PFX(SynchronousExceptionEntry)\r
+\r
+.align 7\r
+ASM_PFX(IrqA64):\r
+ b ASM_PFX(IrqEntry)\r
+\r
+.align 7\r
+ASM_PFX(FiqA64):\r
+ b ASM_PFX(FiqEntry)\r
+\r
+.align 7\r
+ASM_PFX(SErrorA64):\r
+ b ASM_PFX(SErrorEntry)\r
+\r
+//\r
+// Lower EL using AArch32 : 0x0 - 0x180\r
+//\r
+.align 7\r
+ASM_PFX(SynchronousExceptionA32):\r
+ b ASM_PFX(SynchronousExceptionEntry)\r
+\r
+.align 7\r
+ASM_PFX(IrqA32):\r
+ b ASM_PFX(IrqEntry)\r
+\r
+.align 7\r
+ASM_PFX(FiqA32):\r
+ b ASM_PFX(FiqEntry)\r
+\r
+.align 7\r
+ASM_PFX(SErrorA32):\r
+ b ASM_PFX(SErrorEntry)\r
+\r
+\r
+#undef REG_PAIR\r
+#undef REG_ONE\r
+#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) stp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)]\r
+#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) str REG1, [sp, #(OFFSET-CONTEXT_SIZE)]\r
+\r
+ASM_PFX(SynchronousExceptionEntry):\r
+ // Move the stackpointer so we can reach our structure with the str instruction.\r
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE\r
+\r
+ // Save all the General regs before touching x0 and x1.\r
+ // This does not save r31(SP) as it is special. We do that later.\r
+ ALL_GP_REGS\r
+\r
+ // Record the tipe of exception that occured.\r
+ mov x0, #EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS\r
+\r
+ // Jump to our general handler to deal with all the common parts and process the exception.\r
+ ldr x1, ASM_PFX(CommonExceptionEntry)\r
+ br x1\r
+\r
+ASM_PFX(IrqEntry):\r
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE\r
+ ALL_GP_REGS\r
+ mov x0, #EXCEPT_AARCH64_IRQ\r
+ ldr x1, ASM_PFX(CommonExceptionEntry)\r
+ br x1\r
+\r
+ASM_PFX(FiqEntry):\r
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE\r
+ ALL_GP_REGS\r
+ mov x0, #EXCEPT_AARCH64_FIQ\r
+ ldr x1, ASM_PFX(CommonExceptionEntry)\r
+ br x1\r
+\r
+ASM_PFX(SErrorEntry):\r
+ sub sp, sp, FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE\r
+ ALL_GP_REGS\r
+ mov x0, #EXCEPT_AARCH64_SERROR\r
+ ldr x1, ASM_PFX(CommonExceptionEntry)\r
+ br x1\r
+\r
+\r
+//\r
+// This gets patched by the C code that patches in the vector table\r
+//\r
+.align 3\r
+ASM_PFX(CommonExceptionEntry):\r
+ .dword ASM_PFX(AsmCommonExceptionEntry)\r
+\r
+ASM_PFX(ExceptionHandlersEnd):\r
+\r
+\r
+\r
+//\r
+// This code runs from CpuDxe driver loaded address. It is patched into\r
+// CommonExceptionEntry.\r
+//\r
+ASM_PFX(AsmCommonExceptionEntry):\r
+ /* NOTE:\r
+ We have to break up the save code because the immidiate value to be used\r
+ with the SP is to big to do it all in one step so we need to shuffle the SP\r
+ along as we go. (we only have 9bits of immediate to work with) */\r
+\r
+ // Save the current Stack pointer before we start modifying it.\r
+ SAVE_SP\r
+\r
+ // Preserve the stack pointer we came in with before we modify it\r
+ EL1_OR_EL2(x1)\r
+1:mrs x1, elr_el1 // Exception Link Register\r
+ mrs x2, spsr_el1 // Saved Processor Status Register 32bit\r
+ mrs x3, fpsr // Floating point Status Register 32bit\r
+ mrs x4, esr_el1 // EL1 Exception syndrome register 32bit\r
+ mrs x5, far_el1 // EL1 Fault Address Register\r
+ b 3f\r
+\r
+2:mrs x1, elr_el2 // Exception Link Register\r
+ mrs x2, spsr_el2 // Saved Processor Status Register 32bit\r
+ mrs x3, fpsr // Floating point Status Register 32bit\r
+ mrs x4, esr_el2 // EL1 Exception syndrome register 32bit\r
+ mrs x5, far_el2 // EL1 Fault Address Register\r
+\r
+ // Adjust SP to save next set\r
+3:add sp, sp, FP_CONTEXT_SIZE\r
+\r
+ // Push FP regs to Stack.\r
+ ALL_FP_REGS\r
+\r
+ // Adjust SP to save next set\r
+ add sp, sp, SYS_CONTEXT_SIZE\r
+\r
+ // Save the SYS regs\r
+ ALL_SYS_REGS\r
+\r
+ // Point to top of struct after all regs saved\r
+ sub sp, sp, GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE\r
+\r
+ // x0 still holds the exception type.\r
+ // Set x1 to point to the top of our struct on the Stack\r
+ mov x1, sp\r
+\r
+// CommonCExceptionHandler (\r
+// IN EFI_EXCEPTION_TYPE ExceptionType, R0\r
+// IN OUT EFI_SYSTEM_CONTEXT SystemContext R1\r
+// )\r
+\r
+ // Call the handler as defined above\r
+\r
+ // For now we spin in the handler if we received an abort of some kind.\r
+ // We do not try to recover.\r
+ bl ASM_PFX(CommonCExceptionHandler) // Call exception handler\r
+\r
+\r
+// Defines for popping from stack\r
+\r
+#undef REG_PAIR\r
+#undef REG_ONE\r
+#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) ldp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)]\r
+\r
+#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) ldr REG1, [sp, #(OFFSET-CONTEXT_SIZE)]\r
+\r
+\r
+ // pop all regs and return from exception.\r
+ add sp, sp, GP_CONTEXT_SIZE\r
+ ALL_GP_REGS\r
+\r
+ // Adjust SP to pop next set\r
+ add sp, sp, FP_CONTEXT_SIZE\r
+ // Pop FP regs to Stack.\r
+ ALL_FP_REGS\r
+\r
+ // Adjust SP to be where we started from when we came into the handler.\r
+ // The handler can not change the SP.\r
+ add sp, sp, SYS_CONTEXT_SIZE\r
+\r
+ eret\r
+\r
+#undef REG_PAIR\r
+#undef REG_ONE\r
+\r
+dead:\r
+ b dead\r
--- /dev/null
+/*++\r
+\r
+Copyright (c) 2009, Hewlett-Packard Company. All rights reserved.<BR>\r
+Portions copyright (c) 2010, Apple Inc. All rights reserved.<BR>\r
+Portions copyright (c) 2011-2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+This program and the accompanying materials\r
+are licensed and made available under the terms and conditions of the BSD License\r
+which accompanies this distribution. The full text of the license may be found at\r
+http://opensource.org/licenses/bsd-license.php\r
+\r
+THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+\r
+--*/\r
+\r
+#include "CpuDxe.h"\r
+\r
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
+\r
+STATIC\r
+UINT64\r
+GetFirstPageAttribute (\r
+ IN UINT64 *FirstLevelTableAddress,\r
+ IN UINTN TableLevel\r
+ )\r
+{\r
+ UINT64 FirstEntry;\r
+\r
+ // Get the first entry of the table\r
+ FirstEntry = *FirstLevelTableAddress;\r
+\r
+ if ((FirstEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {\r
+ // Only valid for Levels 0, 1 and 2\r
+ ASSERT (TableLevel < 3);\r
+\r
+ // Get the attribute of the subsequent table\r
+ return GetFirstPageAttribute ((UINT64*)(FirstEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE), TableLevel + 1);\r
+ } else if (((FirstEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) ||\r
+ ((TableLevel == 3) && ((FirstEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3)))\r
+ {\r
+ return FirstEntry & TT_ATTR_INDX_MASK;\r
+ } else {\r
+ return TT_ATTR_INDX_INVALID;\r
+ }\r
+}\r
+\r
+STATIC\r
+UINT64\r
+GetNextEntryAttribute (\r
+ IN UINT64 *TableAddress,\r
+ IN UINTN EntryCount,\r
+ IN UINTN TableLevel,\r
+ IN UINT64 BaseAddress,\r
+ IN OUT UINT32 *PrevEntryAttribute,\r
+ IN OUT UINT64 *StartGcdRegion\r
+ )\r
+{\r
+ UINTN Index;\r
+ UINT64 Entry;\r
+ UINT32 EntryAttribute;\r
+ UINT32 EntryType;\r
+ EFI_STATUS Status;\r
+ UINTN NumberOfDescriptors;\r
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;\r
+\r
+ // Get the memory space map from GCD\r
+ MemorySpaceMap = NULL;\r
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ // We cannot get more than 3-level page table\r
+ ASSERT (TableLevel <= 3);\r
+\r
+ // While the top level table might not contain TT_ENTRY_COUNT entries;\r
+ // the subsequent ones should be filled up\r
+ for (Index = 0; Index < EntryCount; Index++) {\r
+ Entry = TableAddress[Index];\r
+ EntryType = Entry & TT_TYPE_MASK;\r
+ EntryAttribute = Entry & TT_ATTR_INDX_MASK;\r
+\r
+ // If Entry is a Table Descriptor type entry then go through the sub-level table\r
+ if ((EntryType == TT_TYPE_BLOCK_ENTRY) ||\r
+ ((TableLevel == 3) && (EntryType == TT_TYPE_BLOCK_ENTRY_LEVEL3))) {\r
+ if ((*PrevEntryAttribute == TT_ATTR_INDX_INVALID) || (EntryAttribute != *PrevEntryAttribute)) {\r
+ if (*PrevEntryAttribute != TT_ATTR_INDX_INVALID) {\r
+ // Update GCD with the last region\r
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,\r
+ *StartGcdRegion,\r
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel)) - 1) - *StartGcdRegion,\r
+ PageAttributeToGcdAttribute (EntryAttribute));\r
+ }\r
+\r
+ // Start of the new region\r
+ *StartGcdRegion = BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel));\r
+ *PrevEntryAttribute = EntryAttribute;\r
+ } else {\r
+ continue;\r
+ }\r
+ } else if (EntryType == TT_TYPE_TABLE_ENTRY) {\r
+ // Table Entry type is only valid for Level 0, 1, 2\r
+ ASSERT (TableLevel < 3);\r
+\r
+ // Increase the level number and scan the sub-level table\r
+ GetNextEntryAttribute ((UINT64*)(Entry & TT_ADDRESS_MASK_DESCRIPTION_TABLE),\r
+ TT_ENTRY_COUNT, TableLevel + 1,\r
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel))),\r
+ PrevEntryAttribute, StartGcdRegion);\r
+ } else {\r
+ if (*PrevEntryAttribute != TT_ATTR_INDX_INVALID) {\r
+ // Update GCD with the last region\r
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,\r
+ *StartGcdRegion,\r
+ (BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel)) - 1) - *StartGcdRegion,\r
+ PageAttributeToGcdAttribute (EntryAttribute));\r
+\r
+ // Start of the new region\r
+ *StartGcdRegion = BaseAddress + (Index * TT_ADDRESS_AT_LEVEL(TableLevel));\r
+ *PrevEntryAttribute = TT_ATTR_INDX_INVALID;\r
+ }\r
+ }\r
+ }\r
+\r
+ return BaseAddress + (EntryCount * TT_ADDRESS_AT_LEVEL(TableLevel));\r
+}\r
+\r
+EFI_STATUS\r
+SyncCacheConfig (\r
+ IN EFI_CPU_ARCH_PROTOCOL *CpuProtocol\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINT32 PageAttribute = 0;\r
+ UINT64 *FirstLevelTableAddress;\r
+ UINTN TableLevel;\r
+ UINTN TableCount;\r
+ UINTN NumberOfDescriptors;\r
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;\r
+ UINTN Tcr;\r
+ UINTN T0SZ;\r
+ UINT64 BaseAddressGcdRegion;\r
+ UINT64 EndAddressGcdRegion;\r
+\r
+ // This code assumes MMU is enabled and filed with section translations\r
+ ASSERT (ArmMmuEnabled ());\r
+\r
+ //\r
+ // Get the memory space map from GCD\r
+ //\r
+ MemorySpaceMap = NULL;\r
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
+ // The GCD implementation maintains its own copy of the state of memory space attributes. GCD needs\r
+ // to know what the initial memory space attributes are. The CPU Arch. Protocol does not provide a\r
+ // GetMemoryAttributes function for GCD to get this so we must resort to calling GCD (as if we were\r
+ // a client) to update its copy of the attributes. This is bad architecture and should be replaced\r
+ // with a way for GCD to query the CPU Arch. driver of the existing memory space attributes instead.\r
+\r
+ // Obtain page table base\r
+ FirstLevelTableAddress = (UINT64*)(ArmGetTTBR0BaseAddress ());\r
+\r
+ // Get Translation Control Register value\r
+ Tcr = ArmGetTCR ();\r
+ // Get Address Region Size\r
+ T0SZ = Tcr & TCR_T0SZ_MASK;\r
+\r
+ // Get the level of the first table for the indicated Address Region Size\r
+ GetRootTranslationTableInfo (T0SZ, &TableLevel, &TableCount);\r
+\r
+ // First Attribute of the Page Tables\r
+ PageAttribute = GetFirstPageAttribute (FirstLevelTableAddress, TableLevel);\r
+\r
+ // We scan from the start of the memory map (ie: at the address 0x0)\r
+ BaseAddressGcdRegion = 0x0;\r
+ EndAddressGcdRegion = GetNextEntryAttribute (FirstLevelTableAddress,\r
+ TableCount, TableLevel,\r
+ BaseAddressGcdRegion,\r
+ &PageAttribute, &BaseAddressGcdRegion);\r
+\r
+ // Update GCD with the last region\r
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors,\r
+ BaseAddressGcdRegion,\r
+ EndAddressGcdRegion - BaseAddressGcdRegion,\r
+ PageAttributeToGcdAttribute (PageAttribute));\r
+\r
+ return EFI_SUCCESS;\r
+}\r
IN EFI_PHYSICAL_ADDRESS VirtualMask\r
);\r
\r
+VOID\r
+GetRootTranslationTableInfo (\r
+ IN UINTN T0SZ,\r
+ OUT UINTN *TableLevel,\r
+ OUT UINTN *TableEntryCount\r
+ );\r
+\r
EFI_STATUS\r
SetGcdMemorySpaceAttributes (\r
- IN EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap,\r
+ IN EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap,\r
IN UINTN NumberOfDescriptors,\r
IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
IN UINT64 Length,\r
ArmV6/ExceptionSupport.asm | RVCT\r
ArmV6/ExceptionSupport.S | GCC\r
\r
+[Sources.AARCH64]\r
+ AArch64/Mmu.c\r
+ AArch64/Exception.c\r
+ AArch64/ExceptionSupport.S | GCC\r
\r
[Packages]\r
ArmPkg/ArmPkg.dec\r
#include <Library/UefiLib.h>\r
#include <Library/PcdLib.h>\r
#include <Library/IoLib.h>\r
-#include <Library/ArmV7ArchTimerLib.h>\r
+#include <Library/ArmArchTimerLib.h>\r
\r
#include <Protocol/Timer.h>\r
#include <Protocol/HardwareInterrupt.h>\r
# Support a Semi Host file system over a debuggers JTAG\r
#\r
# Copyright (c) 2009, Apple Inc. All rights reserved.<BR>\r
+# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.\r
+#\r
# This program and the accompanying materials \r
# are licensed and made available under the terms and conditions of the BSD License \r
# which accompanies this distribution. The full text of the license may be found at \r
\r
ENTRY_POINT = SemihostFsEntryPoint\r
\r
-[Sources.ARM]\r
+[Sources.ARM, Sources.AARCH64]\r
Arm/SemihostFs.c\r
\r
[Packages]\r
--- /dev/null
+/** @file\r
+ Macros to work around lack of Apple support for LDR register, =expr\r
+\r
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+\r
+#ifndef __MACRO_IO_LIBV8_H__\r
+#define __MACRO_IO_LIBV8_H__\r
+\r
+#if defined (__GNUC__)\r
+\r
+#define MmioWrite32(Address, Data) \\r
+ ldr x1, =Address ; \\r
+ ldr x0, =Data ; \\r
+ str x0, [x1]\r
+\r
+#define MmioOr32(Address, OrData) \\r
+ ldr x1, =Address ; \\r
+ ldr x2, =OrData ; \\r
+ ldr x0, [x1] ; \\r
+ orr x0, x0, x2 ; \\r
+ str x0, [x1]\r
+\r
+#define MmioAnd32(Address, AndData) \\r
+ ldr x1, =Address ; \\r
+ ldr x2, =AndData ; \\r
+ ldr x0, [x1] ; \\r
+ and x0, x0, x2 ; \\r
+ str x0, [x1]\r
+\r
+#define MmioAndThenOr32(Address, AndData, OrData) \\r
+ ldr x1, =Address ; \\r
+ ldr x0, [x1] ; \\r
+ ldr x2, =AndData ; \\r
+ and x0, x0, x2 ; \\r
+ ldr x2, =OrData ; \\r
+ orr x0, x0, x2 ; \\r
+ str x0, [x1]\r
+\r
+#define MmioWriteFromReg32(Address, Reg) \\r
+ ldr x1, =Address ; \\r
+ str Reg, [x1]\r
+\r
+#define MmioRead32(Address) \\r
+ ldr x1, =Address ; \\r
+ ldr x0, [x1]\r
+\r
+#define MmioReadToReg32(Address, Reg) \\r
+ ldr x1, =Address ; \\r
+ ldr Reg, [x1]\r
+\r
+#define LoadConstant(Data) \\r
+ ldr x0, =Data\r
+\r
+#define LoadConstantToReg(Data, Reg) \\r
+ ldr Reg, =Data\r
+\r
+#define SetPrimaryStack(StackTop, GlobalSize, Tmp, Tmp1) \\r
+ ands Tmp, GlobalSize, #15 ; \\r
+ mov Tmp1, #16 ; \\r
+ sub Tmp1, Tmp1, Tmp ; \\r
+ csel Tmp, Tmp1, Tmp, ne ; \\r
+ add GlobalSize, GlobalSize, Tmp ; \\r
+ sub sp, StackTop, GlobalSize ; \\r
+ ; \\r
+ mov Tmp, sp ; \\r
+ mov GlobalSize, #0x0 ; \\r
+_SetPrimaryStackInitGlobals: ; \\r
+ cmp Tmp, StackTop ; \\r
+ b.eq _SetPrimaryStackEnd ; \\r
+ str GlobalSize, [Tmp], #8 ; \\r
+ b _SetPrimaryStackInitGlobals ; \\r
+_SetPrimaryStackEnd:\r
+\r
+// Initialize the Global Variable with '0'\r
+#define InitializePrimaryStack(GlobalSize, Tmp1, Tmp2) \\r
+ and Tmp1, GlobalSize, #15 ; \\r
+ mov Tmp2, #16 ; \\r
+ sub Tmp2, Tmp2, Tmp1 ; \\r
+ add GlobalSize, GlobalSize, Tmp2 ; \\r
+ ; \\r
+ mov Tmp1, sp ; \\r
+ sub sp, sp, GlobalSize ; \\r
+ mov GlobalSize, #0x0 ; \\r
+_InitializePrimaryStackLoop: ; \\r
+ mov Tmp2, sp ; \\r
+ cmp Tmp1, Tmp2 ; \\r
+ bls _InitializePrimaryStackEnd ; \\r
+ str GlobalSize, [Tmp1, #-8]! ; \\r
+ b _InitializePrimaryStackLoop ; \\r
+_InitializePrimaryStackEnd:\r
+\r
+// CurrentEL : 0xC = EL3; 8 = EL2; 4 = EL1\r
+// This only selects between EL1 and EL2, else we die.\r
+// Provide the Macro with a safe temp xreg to use.\r
+#define EL1_OR_EL2(SAFE_XREG) \\r
+ mrs SAFE_XREG, CurrentEL ;\\r
+ cmp SAFE_XREG, #0x4 ;\\r
+ b.eq 1f ;\\r
+ cmp SAFE_XREG, #0x8 ;\\r
+ b.eq 2f ;\\r
+ b dead ;// We should never get here.\r
+\r
+// CurrentEL : 0xC = EL3; 8 = EL2; 4 = EL1\r
+// This only selects between EL1 and EL2 and EL3, else we die.\r
+// Provide the Macro with a safe temp xreg to use.\r
+#define EL1_OR_EL2_OR_EL3(SAFE_XREG) \\r
+ mrs SAFE_XREG, CurrentEL ;\\r
+ cmp SAFE_XREG, #0x4 ;\\r
+ b.eq 1f ;\\r
+ cmp SAFE_XREG, #0x8 ;\\r
+ b.eq 2f ;\\r
+ cmp SAFE_XREG, #0xC ;\\r
+ b.eq 3f ;\\r
+ b dead ;// We should never get here.\r
+\r
+#else\r
+\r
+//\r
+// Use ARM assembly macros, form armasm\r
+//\r
+// Less magic in the macros if ldr reg, =expr works\r
+//\r
+\r
+// returns _Data in X0 and _Address in X1\r
+\r
+\r
+\r
+#define MmioWrite32(Address, Data) MmioWrite32Macro Address, Data\r
+\r
+\r
+\r
+\r
+// returns Data in X0 and Address in X1, and OrData in X2\r
+#define MmioOr32(Address, OrData) MmioOr32Macro Address, OrData\r
+\r
+\r
+// returns _Data in X0 and _Address in X1, and _OrData in X2\r
+\r
+\r
+#define MmioAnd32(Address, AndData) MmioAnd32Macro Address, AndData\r
+\r
+// returns result in X0, _Address in X1, and _OrData in X2\r
+\r
+\r
+#define MmioAndThenOr32(Address, AndData, OrData) MmioAndThenOr32Macro Address, AndData, OrData\r
+\r
+\r
+// returns _Data in _Reg and _Address in X1\r
+\r
+\r
+#define MmioWriteFromReg32(Address, Reg) MmioWriteFromReg32Macro Address, Reg\r
+\r
+// returns _Data in X0 and _Address in X1\r
+\r
+\r
+#define MmioRead32(Address) MmioRead32Macro Address\r
+\r
+// returns _Data in Reg and _Address in X1\r
+\r
+\r
+#define MmioReadToReg32(Address, Reg) MmioReadToReg32Macro Address, Reg\r
+\r
+\r
+// load X0 with _Data\r
+\r
+\r
+#define LoadConstant(Data) LoadConstantMacro Data\r
+\r
+// load _Reg with _Data\r
+\r
+\r
+#define LoadConstantToReg(Data, Reg) LoadConstantToRegMacro Data, Reg\r
+\r
+// conditional load testing eq flag\r
+#define LoadConstantToRegIfEq(Data, Reg) LoadConstantToRegIfEqMacro Data, Reg\r
+\r
+#define SetPrimaryStack(StackTop,GlobalSize,Tmp, Tmp1) SetPrimaryStack StackTop, GlobalSize, Tmp, Tmp1\r
+\r
+#define InitializePrimaryStack(GlobalSize, Tmp1, Tmp2) InitializePrimaryStack GlobalSize, Tmp1, Tmp2\r
+\r
+#define EL1_OR_EL2(SAFE_XREG) EL1_OR_EL2 SAFE_XREG\r
+\r
+#define EL1_OR_EL2_OR_EL3(SAFE_XREG) EL1_OR_EL2_OR_EL3 SAFE_XREG\r
+\r
+#endif\r
+\r
+#endif // __MACRO_IO_LIBV8_H__\r
+\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef __AARCH64_H__\r
+#define __AARCH64_H__\r
+\r
+#include <Chipset/AArch64Mmu.h>\r
+#include <Chipset/ArmArchTimer.h>\r
+\r
+// ARM Interrupt ID in Exception Table\r
+#define ARM_ARCH_EXCEPTION_IRQ EXCEPT_AARCH64_IRQ\r
+\r
+// CPACR - Coprocessor Access Control Register definitions\r
+#define CPACR_TTA_EN (1UL << 28)\r
+#define CPACR_FPEN_EL1 (1UL << 20)\r
+#define CPACR_FPEN_FULL (3UL << 20)\r
+#define CPACR_CP_FULL_ACCESS 0x300000\r
+\r
+// Coprocessor Trap Register (CPTR)\r
+#define AARCH64_CPTR_TFP (1 << 10)\r
+\r
+// ID_AA64PFR0 - AArch64 Processor Feature Register 0 definitions\r
+#define AARCH64_PFR0_FP (0xF << 16)\r
+\r
+// NSACR - Non-Secure Access Control Register definitions\r
+#define NSACR_CP(cp) ((1 << (cp)) & 0x3FFF)\r
+#define NSACR_NSD32DIS (1 << 14)\r
+#define NSACR_NSASEDIS (1 << 15)\r
+#define NSACR_PLE (1 << 16)\r
+#define NSACR_TL (1 << 17)\r
+#define NSACR_NS_SMP (1 << 18)\r
+#define NSACR_RFR (1 << 19)\r
+\r
+// SCR - Secure Configuration Register definitions\r
+#define SCR_NS (1 << 0)\r
+#define SCR_IRQ (1 << 1)\r
+#define SCR_FIQ (1 << 2)\r
+#define SCR_EA (1 << 3)\r
+#define SCR_FW (1 << 4)\r
+#define SCR_AW (1 << 5)\r
+\r
+// MIDR - Main ID Register definitions\r
+#define ARM_CPU_TYPE_MASK 0xFFF\r
+#define ARM_CPU_TYPE_AEMv8 0xD0F\r
+#define ARM_CPU_TYPE_A15 0xC0F\r
+#define ARM_CPU_TYPE_A9 0xC09\r
+#define ARM_CPU_TYPE_A5 0xC05\r
+\r
+// Hypervisor Configuration Register\r
+#define ARM_HCR_FMO BIT3\r
+#define ARM_HCR_IMO BIT4\r
+#define ARM_HCR_AMO BIT5\r
+#define ARM_HCR_TGE BIT27\r
+\r
+// AArch64 Exception Level\r
+#define AARCH64_EL3 0xC\r
+#define AARCH64_EL2 0x8\r
+#define AARCH64_EL1 0x4\r
+\r
+#define ARM_VECTOR_TABLE_ALIGNMENT ((1 << 11)-1)\r
+\r
+VOID\r
+EFIAPI\r
+ArmEnableSWPInstruction (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadCbar (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadTpidrurw (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteTpidrurw (\r
+ UINTN Value\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmIsArchTimerImplemented (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadIdPfr0 (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadIdPfr1 (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmGetTCR (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmSetTCR (\r
+ UINTN Value\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmGetMAIR (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmSetMAIR (\r
+ UINTN Value\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmDisableAlignmentCheck (\r
+ VOID\r
+ );\r
+\r
+\r
+VOID\r
+EFIAPI\r
+ArmEnableAlignmentCheck (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmDisableAllExceptions (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+ArmWriteHcr (\r
+ IN UINTN Hcr\r
+ );\r
+\r
+UINTN\r
+ArmReadCurrentEL (\r
+ VOID\r
+ );\r
+\r
+UINT64\r
+PageAttributeToGcdAttribute (\r
+ IN UINT64 PageAttributes\r
+ );\r
+\r
+UINT64\r
+GcdAttributeToPageAttribute (\r
+ IN UINT64 GcdAttributes\r
+ );\r
+\r
+#endif // __AARCH64_H__\r
--- /dev/null
+/** @file\r
+*\r
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+*\r
+* This program and the accompanying materials\r
+* are licensed and made available under the terms and conditions of the BSD License\r
+* which accompanies this distribution. The full text of the license may be found at\r
+* http://opensource.org/licenses/bsd-license.php\r
+*\r
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+*\r
+**/\r
+\r
+#ifndef __AARCH64_MMU_H_\r
+#define __AARCH64_MMU_H_\r
+\r
+//\r
+// Memory Attribute Indirection register Definitions\r
+//\r
+#define MAIR_ATTR_DEVICE_MEMORY 0x0ULL\r
+#define MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE 0x44ULL\r
+#define MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH 0xBBULL\r
+#define MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK 0xFFULL\r
+\r
+#define MAIR_ATTR(n,value) ((value) << (((n) >> 2)*8))\r
+\r
+//\r
+// Long-descriptor Translation Table format\r
+//\r
+\r
+// Return the smallest offset from the table level.\r
+// The first offset starts at 12bit. There are 4 levels of 9-bit address range from level 3 to level 0\r
+#define TT_ADDRESS_OFFSET_AT_LEVEL(TableLevel) (12 + ((3 - (TableLevel)) * 9))\r
+\r
+#define TT_BLOCK_ENTRY_SIZE_AT_LEVEL(Level) (1 << TT_ADDRESS_OFFSET_AT_LEVEL(Level))\r
+\r
+// Get the associated entry in the given Translation Table\r
+#define TT_GET_ENTRY_FOR_ADDRESS(TranslationTable, Level, Address) \\r
+ ((UINTN)(TranslationTable) + ((((Address) >> TT_ADDRESS_OFFSET_AT_LEVEL(Level)) & (BIT9-1)) * sizeof(UINT64)))\r
+\r
+// Return the smallest address granularity from the table level.\r
+// The first offset starts at 12bit. There are 4 levels of 9-bit address range from level 3 to level 0\r
+#define TT_ADDRESS_AT_LEVEL(TableLevel) (1 << TT_ADDRESS_OFFSET_AT_LEVEL(TableLevel))\r
+\r
+// There are 512 entries per table when 4K Granularity\r
+#define TT_ENTRY_COUNT 512\r
+#define TT_ALIGNMENT_BLOCK_ENTRY BIT12\r
+#define TT_ALIGNMENT_DESCRIPTION_TABLE BIT12\r
+\r
+#define TT_ADDRESS_MASK_BLOCK_ENTRY (0xFFFFFFFULL << 12)\r
+#define TT_ADDRESS_MASK_DESCRIPTION_TABLE (0xFFFFFFFULL << 12)\r
+\r
+#define TT_TYPE_MASK 0x3\r
+#define TT_TYPE_TABLE_ENTRY 0x3\r
+#define TT_TYPE_BLOCK_ENTRY 0x1\r
+#define TT_TYPE_BLOCK_ENTRY_LEVEL3 0x3\r
+\r
+#define TT_ATTR_INDX_MASK (0x7 << 2)\r
+#define TT_ATTR_INDX_DEVICE_MEMORY (0x0 << 2)\r
+#define TT_ATTR_INDX_MEMORY_NON_CACHEABLE (0x1 << 2)\r
+#define TT_ATTR_INDX_MEMORY_WRITE_THROUGH (0x2 << 2)\r
+#define TT_ATTR_INDX_MEMORY_WRITE_BACK (0x3 << 2)\r
+\r
+#define TT_AP_MASK (0x3UL << 6)\r
+#define TT_AP_NO_RW (0x0UL << 6)\r
+#define TT_AP_RW_RW (0x1UL << 6)\r
+#define TT_AP_NO_RO (0x2UL << 6)\r
+#define TT_AP_RO_RO (0x3UL << 6)\r
+\r
+#define TT_NS BIT5\r
+#define TT_AF BIT10\r
+\r
+#define TT_PXN_MASK BIT53\r
+#define TT_UXN_MASK BIT54\r
+\r
+#define TT_ATTRIBUTES_MASK ((0xFFFULL << 52) | (0x3FFULL << 2))\r
+\r
+#define TT_TABLE_PXN BIT59\r
+#define TT_TABLE_XN BIT60\r
+#define TT_TABLE_NS BIT63\r
+\r
+#define TT_TABLE_AP_MASK (BIT62 | BIT61)\r
+#define TT_TABLE_AP_NO_PERMISSION (0x0ULL << 61)\r
+#define TT_TABLE_AP_EL0_NO_ACCESS (0x1ULL << 61)\r
+#define TT_TABLE_AP_NO_WRITE_ACCESS (0x2ULL << 61)\r
+\r
+//\r
+// Translation Control Register\r
+//\r
+#define TCR_T0SZ_MASK 0x3F\r
+\r
+#define TCR_PS_4GB (0 << 16)\r
+#define TCR_PS_64GB (1 << 16)\r
+#define TCR_PS_1TB (2 << 16)\r
+#define TCR_PS_4TB (3 << 16)\r
+#define TCR_PS_16TB (4 << 16)\r
+#define TCR_PS_256TB (5 << 16)\r
+\r
+#define TCR_TG0_4KB (0 << 14)\r
+\r
+#define TCR_IPS_4GB (0UL << 32)\r
+#define TCR_IPS_64GB (1UL << 32)\r
+#define TCR_IPS_1TB (2UL << 32)\r
+#define TCR_IPS_4TB (3UL << 32)\r
+#define TCR_IPS_16TB (4UL << 32)\r
+#define TCR_IPS_256TB (5UL << 32)\r
+\r
+\r
+#define TTBR_ASID_FIELD (48)\r
+#define TTBR_ASID_MASK (0xFF << TTBR_ASID_FIELD)\r
+#define TTBR_BADDR_MASK (0xFFFFFFFFFFFF ) // The width of this field depends on the values in TxSZ. Addr occupies bottom 48bits\r
+\r
+#define TCR_EL1_T0SZ_FIELD (0)\r
+#define TCR_EL1_EPD0_FIELD (7)\r
+#define TCR_EL1_IRGN0_FIELD (8)\r
+#define TCR_EL1_ORGN0_FIELD (10)\r
+#define TCR_EL1_SH0_FIELD (12)\r
+#define TCR_EL1_TG0_FIELD (14)\r
+#define TCR_EL1_T1SZ_FIELD (16)\r
+#define TCR_EL1_A1_FIELD (22)\r
+#define TCR_EL1_EPD1_FIELD (23)\r
+#define TCR_EL1_IRGN1_FIELD (24)\r
+#define TCR_EL1_ORGN1_FIELD (26)\r
+#define TCR_EL1_SH1_FIELD (28)\r
+#define TCR_EL1_TG1_FIELD (30)\r
+#define TCR_EL1_IPS_FIELD (32)\r
+#define TCR_EL1_AS_FIELD (36)\r
+#define TCR_EL1_TBI0_FIELD (37)\r
+#define TCR_EL1_TBI1_FIELD (38)\r
+#define TCR_EL1_T0SZ_MASK (0x1F << TCR_EL1_T0SZ_FIELD)\r
+#define TCR_EL1_EPD0_MASK (0x1 << TCR_EL1_EPD0_FIELD)\r
+#define TCR_EL1_IRGN0_MASK (0x3 << TCR_EL1_IRGN0_FIELD)\r
+#define TCR_EL1_ORGN0_MASK (0x3 << TCR_EL1_ORGN0_FIELD)\r
+#define TCR_EL1_SH0_MASK (0x3 << TCR_EL1_SH0_FIELD)\r
+#define TCR_EL1_TG0_MASK (0x1 << TCR_EL1_TG0_FIELD)\r
+#define TCR_EL1_T1SZ_MASK (0x1F << TCR_EL1_T1SZ_FIELD)\r
+#define TCR_EL1_A1_MASK (0x1 << TCR_EL1_A1_FIELD)\r
+#define TCR_EL1_EPD1_MASK (0x1 << TCR_EL1_EPD1_FIELD)\r
+#define TCR_EL1_IRGN1_MASK (0x3 << TCR_EL1_IRGN1_FIELD)\r
+#define TCR_EL1_ORGN1_MASK (0x3 << TCR_EL1_ORGN1_FIELD)\r
+#define TCR_EL1_SH1_MASK (0x3 << TCR_EL1_SH1_FIELD)\r
+#define TCR_EL1_TG1_MASK (0x1 << TCR_EL1_TG1_FIELD)\r
+#define TCR_EL1_IPS_MASK (0x7 << TCR_EL1_IPS_FIELD)\r
+#define TCR_EL1_AS_MASK (0x1 << TCR_EL1_AS_FIELD)\r
+#define TCR_EL1_TBI0_MASK (0x1 << TCR_EL1_TBI0_FIELD)\r
+#define TCR_EL1_TBI1_MASK (0x1 << TCR_EL1_TBI1_FIELD)\r
+\r
+\r
+#define VTCR_EL23_T0SZ_FIELD (0)\r
+#define VTCR_EL23_IRGN0_FIELD (8)\r
+#define VTCR_EL23_ORGN0_FIELD (10)\r
+#define VTCR_EL23_SH0_FIELD (12)\r
+#define TCR_EL23_TG0_FIELD (14)\r
+#define VTCR_EL23_PS_FIELD (16)\r
+#define TCR_EL23_T0SZ_MASK (0x1F << VTCR_EL23_T0SZ_FIELD)\r
+#define TCR_EL23_IRGN0_MASK (0x3 << VTCR_EL23_IRGN0_FIELD)\r
+#define TCR_EL23_ORGN0_MASK (0x3 << VTCR_EL23_ORGN0_FIELD)\r
+#define TCR_EL23_SH0_MASK (0x3 << VTCR_EL23_SH0_FIELD)\r
+#define TCR_EL23_TG0_MASK (0x1 << TCR_EL23_TG0_FIELD)\r
+#define TCR_EL23_PS_MASK (0x7 << VTCR_EL23_PS_FIELD)\r
+\r
+\r
+#define VTCR_EL2_T0SZ_FIELD (0)\r
+#define VTCR_EL2_SL0_FIELD (6)\r
+#define VTCR_EL2_IRGN0_FIELD (8)\r
+#define VTCR_EL2_ORGN0_FIELD (10)\r
+#define VTCR_EL2_SH0_FIELD (12)\r
+#define VTCR_EL2_TG0_FIELD (14)\r
+#define VTCR_EL2_PS_FIELD (16)\r
+#define VTCR_EL2_T0SZ_MASK (0x1F << VTCR_EL2_T0SZ_FIELD)\r
+#define VTCR_EL2_SL0_MASK (0x1F << VTCR_EL2_SL0_FIELD)\r
+#define VTCR_EL2_IRGN0_MASK (0x3 << VTCR_EL2_IRGN0_FIELD)\r
+#define VTCR_EL2_ORGN0_MASK (0x3 << VTCR_EL2_ORGN0_FIELD)\r
+#define VTCR_EL2_SH0_MASK (0x3 << VTCR_EL2_SH0_FIELD)\r
+#define VTCR_EL2_TG0_MASK (0x1 << VTCR_EL2_TG0_FIELD)\r
+#define VTCR_EL2_PS_MASK (0x7 << VTCR_EL2_PS_FIELD)\r
+\r
+\r
+#define TCR_RGN_OUTER_NON_CACHEABLE (0x0 << 10)\r
+#define TCR_RGN_OUTER_WRITE_BACK_ALLOC (0x1 << 10)\r
+#define TCR_RGN_OUTER_WRITE_THROUGH (0x2 << 10)\r
+#define TCR_RGN_OUTER_WRITE_BACK_NO_ALLOC (0x3 << 10)\r
+\r
+#define TCR_RGN_INNER_NON_CACHEABLE (0x0 << 8)\r
+#define TCR_RGN_INNER_WRITE_BACK_ALLOC (0x1 << 8)\r
+#define TCR_RGN_INNER_WRITE_THROUGH (0x2 << 8)\r
+#define TCR_RGN_INNER_WRITE_BACK_NO_ALLOC (0x3 << 8)\r
+\r
+#define TCR_SH_NON_SHAREABLE (0x0 << 12)\r
+#define TCR_SH_OUTER_SHAREABLE (0x2 << 12)\r
+#define TCR_SH_INNER_SHAREABLE (0x3 << 12)\r
+\r
+#define TCR_PASZ_32BITS_4GB (0x0)\r
+#define TCR_PASZ_36BITS_64GB (0x1)\r
+#define TCR_PASZ_40BITS_1TB (0x2)\r
+#define TCR_PASZ_42BITS_4TB (0x3)\r
+#define TCR_PASZ_44BITS_16TB (0x4)\r
+#define TCR_PASZ_48BITS_256TB (0x5)\r
+\r
+// The value written to the T*SZ fields are defined as 2^(64-T*SZ). So a 39Bit\r
+// Virtual address range for 512GB of virtual space sets T*SZ to 25\r
+#define INPUT_ADDRESS_SIZE_TO_TxSZ(a) (64 - a)\r
+\r
+// Uses LPAE Page Table format\r
+\r
+#endif // __AARCH64_MMU_H_\r
+\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef __ARM_AEM_V8_H__\r
+#define __ARM_AEM_V8_H__\r
+\r
+#include <Chipset/AArch64.h>\r
+\r
+#endif //__ARM_AEM_V8_H__\r
+\r
--- /dev/null
+/** @file\r
+*\r
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+* \r
+* This program and the accompanying materials \r
+* are licensed and made available under the terms and conditions of the BSD License \r
+* which accompanies this distribution. The full text of the license may be found at \r
+* http://opensource.org/licenses/bsd-license.php \r
+*\r
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, \r
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. \r
+*\r
+**/\r
+\r
+#ifndef __ARM_ARCH_TIMER_H_\r
+#define __ARM_ARCH_TIMER_H_\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadCntFrq (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntFrq (\r
+ UINTN FreqInHz\r
+ );\r
+\r
+UINT64\r
+EFIAPI\r
+ArmReadCntPct (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadCntkCtl (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntkCtl (\r
+ UINTN Val\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadCntpTval (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntpTval (\r
+ UINTN Val\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadCntpCtl (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntpCtl (\r
+ UINTN Val\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadCntvTval (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntvTval (\r
+ UINTN Val\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmReadCntvCtl (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntvCtl (\r
+ UINTN Val\r
+ );\r
+\r
+UINT64\r
+EFIAPI\r
+ArmReadCntvCt (\r
+ VOID\r
+ );\r
+\r
+UINT64\r
+EFIAPI\r
+ArmReadCntpCval (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntpCval (\r
+ UINT64 Val\r
+ );\r
+\r
+UINT64\r
+EFIAPI\r
+ArmReadCntvCval (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntvCval (\r
+ UINT64 Val\r
+ );\r
+\r
+UINT64\r
+EFIAPI\r
+ArmReadCntvOff (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmWriteCntvOff (\r
+ UINT64 Val\r
+ );\r
+\r
+#endif // __ARM_ARCH_TIMER_H_\r
+\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2012-2013, ARM Limited. All rights reserved.\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef __ARM_CORTEX_A5x_H__\r
+#define __ARM_CORTEX_A5x_H__\r
+\r
+//\r
+// Cortex A5x feature bit definitions\r
+//\r
+#define A5X_FEATURE_SMP (1 << 6)\r
+\r
+#endif\r
#define __ARM_V7_H__\r
\r
#include <Chipset/ArmV7Mmu.h>\r
-#include <Chipset/ArmV7ArchTimer.h>\r
+#include <Chipset/ArmArchTimer.h>\r
\r
// ARM Interrupt ID in Exception Table\r
#define ARM_ARCH_EXCEPTION_IRQ EXCEPT_ARM_IRQ\r
+++ /dev/null
-/** @file\r
-*\r
-* Copyright (c) 2011, ARM Limited. All rights reserved.\r
-* \r
-* This program and the accompanying materials \r
-* are licensed and made available under the terms and conditions of the BSD License \r
-* which accompanies this distribution. The full text of the license may be found at \r
-* http://opensource.org/licenses/bsd-license.php \r
-*\r
-* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, \r
-* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. \r
-*\r
-**/\r
-\r
-#ifndef __ARMV7_ARCH_TIMER_H_\r
-#define __ARMV7_ARCH_TIMER_H_\r
-\r
-UINTN\r
-EFIAPI\r
-ArmReadCntFrq (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntFrq (\r
- UINTN FreqInHz\r
- );\r
-\r
-UINT64\r
-EFIAPI\r
-ArmReadCntPct (\r
- VOID\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmReadCntkCtl (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntkCtl (\r
- UINTN Val\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmReadCntpTval (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntpTval (\r
- UINTN Val\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmReadCntpCtl (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntpCtl (\r
- UINTN Val\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmReadCntvTval (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntvTval (\r
- UINTN Val\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmReadCntvCtl (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntvCtl (\r
- UINTN Val\r
- );\r
-\r
-UINT64\r
-EFIAPI\r
-ArmReadCntvCt (\r
- VOID\r
- );\r
-\r
-UINT64\r
-EFIAPI\r
-ArmReadCntpCval (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntpCval (\r
- UINT64 Val\r
- );\r
-\r
-UINT64\r
-EFIAPI\r
-ArmReadCntvCval (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntvCval (\r
- UINT64 Val\r
- );\r
-\r
-UINT64\r
-EFIAPI\r
-ArmReadCntvOff (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmWriteCntvOff (\r
- UINT64 Val\r
- );\r
-\r
-#endif\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef __ARM_ARCH_TIMER_LIB_H__\r
+#define __ARM_ARCH_TIMER_LIB_H__\r
+\r
+#define ARM_ARCH_TIMER_ENABLE (1 << 0)\r
+#define ARM_ARCH_TIMER_IMASK (1 << 1)\r
+#define ARM_ARCH_TIMER_ISTATUS (1 << 2)\r
+\r
+typedef enum {\r
+ CntFrq = 0,\r
+ CntPct,\r
+ CntkCtl,\r
+ CntpTval,\r
+ CntpCtl,\r
+ CntvTval,\r
+ CntvCtl,\r
+ CntvCt,\r
+ CntpCval,\r
+ CntvCval,\r
+ CntvOff,\r
+ CnthCtl,\r
+ CnthpTval,\r
+ CnthpCtl,\r
+ CnthpCval,\r
+ RegMaximum\r
+} ARM_ARCH_TIMER_REGS;\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerReadReg (\r
+ IN ARM_ARCH_TIMER_REGS Reg,\r
+ OUT VOID *DstBuf\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerWriteReg (\r
+ IN ARM_ARCH_TIMER_REGS Reg,\r
+ IN VOID *SrcBuf\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerEnableTimer (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerDisableTimer (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetTimerFreq (\r
+ IN UINTN FreqInHz\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmArchTimerGetTimerFreq (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetTimerVal (\r
+ IN UINTN Val\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmArchTimerGetTimerVal (\r
+ VOID\r
+ );\r
+\r
+UINT64\r
+EFIAPI\r
+ArmArchTimerGetSystemCount (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmArchTimerGetTimerCtrlReg (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetTimerCtrlReg (\r
+ UINTN Val\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetCompareVal (\r
+ IN UINT64 Val\r
+ );\r
+\r
+#endif // __ARM_ARCH_TIMER_LIB_H__\r
\r
#include <Uefi/UefiBaseType.h>\r
\r
-#ifdef ARM_CPU_ARMv6\r
-#include <Chipset/ARM1176JZ-S.h>\r
+#ifdef MDE_CPU_ARM\r
+ #ifdef ARM_CPU_ARMv6\r
+ #include <Chipset/ARM1176JZ-S.h>\r
+ #else\r
+ #include <Chipset/ArmV7.h>\r
+ #endif\r
+#elif defined(MDE_CPU_AARCH64)\r
+ #include <Chipset/AArch64.h>\r
#else\r
-#include <Chipset/ArmV7.h>\r
+ #error "Unknown chipset."\r
#endif\r
\r
typedef enum {\r
VOID\r
EFIAPI\r
ArmCallWFI (\r
+\r
VOID\r
);\r
\r
+++ /dev/null
-/** @file\r
-\r
- Copyright (c) 2011, ARM Ltd. All rights reserved.<BR>\r
-\r
- This program and the accompanying materials\r
- are licensed and made available under the terms and conditions of the BSD License\r
- which accompanies this distribution. The full text of the license may be found at\r
- http://opensource.org/licenses/bsd-license.php\r
-\r
- THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
- WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
-\r
-**/\r
-\r
-#ifndef __ARM_V7_ARCH_TIMER_LIB_H__\r
-#define __ARM_V7_ARCH_TIMER_LIB_H__\r
-\r
-#define ARM_ARCH_TIMER_ENABLE (1 << 0)\r
-#define ARM_ARCH_TIMER_IMASK (1 << 1)\r
-#define ARM_ARCH_TIMER_ISTATUS (1 << 2)\r
-\r
-typedef enum {\r
- CntFrq = 0,\r
- CntPct,\r
- CntkCtl,\r
- CntpTval,\r
- CntpCtl,\r
- CntvTval,\r
- CntvCtl,\r
- CntvCt,\r
- CntpCval,\r
- CntvCval,\r
- CntvOff,\r
- CnthCtl,\r
- CnthpTval,\r
- CnthpCtl,\r
- CnthpCval,\r
- RegMaximum\r
-}ARM_ARCH_TIMER_REGS;\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerReadReg (\r
- IN ARM_ARCH_TIMER_REGS Reg,\r
- OUT VOID *DstBuf\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerWriteReg (\r
- IN ARM_ARCH_TIMER_REGS Reg,\r
- IN VOID *SrcBuf\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerEnableTimer (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerDisableTimer (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerSetTimerFreq (\r
- IN UINTN FreqInHz\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmArchTimerGetTimerFreq (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerSetTimerVal (\r
- IN UINTN Val\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmArchTimerGetTimerVal (\r
- VOID\r
- );\r
-\r
-UINT64\r
-EFIAPI\r
-ArmArchTimerGetSystemCount (\r
- VOID\r
- );\r
-\r
-UINTN\r
-EFIAPI\r
-ArmArchTimerGetTimerCtrlReg (\r
- VOID\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerSetTimerCtrlReg (\r
- UINTN Val\r
- );\r
-\r
-VOID\r
-EFIAPI\r
-ArmArchTimerSetCompareVal (\r
- IN UINT64 Val\r
- );\r
-\r
-#endif // __ARM_V7_ARCH_TIMER_LIB_H__\r
#include <Library/TimerLib.h>\r
#include <Library/DebugLib.h>\r
#include <Library/PcdLib.h>\r
-#include <Library/ArmV7ArchTimerLib.h>\r
+#include <Library/ArmArchTimerLib.h>\r
#include <Chipset/ArmV7.h>\r
\r
#define TICKS_PER_MICRO_SEC (PcdGet32 (PcdArmArchTimerFreqInHz)/1000000U)\r
// manual lower bound of the frequency is in the range of 1-10MHz\r
ASSERT (TICKS_PER_MICRO_SEC);\r
\r
+#ifdef MDE_CPU_ARM\r
+ // Only set the frequency for ARMv7. We expect the secure firmware to have already do it\r
// If the security extensions are not implemented set Timer Frequency\r
if ((ArmReadIdPfr1 () & 0xF0) == 0x0) {\r
ArmArchTimerSetTimerFreq (PcdGet32 (PcdArmArchTimerFreqInHz));\r
}\r
+#endif\r
\r
// Architectural Timer Frequency must be set in the Secure privileged(if secure extensions are supported) mode.\r
// If the reset value (0) is returned just ASSERT.\r
TimerFreq = ArmArchTimerGetTimerFreq ();\r
- ASSERT (TimerFreq);\r
+ ASSERT (TimerFreq != 0);\r
\r
} else {\r
DEBUG ((EFI_D_ERROR, "ARM Architectural Timer is not available in the CPU, hence this library can not be used.\n"));\r
--- /dev/null
+/** @file\r
+*\r
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+*\r
+* This program and the accompanying materials\r
+* are licensed and made available under the terms and conditions of the BSD License\r
+* which accompanies this distribution. The full text of the license may be found at\r
+* http://opensource.org/licenses/bsd-license.php\r
+*\r
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+*\r
+**/\r
+\r
+#include <Uefi.h>\r
+#include <Chipset/AArch64.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/ArmLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/DebugLib.h>\r
+#include "AArch64Lib.h"\r
+#include "ArmLibPrivate.h"\r
+#include <Library/ArmArchTimerLib.h>\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerReadReg (\r
+ IN ARM_ARCH_TIMER_REGS Reg,\r
+ OUT VOID *DstBuf\r
+ )\r
+{\r
+ // Check if the Generic/Architecture timer is implemented\r
+ if (ArmIsArchTimerImplemented ()) {\r
+\r
+ switch (Reg) {\r
+\r
+ case CntFrq:\r
+ *((UINTN *)DstBuf) = ArmReadCntFrq ();\r
+ break;\r
+\r
+ case CntPct:\r
+ *((UINT64 *)DstBuf) = ArmReadCntPct ();\r
+ break;\r
+\r
+ case CntkCtl:\r
+ *((UINTN *)DstBuf) = ArmReadCntkCtl();\r
+ break;\r
+\r
+ case CntpTval:\r
+ *((UINTN *)DstBuf) = ArmReadCntpTval ();\r
+ break;\r
+\r
+ case CntpCtl:\r
+ *((UINTN *)DstBuf) = ArmReadCntpCtl ();\r
+ break;\r
+\r
+ case CntvTval:\r
+ *((UINTN *)DstBuf) = ArmReadCntvTval ();\r
+ break;\r
+\r
+ case CntvCtl:\r
+ *((UINTN *)DstBuf) = ArmReadCntvCtl ();\r
+ break;\r
+\r
+ case CntvCt:\r
+ *((UINT64 *)DstBuf) = ArmReadCntvCt ();\r
+ break;\r
+\r
+ case CntpCval:\r
+ *((UINT64 *)DstBuf) = ArmReadCntpCval ();\r
+ break;\r
+\r
+ case CntvCval:\r
+ *((UINT64 *)DstBuf) = ArmReadCntvCval ();\r
+ break;\r
+\r
+ case CntvOff:\r
+ *((UINT64 *)DstBuf) = ArmReadCntvOff ();\r
+ break;\r
+\r
+ case CnthCtl:\r
+ case CnthpTval:\r
+ case CnthpCtl:\r
+ case CnthpCval:\r
+ DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));\r
+ break;\r
+\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));\r
+ }\r
+ } else {\r
+ DEBUG ((EFI_D_ERROR, "Attempt to read ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));\r
+ ASSERT (0);\r
+ }\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerWriteReg (\r
+ IN ARM_ARCH_TIMER_REGS Reg,\r
+ IN VOID *SrcBuf\r
+ )\r
+{\r
+ // Check if the Generic/Architecture timer is implemented\r
+ if (ArmIsArchTimerImplemented ()) {\r
+\r
+ switch (Reg) {\r
+\r
+ case CntFrq:\r
+ ArmWriteCntFrq (*((UINTN *)SrcBuf));\r
+ break;\r
+\r
+ case CntPct:\r
+ DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTPCT \n"));\r
+ break;\r
+\r
+ case CntkCtl:\r
+ ArmWriteCntkCtl (*((UINTN *)SrcBuf));\r
+ break;\r
+\r
+ case CntpTval:\r
+ ArmWriteCntpTval (*((UINTN *)SrcBuf));\r
+ break;\r
+\r
+ case CntpCtl:\r
+ ArmWriteCntpCtl (*((UINTN *)SrcBuf));\r
+ break;\r
+\r
+ case CntvTval:\r
+ ArmWriteCntvTval (*((UINTN *)SrcBuf));\r
+ break;\r
+\r
+ case CntvCtl:\r
+ ArmWriteCntvCtl (*((UINTN *)SrcBuf));\r
+ break;\r
+\r
+ case CntvCt:\r
+ DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTVCT \n"));\r
+ break;\r
+\r
+ case CntpCval:\r
+ ArmWriteCntpCval (*((UINT64 *)SrcBuf) );\r
+ break;\r
+\r
+ case CntvCval:\r
+ ArmWriteCntvCval (*((UINT64 *)SrcBuf) );\r
+ break;\r
+\r
+ case CntvOff:\r
+ ArmWriteCntvOff (*((UINT64 *)SrcBuf));\r
+ break;\r
+\r
+ case CnthCtl:\r
+ case CnthpTval:\r
+ case CnthpCtl:\r
+ case CnthpCval:\r
+ DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));\r
+ break;\r
+\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));\r
+ }\r
+ } else {\r
+ DEBUG ((EFI_D_ERROR, "Attempt to write to ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));\r
+ ASSERT (0);\r
+ }\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerEnableTimer (\r
+ VOID\r
+ )\r
+{\r
+ UINTN TimerCtrlReg;\r
+\r
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);\r
+ TimerCtrlReg |= ARM_ARCH_TIMER_ENABLE;\r
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerDisableTimer (\r
+ VOID\r
+ )\r
+{\r
+ UINTN TimerCtrlReg;\r
+\r
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);\r
+ TimerCtrlReg &= ~ARM_ARCH_TIMER_ENABLE;\r
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetTimerFreq (\r
+ IN UINTN FreqInHz\r
+ )\r
+{\r
+ ArmArchTimerWriteReg (CntFrq, (VOID *)&FreqInHz);\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmArchTimerGetTimerFreq (\r
+ VOID\r
+ )\r
+{\r
+ UINTN ArchTimerFreq = 0;\r
+ ArmArchTimerReadReg (CntFrq, (VOID *)&ArchTimerFreq);\r
+ return ArchTimerFreq;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmArchTimerGetTimerVal (\r
+ VOID\r
+ )\r
+{\r
+ UINTN ArchTimerVal;\r
+ ArmArchTimerReadReg (CntpTval, (VOID *)&ArchTimerVal);\r
+ return ArchTimerVal;\r
+}\r
+\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetTimerVal (\r
+ IN UINTN Val\r
+ )\r
+{\r
+ ArmArchTimerWriteReg (CntpTval, (VOID *)&Val);\r
+}\r
+\r
+UINT64\r
+EFIAPI\r
+ArmArchTimerGetSystemCount (\r
+ VOID\r
+ )\r
+{\r
+ UINT64 SystemCount;\r
+ ArmArchTimerReadReg (CntPct, (VOID *)&SystemCount);\r
+ return SystemCount;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmArchTimerGetTimerCtrlReg (\r
+ VOID\r
+ )\r
+{\r
+ UINTN Val;\r
+ ArmArchTimerReadReg (CntpCtl, (VOID *)&Val);\r
+ return Val;\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetTimerCtrlReg (\r
+ UINTN Val\r
+ )\r
+{\r
+ ArmArchTimerWriteReg (CntpCtl, (VOID *)&Val);\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmArchTimerSetCompareVal (\r
+ IN UINT64 Val\r
+ )\r
+{\r
+ ArmArchTimerWriteReg (CntpCval, (VOID *)&Val);\r
+}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+.text\r
+.align 2\r
+\r
+ASM_GLOBAL ASM_PFX(ArmReadCntFrq)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntFrq)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntPct)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntkCtl)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntkCtl)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntpTval)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntpTval)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntpCtl)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntpCtl)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntvTval)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntvTval)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntvCtl)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntvCtl)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntvCt)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntpCval)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntpCval)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntvCval)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntvCval)\r
+ASM_GLOBAL ASM_PFX(ArmReadCntvOff)\r
+ASM_GLOBAL ASM_PFX(ArmWriteCntvOff)\r
+\r
+ASM_PFX(ArmReadCntFrq):\r
+ mrs x0, cntfrq_el0 // Read CNTFRQ\r
+ ret\r
+\r
+\r
+# NOTE - Can only write while at highest implemented EL level (EL3 on model). Else ReadOnly (EL2, EL1, EL0)\r
+ASM_PFX(ArmWriteCntFrq):\r
+ msr cntfrq_el0, x0 // Write to CNTFRQ\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntPct):\r
+ mrs x0, cntpct_el0 // Read CNTPCT (Physical counter register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntkCtl):\r
+ mrs x0, cntkctl_el1 // Read CNTK_CTL (Timer PL1 Control Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntkCtl):\r
+ mrs x0, cntkctl_el1 // Write to CNTK_CTL (Timer PL1 Control Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntpTval):\r
+ mrs x0, cntp_tval_el0 // Read CNTP_TVAL (PL1 physical timer value register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntpTval):\r
+ msr cntp_tval_el0, x0 // Write to CNTP_TVAL (PL1 physical timer value register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntpCtl):\r
+ mrs x0, cntp_ctl_el0 // Read CNTP_CTL (PL1 Physical Timer Control Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntpCtl):\r
+ msr cntp_ctl_el0, x0 // Write to CNTP_CTL (PL1 Physical Timer Control Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntvTval):\r
+ mrs x0, cntv_tval_el0 // Read CNTV_TVAL (Virtual Timer Value register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntvTval):\r
+ msr cntv_tval_el0, x0 // Write to CNTV_TVAL (Virtual Timer Value register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntvCtl):\r
+ mrs x0, cntv_ctl_el0 // Read CNTV_CTL (Virtual Timer Control Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntvCtl):\r
+ msr cntv_ctl_el0, x0 // Write to CNTV_CTL (Virtual Timer Control Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntvCt):\r
+ mrs x0, cntvct_el0 // Read CNTVCT (Virtual Count Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntpCval):\r
+ mrs x0, cntp_cval_el0 // Read CNTP_CTVAL (Physical Timer Compare Value Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntpCval):\r
+ msr cntp_cval_el0, x0 // Write to CNTP_CTVAL (Physical Timer Compare Value Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntvCval):\r
+ mrs x0, cntv_cval_el0 // Read CNTV_CTVAL (Virtual Timer Compare Value Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntvCval):\r
+ msr cntv_cval_el0, x0 // write to CNTV_CTVAL (Virtual Timer Compare Value Register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadCntvOff):\r
+ mrs x0, cntvoff_el2 // Read CNTVOFF (virtual Offset register)\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteCntvOff):\r
+ msr cntvoff_el2, x0 // Write to CNTVOFF (Virtual Offset register)\r
+ ret\r
+\r
+\r
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <Uefi.h>\r
+#include <Chipset/AArch64.h>\r
+#include <Library/ArmLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/IoLib.h>\r
+#include "AArch64Lib.h"\r
+#include "ArmLibPrivate.h"\r
+\r
+ARM_CACHE_TYPE\r
+EFIAPI\r
+ArmCacheType (\r
+ VOID\r
+ )\r
+{\r
+ return ARM_CACHE_TYPE_WRITE_BACK;\r
+}\r
+\r
+ARM_CACHE_ARCHITECTURE\r
+EFIAPI\r
+ArmCacheArchitecture (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CLIDR = ReadCLIDR ();\r
+\r
+ return (ARM_CACHE_ARCHITECTURE)CLIDR; // BugBug Fix Me\r
+}\r
+\r
+BOOLEAN\r
+EFIAPI\r
+ArmDataCachePresent (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CLIDR = ReadCLIDR ();\r
+\r
+ if ((CLIDR & 0x2) == 0x2) {\r
+ // Instruction cache exists\r
+ return TRUE;\r
+ }\r
+ if ((CLIDR & 0x7) == 0x4) {\r
+ // Unified cache\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmDataCacheSize (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 NumSets;\r
+ UINT32 Associativity;\r
+ UINT32 LineSize;\r
+ UINT32 CCSIDR = ReadCCSIDR (0);\r
+\r
+ LineSize = (1 << ((CCSIDR & 0x7) + 2));\r
+ Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;\r
+ NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;\r
+\r
+ // LineSize is in words (4 byte chunks)\r
+ return NumSets * Associativity * LineSize * 4;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmDataCacheAssociativity (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CCSIDR = ReadCCSIDR (0);\r
+\r
+ return ((CCSIDR >> 3) & 0x3ff) + 1;\r
+}\r
+\r
+UINTN\r
+ArmDataCacheSets (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CCSIDR = ReadCCSIDR (0);\r
+\r
+ return ((CCSIDR >> 13) & 0x7fff) + 1;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmDataCacheLineLength (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CCSIDR = ReadCCSIDR (0) & 7;\r
+\r
+ // * 4 converts to bytes\r
+ return (1 << (CCSIDR + 2)) * 4;\r
+}\r
+\r
+BOOLEAN\r
+EFIAPI\r
+ArmInstructionCachePresent (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CLIDR = ReadCLIDR ();\r
+\r
+ if ((CLIDR & 1) == 1) {\r
+ // Instruction cache exists\r
+ return TRUE;\r
+ }\r
+ if ((CLIDR & 0x7) == 0x4) {\r
+ // Unified cache\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmInstructionCacheSize (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 NumSets;\r
+ UINT32 Associativity;\r
+ UINT32 LineSize;\r
+ UINT32 CCSIDR = ReadCCSIDR (1);\r
+\r
+ LineSize = (1 << ((CCSIDR & 0x7) + 2));\r
+ Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;\r
+ NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;\r
+\r
+ // LineSize is in words (4 byte chunks)\r
+ return NumSets * Associativity * LineSize * 4;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmInstructionCacheAssociativity (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CCSIDR = ReadCCSIDR (1);\r
+\r
+ return ((CCSIDR >> 3) & 0x3ff) + 1;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmInstructionCacheSets (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CCSIDR = ReadCCSIDR (1);\r
+\r
+ return ((CCSIDR >> 13) & 0x7fff) + 1;\r
+}\r
+\r
+UINTN\r
+EFIAPI\r
+ArmInstructionCacheLineLength (\r
+ VOID\r
+ )\r
+{\r
+ UINT32 CCSIDR = ReadCCSIDR (1) & 7;\r
+\r
+ // * 4 converts to bytes\r
+ return (1 << (CCSIDR + 2)) * 4;\r
+}\r
+\r
+\r
+VOID\r
+AArch64DataCacheOperation (\r
+ IN AARCH64_CACHE_OPERATION DataCacheOperation\r
+ )\r
+{\r
+ UINTN SavedInterruptState;\r
+\r
+ SavedInterruptState = ArmGetInterruptState ();\r
+ ArmDisableInterrupts();\r
+\r
+ AArch64AllDataCachesOperation (DataCacheOperation);\r
+\r
+ ArmDrainWriteBuffer ();\r
+\r
+ if (SavedInterruptState) {\r
+ ArmEnableInterrupts ();\r
+ }\r
+}\r
+\r
+\r
+VOID\r
+AArch64PoUDataCacheOperation (\r
+ IN AARCH64_CACHE_OPERATION DataCacheOperation\r
+ )\r
+{\r
+ UINTN SavedInterruptState;\r
+\r
+ SavedInterruptState = ArmGetInterruptState ();\r
+ ArmDisableInterrupts ();\r
+\r
+ AArch64PerformPoUDataCacheOperation (DataCacheOperation);\r
+\r
+ ArmDrainWriteBuffer ();\r
+\r
+ if (SavedInterruptState) {\r
+ ArmEnableInterrupts ();\r
+ }\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmInvalidateDataCache (\r
+ VOID\r
+ )\r
+{\r
+ AArch64DataCacheOperation (ArmInvalidateDataCacheEntryBySetWay);\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmCleanInvalidateDataCache (\r
+ VOID\r
+ )\r
+{\r
+ AArch64DataCacheOperation (ArmCleanInvalidateDataCacheEntryBySetWay);\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmCleanDataCache (\r
+ VOID\r
+ )\r
+{\r
+ AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmCleanDataCacheToPoU (\r
+ VOID\r
+ )\r
+{\r
+ AArch64PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);\r
+}\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+ Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef __AARCH64_LIB_H__\r
+#define __AARCH64_LIB_H__\r
+\r
+typedef VOID (*AARCH64_CACHE_OPERATION)(UINTN);\r
+\r
+VOID\r
+EFIAPI\r
+ArmDrainWriteBuffer (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmInvalidateDataCacheEntryBySetWay (\r
+ IN UINTN SetWayFormat\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmCleanDataCacheEntryBySetWay (\r
+ IN UINTN SetWayFormat\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmCleanDataCacheToPoUEntryBySetWay (\r
+ IN UINTN SetWayFormat\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmCleanInvalidateDataCacheEntryBySetWay (\r
+ IN UINTN SetWayFormat\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmEnableAsynchronousAbort (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmDisableAsynchronousAbort (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmEnableIrq (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmDisableIrq (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+EFIAPI\r
+ArmEnableFiq (\r
+ VOID\r
+ );\r
+\r
+UINTN\r
+EFIAPI\r
+ArmDisableFiq (\r
+ VOID\r
+ );\r
+\r
+VOID\r
+AArch64PerformPoUDataCacheOperation (\r
+ IN AARCH64_CACHE_OPERATION DataCacheOperation\r
+ );\r
+\r
+VOID\r
+AArch64AllDataCachesOperation (\r
+ IN AARCH64_CACHE_OPERATION DataCacheOperation\r
+ );\r
+\r
+#endif // __AARCH64_LIB_H__\r
+\r
--- /dev/null
+#/** @file\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+# Portions copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#\r
+#**/\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = AArch64Lib\r
+ FILE_GUID = ef20ddf5-b334-47b3-94cf-52ff44c29138\r
+ MODULE_TYPE = DXE_DRIVER\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = ArmLib\r
+\r
+[Sources.AARCH64]\r
+ AArch64Lib.c\r
+ AArch64Mmu.c\r
+ AArch64ArchTimer.c\r
+ ArmLibSupportV8.S | GCC\r
+ ../Common/AArch64/ArmLibSupport.S | GCC\r
+ AArch64Support.S | GCC\r
+ AArch64ArchTimerSupport.S | GCC\r
+\r
+[Packages]\r
+ ArmPkg/ArmPkg.dec\r
+ MdePkg/MdePkg.dec\r
+\r
+[LibraryClasses]\r
+ MemoryAllocationLib\r
+\r
+[Protocols]\r
+ gEfiCpuArchProtocolGuid\r
+\r
+[FixedPcd]\r
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold\r
--- /dev/null
+#/** @file\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+# Portions copyright (c) 2011-2013, ARM Ltd. All rights reserved.<BR>\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#\r
+#**/\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = AArch64LibPrePi\r
+ FILE_GUID = fd72688d-dbd8-4cf2-91a3-15171dea7816\r
+ MODULE_TYPE = BASE\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = ArmLib\r
+\r
+[Sources.common]\r
+ ArmLibSupportV8.S | GCC\r
+ AArch64Support.S | GCC\r
+\r
+ ../Common/AArch64/ArmLibSupport.S | GCC\r
+ ../Common/ArmLib.c\r
+\r
+ AArch64Lib.c\r
+ AArch64Mmu.c\r
+\r
+ AArch64ArchTimer.c\r
+ AArch64ArchTimerSupport.S | GCC\r
+\r
+[Packages]\r
+ ArmPkg/ArmPkg.dec\r
+ MdePkg/MdePkg.dec\r
+\r
+[LibraryClasses]\r
+ PrePiLib\r
+\r
+[Protocols]\r
+ gEfiCpuArchProtocolGuid\r
+\r
+[FixedPcd]\r
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold\r
--- /dev/null
+#/* @file\r
+#\r
+# Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#*/\r
+\r
+[Defines]\r
+ INF_VERSION = 0x00010005\r
+ BASE_NAME = AArch64Lib\r
+ FILE_GUID = eb7441e4-3ddf-48b8-a009-14f428b19e49\r
+ MODULE_TYPE = BASE\r
+ VERSION_STRING = 1.0\r
+ LIBRARY_CLASS = ArmLib\r
+\r
+[Sources.common]\r
+ ArmLibSupportV8.S | GCC\r
+ AArch64Support.S | GCC\r
+ ArmLib.c\r
+\r
+ ../Common/AArch64/ArmLibSupport.S | GCC\r
+\r
+ AArch64Lib.c\r
+\r
+ AArch64ArchTimer.c\r
+ AArch64ArchTimerSupport.S | GCC\r
+\r
+[Packages]\r
+ ArmPkg/ArmPkg.dec\r
+ MdePkg/MdePkg.dec\r
+\r
+[Protocols]\r
+ gEfiCpuArchProtocolGuid\r
+\r
+[FixedPcd]\r
+ gArmTokenSpaceGuid.PcdArmCacheOperationThreshold\r
--- /dev/null
+/** @file\r
+* File managing the MMU for ARMv8 architecture\r
+*\r
+* Copyright (c) 2011-2013, ARM Limited. All rights reserved.\r
+*\r
+* This program and the accompanying materials\r
+* are licensed and made available under the terms and conditions of the BSD License\r
+* which accompanies this distribution. The full text of the license may be found at\r
+* http://opensource.org/licenses/bsd-license.php\r
+*\r
+* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+*\r
+**/\r
+\r
+#include <Uefi.h>\r
+#include <Chipset/AArch64.h>\r
+#include <Library/BaseMemoryLib.h>\r
+#include <Library/MemoryAllocationLib.h>\r
+#include <Library/ArmLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/DebugLib.h>\r
+#include "AArch64Lib.h"\r
+#include "ArmLibPrivate.h"\r
+\r
+// We use this index definition to define an invalid block entry\r
+#define TT_ATTR_INDX_INVALID ((UINT32)~0)\r
+\r
+STATIC\r
+UINT64\r
+ArmMemoryAttributeToPageAttribute (\r
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes\r
+ )\r
+{\r
+ switch (Attributes) {\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:\r
+ return TT_ATTR_INDX_DEVICE_MEMORY;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:\r
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:\r
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:\r
+ return TT_ATTR_INDX_DEVICE_MEMORY;\r
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:\r
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
+ default:\r
+ ASSERT(0);\r
+ return TT_ATTR_INDX_DEVICE_MEMORY;\r
+ }\r
+}\r
+\r
+UINT64\r
+PageAttributeToGcdAttribute (\r
+ IN UINT64 PageAttributes\r
+ )\r
+{\r
+ UINT64 GcdAttributes;\r
+\r
+ switch (PageAttributes & TT_ATTR_INDX_MASK) {\r
+ case TT_ATTR_INDX_DEVICE_MEMORY:\r
+ GcdAttributes = EFI_MEMORY_UC;\r
+ break;\r
+ case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:\r
+ GcdAttributes = EFI_MEMORY_WC;\r
+ break;\r
+ case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:\r
+ GcdAttributes = EFI_MEMORY_WT;\r
+ break;\r
+ case TT_ATTR_INDX_MEMORY_WRITE_BACK:\r
+ GcdAttributes = EFI_MEMORY_WB;\r
+ break;\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));\r
+ ASSERT (0);\r
+ // The Global Coherency Domain (GCD) value is defined as a bit set.\r
+ // Returning 0 means no attribute has been set.\r
+ GcdAttributes = 0;\r
+ }\r
+\r
+ // Determine protection attributes\r
+ if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {\r
+ // Read only cases map to write-protect\r
+ GcdAttributes |= EFI_MEMORY_WP;\r
+ }\r
+\r
+ // Process eXecute Never attribute\r
+ if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {\r
+ GcdAttributes |= EFI_MEMORY_XP;\r
+ }\r
+\r
+ return GcdAttributes;\r
+}\r
+\r
+UINT64\r
+GcdAttributeToPageAttribute (\r
+ IN UINT64 GcdAttributes\r
+ )\r
+{\r
+ UINT64 PageAttributes;\r
+\r
+ switch (GcdAttributes & 0xFF) {\r
+ case EFI_MEMORY_UC:\r
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
+ break;\r
+ case EFI_MEMORY_WC:\r
+ PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;\r
+ break;\r
+ case EFI_MEMORY_WT:\r
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH;\r
+ break;\r
+ case EFI_MEMORY_WB:\r
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK;\r
+ break;\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToPageAttribute: 0x%X attributes is not supported.\n", GcdAttributes));\r
+ ASSERT (0);\r
+ // If no match has been found then we mark the memory as device memory.\r
+ // The only side effect of using device memory should be a slow down in the performance.\r
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;\r
+ }\r
+\r
+ // Determine protection attributes\r
+ if (GcdAttributes & EFI_MEMORY_WP) {\r
+ // Read only cases map to write-protect\r
+ PageAttributes |= TT_AP_RO_RO;\r
+ }\r
+\r
+ // Process eXecute Never attribute\r
+ if (GcdAttributes & EFI_MEMORY_XP) {\r
+ PageAttributes |= (TT_PXN_MASK | TT_UXN_MASK);\r
+ }\r
+\r
+ return PageAttributes;\r
+}\r
+\r
+ARM_MEMORY_REGION_ATTRIBUTES\r
+GcdAttributeToArmAttribute (\r
+ IN UINT64 GcdAttributes\r
+ )\r
+{\r
+ switch (GcdAttributes & 0xFF) {\r
+ case EFI_MEMORY_UC:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;\r
+ case EFI_MEMORY_WC:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;\r
+ case EFI_MEMORY_WT:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;\r
+ case EFI_MEMORY_WB:\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));\r
+ ASSERT (0);\r
+ return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;\r
+ }\r
+}\r
+\r
+// Describe the T0SZ values for each translation table level\r
+typedef struct {\r
+ UINTN MinT0SZ;\r
+ UINTN MaxT0SZ;\r
+ UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table\r
+ // the MaxT0SZ is not at the boundary of the table\r
+} T0SZ_DESCRIPTION_PER_LEVEL;\r
+\r
+// Map table for the corresponding Level of Table\r
+STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {\r
+ { 16, 24, 24 }, // Table Level 0\r
+ { 25, 33, 33 }, // Table Level 1\r
+ { 34, 39, 42 } // Table Level 2\r
+};\r
+\r
+VOID\r
+GetRootTranslationTableInfo (\r
+ IN UINTN T0SZ,\r
+ OUT UINTN *TableLevel,\r
+ OUT UINTN *TableEntryCount\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ // Identify the level of the root table from the given T0SZ\r
+ for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {\r
+ if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {\r
+ break;\r
+ }\r
+ }\r
+\r
+ // If we have not found the corresponding maximum T0SZ then we use the last one\r
+ if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {\r
+ Index--;\r
+ }\r
+\r
+ // Get the level of the root table\r
+ if (TableLevel) {\r
+ *TableLevel = Index;\r
+ }\r
+\r
+ // The Size of the Table is 2^(T0SZ-LargestT0SZ)\r
+ if (TableEntryCount) {\r
+ *TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);\r
+ }\r
+}\r
+\r
+STATIC\r
+VOID\r
+LookupAddresstoRootTable (\r
+ IN UINT64 MaxAddress,\r
+ OUT UINTN *T0SZ,\r
+ OUT UINTN *TableEntryCount\r
+ )\r
+{\r
+ UINTN TopBit;\r
+\r
+ // Check the parameters are not NULL\r
+ ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));\r
+\r
+ // Look for the highest bit set in MaxAddress\r
+ for (TopBit = 63; TopBit != 0; TopBit--) {\r
+ if ((1ULL << TopBit) & MaxAddress) {\r
+ // MaxAddress top bit is found\r
+ TopBit = TopBit + 1;\r
+ break;\r
+ }\r
+ }\r
+ ASSERT (TopBit != 0);\r
+\r
+ // Calculate T0SZ from the top bit of the MaxAddress\r
+ *T0SZ = 64 - TopBit;\r
+\r
+ // Get the Table info from T0SZ\r
+ GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);\r
+}\r
+\r
+STATIC\r
+UINT64*\r
+GetBlockEntryListFromAddress (\r
+ IN UINT64 *RootTable,\r
+ IN UINT64 RegionStart,\r
+ OUT UINTN *TableLevel,\r
+ IN OUT UINT64 *BlockEntrySize,\r
+ IN OUT UINT64 **LastBlockEntry\r
+ )\r
+{\r
+ UINTN RootTableLevel;\r
+ UINTN RootTableEntryCount;\r
+ UINT64 *TranslationTable;\r
+ UINT64 *BlockEntry;\r
+ UINT64 BlockEntryAddress;\r
+ UINTN BaseAddressAlignment;\r
+ UINTN PageLevel;\r
+ UINTN Index;\r
+ UINTN IndexLevel;\r
+ UINTN T0SZ;\r
+ UINT64 Attributes;\r
+ UINT64 TableAttributes;\r
+\r
+ // Initialize variable\r
+ BlockEntry = NULL;\r
+\r
+ // Ensure the parameters are valid\r
+ ASSERT (TableLevel && BlockEntrySize && LastBlockEntry);\r
+\r
+ // Ensure the Region is aligned on 4KB boundary\r
+ ASSERT ((RegionStart & (SIZE_4KB - 1)) == 0);\r
+\r
+ // Ensure the required size is aligned on 4KB boundary\r
+ ASSERT ((*BlockEntrySize & (SIZE_4KB - 1)) == 0);\r
+\r
+ //\r
+ // Calculate LastBlockEntry from T0SZ\r
+ //\r
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;\r
+ // Get the Table info from T0SZ\r
+ GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);\r
+ // The last block of the root table depends on the number of entry in this table\r
+ *LastBlockEntry = (UINT64*)((UINTN)RootTable + (RootTableEntryCount * sizeof(UINT64)));\r
+\r
+ // If the start address is 0x0 then we use the size of the region to identify the alignment\r
+ if (RegionStart == 0) {\r
+ // Identify the highest possible alignment for the Region Size\r
+ for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {\r
+ if ((1 << BaseAddressAlignment) & *BlockEntrySize) {\r
+ break;\r
+ }\r
+ }\r
+ } else {\r
+ // Identify the highest possible alignment for the Base Address\r
+ for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {\r
+ if ((1 << BaseAddressAlignment) & RegionStart) {\r
+ break;\r
+ }\r
+ }\r
+ }\r
+\r
+ // Identify the Page Level the RegionStart must belongs to\r
+ PageLevel = 3 - ((BaseAddressAlignment - 12) / 9);\r
+\r
+ // If the required size is smaller than the current block size then we need to go to the page bellow.\r
+ if (*BlockEntrySize < TT_ADDRESS_AT_LEVEL(PageLevel)) {\r
+ // It does not fit so we need to go a page level above\r
+ PageLevel++;\r
+ }\r
+\r
+ // Expose the found PageLevel to the caller\r
+ *TableLevel = PageLevel;\r
+\r
+ // Now, we have the Table Level we can get the Block Size associated to this table\r
+ *BlockEntrySize = TT_ADDRESS_AT_LEVEL(PageLevel);\r
+\r
+ //\r
+ // Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries\r
+ //\r
+\r
+ TranslationTable = RootTable;\r
+ for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {\r
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);\r
+\r
+ if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {\r
+ // Go to the next table\r
+ TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);\r
+\r
+ // If we are at the last level then update the output\r
+ if (IndexLevel == PageLevel) {\r
+ // And get the appropriate BlockEntry at the next level\r
+ BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel + 1, RegionStart);\r
+\r
+ // Set the last block for this new table\r
+ *LastBlockEntry = (UINT64*)((UINTN)TranslationTable + (TT_ENTRY_COUNT * sizeof(UINT64)));\r
+ }\r
+ } else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {\r
+ // If we are not at the last level then we need to split this BlockEntry\r
+ if (IndexLevel != PageLevel) {\r
+ // Retrieve the attributes from the block entry\r
+ Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;\r
+\r
+ // Convert the block entry attributes into Table descriptor attributes\r
+ TableAttributes = TT_TABLE_AP_NO_PERMISSION;\r
+ if (Attributes & TT_PXN_MASK) {\r
+ TableAttributes = TT_TABLE_PXN;\r
+ }\r
+ if (Attributes & TT_UXN_MASK) {\r
+ TableAttributes = TT_TABLE_XN;\r
+ }\r
+ if (Attributes & TT_NS) {\r
+ TableAttributes = TT_TABLE_NS;\r
+ }\r
+\r
+ // Get the address corresponding at this entry\r
+ BlockEntryAddress = RegionStart;\r
+ BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r
+ // Shift back to right to set zero before the effective address\r
+ BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);\r
+\r
+ // Set the correct entry type\r
+ if (IndexLevel + 1 == 3) {\r
+ Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
+ } else {\r
+ Attributes |= TT_TYPE_BLOCK_ENTRY;\r
+ }\r
+\r
+ // Create a new translation table\r
+ TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));\r
+ if (TranslationTable == NULL) {\r
+ return NULL;\r
+ }\r
+ TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);\r
+\r
+ // Fill the new BlockEntry with the TranslationTable\r
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY;\r
+\r
+ // Populate the newly created lower level table\r
+ BlockEntry = TranslationTable;\r
+ for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {\r
+ *BlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));\r
+ BlockEntry++;\r
+ }\r
+ // Block Entry points at the beginning of the Translation Table\r
+ BlockEntry = TranslationTable;\r
+ }\r
+ } else {\r
+ // Case of Invalid Entry and we are at a page level above of the one targetted.\r
+ if (IndexLevel != PageLevel) {\r
+ // Create a new translation table\r
+ TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));\r
+ if (TranslationTable == NULL) {\r
+ return NULL;\r
+ }\r
+ TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);\r
+\r
+ ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));\r
+\r
+ // Fill the new BlockEntry with the TranslationTable\r
+ *BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;\r
+ }\r
+ }\r
+ }\r
+\r
+ return BlockEntry;\r
+}\r
+\r
+STATIC\r
+RETURN_STATUS\r
+FillTranslationTable (\r
+ IN UINT64 *RootTable,\r
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion\r
+ )\r
+{\r
+ UINT64 Attributes;\r
+ UINT32 Type;\r
+ UINT64 RegionStart;\r
+ UINT64 RemainingRegionLength;\r
+ UINT64 *BlockEntry;\r
+ UINT64 *LastBlockEntry;\r
+ UINT64 BlockEntrySize;\r
+ UINTN TableLevel;\r
+\r
+ // Ensure the Length is aligned on 4KB boundary\r
+ ASSERT ((MemoryRegion->Length > 0) && ((MemoryRegion->Length & (SIZE_4KB - 1)) == 0));\r
+\r
+ // Variable initialization\r
+ Attributes = ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF;\r
+ RemainingRegionLength = MemoryRegion->Length;\r
+ RegionStart = MemoryRegion->VirtualBase;\r
+\r
+ do {\r
+ // Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor\r
+ // such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor\r
+ BlockEntrySize = RemainingRegionLength;\r
+ BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);\r
+ if (BlockEntry == NULL) {\r
+ // GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables\r
+ return RETURN_OUT_OF_RESOURCES;\r
+ }\r
+\r
+ if (TableLevel != 3) {\r
+ Type = TT_TYPE_BLOCK_ENTRY;\r
+ } else {\r
+ Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;\r
+ }\r
+\r
+ do {\r
+ // Fill the Block Entry with attribute and output block address\r
+ *BlockEntry = (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;\r
+\r
+ // Go to the next BlockEntry\r
+ RegionStart += BlockEntrySize;\r
+ RemainingRegionLength -= BlockEntrySize;\r
+ BlockEntry++;\r
+ } while ((RemainingRegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));\r
+ } while (RemainingRegionLength != 0);\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+RETURN_STATUS\r
+SetMemoryAttributes (\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes,\r
+ IN EFI_PHYSICAL_ADDRESS VirtualMask\r
+ )\r
+{
+ RETURN_STATUS Status;\r
+ ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;\r
+ UINT64 *TranslationTable;\r
+\r
+ MemoryRegion.PhysicalBase = BaseAddress;\r
+ MemoryRegion.VirtualBase = BaseAddress;\r
+ MemoryRegion.Length = Length;\r
+ MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);\r
+\r
+ TranslationTable = ArmGetTTBR0BaseAddress ();\r
+\r
+ Status = FillTranslationTable (TranslationTable, &MemoryRegion);
+ if (RETURN_ERROR (Status)) {
+ return Status;
+ }\r
+\r
+ // Flush d-cache so descriptors make it back to uncached memory for subsequent table walks\r
+ // flush and invalidate pages\r
+ ArmCleanInvalidateDataCache ();\r
+\r
+ ArmInvalidateInstructionCache ();\r
+\r
+ // Invalidate all TLB entries so changes are synced\r
+ ArmInvalidateTlb ();\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+RETURN_STATUS\r
+EFIAPI\r
+ArmConfigureMmu (\r
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,\r
+ OUT VOID **TranslationTableBase OPTIONAL,\r
+ OUT UINTN *TranslationTableSize OPTIONAL\r
+ )\r
+{\r
+ VOID* TranslationTable;\r
+ UINTN TranslationTablePageCount;\r
+ UINT32 TranslationTableAttribute;\r
+ ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;\r
+ UINT64 MaxAddress;\r
+ UINT64 TopAddress;\r
+ UINTN T0SZ;\r
+ UINTN RootTableEntryCount;\r
+ UINT64 TCR;\r
+ RETURN_STATUS Status;\r
+\r
+ ASSERT (MemoryTable != NULL);\r
+\r
+ // Identify the highest address of the memory table\r
+ MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;\r
+ MemoryTableEntry = MemoryTable;\r
+ while (MemoryTableEntry->Length != 0) {\r
+ TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;\r
+ if (TopAddress > MaxAddress) {\r
+ MaxAddress = TopAddress;\r
+ }\r
+ MemoryTableEntry++;\r
+ }\r
+\r
+ // Lookup the Table Level to get the information\r
+ LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);\r
+\r
+ //\r
+ // Set TCR that allows us to retrieve T0SZ in the subsequent functions\r
+ //\r
+ if ((ArmReadCurrentEL () == AARCH64_EL2) || (ArmReadCurrentEL () == AARCH64_EL3)) {\r
+ //Note: Bits 23 and 31 are reserved bits in TCR_EL2 and TCR_EL3\r
+ TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;\r
+\r
+ // Set the Physical Address Size using MaxAddress\r
+ if (MaxAddress < SIZE_4GB) {\r
+ TCR |= TCR_PS_4GB;\r
+ } else if (MaxAddress < SIZE_64GB) {\r
+ TCR |= TCR_PS_64GB;\r
+ } else if (MaxAddress < SIZE_1TB) {\r
+ TCR |= TCR_PS_1TB;\r
+ } else if (MaxAddress < SIZE_4TB) {\r
+ TCR |= TCR_PS_4TB;\r
+ } else if (MaxAddress < SIZE_16TB) {\r
+ TCR |= TCR_PS_16TB;\r
+ } else if (MaxAddress < SIZE_256TB) {\r
+ TCR |= TCR_PS_256TB;\r
+ } else {\r
+ DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU support.\n", MaxAddress));\r
+ ASSERT (0); // Bigger than 48-bit memory space are not supported\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+ } else {\r
+ ASSERT (0); // Bigger than 48-bit memory space are not supported\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ // Set TCR\r
+ ArmSetTCR (TCR);\r
+\r
+ // Allocate pages for translation table\r
+ TranslationTablePageCount = EFI_SIZE_TO_PAGES((RootTableEntryCount * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE);\r
+ TranslationTable = AllocatePages (TranslationTablePageCount);\r
+ if (TranslationTable == NULL) {\r
+ return RETURN_OUT_OF_RESOURCES;\r
+ }\r
+ TranslationTable = (VOID*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);\r
+ // We set TTBR0 just after allocating the table to retrieve its location from the subsequent\r
+ // functions without needing to pass this value across the functions. The MMU is only enabled\r
+ // after the translation tables are populated.\r
+ ArmSetTTBR0 (TranslationTable);\r
+\r
+ if (TranslationTableBase != NULL) {\r
+ *TranslationTableBase = TranslationTable;\r
+ }\r
+\r
+ if (TranslationTableSize != NULL) {\r
+ *TranslationTableSize = RootTableEntryCount * sizeof(UINT64);\r
+ }\r
+\r
+ ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));\r
+\r
+ // Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs\r
+ ArmDisableMmu ();\r
+ ArmDisableDataCache ();\r
+ ArmDisableInstructionCache ();\r
+\r
+ // Make sure nothing sneaked into the cache\r
+ ArmCleanInvalidateDataCache ();\r
+ ArmInvalidateInstructionCache ();\r
+\r
+ TranslationTableAttribute = TT_ATTR_INDX_INVALID;\r
+ while (MemoryTable->Length != 0) {\r
+ // Find the memory attribute for the Translation Table\r
+ if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&\r
+ ((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {\r
+ TranslationTableAttribute = MemoryTable->Attributes;\r
+ }\r
+\r
+ Status = FillTranslationTable (TranslationTable, MemoryTable);\r
+ if (RETURN_ERROR (Status)) {\r
+ goto FREE_TRANSLATION_TABLE;\r
+ }\r
+ MemoryTable++;\r
+ }\r
+\r
+ // Translate the Memory Attributes into Translation Table Register Attributes\r
+ if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {\r
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;\r
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {\r
+ TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;\r
+ } else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||\r
+ (TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {\r
+ TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;\r
+ } else {\r
+ // If we failed to find a mapping that contains the root translation table then it probably means the translation table\r
+ // is not mapped in the given memory map.\r
+ ASSERT (0);\r
+ Status = RETURN_UNSUPPORTED;\r
+ goto FREE_TRANSLATION_TABLE;\r
+ }\r
+\r
+ ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC\r
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC\r
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT\r
+ MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB\r
+\r
+ ArmDisableAlignmentCheck ();\r
+ ArmEnableInstructionCache ();\r
+ ArmEnableDataCache ();\r
+\r
+ ArmEnableMmu ();\r
+ return RETURN_SUCCESS;\r
+\r
+FREE_TRANSLATION_TABLE:\r
+ FreePages (TranslationTable, TranslationTablePageCount);\r
+ return Status;\r
+}\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <Chipset/AArch64.h>\r
+#include <AsmMacroIoLibV8.h>\r
+\r
+.text\r
+.align 3\r
+\r
+GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r
+GCC_ASM_EXPORT (ArmDrainWriteBuffer)\r
+GCC_ASM_EXPORT (ArmEnableMmu)\r
+GCC_ASM_EXPORT (ArmDisableMmu)\r
+GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r
+GCC_ASM_EXPORT (ArmMmuEnabled)\r
+GCC_ASM_EXPORT (ArmEnableDataCache)\r
+GCC_ASM_EXPORT (ArmDisableDataCache)\r
+GCC_ASM_EXPORT (ArmEnableInstructionCache)\r
+GCC_ASM_EXPORT (ArmDisableInstructionCache)\r
+GCC_ASM_EXPORT (ArmDisableAlignmentCheck)\r
+GCC_ASM_EXPORT (ArmEnableAlignmentCheck)\r
+GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r
+GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r
+GCC_ASM_EXPORT (AArch64AllDataCachesOperation)\r
+GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)\r
+GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r
+GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)\r
+GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r
+GCC_ASM_EXPORT (ArmWriteVBar)\r
+GCC_ASM_EXPORT (ArmVFPImplemented)\r
+GCC_ASM_EXPORT (ArmEnableVFP)\r
+GCC_ASM_EXPORT (ArmCallWFI)\r
+GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)\r
+GCC_ASM_EXPORT (ArmReadMpidr)\r
+GCC_ASM_EXPORT (ArmReadTpidrurw)\r
+GCC_ASM_EXPORT (ArmWriteTpidrurw)\r
+GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r
+GCC_ASM_EXPORT (ArmReadIdPfr0)\r
+GCC_ASM_EXPORT (ArmReadIdPfr1)\r
+GCC_ASM_EXPORT (ArmWriteHcr)\r
+GCC_ASM_EXPORT (ArmReadCurrentEL)\r
+\r
+.set CTRL_M_BIT, (1 << 0)\r
+.set CTRL_A_BIT, (1 << 1)\r
+.set CTRL_C_BIT, (1 << 2)\r
+.set CTRL_I_BIT, (1 << 12)\r
+.set CTRL_V_BIT, (1 << 12)\r
+.set CPACR_VFP_BITS, (3 << 20)\r
+\r
+ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r
+ dc ivac, x0 // Invalidate single data cache line\r
+ dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmCleanDataCacheEntryByMVA):\r
+ dc cvac, x0 // Clean single data cache line\r
+ dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r
+ dc civac, x0 // Clean and invalidate single data cache line\r
+ dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r
+ dc isw, x0 // Invalidate this line\r
+ dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r
+ dc cisw, x0 // Clean and Invalidate this line\r
+ dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r
+ dc csw, x0 // Clean this line\r
+ dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmInvalidateInstructionCache):\r
+ ic iallu // Invalidate entire instruction cache\r
+ dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableMmu):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Read System control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Read System control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Read System control register EL3\r
+4: orr x0, x0, #CTRL_M_BIT // Set MMU enable bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: tlbi alle1\r
+ isb\r
+ msr sctlr_el1, x0 // Write back\r
+ b 4f\r
+2: tlbi alle2\r
+ isb\r
+ msr sctlr_el2, x0 // Write back\r
+ b 4f\r
+3: tlbi alle3\r
+ isb\r
+ msr sctlr_el3, x0 // Write back\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableMmu):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Read System Control Register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Read System Control Register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Read System Control Register EL3\r
+4: bic x0, x0, #CTRL_M_BIT // Clear MMU enable bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back\r
+ tlbi alle1\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back\r
+ tlbi alle2\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back\r
+ tlbi alle3\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableCachesAndMmu):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: bic x0, x0, #CTRL_M_BIT // Disable MMU\r
+ bic x0, x0, #CTRL_C_BIT // Disable D Cache\r
+ bic x0, x0, #CTRL_I_BIT // Disable I Cache\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back control register\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmMmuEnabled):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: and x0, x0, #CTRL_M_BIT\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableDataCache):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: orr x0, x0, #CTRL_C_BIT // Set C bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back control register\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableDataCache):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: bic x0, x0, #CTRL_C_BIT // Clear C bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back control register\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableInstructionCache):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: orr x0, x0, #CTRL_I_BIT // Set I bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back control register\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableInstructionCache):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: bic x0, x0, #CTRL_I_BIT // Clear I bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back control register\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableAlignmentCheck):\r
+ EL1_OR_EL2(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 3f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+3: orr x0, x0, #CTRL_A_BIT // Set A (alignment check) bit\r
+ EL1_OR_EL2(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 3f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+3: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableAlignmentCheck):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: mrs x0, sctlr_el1 // Get control register EL1\r
+ b 4f\r
+2: mrs x0, sctlr_el2 // Get control register EL2\r
+ b 4f\r
+3: mrs x0, sctlr_el3 // Get control register EL3\r
+4: bic x0, x0, #CTRL_A_BIT // Clear A (alignment check) bit\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr sctlr_el1, x0 // Write back control register\r
+ b 4f\r
+2: msr sctlr_el2, x0 // Write back control register\r
+ b 4f\r
+3: msr sctlr_el3, x0 // Write back control register\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now\r
+ASM_PFX(ArmEnableBranchPrediction):\r
+ ret\r
+\r
+\r
+// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.\r
+ASM_PFX(ArmDisableBranchPrediction):\r
+ ret\r
+\r
+\r
+ASM_PFX(AArch64AllDataCachesOperation):\r
+// We can use regs 0-7 and 9-15 without having to save/restore.\r
+// Save our link register on the stack.\r
+ str x30, [sp, #-0x10]!\r
+ mov x1, x0 // Save Function call in x1\r
+ mrs x6, clidr_el1 // Read EL1 CLIDR\r
+ and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)\r
+ lsr x3, x3, #23 // Left align cache level value\r
+ cbz x3, L_Finished // No need to clean if LoC is 0\r
+ mov x10, #0 // Start clean at cache level 0\r
+ b Loop1\r
+\r
+ASM_PFX(AArch64PerformPoUDataCacheOperation):\r
+// We can use regs 0-7 and 9-15 without having to save/restore.\r
+// Save our link register on the stack.\r
+ str x30, [sp, #-0x10]!\r
+ mov x1, x0 // Save Function call in x1\r
+ mrs x6, clidr_el1 // Read EL1 CLIDR\r
+ and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)\r
+ lsr x3, x3, #26 // Left align cache level value\r
+ cbz x3, L_Finished // No need to clean if LoC is 0\r
+ mov x10, #0 // Start clean at cache level 0\r
+\r
+Loop1:\r
+ add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info\r
+ lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level\r
+ and x12, x12, #7 // get those 3 bits alone\r
+ cmp x12, #2 // what cache at this level?\r
+ b.lt L_Skip // no cache or only instruction cache at this level\r
+ msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)\r
+ isb // isb to sync the change to the CacheSizeID reg\r
+ mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)\r
+ and x2, x12, #0x7 // extract the line length field\r
+ add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)\r
+ mov x4, #0x400\r
+ sub x4, x4, #1\r
+ and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)\r
+ clz w5, w4 // w5 is the bit position of the way size increment\r
+ mov x7, #0x00008000\r
+ sub x7, x7, #1\r
+ and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)\r
+\r
+Loop2:\r
+ mov x9, x4 // x9 working copy of the max way size (right aligned)\r
+\r
+Loop3:\r
+ lsl x11, x9, x5\r
+ orr x0, x10, x11 // factor in the way number and cache number\r
+ lsl x11, x7, x2\r
+ orr x0, x0, x11 // factor in the index number\r
+\r
+ blr x1 // Goto requested cache operation\r
+\r
+ subs x9, x9, #1 // decrement the way number\r
+ b.ge Loop3\r
+ subs x7, x7, #1 // decrement the index\r
+ b.ge Loop2\r
+L_Skip:\r
+ add x10, x10, #2 // increment the cache number\r
+ cmp x3, x10\r
+ b.gt Loop1\r
+\r
+L_Finished:\r
+ dsb sy\r
+ isb\r
+ ldr x30, [sp], #0x10\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDataMemoryBarrier):\r
+ dmb sy\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDataSyncronizationBarrier):\r
+ASM_PFX(ArmDrainWriteBuffer):\r
+ dsb sy\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmInstructionSynchronizationBarrier):\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmWriteVBar):\r
+ EL1_OR_EL2_OR_EL3(x1)\r
+1: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register\r
+ b 4f\r
+2: msr vbar_el2, x0 // Set the Address of the EL2 Vector Table in the VBAR register\r
+ b 4f\r
+3: msr vbar_el3, x0 // Set the Address of the EL3 Vector Table in the VBAR register\r
+4: isb\r
+ ret\r
+\r
+ASM_PFX(ArmEnableVFP):\r
+ // Check whether floating-point is implemented in the processor.\r
+ mov x1, x30 // Save LR\r
+ bl ArmReadIdPfr0 // Read EL1 Processor Feature Register (PFR0)\r
+ mov x30, x1 // Restore LR\r
+ ands x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation\r
+ cmp x0, #0 // VFP is implemented if '0'.\r
+ b.ne 4f // Exit if VFP not implemented.\r
+ // FVP is implemented.\r
+ // Make sure VFP exceptions are not trapped (to any exception level).\r
+ mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)\r
+ orr x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1\r
+ msr cpacr_el1, x0 // Write back EL1 Coprocessor Access Control Register (CPACR)\r
+ mov x1, #AARCH64_CPTR_TFP // TFP Bit for trapping VFP Exceptions\r
+ EL1_OR_EL2_OR_EL3(x2)\r
+1:ret // Not configurable in EL1\r
+2:mrs x0, cptr_el2 // Disable VFP traps to EL2\r
+ bic x0, x0, x1\r
+ msr cptr_el2, x0\r
+ ret\r
+3:mrs x0, cptr_el3 // Disable VFP traps to EL3\r
+ bic x0, x0, x1\r
+ msr cptr_el3, x0\r
+4:ret\r
+\r
+\r
+ASM_PFX(ArmCallWFI):\r
+ wfi\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmInvalidateInstructionAndDataTlb):\r
+ EL1_OR_EL2_OR_EL3(x0)\r
+1: tlbi alle1\r
+ b 4f\r
+2: tlbi alle2\r
+ b 4f\r
+3: tlbi alle3\r
+4: dsb sy\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadMpidr):\r
+ mrs x0, mpidr_el1 // read EL1 MPIDR\r
+ ret\r
+\r
+\r
+// Keep old function names for C compatibilty for now. Change later?\r
+ASM_PFX(ArmReadTpidrurw):\r
+ mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r
+ ret\r
+\r
+\r
+// Keep old function names for C compatibilty for now. Change later?\r
+ASM_PFX(ArmWriteTpidrurw):\r
+ msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r
+ ret\r
+\r
+\r
+// Arch timers are mandatory on AArch64\r
+ASM_PFX(ArmIsArchTimerImplemented):\r
+ mov x0, #1\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmReadIdPfr0):\r
+ mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register\r
+ ret\r
+\r
+\r
+// Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?\r
+// A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.\r
+// See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c\r
+// Not defined yet, but stick in here for now, should read all zeros.\r
+ASM_PFX(ArmReadIdPfr1):\r
+ mrs x0, id_aa64pfr1_el1 // Read ID_PFR1 Register\r
+ ret\r
+\r
+// VOID ArmWriteHcr(UINTN Hcr)\r
+ASM_PFX(ArmWriteHcr):\r
+ msr hcr_el2, x0 // Write the passed HCR value\r
+ ret\r
+\r
+// UINTN ArmReadCurrentEL(VOID)\r
+ASM_PFX(ArmReadCurrentEL):\r
+ mrs x0, CurrentEL\r
+ ret\r
+\r
+dead:\r
+ b dead\r
+\r
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+ portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <Base.h>\r
+\r
+#include <Library/ArmLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/PcdLib.h>\r
+\r
+#include "ArmLibPrivate.h"\r
+\r
+VOID\r
+EFIAPI\r
+ArmCacheInformation (\r
+ OUT ARM_CACHE_INFO *CacheInfo\r
+ )\r
+{\r
+ if (CacheInfo != NULL) {\r
+ CacheInfo->Type = ArmCacheType();\r
+ CacheInfo->Architecture = ArmCacheArchitecture();\r
+ CacheInfo->DataCachePresent = ArmDataCachePresent();\r
+ CacheInfo->DataCacheSize = ArmDataCacheSize();\r
+ CacheInfo->DataCacheAssociativity = ArmDataCacheAssociativity();\r
+ CacheInfo->DataCacheLineLength = ArmDataCacheLineLength();\r
+ CacheInfo->InstructionCachePresent = ArmInstructionCachePresent();\r
+ CacheInfo->InstructionCacheSize = ArmInstructionCacheSize();\r
+ CacheInfo->InstructionCacheAssociativity = ArmInstructionCacheAssociativity();\r
+ CacheInfo->InstructionCacheLineLength = ArmInstructionCacheLineLength();\r
+ }\r
+}\r
+\r
+VOID\r
+EFIAPI\r
+ArmSetAuxCrBit (\r
+ IN UINT32 Bits\r
+ )\r
+{\r
+ UINT32 val = ArmReadAuxCr();\r
+ val |= Bits;\r
+ ArmWriteAuxCr(val);\r
+}\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#ifndef __ARM_LIB_PRIVATE_H__\r
+#define __ARM_LIB_PRIVATE_H__\r
+\r
+#define CACHE_SIZE_4_KB (3UL)\r
+#define CACHE_SIZE_8_KB (4UL)\r
+#define CACHE_SIZE_16_KB (5UL)\r
+#define CACHE_SIZE_32_KB (6UL)\r
+#define CACHE_SIZE_64_KB (7UL)\r
+#define CACHE_SIZE_128_KB (8UL)\r
+\r
+#define CACHE_ASSOCIATIVITY_DIRECT (0UL)\r
+#define CACHE_ASSOCIATIVITY_4_WAY (2UL)\r
+#define CACHE_ASSOCIATIVITY_8_WAY (3UL)\r
+\r
+#define CACHE_PRESENT (0UL)\r
+#define CACHE_NOT_PRESENT (1UL)\r
+\r
+#define CACHE_LINE_LENGTH_32_BYTES (2UL)\r
+\r
+#define SIZE_FIELD_TO_CACHE_SIZE(x) (((x) >> 6) & 0x0F)\r
+#define SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(x) (((x) >> 3) & 0x07)\r
+#define SIZE_FIELD_TO_CACHE_PRESENCE(x) (((x) >> 2) & 0x01)\r
+#define SIZE_FIELD_TO_CACHE_LINE_LENGTH(x) (((x) >> 0) & 0x03)\r
+\r
+#define DATA_CACHE_SIZE_FIELD(x) (((x) >> 12) & 0x0FFF)\r
+#define INSTRUCTION_CACHE_SIZE_FIELD(x) (((x) >> 0) & 0x0FFF)\r
+\r
+#define DATA_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(DATA_CACHE_SIZE_FIELD(x)))\r
+#define DATA_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(DATA_CACHE_SIZE_FIELD(x)))\r
+#define DATA_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(DATA_CACHE_SIZE_FIELD(x)))\r
+#define DATA_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(DATA_CACHE_SIZE_FIELD(x)))\r
+\r
+#define INSTRUCTION_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(INSTRUCTION_CACHE_SIZE_FIELD(x)))\r
+#define INSTRUCTION_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(INSTRUCTION_CACHE_SIZE_FIELD(x)))\r
+#define INSTRUCTION_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(INSTRUCTION_CACHE_SIZE_FIELD(x)))\r
+#define INSTRUCTION_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(INSTRUCTION_CACHE_SIZE_FIELD(x)))\r
+\r
+#define CACHE_TYPE(x) (((x) >> 25) & 0x0F)\r
+#define CACHE_TYPE_WRITE_BACK (0x0EUL)\r
+\r
+#define CACHE_ARCHITECTURE(x) (((x) >> 24) & 0x01)\r
+#define CACHE_ARCHITECTURE_UNIFIED (0UL)\r
+#define CACHE_ARCHITECTURE_SEPARATE (1UL)\r
+\r
+\r
+VOID\r
+CPSRMaskInsert (\r
+ IN UINT32 Mask,\r
+ IN UINT32 Value\r
+ );\r
+\r
+UINT32\r
+CPSRRead (\r
+ VOID\r
+ );\r
+\r
+UINT32\r
+ReadCCSIDR (\r
+ IN UINT32 CSSELR\r
+ );\r
+\r
+UINT32\r
+ReadCLIDR (\r
+ VOID\r
+ );\r
+\r
+#endif // __ARM_LIB_PRIVATE_H__\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+#include <AsmMacroIoLib.h>\r
+\r
+.text\r
+.align 3\r
+\r
+GCC_ASM_EXPORT (ArmIsMpCore)\r
+GCC_ASM_EXPORT (ArmEnableAsynchronousAbort)\r
+GCC_ASM_EXPORT (ArmDisableAsynchronousAbort)\r
+GCC_ASM_EXPORT (ArmEnableIrq)\r
+GCC_ASM_EXPORT (ArmDisableIrq)\r
+GCC_ASM_EXPORT (ArmEnableFiq)\r
+GCC_ASM_EXPORT (ArmDisableFiq)\r
+GCC_ASM_EXPORT (ArmEnableInterrupts)\r
+GCC_ASM_EXPORT (ArmDisableInterrupts)\r
+GCC_ASM_EXPORT (ArmDisableAllExceptions)\r
+GCC_ASM_EXPORT (ReadCCSIDR)\r
+GCC_ASM_EXPORT (ReadCLIDR)\r
+\r
+#------------------------------------------------------------------------------\r
+\r
+.set MPIDR_U_BIT, (30)\r
+.set MPIDR_U_MASK, (1 << MPIDR_U_BIT)\r
+.set DAIF_FIQ_BIT, (1 << 0)\r
+.set DAIF_IRQ_BIT, (1 << 1)\r
+.set DAIF_ABORT_BIT, (1 << 2)\r
+.set DAIF_DEBUG_BIT, (1 << 3)\r
+.set DAIF_INT_BITS, (DAIF_FIQ_BIT | DAIF_IRQ_BIT)\r
+.set DAIF_ALL, (DAIF_DEBUG_BIT | DAIF_ABORT_BIT | DAIF_INT_BITS)\r
+\r
+\r
+ASM_PFX(ArmIsMpCore):\r
+ mrs x0, mpidr_el1 // Read EL1 Mutliprocessor Affinty Reg (MPIDR)\r
+ and x0, x0, #MPIDR_U_MASK // U Bit clear, the processor is part of a multiprocessor system\r
+ lsr x0, x0, #MPIDR_U_BIT\r
+ eor x0, x0, #1\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableAsynchronousAbort):\r
+ msr daifclr, #DAIF_ABORT_BIT\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableAsynchronousAbort):\r
+ msr daifset, #DAIF_ABORT_BIT\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableIrq):\r
+ msr daifclr, #DAIF_IRQ_BIT\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableIrq):\r
+ msr daifset, #DAIF_IRQ_BIT\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableFiq):\r
+ msr daifclr, #DAIF_FIQ_BIT\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableFiq):\r
+ msr daifset, #DAIF_FIQ_BIT\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmEnableInterrupts):\r
+ msr daifclr, #DAIF_INT_BITS\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableInterrupts):\r
+ msr daifset, #DAIF_INT_BITS\r
+ isb\r
+ ret\r
+\r
+\r
+ASM_PFX(ArmDisableAllExceptions):\r
+ msr daifset, #DAIF_ALL\r
+ isb\r
+ ret\r
+\r
+\r
+// UINT32\r
+// ReadCCSIDR (\r
+// IN UINT32 CSSELR\r
+// )\r
+ASM_PFX(ReadCCSIDR):\r
+ msr csselr_el1, x0 // Write Cache Size Selection Register (CSSELR)\r
+ isb\r
+ mrs x0, ccsidr_el1 // Read current Cache Size ID Register (CCSIDR)\r
+ ret\r
+\r
+\r
+// UINT32\r
+// ReadCLIDR (\r
+// IN UINT32 CSSELR\r
+// )\r
+ASM_PFX(ReadCLIDR):\r
+ mrs x0, clidr_el1 // Read Cache Level ID Register\r
+ ret\r
+\r
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r
#include <Library/DebugLib.h>\r
#include "ArmV7Lib.h"\r
#include "ArmLibPrivate.h"\r
-#include <Library/ArmV7ArchTimerLib.h>\r
+#include <Library/ArmArchTimerLib.h>\r
\r
VOID\r
EFIAPI\r
../Common/Arm/ArmLibSupport.S | GCC\r
../Common/Arm/ArmLibSupport.asm | RVCT\r
\r
+[Sources.AARCH64]\r
+ ../Common/AArch64/ArmLibSupport.S | GCC\r
+\r
[Packages]\r
ArmPkg/ArmPkg.dec\r
MdePkg/MdePkg.dec\r
--- /dev/null
+//\r
+// Copyright (c) 2012-2013, ARM Limited. All rights reserved.\r
+//\r
+// This program and the accompanying materials\r
+// are licensed and made available under the terms and conditions of the BSD License\r
+// which accompanies this distribution. The full text of the license may be found at\r
+// http://opensource.org/licenses/bsd-license.php\r
+//\r
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+//\r
+//\r
+\r
+.text\r
+.align 3\r
+\r
+GCC_ASM_EXPORT(ArmCallSmc)\r
+GCC_ASM_EXPORT(ArmCallSmcArg1)\r
+GCC_ASM_EXPORT(ArmCallSmcArg2)\r
+GCC_ASM_EXPORT(ArmCallSmcArg3)\r
+\r
+ASM_PFX(ArmCallSmc):\r
+ str x1, [sp, #-0x10]!\r
+ mov x1, x0\r
+ ldr x0,[x1]\r
+ smc #0\r
+ str x0,[x1]\r
+ ldr x1, [sp], #0x10\r
+ ret\r
+\r
+ASM_PFX(ArmCallSmcArg1):\r
+ stp x2, x3, [sp, #-0x10]!\r
+ mov x2, x0\r
+ mov x3, x1\r
+ ldr x0,[x2]\r
+ ldr x1,[x3]\r
+ smc #0\r
+ str x0,[x2]\r
+ str x1,[x3]\r
+ ldp x2, x3, [sp], #0x10\r
+ ret\r
+\r
+ASM_PFX(ArmCallSmcArg2):\r
+ stp x3, x4, [sp, #-0x10]!\r
+ str x5, [sp, #-8]!\r
+ mov x3, x0\r
+ mov x4, x1\r
+ mov x5, x2\r
+ ldr x0,[x3]\r
+ ldr x1,[x4]\r
+ ldr x2,[x5]\r
+ smc #0\r
+ str x0,[x3]\r
+ str x1,[x4]\r
+ str x2,[x5]\r
+ ldr x5, [sp], #8\r
+ ldp x3, x4, [sp], #0x10\r
+ ret\r
+\r
+ASM_PFX(ArmCallSmcArg3):\r
+ stp x4, x5, [sp, #-0x10]!\r
+ stp x6, x7, [sp, #-0x10]!\r
+ mov x4, x0\r
+ mov x5, x1\r
+ mov x6, x2\r
+ mov x7, x3\r
+ ldr x0,[x4]\r
+ ldr x1,[x5]\r
+ ldr x2,[x6]\r
+ ldr x3,[x7]\r
+ smc #0\r
+ str x0,[x4]\r
+ str x1,[x5]\r
+ str x2,[x6]\r
+ str x3,[x7]\r
+ ldp x4, x5, [sp], #0x10\r
+ ldp x6, x7, [sp], #0x10\r
+ ret\r
Arm/ArmSmc.asm | RVCT\r
Arm/ArmSmc.S | GCC\r
\r
+[Sources.AARCH64]\r
+ AArch64/ArmSmc.S | GCC\r
+\r
[Packages]\r
MdePkg/MdePkg.dec\r
ArmPkg/ArmPkg.dec\r
--- /dev/null
+//\r
+// Copyright (c) 2012-2013, ARM Limited. All rights reserved.\r
+//\r
+// This program and the accompanying materials\r
+// are licensed and made available under the terms and conditions of the BSD License\r
+// which accompanies this distribution. The full text of the license may be found at\r
+// http://opensource.org/licenses/bsd-license.php\r
+//\r
+// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+//\r
+//\r
+\r
+.text\r
+.align 3\r
+\r
+GCC_ASM_EXPORT(ArmCallSmc)\r
+GCC_ASM_EXPORT(ArmCallSmcArg1)\r
+GCC_ASM_EXPORT(ArmCallSmcArg2)\r
+GCC_ASM_EXPORT(ArmCallSmcArg3)\r
+\r
+ASM_PFX(ArmCallSmc):\r
+ ret\r
+\r
+ASM_PFX(ArmCallSmcArg1):\r
+ ret\r
+\r
+ASM_PFX(ArmCallSmcArg2):\r
+ ret\r
+\r
+ASM_PFX(ArmCallSmcArg3):\r
+ ret\r
Arm/ArmSmcNull.asm | RVCT\r
Arm/ArmSmcNull.S | GCC\r
\r
+[Sources.AARCH64]\r
+ AArch64/ArmSmcNull.S | GCC\r
+\r
[Packages]\r
MdePkg/MdePkg.dec\r
ArmPkg/ArmPkg.dec\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2012-2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "MemLibInternals.h"\r
+\r
+/**\r
+ Copy Length bytes from Source to Destination.\r
+\r
+ @param DestinationBuffer Target of copy\r
+ @param SourceBuffer Place to copy from\r
+ @param Length Number of bytes to copy\r
+\r
+ @return Destination\r
+\r
+**/\r
+VOID *\r
+EFIAPI\r
+InternalMemCopyMem (\r
+ OUT VOID *DestinationBuffer,\r
+ IN CONST VOID *SourceBuffer,\r
+ IN UINTN Length\r
+ )\r
+{\r
+ //\r
+ // Declare the local variables that actually move the data elements as\r
+ // volatile to prevent the optimizer from replacing this function with\r
+ // the intrinsic memcpy()\r
+ //\r
+ volatile UINT8 *Destination8;\r
+ CONST UINT8 *Source8;\r
+ volatile UINT32 *Destination32;\r
+ CONST UINT32 *Source32;\r
+ volatile UINT64 *Destination64;\r
+ CONST UINT64 *Source64;\r
+ UINTN Alignment;\r
+\r
+ if ((((UINTN)DestinationBuffer & 0x7) == 0) && (((UINTN)SourceBuffer & 0x7) == 0) && (Length >= 8)) {\r
+ if (SourceBuffer > DestinationBuffer) {\r
+ Destination64 = (UINT64*)DestinationBuffer;\r
+ Source64 = (CONST UINT64*)SourceBuffer;\r
+ while (Length >= 8) {\r
+ *(Destination64++) = *(Source64++);\r
+ Length -= 8;\r
+ }\r
+\r
+ // Finish if there are still some bytes to copy\r
+ Destination8 = (UINT8*)Destination64;\r
+ Source8 = (CONST UINT8*)Source64;\r
+ while (Length-- != 0) {\r
+ *(Destination8++) = *(Source8++);\r
+ }\r
+ } else if (SourceBuffer < DestinationBuffer) {\r
+ Destination64 = (UINT64*)((UINTN)DestinationBuffer + Length);\r
+ Source64 = (CONST UINT64*)((UINTN)SourceBuffer + Length);\r
+\r
+ // Destination64 and Source64 were aligned on a 64-bit boundary\r
+ // but if length is not a multiple of 8 bytes then they won't be\r
+ // anymore.\r
+\r
+ Alignment = Length & 0x7;\r
+ if (Alignment != 0) {\r
+ Destination8 = (UINT8*)Destination64;\r
+ Source8 = (CONST UINT8*)Source64;\r
+\r
+ while (Alignment-- != 0) {\r
+ *(--Destination8) = *(--Source8);\r
+ --Length;\r
+ }\r
+ Destination64 = (UINT64*)Destination8;\r
+ Source64 = (CONST UINT64*)Source8;\r
+ }\r
+\r
+ while (Length > 0) {\r
+ *(--Destination64) = *(--Source64);\r
+ Length -= 8;\r
+ }\r
+ }\r
+ } else if ((((UINTN)DestinationBuffer & 0x3) == 0) && (((UINTN)SourceBuffer & 0x3) == 0) && (Length >= 4)) {\r
+ if (SourceBuffer > DestinationBuffer) {\r
+ Destination32 = (UINT32*)DestinationBuffer;\r
+ Source32 = (CONST UINT32*)SourceBuffer;\r
+ while (Length >= 4) {\r
+ *(Destination32++) = *(Source32++);\r
+ Length -= 4;\r
+ }\r
+\r
+ // Finish if there are still some bytes to copy\r
+ Destination8 = (UINT8*)Destination32;\r
+ Source8 = (CONST UINT8*)Source32;\r
+ while (Length-- != 0) {\r
+ *(Destination8++) = *(Source8++);\r
+ }\r
+ } else if (SourceBuffer < DestinationBuffer) {\r
+ Destination32 = (UINT32*)((UINTN)DestinationBuffer + Length);\r
+ Source32 = (CONST UINT32*)((UINTN)SourceBuffer + Length);\r
+\r
+ // Destination32 and Source32 were aligned on a 32-bit boundary\r
+ // but if length is not a multiple of 4 bytes then they won't be\r
+ // anymore.\r
+\r
+ Alignment = Length & 0x3;\r
+ if (Alignment != 0) {\r
+ Destination8 = (UINT8*)Destination32;\r
+ Source8 = (CONST UINT8*)Source32;\r
+\r
+ while (Alignment-- != 0) {\r
+ *(--Destination8) = *(--Source8);\r
+ --Length;\r
+ }\r
+ Destination32 = (UINT32*)Destination8;\r
+ Source32 = (CONST UINT32*)Source8;\r
+ }\r
+\r
+ while (Length > 0) {\r
+ *(--Destination32) = *(--Source32);\r
+ Length -= 4;\r
+ }\r
+ }\r
+ } else {\r
+ if (SourceBuffer > DestinationBuffer) {\r
+ Destination8 = (UINT8*)DestinationBuffer;\r
+ Source8 = (CONST UINT8*)SourceBuffer;\r
+ while (Length-- != 0) {\r
+ *(Destination8++) = *(Source8++);\r
+ }\r
+ } else if (SourceBuffer < DestinationBuffer) {\r
+ Destination8 = (UINT8*)DestinationBuffer + Length;\r
+ Source8 = (CONST UINT8*)SourceBuffer + Length;\r
+ while (Length-- != 0) {\r
+ *(--Destination8) = *(--Source8);\r
+ }\r
+ }\r
+ }\r
+ return DestinationBuffer;\r
+}\r
--- /dev/null
+/** @file\r
+\r
+ Copyright (c) 2012-2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "MemLibInternals.h"\r
+\r
+/**\r
+ Set Buffer to Value for Size bytes.\r
+\r
+ @param Buffer Memory to set.\r
+ @param Length Number of bytes to set\r
+ @param Value Value of the set operation.\r
+\r
+ @return Buffer\r
+\r
+**/\r
+VOID *\r
+EFIAPI\r
+InternalMemSetMem (\r
+ OUT VOID *Buffer,\r
+ IN UINTN Length,\r
+ IN UINT8 Value\r
+ )\r
+{\r
+ //\r
+ // Declare the local variables that actually move the data elements as\r
+ // volatile to prevent the optimizer from replacing this function with\r
+ // the intrinsic memset()\r
+ //\r
+ volatile UINT8 *Pointer8;\r
+ volatile UINT32 *Pointer32;\r
+ volatile UINT64 *Pointer64;\r
+ UINT32 Value32;\r
+ UINT64 Value64;\r
+\r
+ if ((((UINTN)Buffer & 0x7) == 0) && (Length >= 8)) {\r
+ // Generate the 64bit value\r
+ Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;\r
+ Value64 = (((UINT64)Value32) << 32) | Value32;\r
+\r
+ Pointer64 = (UINT64*)Buffer;\r
+ while (Length >= 8) {\r
+ *(Pointer64++) = Value64;\r
+ Length -= 8;\r
+ }\r
+\r
+ // Finish with bytes if needed\r
+ Pointer8 = (UINT8*)Pointer64;\r
+ while (Length-- > 0) {\r
+ *(Pointer8++) = Value;\r
+ }\r
+ } else if ((((UINTN)Buffer & 0x3) == 0) && (Length >= 4)) {\r
+ // Generate the 32bit value\r
+ Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;\r
+\r
+ Pointer32 = (UINT32*)Buffer;\r
+ while (Length >= 4) {\r
+ *(Pointer32++) = Value32;\r
+ Length -= 4;\r
+ }\r
+\r
+ // Finish with bytes if needed\r
+ Pointer8 = (UINT8*)Pointer32;\r
+ while (Length-- > 0) {\r
+ *(Pointer8++) = Value;\r
+ }\r
+ } else {\r
+ Pointer8 = (UINT8*)Buffer;\r
+ while (Length-- > 0) {\r
+ *(Pointer8++) = Value;\r
+ }\r
+ }\r
+ return Buffer;\r
+}\r
#\r
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>\r
# Portions copyright (c) 2010, Apple Inc. All rights reserved.<BR>\r
+# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
#\r
# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
\r
\r
#\r
-# VALID_ARCHITECTURES = ARM\r
+# VALID_ARCHITECTURES = ARM AARCH64\r
#\r
\r
\r
Arm/SetMem.asm\r
Arm/SetMem.S\r
\r
+[Sources.AARCH64]\r
+ AArch64/CopyMem.c\r
+ AArch64/SetMem.c\r
\r
[Packages]\r
MdePkg/MdePkg.dec\r
--- /dev/null
+/** @file\r
+ Specific relocation fixups for ARM architecture.\r
+\r
+ Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>\r
+ Portions copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+ Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php.\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include "BasePeCoffLibInternals.h"\r
+#include <Library/BaseLib.h>\r
+\r
+//\r
+// Note: Currently only large memory model is supported by UEFI relocation code.\r
+//\r
+\r
+/**\r
+ Performs an AARCH64-based specific relocation fixup and is a no-op on other\r
+ instruction sets.\r
+\r
+ @param Reloc The pointer to the relocation record.\r
+ @param Fixup The pointer to the address to fix up.\r
+ @param FixupData The pointer to a buffer to log the fixups.\r
+ @param Adjust The offset to adjust the fixup.\r
+\r
+ @return Status code.\r
+\r
+**/\r
+RETURN_STATUS\r
+PeCoffLoaderRelocateImageEx (\r
+ IN UINT16 **Reloc,\r
+ IN OUT CHAR8 *Fixup,\r
+ IN OUT CHAR8 **FixupData,\r
+ IN UINT64 Adjust\r
+ )\r
+{\r
+ UINT64 *F64;\r
+\r
+ switch ((**Reloc) >> 12) {\r
+\r
+ case EFI_IMAGE_REL_BASED_DIR64:\r
+ F64 = (UINT64 *) Fixup;\r
+ *F64 = *F64 + (UINT64) Adjust;\r
+ if (*FixupData != NULL) {\r
+ *FixupData = ALIGN_POINTER(*FixupData, sizeof(UINT64));\r
+ *(UINT64 *)(*FixupData) = *F64;\r
+ *FixupData = *FixupData + sizeof(UINT64);\r
+ }\r
+ break;\r
+\r
+ default:\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
+\r
+/**\r
+ Returns TRUE if the machine type of PE/COFF image is supported. Supported\r
+ does not mean the image can be executed it means the PE/COFF loader supports\r
+ loading and relocating of the image type. It's up to the caller to support\r
+ the entry point.\r
+\r
+ @param Machine Machine type from the PE Header.\r
+\r
+ @return TRUE if this PE/COFF loader can load the image\r
+\r
+**/\r
+BOOLEAN\r
+PeCoffLoaderImageFormatSupported (\r
+ IN UINT16 Machine\r
+ )\r
+{\r
+ if ((Machine == IMAGE_FILE_MACHINE_AARCH64) || (Machine == IMAGE_FILE_MACHINE_EBC)) {\r
+ return TRUE;\r
+ }\r
+\r
+ return FALSE;\r
+}\r
+\r
+/**\r
+ Performs an ARM-based specific re-relocation fixup and is a no-op on other\r
+ instruction sets. This is used to re-relocated the image into the EFI virtual\r
+ space for runtime calls.\r
+\r
+ @param Reloc The pointer to the relocation record.\r
+ @param Fixup The pointer to the address to fix up.\r
+ @param FixupData The pointer to a buffer to log the fixups.\r
+ @param Adjust The offset to adjust the fixup.\r
+\r
+ @return Status code.\r
+\r
+**/\r
+RETURN_STATUS\r
+PeHotRelocateImageEx (\r
+ IN UINT16 **Reloc,\r
+ IN OUT CHAR8 *Fixup,\r
+ IN OUT CHAR8 **FixupData,\r
+ IN UINT64 Adjust\r
+ )\r
+{\r
+ UINT64 *Fixup64;\r
+\r
+ switch ((**Reloc) >> 12) {\r
+ case EFI_IMAGE_REL_BASED_DIR64:\r
+ Fixup64 = (UINT64 *) Fixup;\r
+ *FixupData = ALIGN_POINTER (*FixupData, sizeof (UINT64));\r
+ if (*(UINT64 *) (*FixupData) == *Fixup64) {\r
+ *Fixup64 = *Fixup64 + (UINT64) Adjust;\r
+ }\r
+\r
+ *FixupData = *FixupData + sizeof (UINT64);\r
+ break;\r
+\r
+ default:\r
+ DEBUG ((EFI_D_ERROR, "PeHotRelocateEx:unknown fixed type\n"));\r
+ return RETURN_UNSUPPORTED;\r
+ }\r
+\r
+ return RETURN_SUCCESS;\r
+}\r
#\r
# Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>\r
# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>\r
+# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
#\r
# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
\r
\r
#\r
-# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM\r
+# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM AARCH64\r
#\r
\r
[Sources]\r
[Sources.ARM]\r
Arm/PeCoffLoaderEx.c\r
\r
+[Sources.AARCH64]\r
+ AArch64/PeCoffLoaderEx.c\r
+\r
[Packages]\r
MdePkg/MdePkg.dec\r
\r
INTN err;\r
INTN node;\r
INTN cpu_node;\r
- INTN lenp;\r
+ INT32 lenp;\r
CONST VOID* BootArg;\r
CONST VOID* Method;\r
EFI_PHYSICAL_ADDRESS InitrdImageStart;\r
--- /dev/null
+/*\r
+ * Copyright (c) 2011 - 2013, ARM Ltd\r
+ * All rights reserved.\r
+ *\r
+ * Redistribution and use in source and binary forms, with or without\r
+ * modification, are permitted provided that the following conditions\r
+ * are met:\r
+ * 1. Redistributions of source code must retain the above copyright\r
+ * notice, this list of conditions and the following disclaimer.\r
+ * 2. Redistributions in binary form must reproduce the above copyright\r
+ * notice, this list of conditions and the following disclaimer in the\r
+ * documentation and/or other materials provided with the distribution.\r
+ * 3. The name of the company may not be used to endorse or promote\r
+ * products derived from this software without specific prior written\r
+ * permission.\r
+ *\r
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED\r
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF\r
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\r
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\r
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\r
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\r
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\r
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\r
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\r
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\r
+ */\r
+\r
+\r
+.text\r
+.align 2\r
+\r
+\r
+ASM_GLOBAL ASM_PFX(memcpy)\r
+\r
+\r
+// Taken from Newlib BSD implementation.\r
+ASM_PFX(memcpy):\r
+ // Copy dst to x6, so we can preserve return value.\r
+ mov x6, x0\r
+\r
+ // NOTE: although size_t is unsigned, this code uses signed\r
+ // comparisons on x2 so relies on nb never having its top bit\r
+ // set. In practice this is not going to be a real problem.\r
+\r
+ // Require at least 64 bytes to be worth aligning.\r
+ cmp x2, #64\r
+ blt qwordcopy\r
+\r
+ // Compute offset to align destination to 16 bytes.\r
+ neg x3, x0\r
+ and x3, x3, 15\r
+\r
+ cbz x3, blockcopy // offset == 0 is likely\r
+\r
+ // We know there is at least 64 bytes to be done, so we\r
+ // do a 16 byte misaligned copy at first and then later do\r
+ // all 16-byte aligned copies. Some bytes will be copied\r
+ // twice, but there's no harm in that since memcpy does not\r
+ // guarantee correctness on overlap.\r
+\r
+ sub x2, x2, x3 // nb -= offset\r
+ ldp x4, x5, [x1]\r
+ add x1, x1, x3\r
+ stp x4, x5, [x6]\r
+ add x6, x6, x3\r
+\r
+ // The destination pointer is now qword (16 byte) aligned.\r
+ // (The src pointer might be.)\r
+\r
+blockcopy:\r
+ // Copy 64 bytes at a time.\r
+ subs x2, x2, #64\r
+ blt 3f\r
+2: subs x2, x2, #64\r
+ ldp x4, x5, [x1,#0]\r
+ ldp x8, x9, [x1,#16]\r
+ ldp x10,x11,[x1,#32]\r
+ ldp x12,x13,[x1,#48]\r
+ add x1, x1, #64\r
+ stp x4, x5, [x6,#0]\r
+ stp x8, x9, [x6,#16]\r
+ stp x10,x11,[x6,#32]\r
+ stp x12,x13,[x6,#48]\r
+ add x6, x6, #64\r
+ bge 2b\r
+\r
+ // Unwind pre-decrement\r
+3: add x2, x2, #64\r
+\r
+qwordcopy:\r
+ // Copy 0-48 bytes, 16 bytes at a time.\r
+ subs x2, x2, #16\r
+ blt tailcopy\r
+2: ldp x4, x5, [x1],#16\r
+ subs x2, x2, #16\r
+ stp x4, x5, [x6],#16\r
+ bge 2b\r
+\r
+ // No need to unwind the pre-decrement, it would not change\r
+ // the low 4 bits of the count. But how likely is it for the\r
+ // byte count to be multiple of 16? Is it worth the overhead\r
+ // of testing for x2 == -16?\r
+\r
+tailcopy:\r
+ // Copy trailing 0-15 bytes.\r
+ tbz x2, #3, 1f\r
+ ldr x4, [x1],#8 // copy 8 bytes\r
+ str x4, [x6],#8\r
+1:\r
+ tbz x2, #2, 1f\r
+ ldr w4, [x1],#4 // copy 4 bytes\r
+ str w4, [x6],#4\r
+1:\r
+ tbz x2, #1, 1f\r
+ ldrh w4, [x1],#2 // copy 2 bytes\r
+ strh w4, [x6],#2\r
+1:\r
+ tbz x2, #0, return\r
+ ldrb w4, [x1] // copy 1 byte\r
+ strb w4, [x6]\r
+\r
+return:\r
+ // This is the only return point of memcpy.\r
+ ret\r
VERSION_STRING = 1.0\r
LIBRARY_CLASS = CompilerIntrinsicsLib \r
\r
-\r
-[Sources.common]\r
-\r
+[Sources.AARCH64]\r
+ AArch64/memcpy.S | GCC\r
\r
[Sources.ARM]\r
Arm/mullu.asm | RVCT\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+GCC_ASM_EXPORT(DebugAgentVectorTable)\r
+GCC_ASM_IMPORT(DefaultExceptionHandler)\r
+\r
+.text\r
+ASM_PFX(DebugAgentVectorTable):\r
+\r
+//\r
+// Current EL with SP0 : 0x0 - 0x180\r
+//\r
+.align 11\r
+ASM_PFX(SynchronousExceptionSP0):\r
+ b ASM_PFX(SynchronousExceptionSP0)\r
+\r
+.align 7\r
+ASM_PFX(IrqSP0):\r
+ b ASM_PFX(IrqSP0)\r
+\r
+.align 7\r
+ASM_PFX(FiqSP0):\r
+ b ASM_PFX(FiqSP0)\r
+\r
+.align 7\r
+ASM_PFX(SErrorSP0):\r
+ b ASM_PFX(SErrorSP0)\r
+\r
+//\r
+// Current EL with SPx: 0x200 - 0x380\r
+//\r
+.align 7\r
+ASM_PFX(SynchronousExceptionSPx):\r
+ b ASM_PFX(SynchronousExceptionSPx)\r
+\r
+.align 7\r
+ASM_PFX(IrqSPx):\r
+ b ASM_PFX(IrqSPx)\r
+\r
+.align 7\r
+ASM_PFX(FiqSPx):\r
+ b ASM_PFX(FiqSPx)\r
+\r
+.align 7\r
+ASM_PFX(SErrorSPx):\r
+ b ASM_PFX(SErrorSPx)\r
+\r
+/* Lower EL using AArch64 : 0x400 - 0x580 */\r
+.align 7\r
+ASM_PFX(SynchronousExceptionA64):\r
+ b ASM_PFX(SynchronousExceptionA64)\r
+\r
+.align 7\r
+ASM_PFX(IrqA64):\r
+ b ASM_PFX(IrqA64)\r
+\r
+.align 7\r
+ASM_PFX(FiqA64):\r
+ b ASM_PFX(FiqA64)\r
+\r
+.align 7\r
+ASM_PFX(SErrorA64):\r
+ b ASM_PFX(SErrorA64)\r
+\r
+//\r
+// Lower EL using AArch32 : 0x0 - 0x180\r
+//\r
+.align 7\r
+ASM_PFX(SynchronousExceptionA32):\r
+ b ASM_PFX(SynchronousExceptionA32)\r
+\r
+.align 7\r
+ASM_PFX(IrqA32):\r
+ b ASM_PFX(IrqA32)\r
+\r
+.align 7\r
+ASM_PFX(FiqA32):\r
+ b ASM_PFX(FiqA32)\r
+\r
+.align 7\r
+ASM_PFX(SErrorA32):\r
+ b ASM_PFX(SErrorA32)\r
\r
// Now we've got UART, make the check:\r
// - The Vector table must be 32-byte aligned\r
- ASSERT(((UINTN)DebugAgentVectorTable & ARM_VECTOR_TABLE_ALIGNMENT) == 0);\r
+ //Need to fix basetools ASSERT(((UINTN)DebugAgentVectorTable & ARM_VECTOR_TABLE_ALIGNMENT) == 0);\r
ArmWriteVBar ((UINTN)DebugAgentVectorTable);\r
\r
// We use InitFlag to know if DebugAgent has been intialized from\r
Arm/DebugAgentException.asm | RVCT\r
Arm/DebugAgentException.S | GCC\r
\r
+[Sources.AARCH64]\r
+ AArch64/DebugAgentException.S | GCC\r
+\r
[Packages]\r
MdePkg/MdePkg.dec\r
MdeModulePkg/MdeModulePkg.dec\r
--- /dev/null
+/** @file\r
+ Default exception handler\r
+\r
+ Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+ Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+\r
+ This program and the accompanying materials\r
+ are licensed and made available under the terms and conditions of the BSD License\r
+ which accompanies this distribution. The full text of the license may be found at\r
+ http://opensource.org/licenses/bsd-license.php\r
+\r
+ THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+ WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+\r
+**/\r
+\r
+#include <Uefi.h>\r
+#include <Library/UefiLib.h>\r
+#include <Library/BaseLib.h>\r
+#include <Library/DebugLib.h>\r
+#include <Library/PeCoffGetEntryPointLib.h>\r
+#include <Library/PrintLib.h>\r
+#include <Library/ArmDisassemblerLib.h>\r
+#include <Library/SerialPortLib.h>\r
+\r
+#include <Guid/DebugImageInfoTable.h>\r
+#include <Protocol/DebugSupport.h>\r
+#include <Protocol/LoadedImage.h>\r
+\r
+EFI_DEBUG_IMAGE_INFO_TABLE_HEADER *gDebugImageTableHeader = NULL;\r
+\r
+STATIC CHAR8 *gExceptionTypeString[] = {\r
+ "Synchronous",\r
+ "IRQ",\r
+ "FIQ",\r
+ "SError"\r
+};\r
+\r
+CHAR8 *\r
+GetImageName (\r
+ IN UINTN FaultAddress,\r
+ OUT UINTN *ImageBase,\r
+ OUT UINTN *PeCoffSizeOfHeaders\r
+ );\r
+\r
+/**\r
+ This is the default action to take on an unexpected exception\r
+\r
+ Since this is exception context don't do anything crazy like try to allcoate memory.\r
+\r
+ @param ExceptionType Type of the exception\r
+ @param SystemContext Register state at the time of the Exception\r
+\r
+**/\r
+VOID\r
+DefaultExceptionHandler (\r
+ IN EFI_EXCEPTION_TYPE ExceptionType,\r
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext\r
+ )\r
+{\r
+ CHAR8 Buffer[100];\r
+ UINTN CharCount;\r
+\r
+ CharCount = AsciiSPrint (Buffer,sizeof (Buffer),"\n\n%a Exception: \n", gExceptionTypeString[ExceptionType]);\r
+ SerialPortWrite ((UINT8 *) Buffer, CharCount);\r
+\r
+ DEBUG_CODE_BEGIN ();\r
+ CHAR8 *Pdb;\r
+ UINTN ImageBase;\r
+ UINTN PeCoffSizeOfHeader;\r
+ Pdb = GetImageName (SystemContext.SystemContextAArch64->ELR, &ImageBase, &PeCoffSizeOfHeader);\r
+ if (Pdb != NULL) {\r
+ DEBUG ((EFI_D_ERROR, "%a loaded at 0x%016lx \n", Pdb, ImageBase));\r
+ }\r
+ DEBUG_CODE_END ();\r
+\r
+ DEBUG ((EFI_D_ERROR, "\n X0 0x%016lx X1 0x%016lx X2 0x%016lx X3 0x%016lx\n", SystemContext.SystemContextAArch64->X0, SystemContext.SystemContextAArch64->X1, SystemContext.SystemContextAArch64->X2, SystemContext.SystemContextAArch64->X3));\r
+ DEBUG ((EFI_D_ERROR, " X4 0x%016lx X5 0x%016lx X6 0x%016lx X7 0x%016lx\n", SystemContext.SystemContextAArch64->X4, SystemContext.SystemContextAArch64->X5, SystemContext.SystemContextAArch64->X6, SystemContext.SystemContextAArch64->X7));\r
+ DEBUG ((EFI_D_ERROR, " X8 0x%016lx X9 0x%016lx X10 0x%016lx X11 0x%016lx\n", SystemContext.SystemContextAArch64->X8, SystemContext.SystemContextAArch64->X9, SystemContext.SystemContextAArch64->X10, SystemContext.SystemContextAArch64->X11));\r
+ DEBUG ((EFI_D_ERROR, " X12 0x%016lx X13 0x%016lx X14 0x%016lx X15 0x%016lx\n", SystemContext.SystemContextAArch64->X12, SystemContext.SystemContextAArch64->X13, SystemContext.SystemContextAArch64->X14, SystemContext.SystemContextAArch64->X15));\r
+ DEBUG ((EFI_D_ERROR, " X16 0x%016lx X17 0x%016lx X18 0x%016lx X19 0x%016lx\n", SystemContext.SystemContextAArch64->X16, SystemContext.SystemContextAArch64->X17, SystemContext.SystemContextAArch64->X18, SystemContext.SystemContextAArch64->X19));\r
+ DEBUG ((EFI_D_ERROR, " X20 0x%016lx X21 0x%016lx X22 0x%016lx X23 0x%016lx\n", SystemContext.SystemContextAArch64->X20, SystemContext.SystemContextAArch64->X21, SystemContext.SystemContextAArch64->X22, SystemContext.SystemContextAArch64->X23));\r
+ DEBUG ((EFI_D_ERROR, " X24 0x%016lx X25 0x%016lx X26 0x%016lx X27 0x%016lx\n", SystemContext.SystemContextAArch64->X24, SystemContext.SystemContextAArch64->X25, SystemContext.SystemContextAArch64->X26, SystemContext.SystemContextAArch64->X27));\r
+ DEBUG ((EFI_D_ERROR, " X28 0x%016lx FP 0x%016lx LR 0x%016lx \n", SystemContext.SystemContextAArch64->X28, SystemContext.SystemContextAArch64->FP, SystemContext.SystemContextAArch64->LR));\r
+\r
+ /* We save these as 128bit numbers, but have to print them as two 64bit numbers,\r
+ so swap the 64bit words to correctly represent a 128bit number. */\r
+ DEBUG ((EFI_D_ERROR, "\n V0 0x%016lx %016lx V1 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V0[1], SystemContext.SystemContextAArch64->V0[0], SystemContext.SystemContextAArch64->V1[1], SystemContext.SystemContextAArch64->V1[0]));\r
+ DEBUG ((EFI_D_ERROR, " V2 0x%016lx %016lx V3 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V2[1], SystemContext.SystemContextAArch64->V2[0], SystemContext.SystemContextAArch64->V3[1], SystemContext.SystemContextAArch64->V3[0]));\r
+ DEBUG ((EFI_D_ERROR, " V4 0x%016lx %016lx V5 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V4[1], SystemContext.SystemContextAArch64->V4[0], SystemContext.SystemContextAArch64->V5[1], SystemContext.SystemContextAArch64->V5[0]));\r
+ DEBUG ((EFI_D_ERROR, " V6 0x%016lx %016lx V7 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V6[1], SystemContext.SystemContextAArch64->V6[0], SystemContext.SystemContextAArch64->V7[1], SystemContext.SystemContextAArch64->V7[0]));\r
+ DEBUG ((EFI_D_ERROR, " V8 0x%016lx %016lx V9 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V8[1], SystemContext.SystemContextAArch64->V8[0], SystemContext.SystemContextAArch64->V9[1], SystemContext.SystemContextAArch64->V9[0]));\r
+ DEBUG ((EFI_D_ERROR, " V10 0x%016lx %016lx V11 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V10[1], SystemContext.SystemContextAArch64->V10[0], SystemContext.SystemContextAArch64->V11[1], SystemContext.SystemContextAArch64->V11[0]));\r
+ DEBUG ((EFI_D_ERROR, " V12 0x%016lx %016lx V13 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V12[1], SystemContext.SystemContextAArch64->V12[0], SystemContext.SystemContextAArch64->V13[1], SystemContext.SystemContextAArch64->V13[0]));\r
+ DEBUG ((EFI_D_ERROR, " V14 0x%016lx %016lx V15 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V14[1], SystemContext.SystemContextAArch64->V14[0], SystemContext.SystemContextAArch64->V15[1], SystemContext.SystemContextAArch64->V15[0]));\r
+ DEBUG ((EFI_D_ERROR, " V16 0x%016lx %016lx V17 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V16[1], SystemContext.SystemContextAArch64->V16[0], SystemContext.SystemContextAArch64->V17[1], SystemContext.SystemContextAArch64->V17[0]));\r
+ DEBUG ((EFI_D_ERROR, " V18 0x%016lx %016lx V19 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V18[1], SystemContext.SystemContextAArch64->V18[0], SystemContext.SystemContextAArch64->V19[1], SystemContext.SystemContextAArch64->V19[0]));\r
+ DEBUG ((EFI_D_ERROR, " V20 0x%016lx %016lx V21 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V20[1], SystemContext.SystemContextAArch64->V20[0], SystemContext.SystemContextAArch64->V21[1], SystemContext.SystemContextAArch64->V21[0]));\r
+ DEBUG ((EFI_D_ERROR, " V22 0x%016lx %016lx V23 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V22[1], SystemContext.SystemContextAArch64->V22[0], SystemContext.SystemContextAArch64->V23[1], SystemContext.SystemContextAArch64->V23[0]));\r
+ DEBUG ((EFI_D_ERROR, " V24 0x%016lx %016lx V25 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V24[1], SystemContext.SystemContextAArch64->V24[0], SystemContext.SystemContextAArch64->V25[1], SystemContext.SystemContextAArch64->V25[0]));\r
+ DEBUG ((EFI_D_ERROR, " V26 0x%016lx %016lx V27 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V26[1], SystemContext.SystemContextAArch64->V26[0], SystemContext.SystemContextAArch64->V27[1], SystemContext.SystemContextAArch64->V27[0]));\r
+ DEBUG ((EFI_D_ERROR, " V28 0x%016lx %016lx V29 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V28[1], SystemContext.SystemContextAArch64->V28[0], SystemContext.SystemContextAArch64->V29[1], SystemContext.SystemContextAArch64->V29[0]));\r
+ DEBUG ((EFI_D_ERROR, " V30 0x%016lx %016lx V31 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V30[1], SystemContext.SystemContextAArch64->V30[0], SystemContext.SystemContextAArch64->V31[1], SystemContext.SystemContextAArch64->V31[0]));\r
+\r
+ DEBUG ((EFI_D_ERROR, "\n SP 0x%016lx ELR 0x%016lx SPSR 0x%08lx FPSR 0x%08lx\n ESR 0x%08lx FAR 0x%016lx\n", SystemContext.SystemContextAArch64->SP, SystemContext.SystemContextAArch64->ELR, SystemContext.SystemContextAArch64->SPSR, SystemContext.SystemContextAArch64->FPSR, SystemContext.SystemContextAArch64->ESR, SystemContext.SystemContextAArch64->FAR));\r
+\r
+ DEBUG ((EFI_D_ERROR, "\n ESR : EC 0x%02x IL 0x%x ISS 0x%08x\n", (SystemContext.SystemContextAArch64->ESR & 0xFC000000) >> 26, (SystemContext.SystemContextAArch64->ESR >> 25) & 0x1, SystemContext.SystemContextAArch64->ESR & 0x1FFFFFF ));\r
+\r
+ DEBUG ((EFI_D_ERROR, "\n"));\r
+ ASSERT (FALSE);\r
+}\r
+\r
#/** @file\r
#\r
-# Copyright (c) 2008, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2008, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
#\r
# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
[Sources.ARM]\r
Arm/DefaultExceptionHandler.c\r
\r
+[Sources.AARCH64]\r
+ AArch64/DefaultExceptionHandler.c\r
+\r
[Packages]\r
MdePkg/MdePkg.dec\r
ArmPkg/ArmPkg.dec\r
[Sources.ARM]\r
Arm/DefaultExceptionHandler.c\r
\r
+[Sources.AARCH64]\r
+ AArch64/DefaultExceptionHandler.c\r
+\r
[Packages]\r
MdePkg/MdePkg.dec\r
ArmPkg/ArmPkg.dec\r
--- /dev/null
+#------------------------------------------------------------------------------\r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+.text\r
+.align 2\r
+\r
+.globl ASM_PFX(GccSemihostCall)\r
+\r
+ASM_PFX(GccSemihostCall):\r
+ hlt #0xf000\r
+ ret\r
# Semihosting JTAG lib\r
#\r
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>\r
#\r
# This program and the accompanying materials\r
# are licensed and made available under the terms and conditions of the BSD License\r
#\r
# The following information is for reference only and not required by the build tools.\r
#\r
-# VALID_ARCHITECTURES = ARM\r
+# VALID_ARCHITECTURES = ARM AARCH64\r
#\r
[Sources.common]\r
SemihostLib.c\r
[Sources.ARM]\r
Arm/GccSemihost.S | GCC\r
\r
+[Sources.AARCH64]\r
+ AArch64/GccSemihost.S | GCC\r
+\r
[Packages]\r
MdePkg/MdePkg.dec\r
ArmPkg/ArmPkg.dec\r
\r
[LibraryClasses]\r
BaseLib\r
- \r\r
+\r