-#------------------------------------------------------------------------------
-#
-# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
-#
-# This program and the accompanying materials
-# are licensed and made available under the terms and conditions of the BSD License
-# which accompanies this distribution. The full text of the license may be found at
-# http://opensource.org/licenses/bsd-license.php
-#
-# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
-# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
-#
-#------------------------------------------------------------------------------
-
-.globl ASM_PFX(ArmInvalidateInstructionCache)
-INTERWORK_FUNC(ArmInvalidateInstructionCache)
-.globl ASM_PFX(ArmInvalidateDataCacheEntryByMVA)
-INTERWORK_FUNC(ArmInvalidateDataCacheEntryByMVA)
-.globl ASM_PFX(ArmCleanDataCacheEntryByMVA)
-INTERWORK_FUNC(ArmCleanDataCacheEntryByMVA)
-.globl ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA)
-INTERWORK_FUNC(ArmCleanInvalidateDataCacheEntryByMVA)
-.globl ASM_PFX(ArmInvalidateDataCacheEntryBySetWay)
-INTERWORK_FUNC(ArmInvalidateDataCacheEntryBySetWay)
-.globl ASM_PFX(ArmCleanDataCacheEntryBySetWay)
-INTERWORK_FUNC(ArmCleanDataCacheEntryBySetWay)
-.globl ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay)
-INTERWORK_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay)
-.globl ASM_PFX(ArmDrainWriteBuffer)
-INTERWORK_FUNC(ArmDrainWriteBuffer)
-.globl ASM_PFX(ArmEnableMmu)
-INTERWORK_FUNC(ArmEnableMmu)
-.globl ASM_PFX(ArmDisableMmu)
-INTERWORK_FUNC(ArmDisableMmu)
-.globl ASM_PFX(ArmMmuEnabled)
-INTERWORK_FUNC(ArmMmuEnabled)
-.globl ASM_PFX(ArmEnableDataCache)
-INTERWORK_FUNC(ArmEnableDataCache)
-.globl ASM_PFX(ArmDisableDataCache)
-INTERWORK_FUNC(ArmDisableDataCache)
-.globl ASM_PFX(ArmEnableInstructionCache)
-INTERWORK_FUNC(ArmEnableInstructionCache)
-.globl ASM_PFX(ArmDisableInstructionCache)
-INTERWORK_FUNC(ArmDisableInstructionCache)
-.globl ASM_PFX(ArmEnableBranchPrediction)
-INTERWORK_FUNC(ArmEnableBranchPrediction)
-.globl ASM_PFX(ArmDisableBranchPrediction)
-INTERWORK_FUNC(ArmDisableBranchPrediction)
-.globl ASM_PFX(ArmV7AllDataCachesOperation)
-INTERWORK_FUNC(ArmV7AllDataCachesOperation)
-.globl ASM_PFX(ArmDataMemoryBarrier)
-INTERWORK_FUNC(ArmDataMemoryBarrier)
-.globl ASM_PFX(ArmDataSyncronizationBarrier)
-INTERWORK_FUNC(ArmDataSyncronizationBarrier)
-.globl ASM_PFX(ArmInstructionSynchronizationBarrier)
-INTERWORK_FUNC(ArmInstructionSynchronizationBarrier)
-
-.text
-.align 2
-
-.set DC_ON, (0x1<<2)
-.set IC_ON, (0x1<<12)
-
-
-
-ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
- mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line
- dsb
- isb
- bx lr
-
-
-ASM_PFX(ArmCleanDataCacheEntryByMVA):
- mcr p15, 0, r0, c7, c10, 1 @clean single data cache line
- dsb
- isb
- bx lr
-
-
-ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
- mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line
- dsb
- isb
- bx lr
-
-
-ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
- mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line
- dsb
- isb
- bx lr
-
-
-ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
- mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line
- dsb
- isb
- bx lr
-
-
-ASM_PFX(ArmCleanDataCacheEntryBySetWay):
- mcr p15, 0, r0, c7, c10, 2 @ Clean this line
- dsb
- isb
- bx lr
-
-
-ASM_PFX(ArmInvalidateInstructionCache):
- mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmEnableMmu):
- mrc p15,0,R0,c1,c0,0
- orr R0,R0,#1
- mcr p15,0,R0,c1,c0,0
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmMmuEnabled):
- mrc p15,0,R0,c1,c0,0
- and R0,R0,#1
- bx LR
-
-ASM_PFX(ArmDisableMmu):
- mrc p15,0,R0,c1,c0,0
- bic R0,R0,#1
- mcr p15,0,R0,c1,c0,0 @Disable MMU
-
- mcr p15,0,R0,c8,c7,0 @Invalidate TLB
- mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmEnableDataCache):
- ldr R1,=DC_ON
- mrc p15,0,R0,c1,c0,0 @Read control register configuration data
- orr R0,R0,R1 @Set C bit
- mcr p15,0,r0,c1,c0,0 @Write control register configuration data
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmDisableDataCache):
- ldr R1,=DC_ON
- mrc p15,0,R0,c1,c0,0 @Read control register configuration data
- bic R0,R0,R1 @Clear C bit
- mcr p15,0,r0,c1,c0,0 @Write control register configuration data
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmEnableInstructionCache):
- ldr R1,=IC_ON
- mrc p15,0,R0,c1,c0,0 @Read control register configuration data
- orr R0,R0,R1 @Set I bit
- mcr p15,0,r0,c1,c0,0 @Write control register configuration data
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmDisableInstructionCache):
- ldr R1,=IC_ON
- mrc p15,0,R0,c1,c0,0 @Read control register configuration data
- bic R0,R0,R1 @Clear I bit.
- mcr p15,0,r0,c1,c0,0 @Write control register configuration data
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmEnableBranchPrediction):
- mrc p15, 0, r0, c1, c0, 0
- orr r0, r0, #0x00000800
- mcr p15, 0, r0, c1, c0, 0
- dsb
- isb
- bx LR
-
-ASM_PFX(ArmDisableBranchPrediction):
- mrc p15, 0, r0, c1, c0, 0
- bic r0, r0, #0x00000800
- mcr p15, 0, r0, c1, c0, 0
- dsb
- isb
- bx LR
-
-
-ASM_PFX(ArmV7AllDataCachesOperation):
- stmfd SP!,{r4-r12, LR}
- mov R1, R0 @ Save Function call in R1
- mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR
- ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)
- mov R3, R3, LSR #23 @ Cache level value (naturally aligned)
- beq L_Finished
- mov R10, #0
-
-Loop1:
- add R2, R10, R10, LSR #1 @ Work out 3xcachelevel
- mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level
- and R12, R12, #7 @ get those 3 bits alone
- cmp R12, #2
- blt L_Skip @ no cache or only instruction cache at this level
- mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
- isb @ isb to sync the change to the CacheSizeID reg
- mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)
- and R2, R12, #0x7 @ extract the line length field
- add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)
-@ ldr R4, =0x3FF
- mov R4, #0x400
- sub R4, R4, #1
- ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)
- clz R5, R4 @ R5 is the bit position of the way size increment
-@ ldr R7, =0x00007FFF
- mov R7, #0x00008000
- sub R7, R7, #1
- ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)
-
-Loop2:
- mov R9, R4 @ R9 working copy of the max way size (right aligned)
-
-Loop3:
- orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11
- orr R0, R0, R7, LSL R2 @ factor in the index number
-
- blx R1
-
- subs R9, R9, #1 @ decrement the way number
- bge Loop3
- subs R7, R7, #1 @ decrement the index
- bge Loop2
-L_Skip:
- add R10, R10, #2 @ increment the cache number
- cmp R3, R10
- bgt Loop1
-
-L_Finished:
- dsb
- ldmfd SP!, {r4-r12, lr}
- bx LR
-
-ASM_PFX(ArmDataMemoryBarrier):
- dmb
- bx LR
-
-ASM_PFX(ArmDataSyncronizationBarrier):
-ASM_PFX(ArmDrainWriteBuffer):
- dsb
- bx LR
-
-ASM_PFX(ArmInstructionSynchronizationBarrier):
- isb
- bx LR
-
-
-ASM_FUNCTION_REMOVE_IF_UNREFERENCED
+#------------------------------------------------------------------------------ \r
+#\r
+# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
+# Copyright (c) 2011, ARM Limited. All rights reserved.\r
+#\r
+# This program and the accompanying materials\r
+# are licensed and made available under the terms and conditions of the BSD License\r
+# which accompanies this distribution. The full text of the license may be found at\r
+# http://opensource.org/licenses/bsd-license.php\r
+#\r
+# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
+# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
+#\r
+#------------------------------------------------------------------------------\r
+\r
+.text\r
+.align 2\r
+\r
+GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r
+GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r
+GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r
+GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r
+GCC_ASM_EXPORT (ArmDrainWriteBuffer)\r
+GCC_ASM_EXPORT (ArmEnableMmu)\r
+GCC_ASM_EXPORT (ArmDisableMmu)\r
+GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r
+GCC_ASM_EXPORT (ArmMmuEnabled)\r
+GCC_ASM_EXPORT (ArmEnableDataCache)\r
+GCC_ASM_EXPORT (ArmDisableDataCache)\r
+GCC_ASM_EXPORT (ArmEnableInstructionCache)\r
+GCC_ASM_EXPORT (ArmDisableInstructionCache)\r
+GCC_ASM_EXPORT (ArmEnableSWPInstruction)\r
+GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r
+GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r
+GCC_ASM_EXPORT (ArmSetLowVectors)\r
+GCC_ASM_EXPORT (ArmSetHighVectors)\r
+GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)\r
+GCC_ASM_EXPORT (ArmV7PerformPoUDataCacheOperation)\r
+GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r
+GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)\r
+GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r
+GCC_ASM_EXPORT (ArmReadVBar)\r
+GCC_ASM_EXPORT (ArmWriteVBar)\r
+GCC_ASM_EXPORT (ArmEnableVFP)\r
+GCC_ASM_EXPORT (ArmCallWFI)\r
+GCC_ASM_EXPORT (ArmReadCbar)\r
+GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)\r
+GCC_ASM_EXPORT (ArmReadMpidr)\r
+GCC_ASM_EXPORT (ArmReadTpidrurw)\r
+GCC_ASM_EXPORT (ArmWriteTpidrurw)\r
+GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r
+GCC_ASM_EXPORT (ArmReadIdPfr1)\r
+\r
+.set DC_ON, (0x1<<2)\r
+.set IC_ON, (0x1<<12)\r
+.set CTRL_M_BIT, (1 << 0)\r
+.set CTRL_C_BIT, (1 << 2)\r
+.set CTRL_B_BIT, (1 << 7)\r
+.set CTRL_I_BIT, (1 << 12)\r
+\r
+\r
+ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r
+ mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line \r
+ dsb\r
+ isb\r
+ bx lr\r
+\r
+ASM_PFX(ArmCleanDataCacheEntryByMVA):\r
+ mcr p15, 0, r0, c7, c10, 1 @clean single data cache line \r
+ dsb\r
+ isb\r
+ bx lr\r
+\r
+\r
+ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r
+ mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line\r
+ dsb\r
+ isb\r
+ bx lr\r
+\r
+\r
+ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r
+ mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line \r
+ dsb\r
+ isb\r
+ bx lr\r
+\r
+\r
+ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r
+ mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line \r
+ dsb\r
+ isb\r
+ bx lr\r
+\r
+\r
+ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r
+ mcr p15, 0, r0, c7, c10, 2 @ Clean this line \r
+ dsb\r
+ isb\r
+ bx lr\r
+\r
+ASM_PFX(ArmInvalidateInstructionCache):\r
+ mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmEnableMmu):\r
+ mrc p15,0,R0,c1,c0,0\r
+ orr R0,R0,#1\r
+ mcr p15,0,R0,c1,c0,0\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+\r
+ASM_PFX(ArmDisableMmu):\r
+ mrc p15,0,R0,c1,c0,0\r
+ bic R0,R0,#1\r
+ mcr p15,0,R0,c1,c0,0 @Disable MMU\r
+\r
+ mcr p15,0,R0,c8,c7,0 @Invalidate TLB\r
+ mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmDisableCachesAndMmu):\r
+ mrc p15, 0, r0, c1, c0, 0 @ Get control register\r
+ bic r0, r0, #CTRL_M_BIT @ Disable MMU\r
+ bic r0, r0, #CTRL_C_BIT @ Disable D Cache\r
+ bic r0, r0, #CTRL_I_BIT @ Disable I Cache\r
+ mcr p15, 0, r0, c1, c0, 0 @ Write control register\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmMmuEnabled):\r
+ mrc p15,0,R0,c1,c0,0\r
+ and R0,R0,#1\r
+ bx LR \r
+\r
+ASM_PFX(ArmEnableDataCache):\r
+ ldr R1,=DC_ON\r
+ mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
+ orr R0,R0,R1 @Set C bit\r
+ mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
+ dsb\r
+ isb\r
+ bx LR\r
+ \r
+ASM_PFX(ArmDisableDataCache):\r
+ ldr R1,=DC_ON\r
+ mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
+ bic R0,R0,R1 @Clear C bit\r
+ mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmEnableInstructionCache):\r
+ ldr R1,=IC_ON\r
+ mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
+ orr R0,R0,R1 @Set I bit\r
+ mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
+ dsb\r
+ isb\r
+ bx LR\r
+ \r
+ASM_PFX(ArmDisableInstructionCache):\r
+ ldr R1,=IC_ON\r
+ mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
+ bic R0,R0,R1 @Clear I bit.\r
+ mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmEnableSWPInstruction):\r
+ mrc p15, 0, r0, c1, c0, 0\r
+ orr r0, r0, #0x00000400\r
+ mcr p15, 0, r0, c1, c0, 0\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmEnableBranchPrediction):\r
+ mrc p15, 0, r0, c1, c0, 0\r
+ orr r0, r0, #0x00000800\r
+ mcr p15, 0, r0, c1, c0, 0\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmDisableBranchPrediction):\r
+ mrc p15, 0, r0, c1, c0, 0\r
+ bic r0, r0, #0x00000800\r
+ mcr p15, 0, r0, c1, c0, 0\r
+ dsb\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmSetLowVectors):\r
+ mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
+ bic r0, r0, #0x00002000 @ clear V bit\r
+ mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmSetHighVectors):\r
+ mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
+ orr r0, r0, #0x00002000 @ clear V bit\r
+ mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmV7AllDataCachesOperation):\r
+ stmfd SP!,{r4-r12, LR}\r
+ mov R1, R0 @ Save Function call in R1\r
+ mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r
+ ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)\r
+ mov R3, R3, LSR #23 @ Cache level value (naturally aligned)\r
+ beq L_Finished\r
+ mov R10, #0\r
+\r
+Loop1: \r
+ add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r
+ mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r
+ and R12, R12, #7 @ get those 3 bits alone\r
+ cmp R12, #2\r
+ blt L_Skip @ no cache or only instruction cache at this level\r
+ mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r
+ isb @ isb to sync the change to the CacheSizeID reg \r
+ mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r
+ and R2, R12, #0x7 @ extract the line length field\r
+ add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r
+@ ldr R4, =0x3FF\r
+ mov R4, #0x400\r
+ sub R4, R4, #1\r
+ ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r
+ clz R5, R4 @ R5 is the bit position of the way size increment\r
+@ ldr R7, =0x00007FFF\r
+ mov R7, #0x00008000\r
+ sub R7, R7, #1\r
+ ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r
+\r
+Loop2: \r
+ mov R9, R4 @ R9 working copy of the max way size (right aligned)\r
+\r
+Loop3: \r
+ orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r
+ orr R0, R0, R7, LSL R2 @ factor in the index number\r
+\r
+ blx R1\r
+\r
+ subs R9, R9, #1 @ decrement the way number\r
+ bge Loop3\r
+ subs R7, R7, #1 @ decrement the index\r
+ bge Loop2\r
+L_Skip: \r
+ add R10, R10, #2 @ increment the cache number\r
+ cmp R3, R10\r
+ bgt Loop1\r
+ \r
+L_Finished:\r
+ dsb\r
+ ldmfd SP!, {r4-r12, lr}\r
+ bx LR\r
+\r
+ASM_PFX(ArmV7PerformPoUDataCacheOperation):\r
+ stmfd SP!,{r4-r12, LR}\r
+ mov R1, R0 @ Save Function call in R1\r
+ mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r
+ ands R3, R6, #0x38000000 @ Mask out all but Level of Unification (LoU)\r
+ mov R3, R3, LSR #26 @ Cache level value (naturally aligned)\r
+ beq Finished2\r
+ mov R10, #0\r
+\r
+Loop4:\r
+ add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r
+ mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r
+ and R12, R12, #7 @ get those 3 bits alone\r
+ cmp R12, #2\r
+ blt Skip2 @ no cache or only instruction cache at this level\r
+ mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r
+ isb @ isb to sync the change to the CacheSizeID reg \r
+ mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r
+ and R2, R12, #0x7 @ extract the line length field\r
+ add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r
+ ldr R4, =0x3FF\r
+ ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r
+ clz R5, R4 @ R5 is the bit position of the way size increment\r
+ ldr R7, =0x00007FFF\r
+ ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r
+\r
+Loop5:\r
+ mov R9, R4 @ R9 working copy of the max way size (right aligned)\r
+\r
+Loop6:\r
+ orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r
+ orr R0, R0, R7, LSL R2 @ factor in the index number\r
+\r
+ blx R1\r
+\r
+ subs R9, R9, #1 @ decrement the way number\r
+ bge Loop6\r
+ subs R7, R7, #1 @ decrement the index\r
+ bge Loop5\r
+Skip2:\r
+ add R10, R10, #2 @ increment the cache number\r
+ cmp R3, R10\r
+ bgt Loop4\r
+ \r
+Finished2:\r
+ dsb\r
+ ldmfd SP!, {r4-r12, lr}\r
+ bx LR\r
+\r
+ASM_PFX(ArmDataMemoryBarrier):\r
+ dmb\r
+ bx LR\r
+ \r
+ASM_PFX(ArmDataSyncronizationBarrier):\r
+ASM_PFX(ArmDrainWriteBuffer):\r
+ dsb\r
+ bx LR\r
+ \r
+ASM_PFX(ArmInstructionSynchronizationBarrier):\r
+ isb\r
+ bx LR\r
+\r
+ASM_PFX(ArmReadVBar):\r
+ # Set the Address of the Vector Table in the VBAR register\r
+ mrc p15, 0, r0, c12, c0, 0\r
+ bx lr\r
+\r
+ASM_PFX(ArmWriteVBar):\r
+ # Set the Address of the Vector Table in the VBAR register\r
+ mcr p15, 0, r0, c12, c0, 0 \r
+ # Ensure the SCTLR.V bit is clear\r
+ mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
+ bic r0, r0, #0x00002000 @ clear V bit\r
+ mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
+ isb\r
+ bx lr\r
+\r
+ASM_PFX(ArmEnableVFP):\r
+ # Read CPACR (Coprocessor Access Control Register)\r
+ mrc p15, 0, r0, c1, c0, 2\r
+ # Enable VPF access (Full Access to CP10, CP11) (V* instructions)\r
+ orr r0, r0, #0x00f00000\r
+ # Write back CPACR (Coprocessor Access Control Register)\r
+ mcr p15, 0, r0, c1, c0, 2\r
+ isb\r
+ # Set EN bit in FPEXC. The Advanced SIMD and VFP extensions are enabled and operate normally.\r
+ mov r0, #0x40000000\r
+ mcr p10,#0x7,r0,c8,c0,#0\r
+ bx lr\r
+\r
+ASM_PFX(ArmCallWFI):\r
+ wfi\r
+ bx lr\r
+\r
+#Note: Return 0 in Uniprocessor implementation\r
+ASM_PFX(ArmReadCbar):\r
+ mrc p15, 4, r0, c15, c0, 0 @ Read Configuration Base Address Register\r
+ bx lr\r
+\r
+ASM_PFX(ArmInvalidateInstructionAndDataTlb):\r
+ mcr p15, 0, r0, c8, c7, 0 @ Invalidate Inst TLB and Data TLB\r
+ dsb\r
+ bx lr\r
+\r
+ASM_PFX(ArmReadMpidr):\r
+ mrc p15, 0, r0, c0, c0, 5 @ read MPIDR\r
+ bx lr\r
+ \r
+ASM_PFX(ArmReadTpidrurw):\r
+ mrc p15, 0, r0, c13, c0, 2 @ read TPIDRURW\r
+ bx lr\r
+\r
+ASM_PFX(ArmWriteTpidrurw):\r
+ mcr p15, 0, r0, c13, c0, 2 @ write TPIDRURW\r
+ bx lr\r
+\r
+ASM_PFX(ArmIsArchTimerImplemented):\r
+ mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1\r
+ and r0, r0, #0x000F0000\r
+ bx lr\r
+\r
+ASM_PFX(ArmReadIdPfr1):\r
+ mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1 Register\r
+ bx lr\r
+\r
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r