]> git.proxmox.com Git - mirror_edk2.git/blame - ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
ArmPkg/ArmLib: don't invalidate entire I-cache on range operation
[mirror_edk2.git] / ArmPkg / Library / ArmLib / ArmV7 / ArmV7Support.S
CommitLineData
3402aac7 1#------------------------------------------------------------------------------\r
bd6b9799 2#\r
3# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
9401d6f4 4# Copyright (c) 2011 - 2014, ARM Limited. All rights reserved.\r
bd6b9799 5#\r
6# This program and the accompanying materials\r
7# are licensed and made available under the terms and conditions of the BSD License\r
8# which accompanies this distribution. The full text of the license may be found at\r
9# http://opensource.org/licenses/bsd-license.php\r
10#\r
11# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
12# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
13#\r
14#------------------------------------------------------------------------------\r
15\r
16.text\r
17.align 2\r
18\r
19GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r
20GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r
cf580da1 21GCC_ASM_EXPORT (ArmInvalidateInstructionCacheEntryToPoUByMVA)\r
bd6b9799 22GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r
b7de7e3c 23GCC_ASM_EXPORT (ArmCleanDataCacheEntryToPoUByMVA)\r
bd6b9799 24GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r
25GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r
26GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r
27GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r
bd6b9799 28GCC_ASM_EXPORT (ArmEnableMmu)\r
29GCC_ASM_EXPORT (ArmDisableMmu)\r
30GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r
31GCC_ASM_EXPORT (ArmMmuEnabled)\r
32GCC_ASM_EXPORT (ArmEnableDataCache)\r
33GCC_ASM_EXPORT (ArmDisableDataCache)\r
34GCC_ASM_EXPORT (ArmEnableInstructionCache)\r
35GCC_ASM_EXPORT (ArmDisableInstructionCache)\r
36GCC_ASM_EXPORT (ArmEnableSWPInstruction)\r
37GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r
38GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r
39GCC_ASM_EXPORT (ArmSetLowVectors)\r
40GCC_ASM_EXPORT (ArmSetHighVectors)\r
41GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)\r
42GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r
cf93a378 43GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)\r
bd6b9799 44GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r
836c3500 45GCC_ASM_EXPORT (ArmReadVBar)\r
bd6b9799 46GCC_ASM_EXPORT (ArmWriteVBar)\r
47GCC_ASM_EXPORT (ArmEnableVFP)\r
48GCC_ASM_EXPORT (ArmCallWFI)\r
49GCC_ASM_EXPORT (ArmReadCbar)\r
bd6b9799 50GCC_ASM_EXPORT (ArmReadMpidr)\r
51GCC_ASM_EXPORT (ArmReadTpidrurw)\r
52GCC_ASM_EXPORT (ArmWriteTpidrurw)\r
53GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r
54GCC_ASM_EXPORT (ArmReadIdPfr1)\r
63dbd629 55GCC_ASM_EXPORT (ArmReadIdMmfr0)\r
bd6b9799 56\r
57.set DC_ON, (0x1<<2)\r
58.set IC_ON, (0x1<<12)\r
59.set CTRL_M_BIT, (1 << 0)\r
60.set CTRL_C_BIT, (1 << 2)\r
61.set CTRL_B_BIT, (1 << 7)\r
62.set CTRL_I_BIT, (1 << 12)\r
63\r
64\r
65ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r
3402aac7 66 mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line\r
bd6b9799 67 bx lr\r
68\r
69ASM_PFX(ArmCleanDataCacheEntryByMVA):\r
3402aac7 70 mcr p15, 0, r0, c7, c10, 1 @clean single data cache line\r
bd6b9799 71 bx lr\r
72\r
73\r
b7de7e3c
EC
74ASM_PFX(ArmCleanDataCacheEntryToPoUByMVA):\r
75 mcr p15, 0, r0, c7, c11, 1 @clean single data cache line to PoU\r
76 bx lr\r
77\r
cf580da1
AB
78ASM_PFX(ArmInvalidateInstructionCacheEntryToPoUByMVA):\r
79 mcr p15, 0, r0, c7, c5, 1 @Invalidate single instruction cache line to PoU\r
80 mcr p15, 0, r0, c7, c5, 7 @Invalidate branch predictor\r
81 bx lr\r
b7de7e3c 82\r
bd6b9799 83ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r
84 mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line\r
bd6b9799 85 bx lr\r
86\r
87\r
88ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r
3402aac7 89 mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line\r
bd6b9799 90 bx lr\r
91\r
92\r
93ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r
3402aac7 94 mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line\r
bd6b9799 95 bx lr\r
96\r
97\r
98ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r
3402aac7 99 mcr p15, 0, r0, c7, c10, 2 @ Clean this line\r
bd6b9799 100 bx lr\r
101\r
102ASM_PFX(ArmInvalidateInstructionCache):\r
103 mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache\r
104 dsb\r
105 isb\r
106 bx LR\r
107\r
108ASM_PFX(ArmEnableMmu):\r
109 mrc p15,0,R0,c1,c0,0\r
110 orr R0,R0,#1\r
111 mcr p15,0,R0,c1,c0,0\r
112 dsb\r
113 isb\r
114 bx LR\r
115\r
116\r
117ASM_PFX(ArmDisableMmu):\r
118 mrc p15,0,R0,c1,c0,0\r
119 bic R0,R0,#1\r
120 mcr p15,0,R0,c1,c0,0 @Disable MMU\r
121\r
122 mcr p15,0,R0,c8,c7,0 @Invalidate TLB\r
123 mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array\r
124 dsb\r
125 isb\r
126 bx LR\r
127\r
128ASM_PFX(ArmDisableCachesAndMmu):\r
129 mrc p15, 0, r0, c1, c0, 0 @ Get control register\r
130 bic r0, r0, #CTRL_M_BIT @ Disable MMU\r
131 bic r0, r0, #CTRL_C_BIT @ Disable D Cache\r
132 bic r0, r0, #CTRL_I_BIT @ Disable I Cache\r
133 mcr p15, 0, r0, c1, c0, 0 @ Write control register\r
134 dsb\r
135 isb\r
136 bx LR\r
137\r
138ASM_PFX(ArmMmuEnabled):\r
139 mrc p15,0,R0,c1,c0,0\r
140 and R0,R0,#1\r
3402aac7 141 bx LR\r
bd6b9799 142\r
143ASM_PFX(ArmEnableDataCache):\r
144 ldr R1,=DC_ON\r
145 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
146 orr R0,R0,R1 @Set C bit\r
147 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
148 dsb\r
149 isb\r
150 bx LR\r
3402aac7 151\r
bd6b9799 152ASM_PFX(ArmDisableDataCache):\r
153 ldr R1,=DC_ON\r
154 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
155 bic R0,R0,R1 @Clear C bit\r
156 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
157 dsb\r
158 isb\r
159 bx LR\r
160\r
161ASM_PFX(ArmEnableInstructionCache):\r
162 ldr R1,=IC_ON\r
163 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
164 orr R0,R0,R1 @Set I bit\r
165 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
166 dsb\r
167 isb\r
168 bx LR\r
3402aac7 169\r
bd6b9799 170ASM_PFX(ArmDisableInstructionCache):\r
171 ldr R1,=IC_ON\r
172 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
173 bic R0,R0,R1 @Clear I bit.\r
174 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
175 dsb\r
176 isb\r
177 bx LR\r
178\r
179ASM_PFX(ArmEnableSWPInstruction):\r
180 mrc p15, 0, r0, c1, c0, 0\r
181 orr r0, r0, #0x00000400\r
182 mcr p15, 0, r0, c1, c0, 0\r
183 isb\r
184 bx LR\r
185\r
186ASM_PFX(ArmEnableBranchPrediction):\r
187 mrc p15, 0, r0, c1, c0, 0\r
188 orr r0, r0, #0x00000800\r
189 mcr p15, 0, r0, c1, c0, 0\r
190 dsb\r
191 isb\r
192 bx LR\r
193\r
194ASM_PFX(ArmDisableBranchPrediction):\r
195 mrc p15, 0, r0, c1, c0, 0\r
196 bic r0, r0, #0x00000800\r
197 mcr p15, 0, r0, c1, c0, 0\r
198 dsb\r
199 isb\r
200 bx LR\r
201\r
202ASM_PFX(ArmSetLowVectors):\r
203 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
204 bic r0, r0, #0x00002000 @ clear V bit\r
205 mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
206 isb\r
207 bx LR\r
208\r
209ASM_PFX(ArmSetHighVectors):\r
210 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
c6ba1c12 211 orr r0, r0, #0x00002000 @ Set V bit\r
bd6b9799 212 mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
213 isb\r
214 bx LR\r
215\r
216ASM_PFX(ArmV7AllDataCachesOperation):\r
217 stmfd SP!,{r4-r12, LR}\r
218 mov R1, R0 @ Save Function call in R1\r
219 mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r
220 ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)\r
221 mov R3, R3, LSR #23 @ Cache level value (naturally aligned)\r
222 beq L_Finished\r
223 mov R10, #0\r
224\r
3402aac7 225Loop1:\r
bd6b9799 226 add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r
227 mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r
228 and R12, R12, #7 @ get those 3 bits alone\r
229 cmp R12, #2\r
230 blt L_Skip @ no cache or only instruction cache at this level\r
231 mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r
3402aac7 232 isb @ isb to sync the change to the CacheSizeID reg\r
bd6b9799 233 mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r
234 and R2, R12, #0x7 @ extract the line length field\r
235 add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r
236@ ldr R4, =0x3FF\r
237 mov R4, #0x400\r
238 sub R4, R4, #1\r
239 ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r
240 clz R5, R4 @ R5 is the bit position of the way size increment\r
241@ ldr R7, =0x00007FFF\r
242 mov R7, #0x00008000\r
243 sub R7, R7, #1\r
244 ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r
245\r
3402aac7 246Loop2:\r
bd6b9799 247 mov R9, R4 @ R9 working copy of the max way size (right aligned)\r
248\r
3402aac7 249Loop3:\r
bd6b9799 250 orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r
251 orr R0, R0, R7, LSL R2 @ factor in the index number\r
252\r
253 blx R1\r
254\r
255 subs R9, R9, #1 @ decrement the way number\r
256 bge Loop3\r
257 subs R7, R7, #1 @ decrement the index\r
258 bge Loop2\r
3402aac7 259L_Skip:\r
bd6b9799 260 add R10, R10, #2 @ increment the cache number\r
261 cmp R3, R10\r
262 bgt Loop1\r
3402aac7 263\r
bd6b9799 264L_Finished:\r
265 dsb\r
266 ldmfd SP!, {r4-r12, lr}\r
267 bx LR\r
268\r
269ASM_PFX(ArmDataMemoryBarrier):\r
270 dmb\r
271 bx LR\r
3402aac7 272\r
cf93a378 273ASM_PFX(ArmDataSynchronizationBarrier):\r
bd6b9799 274 dsb\r
275 bx LR\r
3402aac7 276\r
bd6b9799 277ASM_PFX(ArmInstructionSynchronizationBarrier):\r
278 isb\r
279 bx LR\r
280\r
836c3500 281ASM_PFX(ArmReadVBar):\r
282 # Set the Address of the Vector Table in the VBAR register\r
283 mrc p15, 0, r0, c12, c0, 0\r
284 bx lr\r
285\r
bd6b9799 286ASM_PFX(ArmWriteVBar):\r
287 # Set the Address of the Vector Table in the VBAR register\r
3402aac7 288 mcr p15, 0, r0, c12, c0, 0\r
bd6b9799 289 # Ensure the SCTLR.V bit is clear\r
290 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
291 bic r0, r0, #0x00002000 @ clear V bit\r
292 mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
293 isb\r
294 bx lr\r
295\r
296ASM_PFX(ArmEnableVFP):\r
297 # Read CPACR (Coprocessor Access Control Register)\r
298 mrc p15, 0, r0, c1, c0, 2\r
299 # Enable VPF access (Full Access to CP10, CP11) (V* instructions)\r
300 orr r0, r0, #0x00f00000\r
301 # Write back CPACR (Coprocessor Access Control Register)\r
302 mcr p15, 0, r0, c1, c0, 2\r
18029bb9 303 isb\r
bd6b9799 304 # Set EN bit in FPEXC. The Advanced SIMD and VFP extensions are enabled and operate normally.\r
305 mov r0, #0x40000000\r
6d2449c1 306#ifndef __clang__\r
bd6b9799 307 mcr p10,#0x7,r0,c8,c0,#0\r
6d2449c1
AB
308#else\r
309 vmsr fpexc, r0\r
310#endif\r
bd6b9799 311 bx lr\r
312\r
313ASM_PFX(ArmCallWFI):\r
314 wfi\r
315 bx lr\r
316\r
317#Note: Return 0 in Uniprocessor implementation\r
318ASM_PFX(ArmReadCbar):\r
319 mrc p15, 4, r0, c15, c0, 0 @ Read Configuration Base Address Register\r
320 bx lr\r
321\r
bd6b9799 322ASM_PFX(ArmReadMpidr):\r
323 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR\r
324 bx lr\r
3402aac7 325\r
bd6b9799 326ASM_PFX(ArmReadTpidrurw):\r
327 mrc p15, 0, r0, c13, c0, 2 @ read TPIDRURW\r
328 bx lr\r
329\r
330ASM_PFX(ArmWriteTpidrurw):\r
331 mcr p15, 0, r0, c13, c0, 2 @ write TPIDRURW\r
332 bx lr\r
333\r
334ASM_PFX(ArmIsArchTimerImplemented):\r
335 mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1\r
336 and r0, r0, #0x000F0000\r
337 bx lr\r
338\r
339ASM_PFX(ArmReadIdPfr1):\r
340 mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1 Register\r
341 bx lr\r
342\r
63dbd629
AB
343ASM_PFX(ArmReadIdMmfr0):\r
344 mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 Register\r
345 bx lr\r
346\r
bd6b9799 347ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r