]> git.proxmox.com Git - mirror_edk2.git/blame - ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
ArmPkg BeagleBoardPkg Omap35xxPkg: fix typo 'ArmDataSyncronizationBarrier'
[mirror_edk2.git] / ArmPkg / Library / ArmLib / ArmV7 / ArmV7Support.S
CommitLineData
3402aac7 1#------------------------------------------------------------------------------\r
bd6b9799 2#\r
3# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
9401d6f4 4# Copyright (c) 2011 - 2014, ARM Limited. All rights reserved.\r
bd6b9799 5#\r
6# This program and the accompanying materials\r
7# are licensed and made available under the terms and conditions of the BSD License\r
8# which accompanies this distribution. The full text of the license may be found at\r
9# http://opensource.org/licenses/bsd-license.php\r
10#\r
11# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r
12# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r
13#\r
14#------------------------------------------------------------------------------\r
15\r
16.text\r
17.align 2\r
18\r
19GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r
20GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r
21GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r
22GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r
23GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r
24GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r
25GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r
26GCC_ASM_EXPORT (ArmDrainWriteBuffer)\r
27GCC_ASM_EXPORT (ArmEnableMmu)\r
28GCC_ASM_EXPORT (ArmDisableMmu)\r
29GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r
30GCC_ASM_EXPORT (ArmMmuEnabled)\r
31GCC_ASM_EXPORT (ArmEnableDataCache)\r
32GCC_ASM_EXPORT (ArmDisableDataCache)\r
33GCC_ASM_EXPORT (ArmEnableInstructionCache)\r
34GCC_ASM_EXPORT (ArmDisableInstructionCache)\r
35GCC_ASM_EXPORT (ArmEnableSWPInstruction)\r
36GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r
37GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r
38GCC_ASM_EXPORT (ArmSetLowVectors)\r
39GCC_ASM_EXPORT (ArmSetHighVectors)\r
40GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)\r
d60f6af4 41GCC_ASM_EXPORT (ArmV7PerformPoUDataCacheOperation)\r
bd6b9799 42GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r
cf93a378 43GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)\r
bd6b9799 44GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r
836c3500 45GCC_ASM_EXPORT (ArmReadVBar)\r
bd6b9799 46GCC_ASM_EXPORT (ArmWriteVBar)\r
47GCC_ASM_EXPORT (ArmEnableVFP)\r
48GCC_ASM_EXPORT (ArmCallWFI)\r
49GCC_ASM_EXPORT (ArmReadCbar)\r
bd6b9799 50GCC_ASM_EXPORT (ArmReadMpidr)\r
51GCC_ASM_EXPORT (ArmReadTpidrurw)\r
52GCC_ASM_EXPORT (ArmWriteTpidrurw)\r
53GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r
54GCC_ASM_EXPORT (ArmReadIdPfr1)\r
55\r
56.set DC_ON, (0x1<<2)\r
57.set IC_ON, (0x1<<12)\r
58.set CTRL_M_BIT, (1 << 0)\r
59.set CTRL_C_BIT, (1 << 2)\r
60.set CTRL_B_BIT, (1 << 7)\r
61.set CTRL_I_BIT, (1 << 12)\r
62\r
63\r
64ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r
3402aac7 65 mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line\r
bd6b9799 66 dsb\r
67 isb\r
68 bx lr\r
69\r
70ASM_PFX(ArmCleanDataCacheEntryByMVA):\r
3402aac7 71 mcr p15, 0, r0, c7, c10, 1 @clean single data cache line\r
bd6b9799 72 dsb\r
73 isb\r
74 bx lr\r
75\r
76\r
77ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r
78 mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line\r
79 dsb\r
80 isb\r
81 bx lr\r
82\r
83\r
84ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r
3402aac7 85 mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line\r
bd6b9799 86 dsb\r
87 isb\r
88 bx lr\r
89\r
90\r
91ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r
3402aac7 92 mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line\r
bd6b9799 93 dsb\r
94 isb\r
95 bx lr\r
96\r
97\r
98ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r
3402aac7 99 mcr p15, 0, r0, c7, c10, 2 @ Clean this line\r
bd6b9799 100 dsb\r
101 isb\r
102 bx lr\r
103\r
104ASM_PFX(ArmInvalidateInstructionCache):\r
105 mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache\r
106 dsb\r
107 isb\r
108 bx LR\r
109\r
110ASM_PFX(ArmEnableMmu):\r
111 mrc p15,0,R0,c1,c0,0\r
112 orr R0,R0,#1\r
113 mcr p15,0,R0,c1,c0,0\r
114 dsb\r
115 isb\r
116 bx LR\r
117\r
118\r
119ASM_PFX(ArmDisableMmu):\r
120 mrc p15,0,R0,c1,c0,0\r
121 bic R0,R0,#1\r
122 mcr p15,0,R0,c1,c0,0 @Disable MMU\r
123\r
124 mcr p15,0,R0,c8,c7,0 @Invalidate TLB\r
125 mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array\r
126 dsb\r
127 isb\r
128 bx LR\r
129\r
130ASM_PFX(ArmDisableCachesAndMmu):\r
131 mrc p15, 0, r0, c1, c0, 0 @ Get control register\r
132 bic r0, r0, #CTRL_M_BIT @ Disable MMU\r
133 bic r0, r0, #CTRL_C_BIT @ Disable D Cache\r
134 bic r0, r0, #CTRL_I_BIT @ Disable I Cache\r
135 mcr p15, 0, r0, c1, c0, 0 @ Write control register\r
136 dsb\r
137 isb\r
138 bx LR\r
139\r
140ASM_PFX(ArmMmuEnabled):\r
141 mrc p15,0,R0,c1,c0,0\r
142 and R0,R0,#1\r
3402aac7 143 bx LR\r
bd6b9799 144\r
145ASM_PFX(ArmEnableDataCache):\r
146 ldr R1,=DC_ON\r
147 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
148 orr R0,R0,R1 @Set C bit\r
149 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
150 dsb\r
151 isb\r
152 bx LR\r
3402aac7 153\r
bd6b9799 154ASM_PFX(ArmDisableDataCache):\r
155 ldr R1,=DC_ON\r
156 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
157 bic R0,R0,R1 @Clear C bit\r
158 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
159 dsb\r
160 isb\r
161 bx LR\r
162\r
163ASM_PFX(ArmEnableInstructionCache):\r
164 ldr R1,=IC_ON\r
165 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
166 orr R0,R0,R1 @Set I bit\r
167 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
168 dsb\r
169 isb\r
170 bx LR\r
3402aac7 171\r
bd6b9799 172ASM_PFX(ArmDisableInstructionCache):\r
173 ldr R1,=IC_ON\r
174 mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r
175 bic R0,R0,R1 @Clear I bit.\r
176 mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r
177 dsb\r
178 isb\r
179 bx LR\r
180\r
181ASM_PFX(ArmEnableSWPInstruction):\r
182 mrc p15, 0, r0, c1, c0, 0\r
183 orr r0, r0, #0x00000400\r
184 mcr p15, 0, r0, c1, c0, 0\r
185 isb\r
186 bx LR\r
187\r
188ASM_PFX(ArmEnableBranchPrediction):\r
189 mrc p15, 0, r0, c1, c0, 0\r
190 orr r0, r0, #0x00000800\r
191 mcr p15, 0, r0, c1, c0, 0\r
192 dsb\r
193 isb\r
194 bx LR\r
195\r
196ASM_PFX(ArmDisableBranchPrediction):\r
197 mrc p15, 0, r0, c1, c0, 0\r
198 bic r0, r0, #0x00000800\r
199 mcr p15, 0, r0, c1, c0, 0\r
200 dsb\r
201 isb\r
202 bx LR\r
203\r
204ASM_PFX(ArmSetLowVectors):\r
205 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
206 bic r0, r0, #0x00002000 @ clear V bit\r
207 mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
208 isb\r
209 bx LR\r
210\r
211ASM_PFX(ArmSetHighVectors):\r
212 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
c6ba1c12 213 orr r0, r0, #0x00002000 @ Set V bit\r
bd6b9799 214 mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
215 isb\r
216 bx LR\r
217\r
218ASM_PFX(ArmV7AllDataCachesOperation):\r
219 stmfd SP!,{r4-r12, LR}\r
220 mov R1, R0 @ Save Function call in R1\r
221 mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r
222 ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)\r
223 mov R3, R3, LSR #23 @ Cache level value (naturally aligned)\r
224 beq L_Finished\r
225 mov R10, #0\r
226\r
3402aac7 227Loop1:\r
bd6b9799 228 add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r
229 mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r
230 and R12, R12, #7 @ get those 3 bits alone\r
231 cmp R12, #2\r
232 blt L_Skip @ no cache or only instruction cache at this level\r
233 mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r
3402aac7 234 isb @ isb to sync the change to the CacheSizeID reg\r
bd6b9799 235 mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r
236 and R2, R12, #0x7 @ extract the line length field\r
237 add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r
238@ ldr R4, =0x3FF\r
239 mov R4, #0x400\r
240 sub R4, R4, #1\r
241 ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r
242 clz R5, R4 @ R5 is the bit position of the way size increment\r
243@ ldr R7, =0x00007FFF\r
244 mov R7, #0x00008000\r
245 sub R7, R7, #1\r
246 ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r
247\r
3402aac7 248Loop2:\r
bd6b9799 249 mov R9, R4 @ R9 working copy of the max way size (right aligned)\r
250\r
3402aac7 251Loop3:\r
bd6b9799 252 orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r
253 orr R0, R0, R7, LSL R2 @ factor in the index number\r
254\r
255 blx R1\r
256\r
257 subs R9, R9, #1 @ decrement the way number\r
258 bge Loop3\r
259 subs R7, R7, #1 @ decrement the index\r
260 bge Loop2\r
3402aac7 261L_Skip:\r
bd6b9799 262 add R10, R10, #2 @ increment the cache number\r
263 cmp R3, R10\r
264 bgt Loop1\r
3402aac7 265\r
bd6b9799 266L_Finished:\r
267 dsb\r
268 ldmfd SP!, {r4-r12, lr}\r
d60f6af4 269 bx LR\r
270\r
271ASM_PFX(ArmV7PerformPoUDataCacheOperation):\r
272 stmfd SP!,{r4-r12, LR}\r
273 mov R1, R0 @ Save Function call in R1\r
274 mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r
275 ands R3, R6, #0x38000000 @ Mask out all but Level of Unification (LoU)\r
276 mov R3, R3, LSR #26 @ Cache level value (naturally aligned)\r
277 beq Finished2\r
278 mov R10, #0\r
279\r
280Loop4:\r
281 add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r
282 mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r
283 and R12, R12, #7 @ get those 3 bits alone\r
284 cmp R12, #2\r
285 blt Skip2 @ no cache or only instruction cache at this level\r
286 mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r
3402aac7 287 isb @ isb to sync the change to the CacheSizeID reg\r
d60f6af4 288 mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r
289 and R2, R12, #0x7 @ extract the line length field\r
290 add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r
291 ldr R4, =0x3FF\r
292 ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r
293 clz R5, R4 @ R5 is the bit position of the way size increment\r
294 ldr R7, =0x00007FFF\r
295 ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r
296\r
297Loop5:\r
298 mov R9, R4 @ R9 working copy of the max way size (right aligned)\r
299\r
300Loop6:\r
301 orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r
302 orr R0, R0, R7, LSL R2 @ factor in the index number\r
303\r
304 blx R1\r
305\r
306 subs R9, R9, #1 @ decrement the way number\r
307 bge Loop6\r
308 subs R7, R7, #1 @ decrement the index\r
309 bge Loop5\r
310Skip2:\r
311 add R10, R10, #2 @ increment the cache number\r
312 cmp R3, R10\r
313 bgt Loop4\r
3402aac7 314\r
d60f6af4 315Finished2:\r
316 dsb\r
317 ldmfd SP!, {r4-r12, lr}\r
bd6b9799 318 bx LR\r
319\r
320ASM_PFX(ArmDataMemoryBarrier):\r
321 dmb\r
322 bx LR\r
3402aac7 323\r
cf93a378 324ASM_PFX(ArmDataSynchronizationBarrier):\r
bd6b9799 325ASM_PFX(ArmDrainWriteBuffer):\r
326 dsb\r
327 bx LR\r
3402aac7 328\r
bd6b9799 329ASM_PFX(ArmInstructionSynchronizationBarrier):\r
330 isb\r
331 bx LR\r
332\r
836c3500 333ASM_PFX(ArmReadVBar):\r
334 # Set the Address of the Vector Table in the VBAR register\r
335 mrc p15, 0, r0, c12, c0, 0\r
336 bx lr\r
337\r
bd6b9799 338ASM_PFX(ArmWriteVBar):\r
339 # Set the Address of the Vector Table in the VBAR register\r
3402aac7 340 mcr p15, 0, r0, c12, c0, 0\r
bd6b9799 341 # Ensure the SCTLR.V bit is clear\r
342 mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r
343 bic r0, r0, #0x00002000 @ clear V bit\r
344 mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r
345 isb\r
346 bx lr\r
347\r
348ASM_PFX(ArmEnableVFP):\r
349 # Read CPACR (Coprocessor Access Control Register)\r
350 mrc p15, 0, r0, c1, c0, 2\r
351 # Enable VPF access (Full Access to CP10, CP11) (V* instructions)\r
352 orr r0, r0, #0x00f00000\r
353 # Write back CPACR (Coprocessor Access Control Register)\r
354 mcr p15, 0, r0, c1, c0, 2\r
18029bb9 355 isb\r
bd6b9799 356 # Set EN bit in FPEXC. The Advanced SIMD and VFP extensions are enabled and operate normally.\r
357 mov r0, #0x40000000\r
358 mcr p10,#0x7,r0,c8,c0,#0\r
359 bx lr\r
360\r
361ASM_PFX(ArmCallWFI):\r
362 wfi\r
363 bx lr\r
364\r
365#Note: Return 0 in Uniprocessor implementation\r
366ASM_PFX(ArmReadCbar):\r
367 mrc p15, 4, r0, c15, c0, 0 @ Read Configuration Base Address Register\r
368 bx lr\r
369\r
bd6b9799 370ASM_PFX(ArmReadMpidr):\r
371 mrc p15, 0, r0, c0, c0, 5 @ read MPIDR\r
372 bx lr\r
3402aac7 373\r
bd6b9799 374ASM_PFX(ArmReadTpidrurw):\r
375 mrc p15, 0, r0, c13, c0, 2 @ read TPIDRURW\r
376 bx lr\r
377\r
378ASM_PFX(ArmWriteTpidrurw):\r
379 mcr p15, 0, r0, c13, c0, 2 @ write TPIDRURW\r
380 bx lr\r
381\r
382ASM_PFX(ArmIsArchTimerImplemented):\r
383 mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1\r
384 and r0, r0, #0x000F0000\r
385 bx lr\r
386\r
387ASM_PFX(ArmReadIdPfr1):\r
388 mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1 Register\r
389 bx lr\r
390\r
391ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r