]>
Commit | Line | Data |
---|---|---|
bd6b9799 | 1 | #------------------------------------------------------------------------------ \r |
2 | #\r | |
3 | # Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r | |
4 | # Copyright (c) 2011, ARM Limited. All rights reserved.\r | |
5 | #\r | |
6 | # This program and the accompanying materials\r | |
7 | # are licensed and made available under the terms and conditions of the BSD License\r | |
8 | # which accompanies this distribution. The full text of the license may be found at\r | |
9 | # http://opensource.org/licenses/bsd-license.php\r | |
10 | #\r | |
11 | # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
12 | # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
13 | #\r | |
14 | #------------------------------------------------------------------------------\r | |
15 | \r | |
16 | .text\r | |
17 | .align 2\r | |
18 | \r | |
19 | GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r | |
20 | GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r | |
21 | GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r | |
22 | GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r | |
23 | GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r | |
24 | GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r | |
25 | GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r | |
26 | GCC_ASM_EXPORT (ArmDrainWriteBuffer)\r | |
27 | GCC_ASM_EXPORT (ArmEnableMmu)\r | |
28 | GCC_ASM_EXPORT (ArmDisableMmu)\r | |
29 | GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r | |
30 | GCC_ASM_EXPORT (ArmMmuEnabled)\r | |
31 | GCC_ASM_EXPORT (ArmEnableDataCache)\r | |
32 | GCC_ASM_EXPORT (ArmDisableDataCache)\r | |
33 | GCC_ASM_EXPORT (ArmEnableInstructionCache)\r | |
34 | GCC_ASM_EXPORT (ArmDisableInstructionCache)\r | |
35 | GCC_ASM_EXPORT (ArmEnableSWPInstruction)\r | |
36 | GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r | |
37 | GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r | |
38 | GCC_ASM_EXPORT (ArmSetLowVectors)\r | |
39 | GCC_ASM_EXPORT (ArmSetHighVectors)\r | |
40 | GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)\r | |
d60f6af4 | 41 | GCC_ASM_EXPORT (ArmV7PerformPoUDataCacheOperation)\r |
bd6b9799 | 42 | GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r |
43 | GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)\r | |
44 | GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r | |
45 | GCC_ASM_EXPORT (ArmWriteVBar)\r | |
46 | GCC_ASM_EXPORT (ArmEnableVFP)\r | |
47 | GCC_ASM_EXPORT (ArmCallWFI)\r | |
48 | GCC_ASM_EXPORT (ArmReadCbar)\r | |
49 | GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)\r | |
50 | GCC_ASM_EXPORT (ArmReadMpidr)\r | |
51 | GCC_ASM_EXPORT (ArmReadTpidrurw)\r | |
52 | GCC_ASM_EXPORT (ArmWriteTpidrurw)\r | |
53 | GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r | |
54 | GCC_ASM_EXPORT (ArmReadIdPfr1)\r | |
55 | \r | |
56 | .set DC_ON, (0x1<<2)\r | |
57 | .set IC_ON, (0x1<<12)\r | |
58 | .set CTRL_M_BIT, (1 << 0)\r | |
59 | .set CTRL_C_BIT, (1 << 2)\r | |
60 | .set CTRL_B_BIT, (1 << 7)\r | |
61 | .set CTRL_I_BIT, (1 << 12)\r | |
62 | \r | |
63 | \r | |
64 | ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r | |
65 | mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line \r | |
66 | dsb\r | |
67 | isb\r | |
68 | bx lr\r | |
69 | \r | |
70 | ASM_PFX(ArmCleanDataCacheEntryByMVA):\r | |
71 | mcr p15, 0, r0, c7, c10, 1 @clean single data cache line \r | |
72 | dsb\r | |
73 | isb\r | |
74 | bx lr\r | |
75 | \r | |
76 | \r | |
77 | ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r | |
78 | mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line\r | |
79 | dsb\r | |
80 | isb\r | |
81 | bx lr\r | |
82 | \r | |
83 | \r | |
84 | ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r | |
85 | mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line \r | |
86 | dsb\r | |
87 | isb\r | |
88 | bx lr\r | |
89 | \r | |
90 | \r | |
91 | ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r | |
92 | mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line \r | |
93 | dsb\r | |
94 | isb\r | |
95 | bx lr\r | |
96 | \r | |
97 | \r | |
98 | ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r | |
99 | mcr p15, 0, r0, c7, c10, 2 @ Clean this line \r | |
100 | dsb\r | |
101 | isb\r | |
102 | bx lr\r | |
103 | \r | |
104 | ASM_PFX(ArmInvalidateInstructionCache):\r | |
105 | mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache\r | |
106 | dsb\r | |
107 | isb\r | |
108 | bx LR\r | |
109 | \r | |
110 | ASM_PFX(ArmEnableMmu):\r | |
111 | mrc p15,0,R0,c1,c0,0\r | |
112 | orr R0,R0,#1\r | |
113 | mcr p15,0,R0,c1,c0,0\r | |
114 | dsb\r | |
115 | isb\r | |
116 | bx LR\r | |
117 | \r | |
118 | \r | |
119 | ASM_PFX(ArmDisableMmu):\r | |
120 | mrc p15,0,R0,c1,c0,0\r | |
121 | bic R0,R0,#1\r | |
122 | mcr p15,0,R0,c1,c0,0 @Disable MMU\r | |
123 | \r | |
124 | mcr p15,0,R0,c8,c7,0 @Invalidate TLB\r | |
125 | mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array\r | |
126 | dsb\r | |
127 | isb\r | |
128 | bx LR\r | |
129 | \r | |
130 | ASM_PFX(ArmDisableCachesAndMmu):\r | |
131 | mrc p15, 0, r0, c1, c0, 0 @ Get control register\r | |
132 | bic r0, r0, #CTRL_M_BIT @ Disable MMU\r | |
133 | bic r0, r0, #CTRL_C_BIT @ Disable D Cache\r | |
134 | bic r0, r0, #CTRL_I_BIT @ Disable I Cache\r | |
135 | mcr p15, 0, r0, c1, c0, 0 @ Write control register\r | |
136 | dsb\r | |
137 | isb\r | |
138 | bx LR\r | |
139 | \r | |
140 | ASM_PFX(ArmMmuEnabled):\r | |
141 | mrc p15,0,R0,c1,c0,0\r | |
142 | and R0,R0,#1\r | |
143 | bx LR \r | |
144 | \r | |
145 | ASM_PFX(ArmEnableDataCache):\r | |
146 | ldr R1,=DC_ON\r | |
147 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
148 | orr R0,R0,R1 @Set C bit\r | |
149 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
150 | dsb\r | |
151 | isb\r | |
152 | bx LR\r | |
153 | \r | |
154 | ASM_PFX(ArmDisableDataCache):\r | |
155 | ldr R1,=DC_ON\r | |
156 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
157 | bic R0,R0,R1 @Clear C bit\r | |
158 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
159 | dsb\r | |
160 | isb\r | |
161 | bx LR\r | |
162 | \r | |
163 | ASM_PFX(ArmEnableInstructionCache):\r | |
164 | ldr R1,=IC_ON\r | |
165 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
166 | orr R0,R0,R1 @Set I bit\r | |
167 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
168 | dsb\r | |
169 | isb\r | |
170 | bx LR\r | |
171 | \r | |
172 | ASM_PFX(ArmDisableInstructionCache):\r | |
173 | ldr R1,=IC_ON\r | |
174 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
175 | bic R0,R0,R1 @Clear I bit.\r | |
176 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
177 | dsb\r | |
178 | isb\r | |
179 | bx LR\r | |
180 | \r | |
181 | ASM_PFX(ArmEnableSWPInstruction):\r | |
182 | mrc p15, 0, r0, c1, c0, 0\r | |
183 | orr r0, r0, #0x00000400\r | |
184 | mcr p15, 0, r0, c1, c0, 0\r | |
185 | isb\r | |
186 | bx LR\r | |
187 | \r | |
188 | ASM_PFX(ArmEnableBranchPrediction):\r | |
189 | mrc p15, 0, r0, c1, c0, 0\r | |
190 | orr r0, r0, #0x00000800\r | |
191 | mcr p15, 0, r0, c1, c0, 0\r | |
192 | dsb\r | |
193 | isb\r | |
194 | bx LR\r | |
195 | \r | |
196 | ASM_PFX(ArmDisableBranchPrediction):\r | |
197 | mrc p15, 0, r0, c1, c0, 0\r | |
198 | bic r0, r0, #0x00000800\r | |
199 | mcr p15, 0, r0, c1, c0, 0\r | |
200 | dsb\r | |
201 | isb\r | |
202 | bx LR\r | |
203 | \r | |
204 | ASM_PFX(ArmSetLowVectors):\r | |
205 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r | |
206 | bic r0, r0, #0x00002000 @ clear V bit\r | |
207 | mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r | |
208 | isb\r | |
209 | bx LR\r | |
210 | \r | |
211 | ASM_PFX(ArmSetHighVectors):\r | |
212 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r | |
213 | orr r0, r0, #0x00002000 @ clear V bit\r | |
214 | mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r | |
215 | isb\r | |
216 | bx LR\r | |
217 | \r | |
218 | ASM_PFX(ArmV7AllDataCachesOperation):\r | |
219 | stmfd SP!,{r4-r12, LR}\r | |
220 | mov R1, R0 @ Save Function call in R1\r | |
221 | mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r | |
222 | ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)\r | |
223 | mov R3, R3, LSR #23 @ Cache level value (naturally aligned)\r | |
224 | beq L_Finished\r | |
225 | mov R10, #0\r | |
226 | \r | |
227 | Loop1: \r | |
228 | add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r | |
229 | mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r | |
230 | and R12, R12, #7 @ get those 3 bits alone\r | |
231 | cmp R12, #2\r | |
232 | blt L_Skip @ no cache or only instruction cache at this level\r | |
233 | mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r | |
234 | isb @ isb to sync the change to the CacheSizeID reg \r | |
235 | mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r | |
236 | and R2, R12, #0x7 @ extract the line length field\r | |
237 | add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r | |
238 | @ ldr R4, =0x3FF\r | |
239 | mov R4, #0x400\r | |
240 | sub R4, R4, #1\r | |
241 | ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r | |
242 | clz R5, R4 @ R5 is the bit position of the way size increment\r | |
243 | @ ldr R7, =0x00007FFF\r | |
244 | mov R7, #0x00008000\r | |
245 | sub R7, R7, #1\r | |
246 | ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r | |
247 | \r | |
248 | Loop2: \r | |
249 | mov R9, R4 @ R9 working copy of the max way size (right aligned)\r | |
250 | \r | |
251 | Loop3: \r | |
252 | orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r | |
253 | orr R0, R0, R7, LSL R2 @ factor in the index number\r | |
254 | \r | |
255 | blx R1\r | |
256 | \r | |
257 | subs R9, R9, #1 @ decrement the way number\r | |
258 | bge Loop3\r | |
259 | subs R7, R7, #1 @ decrement the index\r | |
260 | bge Loop2\r | |
261 | L_Skip: \r | |
262 | add R10, R10, #2 @ increment the cache number\r | |
263 | cmp R3, R10\r | |
264 | bgt Loop1\r | |
265 | \r | |
266 | L_Finished:\r | |
267 | dsb\r | |
268 | ldmfd SP!, {r4-r12, lr}\r | |
d60f6af4 | 269 | bx LR\r |
270 | \r | |
271 | ASM_PFX(ArmV7PerformPoUDataCacheOperation):\r | |
272 | stmfd SP!,{r4-r12, LR}\r | |
273 | mov R1, R0 @ Save Function call in R1\r | |
274 | mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r | |
275 | ands R3, R6, #0x38000000 @ Mask out all but Level of Unification (LoU)\r | |
276 | mov R3, R3, LSR #26 @ Cache level value (naturally aligned)\r | |
277 | beq Finished2\r | |
278 | mov R10, #0\r | |
279 | \r | |
280 | Loop4:\r | |
281 | add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r | |
282 | mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r | |
283 | and R12, R12, #7 @ get those 3 bits alone\r | |
284 | cmp R12, #2\r | |
285 | blt Skip2 @ no cache or only instruction cache at this level\r | |
286 | mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r | |
287 | isb @ isb to sync the change to the CacheSizeID reg \r | |
288 | mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r | |
289 | and R2, R12, #0x7 @ extract the line length field\r | |
290 | add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r | |
291 | ldr R4, =0x3FF\r | |
292 | ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r | |
293 | clz R5, R4 @ R5 is the bit position of the way size increment\r | |
294 | ldr R7, =0x00007FFF\r | |
295 | ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r | |
296 | \r | |
297 | Loop5:\r | |
298 | mov R9, R4 @ R9 working copy of the max way size (right aligned)\r | |
299 | \r | |
300 | Loop6:\r | |
301 | orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r | |
302 | orr R0, R0, R7, LSL R2 @ factor in the index number\r | |
303 | \r | |
304 | blx R1\r | |
305 | \r | |
306 | subs R9, R9, #1 @ decrement the way number\r | |
307 | bge Loop6\r | |
308 | subs R7, R7, #1 @ decrement the index\r | |
309 | bge Loop5\r | |
310 | Skip2:\r | |
311 | add R10, R10, #2 @ increment the cache number\r | |
312 | cmp R3, R10\r | |
313 | bgt Loop4\r | |
314 | \r | |
315 | Finished2:\r | |
316 | dsb\r | |
317 | ldmfd SP!, {r4-r12, lr}\r | |
bd6b9799 | 318 | bx LR\r |
319 | \r | |
320 | ASM_PFX(ArmDataMemoryBarrier):\r | |
321 | dmb\r | |
322 | bx LR\r | |
323 | \r | |
324 | ASM_PFX(ArmDataSyncronizationBarrier):\r | |
325 | ASM_PFX(ArmDrainWriteBuffer):\r | |
326 | dsb\r | |
327 | bx LR\r | |
328 | \r | |
329 | ASM_PFX(ArmInstructionSynchronizationBarrier):\r | |
330 | isb\r | |
331 | bx LR\r | |
332 | \r | |
333 | ASM_PFX(ArmWriteVBar):\r | |
334 | # Set the Address of the Vector Table in the VBAR register\r | |
335 | mcr p15, 0, r0, c12, c0, 0 \r | |
336 | # Ensure the SCTLR.V bit is clear\r | |
337 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r | |
338 | bic r0, r0, #0x00002000 @ clear V bit\r | |
339 | mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r | |
340 | isb\r | |
341 | bx lr\r | |
342 | \r | |
343 | ASM_PFX(ArmEnableVFP):\r | |
344 | # Read CPACR (Coprocessor Access Control Register)\r | |
345 | mrc p15, 0, r0, c1, c0, 2\r | |
346 | # Enable VPF access (Full Access to CP10, CP11) (V* instructions)\r | |
347 | orr r0, r0, #0x00f00000\r | |
348 | # Write back CPACR (Coprocessor Access Control Register)\r | |
349 | mcr p15, 0, r0, c1, c0, 2\r | |
18029bb9 | 350 | isb\r |
bd6b9799 | 351 | # Set EN bit in FPEXC. The Advanced SIMD and VFP extensions are enabled and operate normally.\r |
352 | mov r0, #0x40000000\r | |
353 | mcr p10,#0x7,r0,c8,c0,#0\r | |
354 | bx lr\r | |
355 | \r | |
356 | ASM_PFX(ArmCallWFI):\r | |
357 | wfi\r | |
358 | bx lr\r | |
359 | \r | |
360 | #Note: Return 0 in Uniprocessor implementation\r | |
361 | ASM_PFX(ArmReadCbar):\r | |
362 | mrc p15, 4, r0, c15, c0, 0 @ Read Configuration Base Address Register\r | |
363 | bx lr\r | |
364 | \r | |
365 | ASM_PFX(ArmInvalidateInstructionAndDataTlb):\r | |
366 | mcr p15, 0, r0, c8, c7, 0 @ Invalidate Inst TLB and Data TLB\r | |
367 | dsb\r | |
368 | bx lr\r | |
369 | \r | |
370 | ASM_PFX(ArmReadMpidr):\r | |
371 | mrc p15, 0, r0, c0, c0, 5 @ read MPIDR\r | |
372 | bx lr\r | |
373 | \r | |
374 | ASM_PFX(ArmReadTpidrurw):\r | |
375 | mrc p15, 0, r0, c13, c0, 2 @ read TPIDRURW\r | |
376 | bx lr\r | |
377 | \r | |
378 | ASM_PFX(ArmWriteTpidrurw):\r | |
379 | mcr p15, 0, r0, c13, c0, 2 @ write TPIDRURW\r | |
380 | bx lr\r | |
381 | \r | |
382 | ASM_PFX(ArmIsArchTimerImplemented):\r | |
383 | mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1\r | |
384 | and r0, r0, #0x000F0000\r | |
385 | bx lr\r | |
386 | \r | |
387 | ASM_PFX(ArmReadIdPfr1):\r | |
388 | mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1 Register\r | |
389 | bx lr\r | |
390 | \r | |
391 | ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r |