]>
Commit | Line | Data |
---|---|---|
bd6b9799 | 1 | #------------------------------------------------------------------------------ \r |
2 | #\r | |
3 | # Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r | |
4 | # Copyright (c) 2011, ARM Limited. All rights reserved.\r | |
5 | #\r | |
6 | # This program and the accompanying materials\r | |
7 | # are licensed and made available under the terms and conditions of the BSD License\r | |
8 | # which accompanies this distribution. The full text of the license may be found at\r | |
9 | # http://opensource.org/licenses/bsd-license.php\r | |
10 | #\r | |
11 | # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
12 | # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
13 | #\r | |
14 | #------------------------------------------------------------------------------\r | |
15 | \r | |
16 | .text\r | |
17 | .align 2\r | |
18 | \r | |
19 | GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r | |
20 | GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r | |
21 | GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r | |
22 | GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r | |
23 | GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r | |
24 | GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r | |
25 | GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r | |
26 | GCC_ASM_EXPORT (ArmDrainWriteBuffer)\r | |
27 | GCC_ASM_EXPORT (ArmEnableMmu)\r | |
28 | GCC_ASM_EXPORT (ArmDisableMmu)\r | |
29 | GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r | |
30 | GCC_ASM_EXPORT (ArmMmuEnabled)\r | |
31 | GCC_ASM_EXPORT (ArmEnableDataCache)\r | |
32 | GCC_ASM_EXPORT (ArmDisableDataCache)\r | |
33 | GCC_ASM_EXPORT (ArmEnableInstructionCache)\r | |
34 | GCC_ASM_EXPORT (ArmDisableInstructionCache)\r | |
35 | GCC_ASM_EXPORT (ArmEnableSWPInstruction)\r | |
36 | GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r | |
37 | GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r | |
38 | GCC_ASM_EXPORT (ArmSetLowVectors)\r | |
39 | GCC_ASM_EXPORT (ArmSetHighVectors)\r | |
40 | GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)\r | |
d60f6af4 | 41 | GCC_ASM_EXPORT (ArmV7PerformPoUDataCacheOperation)\r |
bd6b9799 | 42 | GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r |
43 | GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)\r | |
44 | GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r | |
836c3500 | 45 | GCC_ASM_EXPORT (ArmReadVBar)\r |
bd6b9799 | 46 | GCC_ASM_EXPORT (ArmWriteVBar)\r |
47 | GCC_ASM_EXPORT (ArmEnableVFP)\r | |
48 | GCC_ASM_EXPORT (ArmCallWFI)\r | |
49 | GCC_ASM_EXPORT (ArmReadCbar)\r | |
50 | GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)\r | |
51 | GCC_ASM_EXPORT (ArmReadMpidr)\r | |
52 | GCC_ASM_EXPORT (ArmReadTpidrurw)\r | |
53 | GCC_ASM_EXPORT (ArmWriteTpidrurw)\r | |
54 | GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r | |
55 | GCC_ASM_EXPORT (ArmReadIdPfr1)\r | |
56 | \r | |
57 | .set DC_ON, (0x1<<2)\r | |
58 | .set IC_ON, (0x1<<12)\r | |
59 | .set CTRL_M_BIT, (1 << 0)\r | |
60 | .set CTRL_C_BIT, (1 << 2)\r | |
61 | .set CTRL_B_BIT, (1 << 7)\r | |
62 | .set CTRL_I_BIT, (1 << 12)\r | |
63 | \r | |
64 | \r | |
65 | ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r | |
66 | mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line \r | |
67 | dsb\r | |
68 | isb\r | |
69 | bx lr\r | |
70 | \r | |
71 | ASM_PFX(ArmCleanDataCacheEntryByMVA):\r | |
72 | mcr p15, 0, r0, c7, c10, 1 @clean single data cache line \r | |
73 | dsb\r | |
74 | isb\r | |
75 | bx lr\r | |
76 | \r | |
77 | \r | |
78 | ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r | |
79 | mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line\r | |
80 | dsb\r | |
81 | isb\r | |
82 | bx lr\r | |
83 | \r | |
84 | \r | |
85 | ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r | |
86 | mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line \r | |
87 | dsb\r | |
88 | isb\r | |
89 | bx lr\r | |
90 | \r | |
91 | \r | |
92 | ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r | |
93 | mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line \r | |
94 | dsb\r | |
95 | isb\r | |
96 | bx lr\r | |
97 | \r | |
98 | \r | |
99 | ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r | |
100 | mcr p15, 0, r0, c7, c10, 2 @ Clean this line \r | |
101 | dsb\r | |
102 | isb\r | |
103 | bx lr\r | |
104 | \r | |
105 | ASM_PFX(ArmInvalidateInstructionCache):\r | |
106 | mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache\r | |
107 | dsb\r | |
108 | isb\r | |
109 | bx LR\r | |
110 | \r | |
111 | ASM_PFX(ArmEnableMmu):\r | |
112 | mrc p15,0,R0,c1,c0,0\r | |
113 | orr R0,R0,#1\r | |
114 | mcr p15,0,R0,c1,c0,0\r | |
115 | dsb\r | |
116 | isb\r | |
117 | bx LR\r | |
118 | \r | |
119 | \r | |
120 | ASM_PFX(ArmDisableMmu):\r | |
121 | mrc p15,0,R0,c1,c0,0\r | |
122 | bic R0,R0,#1\r | |
123 | mcr p15,0,R0,c1,c0,0 @Disable MMU\r | |
124 | \r | |
125 | mcr p15,0,R0,c8,c7,0 @Invalidate TLB\r | |
126 | mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array\r | |
127 | dsb\r | |
128 | isb\r | |
129 | bx LR\r | |
130 | \r | |
131 | ASM_PFX(ArmDisableCachesAndMmu):\r | |
132 | mrc p15, 0, r0, c1, c0, 0 @ Get control register\r | |
133 | bic r0, r0, #CTRL_M_BIT @ Disable MMU\r | |
134 | bic r0, r0, #CTRL_C_BIT @ Disable D Cache\r | |
135 | bic r0, r0, #CTRL_I_BIT @ Disable I Cache\r | |
136 | mcr p15, 0, r0, c1, c0, 0 @ Write control register\r | |
137 | dsb\r | |
138 | isb\r | |
139 | bx LR\r | |
140 | \r | |
141 | ASM_PFX(ArmMmuEnabled):\r | |
142 | mrc p15,0,R0,c1,c0,0\r | |
143 | and R0,R0,#1\r | |
144 | bx LR \r | |
145 | \r | |
146 | ASM_PFX(ArmEnableDataCache):\r | |
147 | ldr R1,=DC_ON\r | |
148 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
149 | orr R0,R0,R1 @Set C bit\r | |
150 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
151 | dsb\r | |
152 | isb\r | |
153 | bx LR\r | |
154 | \r | |
155 | ASM_PFX(ArmDisableDataCache):\r | |
156 | ldr R1,=DC_ON\r | |
157 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
158 | bic R0,R0,R1 @Clear C bit\r | |
159 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
160 | dsb\r | |
161 | isb\r | |
162 | bx LR\r | |
163 | \r | |
164 | ASM_PFX(ArmEnableInstructionCache):\r | |
165 | ldr R1,=IC_ON\r | |
166 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
167 | orr R0,R0,R1 @Set I bit\r | |
168 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
169 | dsb\r | |
170 | isb\r | |
171 | bx LR\r | |
172 | \r | |
173 | ASM_PFX(ArmDisableInstructionCache):\r | |
174 | ldr R1,=IC_ON\r | |
175 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data\r | |
176 | bic R0,R0,R1 @Clear I bit.\r | |
177 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data\r | |
178 | dsb\r | |
179 | isb\r | |
180 | bx LR\r | |
181 | \r | |
182 | ASM_PFX(ArmEnableSWPInstruction):\r | |
183 | mrc p15, 0, r0, c1, c0, 0\r | |
184 | orr r0, r0, #0x00000400\r | |
185 | mcr p15, 0, r0, c1, c0, 0\r | |
186 | isb\r | |
187 | bx LR\r | |
188 | \r | |
189 | ASM_PFX(ArmEnableBranchPrediction):\r | |
190 | mrc p15, 0, r0, c1, c0, 0\r | |
191 | orr r0, r0, #0x00000800\r | |
192 | mcr p15, 0, r0, c1, c0, 0\r | |
193 | dsb\r | |
194 | isb\r | |
195 | bx LR\r | |
196 | \r | |
197 | ASM_PFX(ArmDisableBranchPrediction):\r | |
198 | mrc p15, 0, r0, c1, c0, 0\r | |
199 | bic r0, r0, #0x00000800\r | |
200 | mcr p15, 0, r0, c1, c0, 0\r | |
201 | dsb\r | |
202 | isb\r | |
203 | bx LR\r | |
204 | \r | |
205 | ASM_PFX(ArmSetLowVectors):\r | |
206 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r | |
207 | bic r0, r0, #0x00002000 @ clear V bit\r | |
208 | mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r | |
209 | isb\r | |
210 | bx LR\r | |
211 | \r | |
212 | ASM_PFX(ArmSetHighVectors):\r | |
213 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r | |
214 | orr r0, r0, #0x00002000 @ clear V bit\r | |
215 | mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r | |
216 | isb\r | |
217 | bx LR\r | |
218 | \r | |
219 | ASM_PFX(ArmV7AllDataCachesOperation):\r | |
220 | stmfd SP!,{r4-r12, LR}\r | |
221 | mov R1, R0 @ Save Function call in R1\r | |
222 | mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r | |
223 | ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)\r | |
224 | mov R3, R3, LSR #23 @ Cache level value (naturally aligned)\r | |
225 | beq L_Finished\r | |
226 | mov R10, #0\r | |
227 | \r | |
228 | Loop1: \r | |
229 | add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r | |
230 | mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r | |
231 | and R12, R12, #7 @ get those 3 bits alone\r | |
232 | cmp R12, #2\r | |
233 | blt L_Skip @ no cache or only instruction cache at this level\r | |
234 | mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r | |
235 | isb @ isb to sync the change to the CacheSizeID reg \r | |
236 | mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r | |
237 | and R2, R12, #0x7 @ extract the line length field\r | |
238 | add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r | |
239 | @ ldr R4, =0x3FF\r | |
240 | mov R4, #0x400\r | |
241 | sub R4, R4, #1\r | |
242 | ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r | |
243 | clz R5, R4 @ R5 is the bit position of the way size increment\r | |
244 | @ ldr R7, =0x00007FFF\r | |
245 | mov R7, #0x00008000\r | |
246 | sub R7, R7, #1\r | |
247 | ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r | |
248 | \r | |
249 | Loop2: \r | |
250 | mov R9, R4 @ R9 working copy of the max way size (right aligned)\r | |
251 | \r | |
252 | Loop3: \r | |
253 | orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r | |
254 | orr R0, R0, R7, LSL R2 @ factor in the index number\r | |
255 | \r | |
256 | blx R1\r | |
257 | \r | |
258 | subs R9, R9, #1 @ decrement the way number\r | |
259 | bge Loop3\r | |
260 | subs R7, R7, #1 @ decrement the index\r | |
261 | bge Loop2\r | |
262 | L_Skip: \r | |
263 | add R10, R10, #2 @ increment the cache number\r | |
264 | cmp R3, R10\r | |
265 | bgt Loop1\r | |
266 | \r | |
267 | L_Finished:\r | |
268 | dsb\r | |
269 | ldmfd SP!, {r4-r12, lr}\r | |
d60f6af4 | 270 | bx LR\r |
271 | \r | |
272 | ASM_PFX(ArmV7PerformPoUDataCacheOperation):\r | |
273 | stmfd SP!,{r4-r12, LR}\r | |
274 | mov R1, R0 @ Save Function call in R1\r | |
275 | mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR\r | |
276 | ands R3, R6, #0x38000000 @ Mask out all but Level of Unification (LoU)\r | |
277 | mov R3, R3, LSR #26 @ Cache level value (naturally aligned)\r | |
278 | beq Finished2\r | |
279 | mov R10, #0\r | |
280 | \r | |
281 | Loop4:\r | |
282 | add R2, R10, R10, LSR #1 @ Work out 3xcachelevel\r | |
283 | mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level\r | |
284 | and R12, R12, #7 @ get those 3 bits alone\r | |
285 | cmp R12, #2\r | |
286 | blt Skip2 @ no cache or only instruction cache at this level\r | |
287 | mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction\r | |
288 | isb @ isb to sync the change to the CacheSizeID reg \r | |
289 | mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)\r | |
290 | and R2, R12, #0x7 @ extract the line length field\r | |
291 | add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)\r | |
292 | ldr R4, =0x3FF\r | |
293 | ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)\r | |
294 | clz R5, R4 @ R5 is the bit position of the way size increment\r | |
295 | ldr R7, =0x00007FFF\r | |
296 | ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)\r | |
297 | \r | |
298 | Loop5:\r | |
299 | mov R9, R4 @ R9 working copy of the max way size (right aligned)\r | |
300 | \r | |
301 | Loop6:\r | |
302 | orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11\r | |
303 | orr R0, R0, R7, LSL R2 @ factor in the index number\r | |
304 | \r | |
305 | blx R1\r | |
306 | \r | |
307 | subs R9, R9, #1 @ decrement the way number\r | |
308 | bge Loop6\r | |
309 | subs R7, R7, #1 @ decrement the index\r | |
310 | bge Loop5\r | |
311 | Skip2:\r | |
312 | add R10, R10, #2 @ increment the cache number\r | |
313 | cmp R3, R10\r | |
314 | bgt Loop4\r | |
315 | \r | |
316 | Finished2:\r | |
317 | dsb\r | |
318 | ldmfd SP!, {r4-r12, lr}\r | |
bd6b9799 | 319 | bx LR\r |
320 | \r | |
321 | ASM_PFX(ArmDataMemoryBarrier):\r | |
322 | dmb\r | |
323 | bx LR\r | |
324 | \r | |
325 | ASM_PFX(ArmDataSyncronizationBarrier):\r | |
326 | ASM_PFX(ArmDrainWriteBuffer):\r | |
327 | dsb\r | |
328 | bx LR\r | |
329 | \r | |
330 | ASM_PFX(ArmInstructionSynchronizationBarrier):\r | |
331 | isb\r | |
332 | bx LR\r | |
333 | \r | |
836c3500 | 334 | ASM_PFX(ArmReadVBar):\r |
335 | # Set the Address of the Vector Table in the VBAR register\r | |
336 | mrc p15, 0, r0, c12, c0, 0\r | |
337 | bx lr\r | |
338 | \r | |
bd6b9799 | 339 | ASM_PFX(ArmWriteVBar):\r |
340 | # Set the Address of the Vector Table in the VBAR register\r | |
341 | mcr p15, 0, r0, c12, c0, 0 \r | |
342 | # Ensure the SCTLR.V bit is clear\r | |
343 | mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR into R0 (Read control register configuration data)\r | |
344 | bic r0, r0, #0x00002000 @ clear V bit\r | |
345 | mcr p15, 0, r0, c1, c0, 0 @ Write R0 into SCTLR (Write control register configuration data)\r | |
346 | isb\r | |
347 | bx lr\r | |
348 | \r | |
349 | ASM_PFX(ArmEnableVFP):\r | |
350 | # Read CPACR (Coprocessor Access Control Register)\r | |
351 | mrc p15, 0, r0, c1, c0, 2\r | |
352 | # Enable VPF access (Full Access to CP10, CP11) (V* instructions)\r | |
353 | orr r0, r0, #0x00f00000\r | |
354 | # Write back CPACR (Coprocessor Access Control Register)\r | |
355 | mcr p15, 0, r0, c1, c0, 2\r | |
18029bb9 | 356 | isb\r |
bd6b9799 | 357 | # Set EN bit in FPEXC. The Advanced SIMD and VFP extensions are enabled and operate normally.\r |
358 | mov r0, #0x40000000\r | |
359 | mcr p10,#0x7,r0,c8,c0,#0\r | |
360 | bx lr\r | |
361 | \r | |
362 | ASM_PFX(ArmCallWFI):\r | |
363 | wfi\r | |
364 | bx lr\r | |
365 | \r | |
366 | #Note: Return 0 in Uniprocessor implementation\r | |
367 | ASM_PFX(ArmReadCbar):\r | |
368 | mrc p15, 4, r0, c15, c0, 0 @ Read Configuration Base Address Register\r | |
369 | bx lr\r | |
370 | \r | |
371 | ASM_PFX(ArmInvalidateInstructionAndDataTlb):\r | |
372 | mcr p15, 0, r0, c8, c7, 0 @ Invalidate Inst TLB and Data TLB\r | |
373 | dsb\r | |
374 | bx lr\r | |
375 | \r | |
376 | ASM_PFX(ArmReadMpidr):\r | |
377 | mrc p15, 0, r0, c0, c0, 5 @ read MPIDR\r | |
378 | bx lr\r | |
379 | \r | |
380 | ASM_PFX(ArmReadTpidrurw):\r | |
381 | mrc p15, 0, r0, c13, c0, 2 @ read TPIDRURW\r | |
382 | bx lr\r | |
383 | \r | |
384 | ASM_PFX(ArmWriteTpidrurw):\r | |
385 | mcr p15, 0, r0, c13, c0, 2 @ write TPIDRURW\r | |
386 | bx lr\r | |
387 | \r | |
388 | ASM_PFX(ArmIsArchTimerImplemented):\r | |
389 | mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1\r | |
390 | and r0, r0, #0x000F0000\r | |
391 | bx lr\r | |
392 | \r | |
393 | ASM_PFX(ArmReadIdPfr1):\r | |
394 | mrc p15, 0, r0, c0, c1, 1 @ Read ID_PFR1 Register\r | |
395 | bx lr\r | |
396 | \r | |
397 | ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r |