]>
Commit | Line | Data |
---|---|---|
2ef2b01e A |
1 | #------------------------------------------------------------------------------ |
2 | # | |
d6ebcab7 | 3 | # Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR> |
2ef2b01e | 4 | # |
d6ebcab7 | 5 | # This program and the accompanying materials |
2ef2b01e A |
6 | # are licensed and made available under the terms and conditions of the BSD License |
7 | # which accompanies this distribution. The full text of the license may be found at | |
8 | # http://opensource.org/licenses/bsd-license.php | |
9 | # | |
10 | # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, | |
11 | # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. | |
12 | # | |
13 | #------------------------------------------------------------------------------ | |
14 | ||
2ef2b01e | 15 | .globl ASM_PFX(ArmInvalidateInstructionCache) |
63ca7402 | 16 | INTERWORK_FUNC(ArmInvalidateInstructionCache) |
2ef2b01e | 17 | .globl ASM_PFX(ArmInvalidateDataCacheEntryByMVA) |
63ca7402 | 18 | INTERWORK_FUNC(ArmInvalidateDataCacheEntryByMVA) |
2ef2b01e | 19 | .globl ASM_PFX(ArmCleanDataCacheEntryByMVA) |
63ca7402 | 20 | INTERWORK_FUNC(ArmCleanDataCacheEntryByMVA) |
2ef2b01e | 21 | .globl ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA) |
63ca7402 | 22 | INTERWORK_FUNC(ArmCleanInvalidateDataCacheEntryByMVA) |
2ef2b01e | 23 | .globl ASM_PFX(ArmInvalidateDataCacheEntryBySetWay) |
63ca7402 | 24 | INTERWORK_FUNC(ArmInvalidateDataCacheEntryBySetWay) |
2ef2b01e | 25 | .globl ASM_PFX(ArmCleanDataCacheEntryBySetWay) |
63ca7402 | 26 | INTERWORK_FUNC(ArmCleanDataCacheEntryBySetWay) |
2ef2b01e | 27 | .globl ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay) |
63ca7402 | 28 | INTERWORK_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay) |
2ef2b01e | 29 | .globl ASM_PFX(ArmDrainWriteBuffer) |
63ca7402 | 30 | INTERWORK_FUNC(ArmDrainWriteBuffer) |
2ef2b01e | 31 | .globl ASM_PFX(ArmEnableMmu) |
63ca7402 | 32 | INTERWORK_FUNC(ArmEnableMmu) |
2ef2b01e | 33 | .globl ASM_PFX(ArmDisableMmu) |
63ca7402 | 34 | INTERWORK_FUNC(ArmDisableMmu) |
c2b5ca8b | 35 | .globl ASM_PFX(ArmMmuEnabled) |
63ca7402 | 36 | INTERWORK_FUNC(ArmMmuEnabled) |
2ef2b01e | 37 | .globl ASM_PFX(ArmEnableDataCache) |
63ca7402 | 38 | INTERWORK_FUNC(ArmEnableDataCache) |
2ef2b01e | 39 | .globl ASM_PFX(ArmDisableDataCache) |
63ca7402 | 40 | INTERWORK_FUNC(ArmDisableDataCache) |
2ef2b01e | 41 | .globl ASM_PFX(ArmEnableInstructionCache) |
63ca7402 | 42 | INTERWORK_FUNC(ArmEnableInstructionCache) |
2ef2b01e | 43 | .globl ASM_PFX(ArmDisableInstructionCache) |
63ca7402 | 44 | INTERWORK_FUNC(ArmDisableInstructionCache) |
2ef2b01e | 45 | .globl ASM_PFX(ArmEnableBranchPrediction) |
63ca7402 | 46 | INTERWORK_FUNC(ArmEnableBranchPrediction) |
2ef2b01e | 47 | .globl ASM_PFX(ArmDisableBranchPrediction) |
63ca7402 | 48 | INTERWORK_FUNC(ArmDisableBranchPrediction) |
98bc0c8c | 49 | .globl ASM_PFX(ArmV7AllDataCachesOperation) |
63ca7402 | 50 | INTERWORK_FUNC(ArmV7AllDataCachesOperation) |
026c3d34 | 51 | .globl ASM_PFX(ArmDataMemoryBarrier) |
63ca7402 | 52 | INTERWORK_FUNC(ArmDataMemoryBarrier) |
026c3d34 | 53 | .globl ASM_PFX(ArmDataSyncronizationBarrier) |
63ca7402 | 54 | INTERWORK_FUNC(ArmDataSyncronizationBarrier) |
026c3d34 | 55 | .globl ASM_PFX(ArmInstructionSynchronizationBarrier) |
63ca7402 | 56 | INTERWORK_FUNC(ArmInstructionSynchronizationBarrier) |
026c3d34 | 57 | |
548af3e7 | 58 | .text |
59 | .align 2 | |
2ef2b01e A |
60 | |
61 | .set DC_ON, (0x1<<2) | |
62 | .set IC_ON, (0x1<<12) | |
98bc0c8c | 63 | |
2ef2b01e | 64 | |
7800c283 | 65 | |
2ef2b01e | 66 | ASM_PFX(ArmInvalidateDataCacheEntryByMVA): |
98bc0c8c | 67 | mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line |
68 | dsb | |
69 | isb | |
2ef2b01e A |
70 | bx lr |
71 | ||
72 | ||
73 | ASM_PFX(ArmCleanDataCacheEntryByMVA): | |
74 | mcr p15, 0, r0, c7, c10, 1 @clean single data cache line | |
98bc0c8c | 75 | dsb |
76 | isb | |
2ef2b01e A |
77 | bx lr |
78 | ||
79 | ||
80 | ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA): | |
81 | mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line | |
98bc0c8c | 82 | dsb |
83 | isb | |
2ef2b01e A |
84 | bx lr |
85 | ||
86 | ||
87 | ASM_PFX(ArmInvalidateDataCacheEntryBySetWay): | |
88 | mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line | |
98bc0c8c | 89 | dsb |
90 | isb | |
2ef2b01e A |
91 | bx lr |
92 | ||
93 | ||
94 | ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay): | |
95 | mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line | |
98bc0c8c | 96 | dsb |
97 | isb | |
2ef2b01e A |
98 | bx lr |
99 | ||
100 | ||
101 | ASM_PFX(ArmCleanDataCacheEntryBySetWay): | |
102 | mcr p15, 0, r0, c7, c10, 2 @ Clean this line | |
98bc0c8c | 103 | dsb |
104 | isb | |
2ef2b01e A |
105 | bx lr |
106 | ||
107 | ||
2ef2b01e | 108 | ASM_PFX(ArmInvalidateInstructionCache): |
2ef2b01e | 109 | mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache |
98bc0c8c | 110 | dsb |
111 | isb | |
2ef2b01e A |
112 | bx LR |
113 | ||
114 | ASM_PFX(ArmEnableMmu): | |
115 | mrc p15,0,R0,c1,c0,0 | |
116 | orr R0,R0,#1 | |
117 | mcr p15,0,R0,c1,c0,0 | |
548af3e7 | 118 | dsb |
119 | isb | |
2ef2b01e A |
120 | bx LR |
121 | ||
c2b5ca8b A |
122 | ASM_PFX(ArmMmuEnabled): |
123 | mrc p15,0,R0,c1,c0,0 | |
124 | and R0,R0,#1 | |
125 | bx LR | |
126 | ||
2ef2b01e | 127 | ASM_PFX(ArmDisableMmu): |
2ef2b01e A |
128 | mrc p15,0,R0,c1,c0,0 |
129 | bic R0,R0,#1 | |
130 | mcr p15,0,R0,c1,c0,0 @Disable MMU | |
7800c283 | 131 | |
132 | mcr p15,0,R0,c8,c7,0 @Invalidate TLB | |
133 | mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array | |
98bc0c8c | 134 | dsb |
135 | isb | |
2ef2b01e A |
136 | bx LR |
137 | ||
138 | ASM_PFX(ArmEnableDataCache): | |
139 | ldr R1,=DC_ON | |
140 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data | |
141 | orr R0,R0,R1 @Set C bit | |
142 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data | |
98bc0c8c | 143 | dsb |
144 | isb | |
2ef2b01e A |
145 | bx LR |
146 | ||
147 | ASM_PFX(ArmDisableDataCache): | |
148 | ldr R1,=DC_ON | |
149 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data | |
150 | bic R0,R0,R1 @Clear C bit | |
151 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data | |
98bc0c8c | 152 | dsb |
153 | isb | |
2ef2b01e A |
154 | bx LR |
155 | ||
156 | ASM_PFX(ArmEnableInstructionCache): | |
157 | ldr R1,=IC_ON | |
158 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data | |
159 | orr R0,R0,R1 @Set I bit | |
160 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data | |
98bc0c8c | 161 | dsb |
162 | isb | |
2ef2b01e A |
163 | bx LR |
164 | ||
165 | ASM_PFX(ArmDisableInstructionCache): | |
166 | ldr R1,=IC_ON | |
167 | mrc p15,0,R0,c1,c0,0 @Read control register configuration data | |
168 | bic R0,R0,R1 @Clear I bit. | |
169 | mcr p15,0,r0,c1,c0,0 @Write control register configuration data | |
98bc0c8c | 170 | dsb |
171 | isb | |
2ef2b01e A |
172 | bx LR |
173 | ||
174 | ASM_PFX(ArmEnableBranchPrediction): | |
175 | mrc p15, 0, r0, c1, c0, 0 | |
176 | orr r0, r0, #0x00000800 | |
177 | mcr p15, 0, r0, c1, c0, 0 | |
98bc0c8c | 178 | dsb |
179 | isb | |
2ef2b01e A |
180 | bx LR |
181 | ||
182 | ASM_PFX(ArmDisableBranchPrediction): | |
183 | mrc p15, 0, r0, c1, c0, 0 | |
184 | bic r0, r0, #0x00000800 | |
185 | mcr p15, 0, r0, c1, c0, 0 | |
98bc0c8c | 186 | dsb |
187 | isb | |
2ef2b01e A |
188 | bx LR |
189 | ||
98bc0c8c | 190 | |
191 | ASM_PFX(ArmV7AllDataCachesOperation): | |
192 | stmfd SP!,{r4-r12, LR} | |
193 | mov R1, R0 @ Save Function call in R1 | |
194 | mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR | |
195 | ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC) | |
196 | mov R3, R3, LSR #23 @ Cache level value (naturally aligned) | |
197 | beq L_Finished | |
198 | mov R10, #0 | |
199 | ||
200 | Loop1: | |
201 | add R2, R10, R10, LSR #1 @ Work out 3xcachelevel | |
202 | mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level | |
203 | and R12, R12, #7 @ get those 3 bits alone | |
204 | cmp R12, #2 | |
205 | blt L_Skip @ no cache or only instruction cache at this level | |
206 | mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction | |
548af3e7 | 207 | isb @ isb to sync the change to the CacheSizeID reg |
208 | mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR) | |
98bc0c8c | 209 | and R2, R12, #0x7 @ extract the line length field |
548af3e7 | 210 | add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes) |
211 | @ ldr R4, =0x3FF | |
98bc0c8c | 212 | mov R4, #0x400 |
213 | sub R4, R4, #1 | |
214 | ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned) | |
215 | clz R5, R4 @ R5 is the bit position of the way size increment | |
548af3e7 | 216 | @ ldr R7, =0x00007FFF |
98bc0c8c | 217 | mov R7, #0x00008000 |
218 | sub R7, R7, #1 | |
219 | ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned) | |
220 | ||
221 | Loop2: | |
222 | mov R9, R4 @ R9 working copy of the max way size (right aligned) | |
223 | ||
224 | Loop3: | |
225 | orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11 | |
226 | orr R0, R0, R7, LSL R2 @ factor in the index number | |
227 | ||
228 | blx R1 | |
229 | ||
230 | subs R9, R9, #1 @ decrement the way number | |
231 | bge Loop3 | |
232 | subs R7, R7, #1 @ decrement the index | |
233 | bge Loop2 | |
234 | L_Skip: | |
235 | add R10, R10, #2 @ increment the cache number | |
236 | cmp R3, R10 | |
237 | bgt Loop1 | |
238 | ||
239 | L_Finished: | |
7800c283 | 240 | dsb |
98bc0c8c | 241 | ldmfd SP!, {r4-r12, lr} |
242 | bx LR | |
243 | ||
026c3d34 | 244 | ASM_PFX(ArmDataMemoryBarrier): |
245 | dmb | |
246 | bx LR | |
247 | ||
248 | ASM_PFX(ArmDataSyncronizationBarrier): | |
7800c283 | 249 | ASM_PFX(ArmDrainWriteBuffer): |
026c3d34 | 250 | dsb |
251 | bx LR | |
252 | ||
253 | ASM_PFX(ArmInstructionSynchronizationBarrier): | |
254 | isb | |
255 | bx LR | |
256 | ||
98bc0c8c | 257 | |
2ef2b01e | 258 | ASM_FUNCTION_REMOVE_IF_UNREFERENCED |