]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
Update remaining ARM .S files with INTERWORK_FUNC macro. This is the 2nd half of...
[mirror_edk2.git] / ArmPkg / Library / ArmLib / ArmV7 / ArmV7Support.S
1 #------------------------------------------------------------------------------
2 #
3 # Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
4 #
5 # This program and the accompanying materials
6 # are licensed and made available under the terms and conditions of the BSD License
7 # which accompanies this distribution. The full text of the license may be found at
8 # http://opensource.org/licenses/bsd-license.php
9 #
10 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12 #
13 #------------------------------------------------------------------------------
14
15 .globl ASM_PFX(ArmInvalidateInstructionCache)
16 INTERWORK_FUNC(ArmInvalidateInstructionCache)
17 .globl ASM_PFX(ArmInvalidateDataCacheEntryByMVA)
18 INTERWORK_FUNC(ArmInvalidateDataCacheEntryByMVA)
19 .globl ASM_PFX(ArmCleanDataCacheEntryByMVA)
20 INTERWORK_FUNC(ArmCleanDataCacheEntryByMVA)
21 .globl ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA)
22 INTERWORK_FUNC(ArmCleanInvalidateDataCacheEntryByMVA)
23 .globl ASM_PFX(ArmInvalidateDataCacheEntryBySetWay)
24 INTERWORK_FUNC(ArmInvalidateDataCacheEntryBySetWay)
25 .globl ASM_PFX(ArmCleanDataCacheEntryBySetWay)
26 INTERWORK_FUNC(ArmCleanDataCacheEntryBySetWay)
27 .globl ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay)
28 INTERWORK_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay)
29 .globl ASM_PFX(ArmDrainWriteBuffer)
30 INTERWORK_FUNC(ArmDrainWriteBuffer)
31 .globl ASM_PFX(ArmEnableMmu)
32 INTERWORK_FUNC(ArmEnableMmu)
33 .globl ASM_PFX(ArmDisableMmu)
34 INTERWORK_FUNC(ArmDisableMmu)
35 .globl ASM_PFX(ArmMmuEnabled)
36 INTERWORK_FUNC(ArmMmuEnabled)
37 .globl ASM_PFX(ArmEnableDataCache)
38 INTERWORK_FUNC(ArmEnableDataCache)
39 .globl ASM_PFX(ArmDisableDataCache)
40 INTERWORK_FUNC(ArmDisableDataCache)
41 .globl ASM_PFX(ArmEnableInstructionCache)
42 INTERWORK_FUNC(ArmEnableInstructionCache)
43 .globl ASM_PFX(ArmDisableInstructionCache)
44 INTERWORK_FUNC(ArmDisableInstructionCache)
45 .globl ASM_PFX(ArmEnableBranchPrediction)
46 INTERWORK_FUNC(ArmEnableBranchPrediction)
47 .globl ASM_PFX(ArmDisableBranchPrediction)
48 INTERWORK_FUNC(ArmDisableBranchPrediction)
49 .globl ASM_PFX(ArmV7AllDataCachesOperation)
50 INTERWORK_FUNC(ArmV7AllDataCachesOperation)
51 .globl ASM_PFX(ArmDataMemoryBarrier)
52 INTERWORK_FUNC(ArmDataMemoryBarrier)
53 .globl ASM_PFX(ArmDataSyncronizationBarrier)
54 INTERWORK_FUNC(ArmDataSyncronizationBarrier)
55 .globl ASM_PFX(ArmInstructionSynchronizationBarrier)
56 INTERWORK_FUNC(ArmInstructionSynchronizationBarrier)
57
58 .text
59 .align 2
60
61 .set DC_ON, (0x1<<2)
62 .set IC_ON, (0x1<<12)
63
64
65
66 ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
67 mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line
68 dsb
69 isb
70 bx lr
71
72
73 ASM_PFX(ArmCleanDataCacheEntryByMVA):
74 mcr p15, 0, r0, c7, c10, 1 @clean single data cache line
75 dsb
76 isb
77 bx lr
78
79
80 ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
81 mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line
82 dsb
83 isb
84 bx lr
85
86
87 ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
88 mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line
89 dsb
90 isb
91 bx lr
92
93
94 ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
95 mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line
96 dsb
97 isb
98 bx lr
99
100
101 ASM_PFX(ArmCleanDataCacheEntryBySetWay):
102 mcr p15, 0, r0, c7, c10, 2 @ Clean this line
103 dsb
104 isb
105 bx lr
106
107
108 ASM_PFX(ArmInvalidateInstructionCache):
109 mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache
110 dsb
111 isb
112 bx LR
113
114 ASM_PFX(ArmEnableMmu):
115 mrc p15,0,R0,c1,c0,0
116 orr R0,R0,#1
117 mcr p15,0,R0,c1,c0,0
118 dsb
119 isb
120 bx LR
121
122 ASM_PFX(ArmMmuEnabled):
123 mrc p15,0,R0,c1,c0,0
124 and R0,R0,#1
125 bx LR
126
127 ASM_PFX(ArmDisableMmu):
128 mrc p15,0,R0,c1,c0,0
129 bic R0,R0,#1
130 mcr p15,0,R0,c1,c0,0 @Disable MMU
131
132 mcr p15,0,R0,c8,c7,0 @Invalidate TLB
133 mcr p15,0,R0,c7,c5,6 @Invalidate Branch predictor array
134 dsb
135 isb
136 bx LR
137
138 ASM_PFX(ArmEnableDataCache):
139 ldr R1,=DC_ON
140 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
141 orr R0,R0,R1 @Set C bit
142 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
143 dsb
144 isb
145 bx LR
146
147 ASM_PFX(ArmDisableDataCache):
148 ldr R1,=DC_ON
149 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
150 bic R0,R0,R1 @Clear C bit
151 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
152 dsb
153 isb
154 bx LR
155
156 ASM_PFX(ArmEnableInstructionCache):
157 ldr R1,=IC_ON
158 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
159 orr R0,R0,R1 @Set I bit
160 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
161 dsb
162 isb
163 bx LR
164
165 ASM_PFX(ArmDisableInstructionCache):
166 ldr R1,=IC_ON
167 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
168 bic R0,R0,R1 @Clear I bit.
169 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
170 dsb
171 isb
172 bx LR
173
174 ASM_PFX(ArmEnableBranchPrediction):
175 mrc p15, 0, r0, c1, c0, 0
176 orr r0, r0, #0x00000800
177 mcr p15, 0, r0, c1, c0, 0
178 dsb
179 isb
180 bx LR
181
182 ASM_PFX(ArmDisableBranchPrediction):
183 mrc p15, 0, r0, c1, c0, 0
184 bic r0, r0, #0x00000800
185 mcr p15, 0, r0, c1, c0, 0
186 dsb
187 isb
188 bx LR
189
190
191 ASM_PFX(ArmV7AllDataCachesOperation):
192 stmfd SP!,{r4-r12, LR}
193 mov R1, R0 @ Save Function call in R1
194 mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR
195 ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)
196 mov R3, R3, LSR #23 @ Cache level value (naturally aligned)
197 beq L_Finished
198 mov R10, #0
199
200 Loop1:
201 add R2, R10, R10, LSR #1 @ Work out 3xcachelevel
202 mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level
203 and R12, R12, #7 @ get those 3 bits alone
204 cmp R12, #2
205 blt L_Skip @ no cache or only instruction cache at this level
206 mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
207 isb @ isb to sync the change to the CacheSizeID reg
208 mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)
209 and R2, R12, #0x7 @ extract the line length field
210 add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)
211 @ ldr R4, =0x3FF
212 mov R4, #0x400
213 sub R4, R4, #1
214 ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)
215 clz R5, R4 @ R5 is the bit position of the way size increment
216 @ ldr R7, =0x00007FFF
217 mov R7, #0x00008000
218 sub R7, R7, #1
219 ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)
220
221 Loop2:
222 mov R9, R4 @ R9 working copy of the max way size (right aligned)
223
224 Loop3:
225 orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11
226 orr R0, R0, R7, LSL R2 @ factor in the index number
227
228 blx R1
229
230 subs R9, R9, #1 @ decrement the way number
231 bge Loop3
232 subs R7, R7, #1 @ decrement the index
233 bge Loop2
234 L_Skip:
235 add R10, R10, #2 @ increment the cache number
236 cmp R3, R10
237 bgt Loop1
238
239 L_Finished:
240 dsb
241 ldmfd SP!, {r4-r12, lr}
242 bx LR
243
244 ASM_PFX(ArmDataMemoryBarrier):
245 dmb
246 bx LR
247
248 ASM_PFX(ArmDataSyncronizationBarrier):
249 ASM_PFX(ArmDrainWriteBuffer):
250 dsb
251 bx LR
252
253 ASM_PFX(ArmInstructionSynchronizationBarrier):
254 isb
255 bx LR
256
257
258 ASM_FUNCTION_REMOVE_IF_UNREFERENCED