]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmLib/ArmV7/ArmV7Support.S
Updated Hardware Interrupt protocol to add an EOI member. Added ARM Data/Instruction...
[mirror_edk2.git] / ArmPkg / Library / ArmLib / ArmV7 / ArmV7Support.S
1 #------------------------------------------------------------------------------
2 #
3 # Copyright (c) 2008-2009 Apple Inc. All rights reserved.
4 #
5 # All rights reserved. This program and the accompanying materials
6 # are licensed and made available under the terms and conditions of the BSD License
7 # which accompanies this distribution. The full text of the license may be found at
8 # http://opensource.org/licenses/bsd-license.php
9 #
10 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
12 #
13 #------------------------------------------------------------------------------
14
15 .text
16 .align 2
17 .globl ASM_PFX(ArmInvalidateInstructionCache)
18 .globl ASM_PFX(ArmInvalidateDataCacheEntryByMVA)
19 .globl ASM_PFX(ArmCleanDataCacheEntryByMVA)
20 .globl ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA)
21 .globl ASM_PFX(ArmInvalidateDataCacheEntryBySetWay)
22 .globl ASM_PFX(ArmCleanDataCacheEntryBySetWay)
23 .globl ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay)
24 .globl ASM_PFX(ArmDrainWriteBuffer)
25 .globl ASM_PFX(ArmEnableMmu)
26 .globl ASM_PFX(ArmDisableMmu)
27 .globl ASM_PFX(ArmMmuEnabled)
28 .globl ASM_PFX(ArmEnableDataCache)
29 .globl ASM_PFX(ArmDisableDataCache)
30 .globl ASM_PFX(ArmEnableInstructionCache)
31 .globl ASM_PFX(ArmDisableInstructionCache)
32 .globl ASM_PFX(ArmEnableExtendPTConfig)
33 .globl ASM_PFX(ArmDisableExtendPTConfig)
34 .globl ASM_PFX(ArmEnableBranchPrediction)
35 .globl ASM_PFX(ArmDisableBranchPrediction)
36 .globl ASM_PFX(ArmV7AllDataCachesOperation)
37 .globl ASM_PFX(ArmDataMemoryBarrier)
38 .globl ASM_PFX(ArmDataSyncronizationBarrier)
39 .globl ASM_PFX(ArmInstructionSynchronizationBarrier)
40
41
42 .set DC_ON, (0x1<<2)
43 .set IC_ON, (0x1<<12)
44
45
46 ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
47 mcr p15, 0, r0, c7, c6, 1 @invalidate single data cache line
48 dsb
49 isb
50 bx lr
51
52
53 ASM_PFX(ArmCleanDataCacheEntryByMVA):
54 mcr p15, 0, r0, c7, c10, 1 @clean single data cache line
55 dsb
56 isb
57 bx lr
58
59
60 ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
61 mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line
62 dsb
63 isb
64 bx lr
65
66
67 ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
68 mcr p15, 0, r0, c7, c6, 2 @ Invalidate this line
69 dsb
70 isb
71 bx lr
72
73
74 ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
75 mcr p15, 0, r0, c7, c14, 2 @ Clean and Invalidate this line
76 dsb
77 isb
78 bx lr
79
80
81 ASM_PFX(ArmCleanDataCacheEntryBySetWay):
82 mcr p15, 0, r0, c7, c10, 2 @ Clean this line
83 dsb
84 isb
85 bx lr
86
87
88 ASM_PFX(ArmDrainWriteBuffer):
89 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer for sync
90 dsb
91 isb
92 bx lr
93
94
95 ASM_PFX(ArmInvalidateInstructionCache):
96 mov R0,#0
97 mcr p15,0,R0,c7,c5,0 @Invalidate entire instruction cache
98 mov R0,#0
99 dsb
100 isb
101 bx LR
102
103 ASM_PFX(ArmEnableMmu):
104 mrc p15,0,R0,c1,c0,0
105 orr R0,R0,#1
106 mcr p15,0,R0,c1,c0,0
107 bx LR
108
109 ASM_PFX(ArmMmuEnabled):
110 mrc p15,0,R0,c1,c0,0
111 and R0,R0,#1
112 bx LR
113
114
115 ASM_PFX(ArmDisableMmu):
116 mov R0,#0
117 mcr p15,0,R0,c13,c0,0 @FCSE PID register must be cleared before disabling MMU
118 mrc p15,0,R0,c1,c0,0
119 bic R0,R0,#1
120 mcr p15,0,R0,c1,c0,0 @Disable MMU
121 mov R0,#0
122 dsb
123 isb
124 bx LR
125
126 ASM_PFX(ArmEnableDataCache):
127 ldr R1,=DC_ON
128 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
129 orr R0,R0,R1 @Set C bit
130 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
131 dsb
132 isb
133 bx LR
134
135 ASM_PFX(ArmDisableDataCache):
136 ldr R1,=DC_ON
137 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
138 bic R0,R0,R1 @Clear C bit
139 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
140 dsb
141 isb
142 bx LR
143
144 ASM_PFX(ArmEnableInstructionCache):
145 ldr R1,=IC_ON
146 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
147 orr R0,R0,R1 @Set I bit
148 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
149 dsb
150 isb
151 bx LR
152
153 ASM_PFX(ArmDisableInstructionCache):
154 ldr R1,=IC_ON
155 mrc p15,0,R0,c1,c0,0 @Read control register configuration data
156 bic R0,R0,R1 @Clear I bit.
157 mcr p15,0,r0,c1,c0,0 @Write control register configuration data
158 dsb
159 isb
160 bx LR
161
162 ASM_PFX(ArmEnableBranchPrediction):
163 mrc p15, 0, r0, c1, c0, 0
164 orr r0, r0, #0x00000800
165 mcr p15, 0, r0, c1, c0, 0
166 dsb
167 isb
168 bx LR
169
170 ASM_PFX(ArmDisableBranchPrediction):
171 mrc p15, 0, r0, c1, c0, 0
172 bic r0, r0, #0x00000800
173 mcr p15, 0, r0, c1, c0, 0
174 dsb
175 isb
176 bx LR
177
178
179 ASM_PFX(ArmV7AllDataCachesOperation):
180 stmfd SP!,{r4-r12, LR}
181 mov R1, R0 @ Save Function call in R1
182 mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR
183 ands R3, R6, #0x7000000 @ Mask out all but Level of Coherency (LoC)
184 mov R3, R3, LSR #23 @ Cache level value (naturally aligned)
185 beq L_Finished
186 mov R10, #0
187
188 Loop1:
189 add R2, R10, R10, LSR #1 @ Work out 3xcachelevel
190 mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level
191 and R12, R12, #7 @ get those 3 bits alone
192 cmp R12, #2
193 blt L_Skip @ no cache or only instruction cache at this level
194 mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
195 isb @ ISB to sync the change to the CacheSizeID reg
196 mcr p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)
197 and R2, R12, #0x7 @ extract the line length field
198 and R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)
199 mov R4, #0x400
200 sub R4, R4, #1
201 ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)
202 clz R5, R4 @ R5 is the bit position of the way size increment
203 mov R7, #0x00008000
204 sub R7, R7, #1
205 ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)
206
207 Loop2:
208 mov R9, R4 @ R9 working copy of the max way size (right aligned)
209
210 Loop3:
211 orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11
212 orr R0, R0, R7, LSL R2 @ factor in the index number
213
214 blx R1
215
216 subs R9, R9, #1 @ decrement the way number
217 bge Loop3
218 subs R7, R7, #1 @ decrement the index
219 bge Loop2
220 L_Skip:
221 add R10, R10, #2 @ increment the cache number
222 cmp R3, R10
223 bgt Loop1
224
225 L_Finished:
226 ldmfd SP!, {r4-r12, lr}
227 bx LR
228
229 ASM_PFX(ArmDataMemoryBarrier):
230 dmb
231 bx LR
232
233 ASM_PFX(ArmDataSyncronizationBarrier):
234 dsb
235 bx LR
236
237 ASM_PFX(ArmInstructionSynchronizationBarrier):
238 isb
239 bx LR
240
241
242 ASM_FUNCTION_REMOVE_IF_UNREFERENCED