]>
Commit | Line | Data |
---|---|---|
25402f5d HL |
1 | #------------------------------------------------------------------------------\r |
2 | #\r | |
3 | # Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r | |
9401d6f4 | 4 | # Copyright (c) 2011 - 2014, ARM Limited. All rights reserved.\r |
25402f5d HL |
5 | #\r |
6 | # This program and the accompanying materials\r | |
7 | # are licensed and made available under the terms and conditions of the BSD License\r | |
8 | # which accompanies this distribution. The full text of the license may be found at\r | |
9 | # http://opensource.org/licenses/bsd-license.php\r | |
10 | #\r | |
11 | # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,\r | |
12 | # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.\r | |
13 | #\r | |
14 | #------------------------------------------------------------------------------\r | |
15 | \r | |
16 | #include <Chipset/AArch64.h>\r | |
17 | #include <AsmMacroIoLibV8.h>\r | |
18 | \r | |
19 | .text\r | |
20 | .align 3\r | |
21 | \r | |
22 | GCC_ASM_EXPORT (ArmInvalidateInstructionCache)\r | |
23 | GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)\r | |
24 | GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)\r | |
25 | GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)\r | |
26 | GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r | |
27 | GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r | |
28 | GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r | |
29 | GCC_ASM_EXPORT (ArmDrainWriteBuffer)\r | |
30 | GCC_ASM_EXPORT (ArmEnableMmu)\r | |
31 | GCC_ASM_EXPORT (ArmDisableMmu)\r | |
32 | GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r | |
33 | GCC_ASM_EXPORT (ArmMmuEnabled)\r | |
34 | GCC_ASM_EXPORT (ArmEnableDataCache)\r | |
35 | GCC_ASM_EXPORT (ArmDisableDataCache)\r | |
36 | GCC_ASM_EXPORT (ArmEnableInstructionCache)\r | |
37 | GCC_ASM_EXPORT (ArmDisableInstructionCache)\r | |
38 | GCC_ASM_EXPORT (ArmDisableAlignmentCheck)\r | |
39 | GCC_ASM_EXPORT (ArmEnableAlignmentCheck)\r | |
40 | GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r | |
41 | GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r | |
42 | GCC_ASM_EXPORT (AArch64AllDataCachesOperation)\r | |
43 | GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)\r | |
44 | GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r | |
cf93a378 | 45 | GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)\r |
25402f5d HL |
46 | GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r |
47 | GCC_ASM_EXPORT (ArmWriteVBar)\r | |
f0247796 | 48 | GCC_ASM_EXPORT (ArmReadVBar)\r |
25402f5d HL |
49 | GCC_ASM_EXPORT (ArmEnableVFP)\r |
50 | GCC_ASM_EXPORT (ArmCallWFI)\r | |
25402f5d HL |
51 | GCC_ASM_EXPORT (ArmReadMpidr)\r |
52 | GCC_ASM_EXPORT (ArmReadTpidrurw)\r | |
53 | GCC_ASM_EXPORT (ArmWriteTpidrurw)\r | |
54 | GCC_ASM_EXPORT (ArmIsArchTimerImplemented)\r | |
55 | GCC_ASM_EXPORT (ArmReadIdPfr0)\r | |
56 | GCC_ASM_EXPORT (ArmReadIdPfr1)\r | |
57 | GCC_ASM_EXPORT (ArmWriteHcr)\r | |
58 | GCC_ASM_EXPORT (ArmReadCurrentEL)\r | |
59 | \r | |
60 | .set CTRL_M_BIT, (1 << 0)\r | |
61 | .set CTRL_A_BIT, (1 << 1)\r | |
62 | .set CTRL_C_BIT, (1 << 2)\r | |
63 | .set CTRL_I_BIT, (1 << 12)\r | |
64 | .set CTRL_V_BIT, (1 << 12)\r | |
65 | .set CPACR_VFP_BITS, (3 << 20)\r | |
66 | \r | |
67 | ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r | |
68 | dc ivac, x0 // Invalidate single data cache line\r | |
69 | dsb sy\r | |
70 | isb\r | |
71 | ret\r | |
72 | \r | |
73 | \r | |
74 | ASM_PFX(ArmCleanDataCacheEntryByMVA):\r | |
75 | dc cvac, x0 // Clean single data cache line\r | |
76 | dsb sy\r | |
77 | isb\r | |
78 | ret\r | |
79 | \r | |
80 | \r | |
81 | ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r | |
82 | dc civac, x0 // Clean and invalidate single data cache line\r | |
83 | dsb sy\r | |
84 | isb\r | |
85 | ret\r | |
86 | \r | |
87 | \r | |
88 | ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r | |
89 | dc isw, x0 // Invalidate this line\r | |
90 | dsb sy\r | |
91 | isb\r | |
92 | ret\r | |
93 | \r | |
94 | \r | |
95 | ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r | |
96 | dc cisw, x0 // Clean and Invalidate this line\r | |
97 | dsb sy\r | |
98 | isb\r | |
99 | ret\r | |
100 | \r | |
101 | \r | |
102 | ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r | |
103 | dc csw, x0 // Clean this line\r | |
104 | dsb sy\r | |
105 | isb\r | |
106 | ret\r | |
107 | \r | |
108 | \r | |
109 | ASM_PFX(ArmInvalidateInstructionCache):\r | |
110 | ic iallu // Invalidate entire instruction cache\r | |
111 | dsb sy\r | |
112 | isb\r | |
113 | ret\r | |
114 | \r | |
115 | \r | |
116 | ASM_PFX(ArmEnableMmu):\r | |
117 | EL1_OR_EL2_OR_EL3(x1)\r | |
118 | 1: mrs x0, sctlr_el1 // Read System control register EL1\r | |
119 | b 4f\r | |
120 | 2: mrs x0, sctlr_el2 // Read System control register EL2\r | |
121 | b 4f\r | |
122 | 3: mrs x0, sctlr_el3 // Read System control register EL3\r | |
123 | 4: orr x0, x0, #CTRL_M_BIT // Set MMU enable bit\r | |
124 | EL1_OR_EL2_OR_EL3(x1)\r | |
70f89c0b | 125 | 1: tlbi vmalle1\r |
ee95f9e1 | 126 | dsb nsh\r |
25402f5d HL |
127 | isb\r |
128 | msr sctlr_el1, x0 // Write back\r | |
129 | b 4f\r | |
130 | 2: tlbi alle2\r | |
ee95f9e1 | 131 | dsb nsh\r |
25402f5d HL |
132 | isb\r |
133 | msr sctlr_el2, x0 // Write back\r | |
134 | b 4f\r | |
135 | 3: tlbi alle3\r | |
ee95f9e1 | 136 | dsb nsh\r |
25402f5d HL |
137 | isb\r |
138 | msr sctlr_el3, x0 // Write back\r | |
ee95f9e1 | 139 | 4: isb\r |
25402f5d HL |
140 | ret\r |
141 | \r | |
142 | \r | |
143 | ASM_PFX(ArmDisableMmu):\r | |
144 | EL1_OR_EL2_OR_EL3(x1)\r | |
145 | 1: mrs x0, sctlr_el1 // Read System Control Register EL1\r | |
146 | b 4f\r | |
147 | 2: mrs x0, sctlr_el2 // Read System Control Register EL2\r | |
148 | b 4f\r | |
149 | 3: mrs x0, sctlr_el3 // Read System Control Register EL3\r | |
73ca5009 | 150 | 4: and x0, x0, #~CTRL_M_BIT // Clear MMU enable bit\r |
25402f5d HL |
151 | EL1_OR_EL2_OR_EL3(x1)\r |
152 | 1: msr sctlr_el1, x0 // Write back\r | |
70f89c0b | 153 | tlbi vmalle1\r |
25402f5d HL |
154 | b 4f\r |
155 | 2: msr sctlr_el2, x0 // Write back\r | |
156 | tlbi alle2\r | |
157 | b 4f\r | |
158 | 3: msr sctlr_el3, x0 // Write back\r | |
159 | tlbi alle3\r | |
160 | 4: dsb sy\r | |
161 | isb\r | |
162 | ret\r | |
163 | \r | |
164 | \r | |
165 | ASM_PFX(ArmDisableCachesAndMmu):\r | |
166 | EL1_OR_EL2_OR_EL3(x1)\r | |
167 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
168 | b 4f\r | |
169 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
170 | b 4f\r | |
171 | 3: mrs x0, sctlr_el3 // Get control register EL3\r | |
73ca5009 BJ |
172 | 4: mov x1, #~(CTRL_M_BIT | CTRL_C_BIT | CTRL_I_BIT) // Disable MMU, D & I caches\r |
173 | and x0, x0, x1\r | |
25402f5d HL |
174 | EL1_OR_EL2_OR_EL3(x1)\r |
175 | 1: msr sctlr_el1, x0 // Write back control register\r | |
176 | b 4f\r | |
177 | 2: msr sctlr_el2, x0 // Write back control register\r | |
178 | b 4f\r | |
179 | 3: msr sctlr_el3, x0 // Write back control register\r | |
180 | 4: dsb sy\r | |
181 | isb\r | |
182 | ret\r | |
183 | \r | |
184 | \r | |
185 | ASM_PFX(ArmMmuEnabled):\r | |
186 | EL1_OR_EL2_OR_EL3(x1)\r | |
187 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
188 | b 4f\r | |
189 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
190 | b 4f\r | |
191 | 3: mrs x0, sctlr_el3 // Get control register EL3\r | |
192 | 4: and x0, x0, #CTRL_M_BIT\r | |
193 | ret\r | |
194 | \r | |
195 | \r | |
196 | ASM_PFX(ArmEnableDataCache):\r | |
197 | EL1_OR_EL2_OR_EL3(x1)\r | |
198 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
199 | b 4f\r | |
200 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
201 | b 4f\r | |
202 | 3: mrs x0, sctlr_el3 // Get control register EL3\r | |
203 | 4: orr x0, x0, #CTRL_C_BIT // Set C bit\r | |
204 | EL1_OR_EL2_OR_EL3(x1)\r | |
205 | 1: msr sctlr_el1, x0 // Write back control register\r | |
206 | b 4f\r | |
207 | 2: msr sctlr_el2, x0 // Write back control register\r | |
208 | b 4f\r | |
209 | 3: msr sctlr_el3, x0 // Write back control register\r | |
210 | 4: dsb sy\r | |
211 | isb\r | |
212 | ret\r | |
213 | \r | |
214 | \r | |
215 | ASM_PFX(ArmDisableDataCache):\r | |
216 | EL1_OR_EL2_OR_EL3(x1)\r | |
217 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
218 | b 4f\r | |
219 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
220 | b 4f\r | |
221 | 3: mrs x0, sctlr_el3 // Get control register EL3\r | |
73ca5009 | 222 | 4: and x0, x0, #~CTRL_C_BIT // Clear C bit\r |
25402f5d HL |
223 | EL1_OR_EL2_OR_EL3(x1)\r |
224 | 1: msr sctlr_el1, x0 // Write back control register\r | |
225 | b 4f\r | |
226 | 2: msr sctlr_el2, x0 // Write back control register\r | |
227 | b 4f\r | |
228 | 3: msr sctlr_el3, x0 // Write back control register\r | |
229 | 4: dsb sy\r | |
230 | isb\r | |
231 | ret\r | |
232 | \r | |
233 | \r | |
234 | ASM_PFX(ArmEnableInstructionCache):\r | |
235 | EL1_OR_EL2_OR_EL3(x1)\r | |
236 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
237 | b 4f\r | |
238 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
239 | b 4f\r | |
240 | 3: mrs x0, sctlr_el3 // Get control register EL3\r | |
241 | 4: orr x0, x0, #CTRL_I_BIT // Set I bit\r | |
242 | EL1_OR_EL2_OR_EL3(x1)\r | |
243 | 1: msr sctlr_el1, x0 // Write back control register\r | |
244 | b 4f\r | |
245 | 2: msr sctlr_el2, x0 // Write back control register\r | |
246 | b 4f\r | |
247 | 3: msr sctlr_el3, x0 // Write back control register\r | |
248 | 4: dsb sy\r | |
249 | isb\r | |
250 | ret\r | |
251 | \r | |
252 | \r | |
253 | ASM_PFX(ArmDisableInstructionCache):\r | |
254 | EL1_OR_EL2_OR_EL3(x1)\r | |
255 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
256 | b 4f\r | |
257 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
258 | b 4f\r | |
259 | 3: mrs x0, sctlr_el3 // Get control register EL3\r | |
73ca5009 | 260 | 4: and x0, x0, #~CTRL_I_BIT // Clear I bit\r |
25402f5d HL |
261 | EL1_OR_EL2_OR_EL3(x1)\r |
262 | 1: msr sctlr_el1, x0 // Write back control register\r | |
263 | b 4f\r | |
264 | 2: msr sctlr_el2, x0 // Write back control register\r | |
265 | b 4f\r | |
266 | 3: msr sctlr_el3, x0 // Write back control register\r | |
267 | 4: dsb sy\r | |
268 | isb\r | |
269 | ret\r | |
270 | \r | |
271 | \r | |
272 | ASM_PFX(ArmEnableAlignmentCheck):\r | |
273 | EL1_OR_EL2(x1)\r | |
274 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
275 | b 3f\r | |
276 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
277 | 3: orr x0, x0, #CTRL_A_BIT // Set A (alignment check) bit\r | |
278 | EL1_OR_EL2(x1)\r | |
279 | 1: msr sctlr_el1, x0 // Write back control register\r | |
280 | b 3f\r | |
281 | 2: msr sctlr_el2, x0 // Write back control register\r | |
282 | 3: dsb sy\r | |
283 | isb\r | |
284 | ret\r | |
285 | \r | |
286 | \r | |
287 | ASM_PFX(ArmDisableAlignmentCheck):\r | |
288 | EL1_OR_EL2_OR_EL3(x1)\r | |
289 | 1: mrs x0, sctlr_el1 // Get control register EL1\r | |
290 | b 4f\r | |
291 | 2: mrs x0, sctlr_el2 // Get control register EL2\r | |
292 | b 4f\r | |
293 | 3: mrs x0, sctlr_el3 // Get control register EL3\r | |
73ca5009 | 294 | 4: and x0, x0, #~CTRL_A_BIT // Clear A (alignment check) bit\r |
25402f5d HL |
295 | EL1_OR_EL2_OR_EL3(x1)\r |
296 | 1: msr sctlr_el1, x0 // Write back control register\r | |
297 | b 4f\r | |
298 | 2: msr sctlr_el2, x0 // Write back control register\r | |
299 | b 4f\r | |
300 | 3: msr sctlr_el3, x0 // Write back control register\r | |
301 | 4: dsb sy\r | |
302 | isb\r | |
303 | ret\r | |
304 | \r | |
305 | \r | |
306 | // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now\r | |
307 | ASM_PFX(ArmEnableBranchPrediction):\r | |
308 | ret\r | |
309 | \r | |
310 | \r | |
311 | // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.\r | |
312 | ASM_PFX(ArmDisableBranchPrediction):\r | |
313 | ret\r | |
314 | \r | |
315 | \r | |
316 | ASM_PFX(AArch64AllDataCachesOperation):\r | |
317 | // We can use regs 0-7 and 9-15 without having to save/restore.\r | |
fb7ea611 OM |
318 | // Save our link register on the stack. - The stack must always be quad-word aligned\r |
319 | str x30, [sp, #-16]!\r | |
25402f5d HL |
320 | mov x1, x0 // Save Function call in x1\r |
321 | mrs x6, clidr_el1 // Read EL1 CLIDR\r | |
322 | and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)\r | |
433a49a0 OM |
323 | lsr x3, x3, #23 // Left align cache level value - the level is shifted by 1 to the\r |
324 | // right to ease the access to CSSELR and the Set/Way operation.\r | |
25402f5d HL |
325 | cbz x3, L_Finished // No need to clean if LoC is 0\r |
326 | mov x10, #0 // Start clean at cache level 0\r | |
327 | b Loop1\r | |
328 | \r | |
329 | ASM_PFX(AArch64PerformPoUDataCacheOperation):\r | |
330 | // We can use regs 0-7 and 9-15 without having to save/restore.\r | |
fb7ea611 OM |
331 | // Save our link register on the stack. - The stack must always be quad-word aligned\r |
332 | str x30, [sp, #-16]!\r | |
25402f5d HL |
333 | mov x1, x0 // Save Function call in x1\r |
334 | mrs x6, clidr_el1 // Read EL1 CLIDR\r | |
335 | and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)\r | |
433a49a0 OM |
336 | lsr x3, x3, #26 // Left align cache level value - the level is shifted by 1 to the\r |
337 | // right to ease the access to CSSELR and the Set/Way operation.\r | |
25402f5d HL |
338 | cbz x3, L_Finished // No need to clean if LoC is 0\r |
339 | mov x10, #0 // Start clean at cache level 0\r | |
340 | \r | |
341 | Loop1:\r | |
342 | add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info\r | |
343 | lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level\r | |
344 | and x12, x12, #7 // get those 3 bits alone\r | |
345 | cmp x12, #2 // what cache at this level?\r | |
346 | b.lt L_Skip // no cache or only instruction cache at this level\r | |
347 | msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)\r | |
348 | isb // isb to sync the change to the CacheSizeID reg\r | |
349 | mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)\r | |
350 | and x2, x12, #0x7 // extract the line length field\r | |
351 | add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)\r | |
352 | mov x4, #0x400\r | |
353 | sub x4, x4, #1\r | |
354 | and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)\r | |
355 | clz w5, w4 // w5 is the bit position of the way size increment\r | |
356 | mov x7, #0x00008000\r | |
357 | sub x7, x7, #1\r | |
358 | and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)\r | |
359 | \r | |
360 | Loop2:\r | |
361 | mov x9, x4 // x9 working copy of the max way size (right aligned)\r | |
362 | \r | |
363 | Loop3:\r | |
364 | lsl x11, x9, x5\r | |
365 | orr x0, x10, x11 // factor in the way number and cache number\r | |
366 | lsl x11, x7, x2\r | |
367 | orr x0, x0, x11 // factor in the index number\r | |
368 | \r | |
369 | blr x1 // Goto requested cache operation\r | |
370 | \r | |
371 | subs x9, x9, #1 // decrement the way number\r | |
372 | b.ge Loop3\r | |
373 | subs x7, x7, #1 // decrement the index\r | |
374 | b.ge Loop2\r | |
375 | L_Skip:\r | |
376 | add x10, x10, #2 // increment the cache number\r | |
377 | cmp x3, x10\r | |
378 | b.gt Loop1\r | |
379 | \r | |
380 | L_Finished:\r | |
381 | dsb sy\r | |
382 | isb\r | |
383 | ldr x30, [sp], #0x10\r | |
384 | ret\r | |
385 | \r | |
386 | \r | |
387 | ASM_PFX(ArmDataMemoryBarrier):\r | |
388 | dmb sy\r | |
389 | ret\r | |
390 | \r | |
391 | \r | |
cf93a378 | 392 | ASM_PFX(ArmDataSynchronizationBarrier):\r |
25402f5d HL |
393 | ASM_PFX(ArmDrainWriteBuffer):\r |
394 | dsb sy\r | |
395 | ret\r | |
396 | \r | |
397 | \r | |
398 | ASM_PFX(ArmInstructionSynchronizationBarrier):\r | |
399 | isb\r | |
400 | ret\r | |
401 | \r | |
402 | \r | |
403 | ASM_PFX(ArmWriteVBar):\r | |
404 | EL1_OR_EL2_OR_EL3(x1)\r | |
405 | 1: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register\r | |
406 | b 4f\r | |
407 | 2: msr vbar_el2, x0 // Set the Address of the EL2 Vector Table in the VBAR register\r | |
408 | b 4f\r | |
409 | 3: msr vbar_el3, x0 // Set the Address of the EL3 Vector Table in the VBAR register\r | |
410 | 4: isb\r | |
411 | ret\r | |
412 | \r | |
f0247796 OM |
413 | ASM_PFX(ArmReadVBar):\r |
414 | EL1_OR_EL2_OR_EL3(x1)\r | |
415 | 1: mrs x0, vbar_el1 // Set the Address of the EL1 Vector Table in the VBAR register\r | |
416 | ret\r | |
417 | 2: mrs x0, vbar_el2 // Set the Address of the EL2 Vector Table in the VBAR register\r | |
418 | ret\r | |
419 | 3: mrs x0, vbar_el3 // Set the Address of the EL3 Vector Table in the VBAR register\r | |
420 | ret\r | |
421 | \r | |
422 | \r | |
25402f5d HL |
423 | ASM_PFX(ArmEnableVFP):\r |
424 | // Check whether floating-point is implemented in the processor.\r | |
425 | mov x1, x30 // Save LR\r | |
426 | bl ArmReadIdPfr0 // Read EL1 Processor Feature Register (PFR0)\r | |
427 | mov x30, x1 // Restore LR\r | |
428 | ands x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation\r | |
429 | cmp x0, #0 // VFP is implemented if '0'.\r | |
430 | b.ne 4f // Exit if VFP not implemented.\r | |
431 | // FVP is implemented.\r | |
432 | // Make sure VFP exceptions are not trapped (to any exception level).\r | |
433 | mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)\r | |
434 | orr x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1\r | |
435 | msr cpacr_el1, x0 // Write back EL1 Coprocessor Access Control Register (CPACR)\r | |
436 | mov x1, #AARCH64_CPTR_TFP // TFP Bit for trapping VFP Exceptions\r | |
437 | EL1_OR_EL2_OR_EL3(x2)\r | |
438 | 1:ret // Not configurable in EL1\r | |
439 | 2:mrs x0, cptr_el2 // Disable VFP traps to EL2\r | |
440 | bic x0, x0, x1\r | |
441 | msr cptr_el2, x0\r | |
442 | ret\r | |
443 | 3:mrs x0, cptr_el3 // Disable VFP traps to EL3\r | |
444 | bic x0, x0, x1\r | |
445 | msr cptr_el3, x0\r | |
446 | 4:ret\r | |
447 | \r | |
448 | \r | |
449 | ASM_PFX(ArmCallWFI):\r | |
450 | wfi\r | |
451 | ret\r | |
452 | \r | |
453 | \r | |
25402f5d HL |
454 | ASM_PFX(ArmReadMpidr):\r |
455 | mrs x0, mpidr_el1 // read EL1 MPIDR\r | |
456 | ret\r | |
457 | \r | |
458 | \r | |
459 | // Keep old function names for C compatibilty for now. Change later?\r | |
460 | ASM_PFX(ArmReadTpidrurw):\r | |
461 | mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r | |
462 | ret\r | |
463 | \r | |
464 | \r | |
465 | // Keep old function names for C compatibilty for now. Change later?\r | |
466 | ASM_PFX(ArmWriteTpidrurw):\r | |
467 | msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r | |
468 | ret\r | |
469 | \r | |
470 | \r | |
471 | // Arch timers are mandatory on AArch64\r | |
472 | ASM_PFX(ArmIsArchTimerImplemented):\r | |
473 | mov x0, #1\r | |
474 | ret\r | |
475 | \r | |
476 | \r | |
477 | ASM_PFX(ArmReadIdPfr0):\r | |
478 | mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register\r | |
479 | ret\r | |
480 | \r | |
481 | \r | |
482 | // Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?\r | |
483 | // A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.\r | |
484 | // See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c\r | |
485 | // Not defined yet, but stick in here for now, should read all zeros.\r | |
486 | ASM_PFX(ArmReadIdPfr1):\r | |
487 | mrs x0, id_aa64pfr1_el1 // Read ID_PFR1 Register\r | |
488 | ret\r | |
489 | \r | |
490 | // VOID ArmWriteHcr(UINTN Hcr)\r | |
491 | ASM_PFX(ArmWriteHcr):\r | |
492 | msr hcr_el2, x0 // Write the passed HCR value\r | |
493 | ret\r | |
494 | \r | |
495 | // UINTN ArmReadCurrentEL(VOID)\r | |
496 | ASM_PFX(ArmReadCurrentEL):\r | |
497 | mrs x0, CurrentEL\r | |
498 | ret\r | |
499 | \r | |
25402f5d | 500 | ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r |