]> git.proxmox.com Git - mirror_edk2.git/blame - ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
ArmPkg/ArmLib: rename AArch64 variant of ArmReadIdPfr0
[mirror_edk2.git] / ArmPkg / Library / ArmLib / AArch64 / AArch64Support.S
CommitLineData
25402f5d
HL
1#------------------------------------------------------------------------------\r
2#\r
3# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>\r
2f16993c 4# Copyright (c) 2011 - 2017, ARM Limited. All rights reserved.\r
0efaa42f 5# Copyright (c) 2016, Linaro Limited. All rights reserved.\r
bb56ce81 6# Copyright (c) 2020, NUVIA Inc. All rights reserved.\r
25402f5d 7#\r
4059386c 8# SPDX-License-Identifier: BSD-2-Clause-Patent\r
25402f5d
HL
9#\r
10#------------------------------------------------------------------------------\r
11\r
12#include <Chipset/AArch64.h>\r
13#include <AsmMacroIoLibV8.h>\r
14\r
25402f5d
HL
15.set CTRL_M_BIT, (1 << 0)\r
16.set CTRL_A_BIT, (1 << 1)\r
17.set CTRL_C_BIT, (1 << 2)\r
97f0d01d 18.set CTRL_SA_BIT, (1 << 3)\r
25402f5d
HL
19.set CTRL_I_BIT, (1 << 12)\r
20.set CTRL_V_BIT, (1 << 12)\r
21.set CPACR_VFP_BITS, (3 << 20)\r
22\r
0efaa42f 23ASM_FUNC(ArmInvalidateDataCacheEntryByMVA)\r
25402f5d 24 dc ivac, x0 // Invalidate single data cache line\r
25402f5d
HL
25 ret\r
26\r
27\r
0efaa42f 28ASM_FUNC(ArmCleanDataCacheEntryByMVA)\r
25402f5d 29 dc cvac, x0 // Clean single data cache line\r
25402f5d
HL
30 ret\r
31\r
32\r
0efaa42f 33ASM_FUNC(ArmCleanDataCacheEntryToPoUByMVA)\r
b7de7e3c
EC
34 dc cvau, x0 // Clean single data cache line to PoU\r
35 ret\r
36\r
0efaa42f 37ASM_FUNC(ArmInvalidateInstructionCacheEntryToPoUByMVA)\r
cf580da1
AB
38 ic ivau, x0 // Invalidate single instruction cache line to PoU\r
39 ret\r
40\r
b7de7e3c 41\r
0efaa42f 42ASM_FUNC(ArmCleanInvalidateDataCacheEntryByMVA)\r
25402f5d 43 dc civac, x0 // Clean and invalidate single data cache line\r
25402f5d
HL
44 ret\r
45\r
46\r
0efaa42f 47ASM_FUNC(ArmInvalidateDataCacheEntryBySetWay)\r
25402f5d 48 dc isw, x0 // Invalidate this line\r
25402f5d
HL
49 ret\r
50\r
51\r
0efaa42f 52ASM_FUNC(ArmCleanInvalidateDataCacheEntryBySetWay)\r
25402f5d 53 dc cisw, x0 // Clean and Invalidate this line\r
25402f5d
HL
54 ret\r
55\r
56\r
0efaa42f 57ASM_FUNC(ArmCleanDataCacheEntryBySetWay)\r
25402f5d 58 dc csw, x0 // Clean this line\r
25402f5d
HL
59 ret\r
60\r
61\r
0efaa42f 62ASM_FUNC(ArmInvalidateInstructionCache)\r
25402f5d
HL
63 ic iallu // Invalidate entire instruction cache\r
64 dsb sy\r
65 isb\r
66 ret\r
67\r
68\r
0efaa42f 69ASM_FUNC(ArmEnableMmu)\r
25402f5d
HL
70 EL1_OR_EL2_OR_EL3(x1)\r
711: mrs x0, sctlr_el1 // Read System control register EL1\r
72 b 4f\r
732: mrs x0, sctlr_el2 // Read System control register EL2\r
74 b 4f\r
753: mrs x0, sctlr_el3 // Read System control register EL3\r
764: orr x0, x0, #CTRL_M_BIT // Set MMU enable bit\r
77 EL1_OR_EL2_OR_EL3(x1)\r
70f89c0b 781: tlbi vmalle1\r
ee95f9e1 79 dsb nsh\r
25402f5d
HL
80 isb\r
81 msr sctlr_el1, x0 // Write back\r
82 b 4f\r
832: tlbi alle2\r
ee95f9e1 84 dsb nsh\r
25402f5d
HL
85 isb\r
86 msr sctlr_el2, x0 // Write back\r
87 b 4f\r
883: tlbi alle3\r
ee95f9e1 89 dsb nsh\r
25402f5d
HL
90 isb\r
91 msr sctlr_el3, x0 // Write back\r
ee95f9e1 924: isb\r
25402f5d
HL
93 ret\r
94\r
95\r
0efaa42f 96ASM_FUNC(ArmDisableMmu)\r
25402f5d
HL
97 EL1_OR_EL2_OR_EL3(x1)\r
981: mrs x0, sctlr_el1 // Read System Control Register EL1\r
99 b 4f\r
1002: mrs x0, sctlr_el2 // Read System Control Register EL2\r
101 b 4f\r
1023: mrs x0, sctlr_el3 // Read System Control Register EL3\r
73ca5009 1034: and x0, x0, #~CTRL_M_BIT // Clear MMU enable bit\r
25402f5d
HL
104 EL1_OR_EL2_OR_EL3(x1)\r
1051: msr sctlr_el1, x0 // Write back\r
70f89c0b 106 tlbi vmalle1\r
25402f5d
HL
107 b 4f\r
1082: msr sctlr_el2, x0 // Write back\r
109 tlbi alle2\r
110 b 4f\r
1113: msr sctlr_el3, x0 // Write back\r
112 tlbi alle3\r
1134: dsb sy\r
114 isb\r
115 ret\r
116\r
117\r
0efaa42f 118ASM_FUNC(ArmDisableCachesAndMmu)\r
25402f5d
HL
119 EL1_OR_EL2_OR_EL3(x1)\r
1201: mrs x0, sctlr_el1 // Get control register EL1\r
121 b 4f\r
1222: mrs x0, sctlr_el2 // Get control register EL2\r
123 b 4f\r
1243: mrs x0, sctlr_el3 // Get control register EL3\r
73ca5009
BJ
1254: mov x1, #~(CTRL_M_BIT | CTRL_C_BIT | CTRL_I_BIT) // Disable MMU, D & I caches\r
126 and x0, x0, x1\r
25402f5d
HL
127 EL1_OR_EL2_OR_EL3(x1)\r
1281: msr sctlr_el1, x0 // Write back control register\r
129 b 4f\r
1302: msr sctlr_el2, x0 // Write back control register\r
131 b 4f\r
1323: msr sctlr_el3, x0 // Write back control register\r
1334: dsb sy\r
134 isb\r
135 ret\r
136\r
137\r
0efaa42f 138ASM_FUNC(ArmMmuEnabled)\r
25402f5d
HL
139 EL1_OR_EL2_OR_EL3(x1)\r
1401: mrs x0, sctlr_el1 // Get control register EL1\r
141 b 4f\r
1422: mrs x0, sctlr_el2 // Get control register EL2\r
143 b 4f\r
1443: mrs x0, sctlr_el3 // Get control register EL3\r
1454: and x0, x0, #CTRL_M_BIT\r
146 ret\r
147\r
148\r
0efaa42f 149ASM_FUNC(ArmEnableDataCache)\r
25402f5d
HL
150 EL1_OR_EL2_OR_EL3(x1)\r
1511: mrs x0, sctlr_el1 // Get control register EL1\r
152 b 4f\r
1532: mrs x0, sctlr_el2 // Get control register EL2\r
154 b 4f\r
1553: mrs x0, sctlr_el3 // Get control register EL3\r
1564: orr x0, x0, #CTRL_C_BIT // Set C bit\r
157 EL1_OR_EL2_OR_EL3(x1)\r
1581: msr sctlr_el1, x0 // Write back control register\r
159 b 4f\r
1602: msr sctlr_el2, x0 // Write back control register\r
161 b 4f\r
1623: msr sctlr_el3, x0 // Write back control register\r
1634: dsb sy\r
164 isb\r
165 ret\r
166\r
167\r
0efaa42f 168ASM_FUNC(ArmDisableDataCache)\r
25402f5d
HL
169 EL1_OR_EL2_OR_EL3(x1)\r
1701: mrs x0, sctlr_el1 // Get control register EL1\r
171 b 4f\r
1722: mrs x0, sctlr_el2 // Get control register EL2\r
173 b 4f\r
1743: mrs x0, sctlr_el3 // Get control register EL3\r
73ca5009 1754: and x0, x0, #~CTRL_C_BIT // Clear C bit\r
25402f5d
HL
176 EL1_OR_EL2_OR_EL3(x1)\r
1771: msr sctlr_el1, x0 // Write back control register\r
178 b 4f\r
1792: msr sctlr_el2, x0 // Write back control register\r
180 b 4f\r
1813: msr sctlr_el3, x0 // Write back control register\r
1824: dsb sy\r
183 isb\r
184 ret\r
185\r
186\r
0efaa42f 187ASM_FUNC(ArmEnableInstructionCache)\r
25402f5d
HL
188 EL1_OR_EL2_OR_EL3(x1)\r
1891: mrs x0, sctlr_el1 // Get control register EL1\r
190 b 4f\r
1912: mrs x0, sctlr_el2 // Get control register EL2\r
192 b 4f\r
1933: mrs x0, sctlr_el3 // Get control register EL3\r
1944: orr x0, x0, #CTRL_I_BIT // Set I bit\r
195 EL1_OR_EL2_OR_EL3(x1)\r
1961: msr sctlr_el1, x0 // Write back control register\r
197 b 4f\r
1982: msr sctlr_el2, x0 // Write back control register\r
199 b 4f\r
2003: msr sctlr_el3, x0 // Write back control register\r
2014: dsb sy\r
202 isb\r
203 ret\r
204\r
205\r
0efaa42f 206ASM_FUNC(ArmDisableInstructionCache)\r
25402f5d
HL
207 EL1_OR_EL2_OR_EL3(x1)\r
2081: mrs x0, sctlr_el1 // Get control register EL1\r
209 b 4f\r
2102: mrs x0, sctlr_el2 // Get control register EL2\r
211 b 4f\r
2123: mrs x0, sctlr_el3 // Get control register EL3\r
73ca5009 2134: and x0, x0, #~CTRL_I_BIT // Clear I bit\r
25402f5d
HL
214 EL1_OR_EL2_OR_EL3(x1)\r
2151: msr sctlr_el1, x0 // Write back control register\r
216 b 4f\r
2172: msr sctlr_el2, x0 // Write back control register\r
218 b 4f\r
2193: msr sctlr_el3, x0 // Write back control register\r
2204: dsb sy\r
221 isb\r
222 ret\r
223\r
224\r
0efaa42f 225ASM_FUNC(ArmEnableAlignmentCheck)\r
25402f5d
HL
226 EL1_OR_EL2(x1)\r
2271: mrs x0, sctlr_el1 // Get control register EL1\r
228 b 3f\r
2292: mrs x0, sctlr_el2 // Get control register EL2\r
2303: orr x0, x0, #CTRL_A_BIT // Set A (alignment check) bit\r
231 EL1_OR_EL2(x1)\r
2321: msr sctlr_el1, x0 // Write back control register\r
233 b 3f\r
2342: msr sctlr_el2, x0 // Write back control register\r
2353: dsb sy\r
236 isb\r
237 ret\r
238\r
239\r
0efaa42f 240ASM_FUNC(ArmDisableAlignmentCheck)\r
25402f5d
HL
241 EL1_OR_EL2_OR_EL3(x1)\r
2421: mrs x0, sctlr_el1 // Get control register EL1\r
243 b 4f\r
2442: mrs x0, sctlr_el2 // Get control register EL2\r
245 b 4f\r
2463: mrs x0, sctlr_el3 // Get control register EL3\r
73ca5009 2474: and x0, x0, #~CTRL_A_BIT // Clear A (alignment check) bit\r
25402f5d
HL
248 EL1_OR_EL2_OR_EL3(x1)\r
2491: msr sctlr_el1, x0 // Write back control register\r
250 b 4f\r
2512: msr sctlr_el2, x0 // Write back control register\r
252 b 4f\r
2533: msr sctlr_el3, x0 // Write back control register\r
2544: dsb sy\r
255 isb\r
256 ret\r
257\r
97f0d01d
AB
258ASM_FUNC(ArmEnableStackAlignmentCheck)\r
259 EL1_OR_EL2(x1)\r
2601: mrs x0, sctlr_el1 // Get control register EL1\r
261 b 3f\r
2622: mrs x0, sctlr_el2 // Get control register EL2\r
2633: orr x0, x0, #CTRL_SA_BIT // Set SA (stack alignment check) bit\r
264 EL1_OR_EL2(x1)\r
2651: msr sctlr_el1, x0 // Write back control register\r
266 b 3f\r
2672: msr sctlr_el2, x0 // Write back control register\r
2683: dsb sy\r
269 isb\r
270 ret\r
271\r
272\r
273ASM_FUNC(ArmDisableStackAlignmentCheck)\r
274 EL1_OR_EL2_OR_EL3(x1)\r
2751: mrs x0, sctlr_el1 // Get control register EL1\r
276 b 4f\r
2772: mrs x0, sctlr_el2 // Get control register EL2\r
278 b 4f\r
2793: mrs x0, sctlr_el3 // Get control register EL3\r
2804: bic x0, x0, #CTRL_SA_BIT // Clear SA (stack alignment check) bit\r
281 EL1_OR_EL2_OR_EL3(x1)\r
2821: msr sctlr_el1, x0 // Write back control register\r
283 b 4f\r
2842: msr sctlr_el2, x0 // Write back control register\r
285 b 4f\r
2863: msr sctlr_el3, x0 // Write back control register\r
2874: dsb sy\r
288 isb\r
289 ret\r
290\r
25402f5d
HL
291\r
292// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now\r
0efaa42f 293ASM_FUNC(ArmEnableBranchPrediction)\r
25402f5d
HL
294 ret\r
295\r
296\r
297// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.\r
0efaa42f 298ASM_FUNC(ArmDisableBranchPrediction)\r
25402f5d
HL
299 ret\r
300\r
301\r
0efaa42f 302ASM_FUNC(AArch64AllDataCachesOperation)\r
25402f5d 303// We can use regs 0-7 and 9-15 without having to save/restore.\r
fb7ea611 304// Save our link register on the stack. - The stack must always be quad-word aligned\r
de2a7824
AB
305 stp x29, x30, [sp, #-16]!\r
306 mov x29, sp\r
25402f5d
HL
307 mov x1, x0 // Save Function call in x1\r
308 mrs x6, clidr_el1 // Read EL1 CLIDR\r
309 and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)\r
433a49a0
OM
310 lsr x3, x3, #23 // Left align cache level value - the level is shifted by 1 to the\r
311 // right to ease the access to CSSELR and the Set/Way operation.\r
25402f5d
HL
312 cbz x3, L_Finished // No need to clean if LoC is 0\r
313 mov x10, #0 // Start clean at cache level 0\r
25402f5d
HL
314\r
315Loop1:\r
316 add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info\r
317 lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level\r
318 and x12, x12, #7 // get those 3 bits alone\r
319 cmp x12, #2 // what cache at this level?\r
320 b.lt L_Skip // no cache or only instruction cache at this level\r
321 msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)\r
322 isb // isb to sync the change to the CacheSizeID reg\r
323 mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)\r
324 and x2, x12, #0x7 // extract the line length field\r
325 add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)\r
326 mov x4, #0x400\r
327 sub x4, x4, #1\r
328 and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)\r
329 clz w5, w4 // w5 is the bit position of the way size increment\r
330 mov x7, #0x00008000\r
331 sub x7, x7, #1\r
332 and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)\r
333\r
334Loop2:\r
335 mov x9, x4 // x9 working copy of the max way size (right aligned)\r
336\r
337Loop3:\r
338 lsl x11, x9, x5\r
339 orr x0, x10, x11 // factor in the way number and cache number\r
340 lsl x11, x7, x2\r
341 orr x0, x0, x11 // factor in the index number\r
342\r
343 blr x1 // Goto requested cache operation\r
344\r
345 subs x9, x9, #1 // decrement the way number\r
346 b.ge Loop3\r
347 subs x7, x7, #1 // decrement the index\r
348 b.ge Loop2\r
349L_Skip:\r
350 add x10, x10, #2 // increment the cache number\r
351 cmp x3, x10\r
352 b.gt Loop1\r
353\r
354L_Finished:\r
355 dsb sy\r
356 isb\r
de2a7824 357 ldp x29, x30, [sp], #0x10\r
25402f5d
HL
358 ret\r
359\r
360\r
0efaa42f 361ASM_FUNC(ArmDataMemoryBarrier)\r
25402f5d
HL
362 dmb sy\r
363 ret\r
364\r
365\r
0efaa42f 366ASM_FUNC(ArmDataSynchronizationBarrier)\r
25402f5d
HL
367 dsb sy\r
368 ret\r
369\r
370\r
0efaa42f 371ASM_FUNC(ArmInstructionSynchronizationBarrier)\r
25402f5d
HL
372 isb\r
373 ret\r
374\r
375\r
0efaa42f 376ASM_FUNC(ArmWriteVBar)\r
25402f5d
HL
377 EL1_OR_EL2_OR_EL3(x1)\r
3781: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register\r
379 b 4f\r
3802: msr vbar_el2, x0 // Set the Address of the EL2 Vector Table in the VBAR register\r
381 b 4f\r
3823: msr vbar_el3, x0 // Set the Address of the EL3 Vector Table in the VBAR register\r
3834: isb\r
384 ret\r
385\r
0efaa42f 386ASM_FUNC(ArmReadVBar)\r
f0247796
OM
387 EL1_OR_EL2_OR_EL3(x1)\r
3881: mrs x0, vbar_el1 // Set the Address of the EL1 Vector Table in the VBAR register\r
389 ret\r
3902: mrs x0, vbar_el2 // Set the Address of the EL2 Vector Table in the VBAR register\r
391 ret\r
3923: mrs x0, vbar_el3 // Set the Address of the EL3 Vector Table in the VBAR register\r
393 ret\r
394\r
395\r
0efaa42f 396ASM_FUNC(ArmEnableVFP)\r
25402f5d
HL
397 // Check whether floating-point is implemented in the processor.\r
398 mov x1, x30 // Save LR\r
bb56ce81 399 bl ArmReadIdAA64Pfr0 // Read EL1 Processor Feature Register (PFR0)\r
25402f5d 400 mov x30, x1 // Restore LR\r
2f16993c
SM
401 ubfx x0, x0, #16, #4 // Extract the FP bits 16:19\r
402 cmp x0, #0xF // Check if FP bits are '1111b',\r
403 // i.e. Floating Point not implemented\r
404 b.eq 4f // Exit when VFP is not implemented.\r
405\r
25402f5d
HL
406 // FVP is implemented.\r
407 // Make sure VFP exceptions are not trapped (to any exception level).\r
408 mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)\r
409 orr x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1\r
410 msr cpacr_el1, x0 // Write back EL1 Coprocessor Access Control Register (CPACR)\r
411 mov x1, #AARCH64_CPTR_TFP // TFP Bit for trapping VFP Exceptions\r
412 EL1_OR_EL2_OR_EL3(x2)\r
4131:ret // Not configurable in EL1\r
4142:mrs x0, cptr_el2 // Disable VFP traps to EL2\r
415 bic x0, x0, x1\r
416 msr cptr_el2, x0\r
417 ret\r
4183:mrs x0, cptr_el3 // Disable VFP traps to EL3\r
419 bic x0, x0, x1\r
420 msr cptr_el3, x0\r
4214:ret\r
422\r
423\r
0efaa42f 424ASM_FUNC(ArmCallWFI)\r
25402f5d
HL
425 wfi\r
426 ret\r
427\r
428\r
0efaa42f 429ASM_FUNC(ArmReadMpidr)\r
25402f5d
HL
430 mrs x0, mpidr_el1 // read EL1 MPIDR\r
431 ret\r
432\r
433\r
ff5fef14 434// Keep old function names for C compatibility for now. Change later?\r
0efaa42f 435ASM_FUNC(ArmReadTpidrurw)\r
25402f5d
HL
436 mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r
437 ret\r
438\r
439\r
ff5fef14 440// Keep old function names for C compatibility for now. Change later?\r
0efaa42f 441ASM_FUNC(ArmWriteTpidrurw)\r
25402f5d
HL
442 msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)\r
443 ret\r
444\r
445\r
446// Arch timers are mandatory on AArch64\r
0efaa42f 447ASM_FUNC(ArmIsArchTimerImplemented)\r
25402f5d
HL
448 mov x0, #1\r
449 ret\r
450\r
451\r
bb56ce81 452ASM_FUNC(ArmReadIdAA64Pfr0)\r
25402f5d
HL
453 mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register\r
454 ret\r
455\r
456\r
25402f5d 457// VOID ArmWriteHcr(UINTN Hcr)\r
0efaa42f 458ASM_FUNC(ArmWriteHcr)\r
25402f5d
HL
459 msr hcr_el2, x0 // Write the passed HCR value\r
460 ret\r
461\r
d2bb61a2 462// UINTN ArmReadHcr(VOID)\r
0efaa42f 463ASM_FUNC(ArmReadHcr)\r
d2bb61a2
EC
464 mrs x0, hcr_el2\r
465 ret\r
466\r
25402f5d 467// UINTN ArmReadCurrentEL(VOID)\r
0efaa42f 468ASM_FUNC(ArmReadCurrentEL)\r
25402f5d
HL
469 mrs x0, CurrentEL\r
470 ret\r
471\r
23d6348f
SM
472// UINT32 ArmReadCntHctl(VOID)\r
473ASM_FUNC(ArmReadCntHctl)\r
474 mrs x0, cnthctl_el2\r
475 ret\r
476\r
477// VOID ArmWriteCntHctl(UINT32 CntHctl)\r
478ASM_FUNC(ArmWriteCntHctl)\r
479 msr cnthctl_el2, x0\r
480 ret\r
481\r
25402f5d 482ASM_FUNCTION_REMOVE_IF_UNREFERENCED\r