]> git.proxmox.com Git - mirror_edk2.git/blob - ArmPkg/Library/ArmLib/AArch64/AArch64Support.S
ArmLib/AArch64Support.S: remove export of unimplemented function
[mirror_edk2.git] / ArmPkg / Library / ArmLib / AArch64 / AArch64Support.S
1 #------------------------------------------------------------------------------
2 #
3 # Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
4 # Copyright (c) 2011 - 2014, ARM Limited. All rights reserved.
5 #
6 # This program and the accompanying materials
7 # are licensed and made available under the terms and conditions of the BSD License
8 # which accompanies this distribution. The full text of the license may be found at
9 # http://opensource.org/licenses/bsd-license.php
10 #
11 # THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
13 #
14 #------------------------------------------------------------------------------
15
16 #include <Chipset/AArch64.h>
17 #include <AsmMacroIoLibV8.h>
18
19 .text
20 .align 3
21
22 GCC_ASM_EXPORT (ArmInvalidateInstructionCache)
23 GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)
24 GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)
25 GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)
26 GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)
27 GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)
28 GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)
29 GCC_ASM_EXPORT (ArmDrainWriteBuffer)
30 GCC_ASM_EXPORT (ArmEnableMmu)
31 GCC_ASM_EXPORT (ArmDisableMmu)
32 GCC_ASM_EXPORT (ArmDisableCachesAndMmu)
33 GCC_ASM_EXPORT (ArmMmuEnabled)
34 GCC_ASM_EXPORT (ArmEnableDataCache)
35 GCC_ASM_EXPORT (ArmDisableDataCache)
36 GCC_ASM_EXPORT (ArmEnableInstructionCache)
37 GCC_ASM_EXPORT (ArmDisableInstructionCache)
38 GCC_ASM_EXPORT (ArmDisableAlignmentCheck)
39 GCC_ASM_EXPORT (ArmEnableAlignmentCheck)
40 GCC_ASM_EXPORT (ArmEnableBranchPrediction)
41 GCC_ASM_EXPORT (ArmDisableBranchPrediction)
42 GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
43 GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)
44 GCC_ASM_EXPORT (ArmDataMemoryBarrier)
45 GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)
46 GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
47 GCC_ASM_EXPORT (ArmWriteVBar)
48 GCC_ASM_EXPORT (ArmReadVBar)
49 GCC_ASM_EXPORT (ArmEnableVFP)
50 GCC_ASM_EXPORT (ArmCallWFI)
51 GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)
52 GCC_ASM_EXPORT (ArmReadMpidr)
53 GCC_ASM_EXPORT (ArmReadTpidrurw)
54 GCC_ASM_EXPORT (ArmWriteTpidrurw)
55 GCC_ASM_EXPORT (ArmIsArchTimerImplemented)
56 GCC_ASM_EXPORT (ArmReadIdPfr0)
57 GCC_ASM_EXPORT (ArmReadIdPfr1)
58 GCC_ASM_EXPORT (ArmWriteHcr)
59 GCC_ASM_EXPORT (ArmReadCurrentEL)
60
61 .set CTRL_M_BIT, (1 << 0)
62 .set CTRL_A_BIT, (1 << 1)
63 .set CTRL_C_BIT, (1 << 2)
64 .set CTRL_I_BIT, (1 << 12)
65 .set CTRL_V_BIT, (1 << 12)
66 .set CPACR_VFP_BITS, (3 << 20)
67
68 ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
69 dc ivac, x0 // Invalidate single data cache line
70 dsb sy
71 isb
72 ret
73
74
75 ASM_PFX(ArmCleanDataCacheEntryByMVA):
76 dc cvac, x0 // Clean single data cache line
77 dsb sy
78 isb
79 ret
80
81
82 ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
83 dc civac, x0 // Clean and invalidate single data cache line
84 dsb sy
85 isb
86 ret
87
88
89 ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
90 dc isw, x0 // Invalidate this line
91 dsb sy
92 isb
93 ret
94
95
96 ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
97 dc cisw, x0 // Clean and Invalidate this line
98 dsb sy
99 isb
100 ret
101
102
103 ASM_PFX(ArmCleanDataCacheEntryBySetWay):
104 dc csw, x0 // Clean this line
105 dsb sy
106 isb
107 ret
108
109
110 ASM_PFX(ArmInvalidateInstructionCache):
111 ic iallu // Invalidate entire instruction cache
112 dsb sy
113 isb
114 ret
115
116
117 ASM_PFX(ArmEnableMmu):
118 EL1_OR_EL2_OR_EL3(x1)
119 1: mrs x0, sctlr_el1 // Read System control register EL1
120 b 4f
121 2: mrs x0, sctlr_el2 // Read System control register EL2
122 b 4f
123 3: mrs x0, sctlr_el3 // Read System control register EL3
124 4: orr x0, x0, #CTRL_M_BIT // Set MMU enable bit
125 EL1_OR_EL2_OR_EL3(x1)
126 1: tlbi vmalle1
127 isb
128 msr sctlr_el1, x0 // Write back
129 b 4f
130 2: tlbi alle2
131 isb
132 msr sctlr_el2, x0 // Write back
133 b 4f
134 3: tlbi alle3
135 isb
136 msr sctlr_el3, x0 // Write back
137 4: dsb sy
138 isb
139 ret
140
141
142 ASM_PFX(ArmDisableMmu):
143 EL1_OR_EL2_OR_EL3(x1)
144 1: mrs x0, sctlr_el1 // Read System Control Register EL1
145 b 4f
146 2: mrs x0, sctlr_el2 // Read System Control Register EL2
147 b 4f
148 3: mrs x0, sctlr_el3 // Read System Control Register EL3
149 4: bic x0, x0, #CTRL_M_BIT // Clear MMU enable bit
150 EL1_OR_EL2_OR_EL3(x1)
151 1: msr sctlr_el1, x0 // Write back
152 tlbi vmalle1
153 b 4f
154 2: msr sctlr_el2, x0 // Write back
155 tlbi alle2
156 b 4f
157 3: msr sctlr_el3, x0 // Write back
158 tlbi alle3
159 4: dsb sy
160 isb
161 ret
162
163
164 ASM_PFX(ArmDisableCachesAndMmu):
165 EL1_OR_EL2_OR_EL3(x1)
166 1: mrs x0, sctlr_el1 // Get control register EL1
167 b 4f
168 2: mrs x0, sctlr_el2 // Get control register EL2
169 b 4f
170 3: mrs x0, sctlr_el3 // Get control register EL3
171 4: bic x0, x0, #CTRL_M_BIT // Disable MMU
172 bic x0, x0, #CTRL_C_BIT // Disable D Cache
173 bic x0, x0, #CTRL_I_BIT // Disable I Cache
174 EL1_OR_EL2_OR_EL3(x1)
175 1: msr sctlr_el1, x0 // Write back control register
176 b 4f
177 2: msr sctlr_el2, x0 // Write back control register
178 b 4f
179 3: msr sctlr_el3, x0 // Write back control register
180 4: dsb sy
181 isb
182 ret
183
184
185 ASM_PFX(ArmMmuEnabled):
186 EL1_OR_EL2_OR_EL3(x1)
187 1: mrs x0, sctlr_el1 // Get control register EL1
188 b 4f
189 2: mrs x0, sctlr_el2 // Get control register EL2
190 b 4f
191 3: mrs x0, sctlr_el3 // Get control register EL3
192 4: and x0, x0, #CTRL_M_BIT
193 ret
194
195
196 ASM_PFX(ArmEnableDataCache):
197 EL1_OR_EL2_OR_EL3(x1)
198 1: mrs x0, sctlr_el1 // Get control register EL1
199 b 4f
200 2: mrs x0, sctlr_el2 // Get control register EL2
201 b 4f
202 3: mrs x0, sctlr_el3 // Get control register EL3
203 4: orr x0, x0, #CTRL_C_BIT // Set C bit
204 EL1_OR_EL2_OR_EL3(x1)
205 1: msr sctlr_el1, x0 // Write back control register
206 b 4f
207 2: msr sctlr_el2, x0 // Write back control register
208 b 4f
209 3: msr sctlr_el3, x0 // Write back control register
210 4: dsb sy
211 isb
212 ret
213
214
215 ASM_PFX(ArmDisableDataCache):
216 EL1_OR_EL2_OR_EL3(x1)
217 1: mrs x0, sctlr_el1 // Get control register EL1
218 b 4f
219 2: mrs x0, sctlr_el2 // Get control register EL2
220 b 4f
221 3: mrs x0, sctlr_el3 // Get control register EL3
222 4: bic x0, x0, #CTRL_C_BIT // Clear C bit
223 EL1_OR_EL2_OR_EL3(x1)
224 1: msr sctlr_el1, x0 // Write back control register
225 b 4f
226 2: msr sctlr_el2, x0 // Write back control register
227 b 4f
228 3: msr sctlr_el3, x0 // Write back control register
229 4: dsb sy
230 isb
231 ret
232
233
234 ASM_PFX(ArmEnableInstructionCache):
235 EL1_OR_EL2_OR_EL3(x1)
236 1: mrs x0, sctlr_el1 // Get control register EL1
237 b 4f
238 2: mrs x0, sctlr_el2 // Get control register EL2
239 b 4f
240 3: mrs x0, sctlr_el3 // Get control register EL3
241 4: orr x0, x0, #CTRL_I_BIT // Set I bit
242 EL1_OR_EL2_OR_EL3(x1)
243 1: msr sctlr_el1, x0 // Write back control register
244 b 4f
245 2: msr sctlr_el2, x0 // Write back control register
246 b 4f
247 3: msr sctlr_el3, x0 // Write back control register
248 4: dsb sy
249 isb
250 ret
251
252
253 ASM_PFX(ArmDisableInstructionCache):
254 EL1_OR_EL2_OR_EL3(x1)
255 1: mrs x0, sctlr_el1 // Get control register EL1
256 b 4f
257 2: mrs x0, sctlr_el2 // Get control register EL2
258 b 4f
259 3: mrs x0, sctlr_el3 // Get control register EL3
260 4: bic x0, x0, #CTRL_I_BIT // Clear I bit
261 EL1_OR_EL2_OR_EL3(x1)
262 1: msr sctlr_el1, x0 // Write back control register
263 b 4f
264 2: msr sctlr_el2, x0 // Write back control register
265 b 4f
266 3: msr sctlr_el3, x0 // Write back control register
267 4: dsb sy
268 isb
269 ret
270
271
272 ASM_PFX(ArmEnableAlignmentCheck):
273 EL1_OR_EL2(x1)
274 1: mrs x0, sctlr_el1 // Get control register EL1
275 b 3f
276 2: mrs x0, sctlr_el2 // Get control register EL2
277 3: orr x0, x0, #CTRL_A_BIT // Set A (alignment check) bit
278 EL1_OR_EL2(x1)
279 1: msr sctlr_el1, x0 // Write back control register
280 b 3f
281 2: msr sctlr_el2, x0 // Write back control register
282 3: dsb sy
283 isb
284 ret
285
286
287 ASM_PFX(ArmDisableAlignmentCheck):
288 EL1_OR_EL2_OR_EL3(x1)
289 1: mrs x0, sctlr_el1 // Get control register EL1
290 b 4f
291 2: mrs x0, sctlr_el2 // Get control register EL2
292 b 4f
293 3: mrs x0, sctlr_el3 // Get control register EL3
294 4: bic x0, x0, #CTRL_A_BIT // Clear A (alignment check) bit
295 EL1_OR_EL2_OR_EL3(x1)
296 1: msr sctlr_el1, x0 // Write back control register
297 b 4f
298 2: msr sctlr_el2, x0 // Write back control register
299 b 4f
300 3: msr sctlr_el3, x0 // Write back control register
301 4: dsb sy
302 isb
303 ret
304
305
306 // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now
307 ASM_PFX(ArmEnableBranchPrediction):
308 ret
309
310
311 // Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.
312 ASM_PFX(ArmDisableBranchPrediction):
313 ret
314
315
316 ASM_PFX(AArch64AllDataCachesOperation):
317 // We can use regs 0-7 and 9-15 without having to save/restore.
318 // Save our link register on the stack.
319 str x30, [sp, #-0x10]!
320 mov x1, x0 // Save Function call in x1
321 mrs x6, clidr_el1 // Read EL1 CLIDR
322 and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)
323 lsr x3, x3, #23 // Left align cache level value - the level is shifted by 1 to the
324 // right to ease the access to CSSELR and the Set/Way operation.
325 cbz x3, L_Finished // No need to clean if LoC is 0
326 mov x10, #0 // Start clean at cache level 0
327 b Loop1
328
329 ASM_PFX(AArch64PerformPoUDataCacheOperation):
330 // We can use regs 0-7 and 9-15 without having to save/restore.
331 // Save our link register on the stack.
332 str x30, [sp, #-0x10]!
333 mov x1, x0 // Save Function call in x1
334 mrs x6, clidr_el1 // Read EL1 CLIDR
335 and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)
336 lsr x3, x3, #26 // Left align cache level value - the level is shifted by 1 to the
337 // right to ease the access to CSSELR and the Set/Way operation.
338 cbz x3, L_Finished // No need to clean if LoC is 0
339 mov x10, #0 // Start clean at cache level 0
340
341 Loop1:
342 add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info
343 lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level
344 and x12, x12, #7 // get those 3 bits alone
345 cmp x12, #2 // what cache at this level?
346 b.lt L_Skip // no cache or only instruction cache at this level
347 msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)
348 isb // isb to sync the change to the CacheSizeID reg
349 mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)
350 and x2, x12, #0x7 // extract the line length field
351 add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)
352 mov x4, #0x400
353 sub x4, x4, #1
354 and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)
355 clz w5, w4 // w5 is the bit position of the way size increment
356 mov x7, #0x00008000
357 sub x7, x7, #1
358 and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)
359
360 Loop2:
361 mov x9, x4 // x9 working copy of the max way size (right aligned)
362
363 Loop3:
364 lsl x11, x9, x5
365 orr x0, x10, x11 // factor in the way number and cache number
366 lsl x11, x7, x2
367 orr x0, x0, x11 // factor in the index number
368
369 blr x1 // Goto requested cache operation
370
371 subs x9, x9, #1 // decrement the way number
372 b.ge Loop3
373 subs x7, x7, #1 // decrement the index
374 b.ge Loop2
375 L_Skip:
376 add x10, x10, #2 // increment the cache number
377 cmp x3, x10
378 b.gt Loop1
379
380 L_Finished:
381 dsb sy
382 isb
383 ldr x30, [sp], #0x10
384 ret
385
386
387 ASM_PFX(ArmDataMemoryBarrier):
388 dmb sy
389 ret
390
391
392 ASM_PFX(ArmDataSyncronizationBarrier):
393 ASM_PFX(ArmDrainWriteBuffer):
394 dsb sy
395 ret
396
397
398 ASM_PFX(ArmInstructionSynchronizationBarrier):
399 isb
400 ret
401
402
403 ASM_PFX(ArmWriteVBar):
404 EL1_OR_EL2_OR_EL3(x1)
405 1: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register
406 b 4f
407 2: msr vbar_el2, x0 // Set the Address of the EL2 Vector Table in the VBAR register
408 b 4f
409 3: msr vbar_el3, x0 // Set the Address of the EL3 Vector Table in the VBAR register
410 4: isb
411 ret
412
413 ASM_PFX(ArmReadVBar):
414 EL1_OR_EL2_OR_EL3(x1)
415 1: mrs x0, vbar_el1 // Set the Address of the EL1 Vector Table in the VBAR register
416 ret
417 2: mrs x0, vbar_el2 // Set the Address of the EL2 Vector Table in the VBAR register
418 ret
419 3: mrs x0, vbar_el3 // Set the Address of the EL3 Vector Table in the VBAR register
420 ret
421
422
423 ASM_PFX(ArmEnableVFP):
424 // Check whether floating-point is implemented in the processor.
425 mov x1, x30 // Save LR
426 bl ArmReadIdPfr0 // Read EL1 Processor Feature Register (PFR0)
427 mov x30, x1 // Restore LR
428 ands x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation
429 cmp x0, #0 // VFP is implemented if '0'.
430 b.ne 4f // Exit if VFP not implemented.
431 // FVP is implemented.
432 // Make sure VFP exceptions are not trapped (to any exception level).
433 mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)
434 orr x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1
435 msr cpacr_el1, x0 // Write back EL1 Coprocessor Access Control Register (CPACR)
436 mov x1, #AARCH64_CPTR_TFP // TFP Bit for trapping VFP Exceptions
437 EL1_OR_EL2_OR_EL3(x2)
438 1:ret // Not configurable in EL1
439 2:mrs x0, cptr_el2 // Disable VFP traps to EL2
440 bic x0, x0, x1
441 msr cptr_el2, x0
442 ret
443 3:mrs x0, cptr_el3 // Disable VFP traps to EL3
444 bic x0, x0, x1
445 msr cptr_el3, x0
446 4:ret
447
448
449 ASM_PFX(ArmCallWFI):
450 wfi
451 ret
452
453
454 ASM_PFX(ArmInvalidateInstructionAndDataTlb):
455 EL1_OR_EL2_OR_EL3(x0)
456 1: tlbi vmalle1
457 b 4f
458 2: tlbi alle2
459 b 4f
460 3: tlbi alle3
461 4: dsb sy
462 isb
463 ret
464
465
466 ASM_PFX(ArmReadMpidr):
467 mrs x0, mpidr_el1 // read EL1 MPIDR
468 ret
469
470
471 // Keep old function names for C compatibilty for now. Change later?
472 ASM_PFX(ArmReadTpidrurw):
473 mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
474 ret
475
476
477 // Keep old function names for C compatibilty for now. Change later?
478 ASM_PFX(ArmWriteTpidrurw):
479 msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
480 ret
481
482
483 // Arch timers are mandatory on AArch64
484 ASM_PFX(ArmIsArchTimerImplemented):
485 mov x0, #1
486 ret
487
488
489 ASM_PFX(ArmReadIdPfr0):
490 mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register
491 ret
492
493
494 // Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?
495 // A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.
496 // See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c
497 // Not defined yet, but stick in here for now, should read all zeros.
498 ASM_PFX(ArmReadIdPfr1):
499 mrs x0, id_aa64pfr1_el1 // Read ID_PFR1 Register
500 ret
501
502 // VOID ArmWriteHcr(UINTN Hcr)
503 ASM_PFX(ArmWriteHcr):
504 msr hcr_el2, x0 // Write the passed HCR value
505 ret
506
507 // UINTN ArmReadCurrentEL(VOID)
508 ASM_PFX(ArmReadCurrentEL):
509 mrs x0, CurrentEL
510 ret
511
512 ASM_FUNCTION_REMOVE_IF_UNREFERENCED