GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)\r
GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)\r
GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)\r
-GCC_ASM_EXPORT (ArmDrainWriteBuffer)\r
GCC_ASM_EXPORT (ArmEnableMmu)\r
GCC_ASM_EXPORT (ArmDisableMmu)\r
GCC_ASM_EXPORT (ArmDisableCachesAndMmu)\r
GCC_ASM_EXPORT (ArmEnableBranchPrediction)\r
GCC_ASM_EXPORT (ArmDisableBranchPrediction)\r
GCC_ASM_EXPORT (AArch64AllDataCachesOperation)\r
-GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)\r
GCC_ASM_EXPORT (ArmDataMemoryBarrier)\r
GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)\r
GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)\r
\r
ASM_PFX(ArmInvalidateDataCacheEntryByMVA):\r
dc ivac, x0 // Invalidate single data cache line\r
- dsb sy\r
- isb\r
ret\r
\r
\r
ASM_PFX(ArmCleanDataCacheEntryByMVA):\r
dc cvac, x0 // Clean single data cache line\r
- dsb sy\r
- isb\r
ret\r
\r
\r
ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):\r
dc civac, x0 // Clean and invalidate single data cache line\r
- dsb sy\r
- isb\r
ret\r
\r
\r
ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):\r
dc isw, x0 // Invalidate this line\r
- dsb sy\r
- isb\r
ret\r
\r
\r
ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):\r
dc cisw, x0 // Clean and Invalidate this line\r
- dsb sy\r
- isb\r
ret\r
\r
\r
ASM_PFX(ArmCleanDataCacheEntryBySetWay):\r
dc csw, x0 // Clean this line\r
- dsb sy\r
- isb\r
ret\r
\r
\r
// right to ease the access to CSSELR and the Set/Way operation.\r
cbz x3, L_Finished // No need to clean if LoC is 0\r
mov x10, #0 // Start clean at cache level 0\r
- b Loop1\r
-\r
-ASM_PFX(AArch64PerformPoUDataCacheOperation):\r
-// We can use regs 0-7 and 9-15 without having to save/restore.\r
-// Save our link register on the stack. - The stack must always be quad-word aligned\r
- str x30, [sp, #-16]!\r
- mov x1, x0 // Save Function call in x1\r
- mrs x6, clidr_el1 // Read EL1 CLIDR\r
- and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)\r
- lsr x3, x3, #26 // Left align cache level value - the level is shifted by 1 to the\r
- // right to ease the access to CSSELR and the Set/Way operation.\r
- cbz x3, L_Finished // No need to clean if LoC is 0\r
- mov x10, #0 // Start clean at cache level 0\r
\r
Loop1:\r
add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info\r
\r
\r
ASM_PFX(ArmDataSynchronizationBarrier):\r
-ASM_PFX(ArmDrainWriteBuffer):\r
dsb sy\r
ret\r
\r