summary |
shortlog |
log |
commit | commitdiff |
tree
raw |
patch |
inline | side by side (from parent 1:
f34b38f)
Currently, we always invalidate the TLBs entirely after making
any modification to the page tables. Now that we have introduced
strict memory permissions in quite a number of places, such
modifications occur much more often, and it is better for performance
to flush only those TLB entries that are actually affected by
the changes.
At the same time, relax some system wide data synchronization barriers
to non-shared. When running in UEFI, we don't share virtual address
translations with other masters, unless we are running under virt, but
in that case, the host will upgrade them as appropriate (by setting
an override at EL2)
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Leif Lindholm <leif.lindholm@linaro.org>
EFIAPI\r
ArmReplaceLiveTranslationEntry (\r
IN UINT64 *Entry,\r
EFIAPI\r
ArmReplaceLiveTranslationEntry (\r
IN UINT64 *Entry,\r
+ IN UINT64 Value,\r
+ IN UINT64 RegionStart\r
// IN VOID *MVA // X1\r
// );\r
ASM_FUNC(ArmUpdateTranslationTableEntry)\r
// IN VOID *MVA // X1\r
// );\r
ASM_FUNC(ArmUpdateTranslationTableEntry)\r
- dc civac, x0 // Clean and invalidate data line\r
- dsb sy\r
+ dsb nshst\r
+ lsr x1, x1, #12\r
EL1_OR_EL2_OR_EL3(x0)\r
1: tlbi vaae1, x1 // TLB Invalidate VA , EL1\r
b 4f\r
2: tlbi vae2, x1 // TLB Invalidate VA , EL2\r
b 4f\r
3: tlbi vae3, x1 // TLB Invalidate VA , EL3\r
EL1_OR_EL2_OR_EL3(x0)\r
1: tlbi vaae1, x1 // TLB Invalidate VA , EL1\r
b 4f\r
2: tlbi vae2, x1 // TLB Invalidate VA , EL2\r
b 4f\r
3: tlbi vae3, x1 // TLB Invalidate VA , EL3\r
VOID\r
ReplaceLiveEntry (\r
IN UINT64 *Entry,\r
VOID\r
ReplaceLiveEntry (\r
IN UINT64 *Entry,\r
+ IN UINT64 Value,\r
+ IN UINT64 RegionStart\r
)\r
{\r
if (!ArmMmuEnabled ()) {\r
*Entry = Value;\r
} else {\r
)\r
{\r
if (!ArmMmuEnabled ()) {\r
*Entry = Value;\r
} else {\r
- ArmReplaceLiveTranslationEntry (Entry, Value);\r
+ ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);\r
\r
// Fill the BlockEntry with the new TranslationTable\r
ReplaceLiveEntry (BlockEntry,\r
\r
// Fill the BlockEntry with the new TranslationTable\r
ReplaceLiveEntry (BlockEntry,\r
- ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY);\r
+ (UINTN)TranslationTable | TableAttributes | TT_TYPE_TABLE_ENTRY,\r
+ RegionStart);\r
}\r
} else {\r
if (IndexLevel != PageLevel) {\r
}\r
} else {\r
if (IndexLevel != PageLevel) {\r
*BlockEntry &= BlockEntryMask;\r
*BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;\r
\r
*BlockEntry &= BlockEntryMask;\r
*BlockEntry |= (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;\r
\r
+ ArmUpdateTranslationTableEntry (BlockEntry, (VOID *)RegionStart);\r
+\r
// Go to the next BlockEntry\r
RegionStart += BlockEntrySize;\r
RegionLength -= BlockEntrySize;\r
// Go to the next BlockEntry\r
RegionStart += BlockEntrySize;\r
RegionLength -= BlockEntrySize;\r
- // Invalidate all TLB entries so changes are synced\r
- ArmInvalidateTlb ();\r
-\r
return EFI_SUCCESS;\r
}\r
\r
return EFI_SUCCESS;\r
}\r
\r
- // Invalidate all TLB entries so changes are synced\r
- ArmInvalidateTlb ();\r
-\r
return EFI_SUCCESS;\r
}\r
\r
return EFI_SUCCESS;\r
}\r
\r
dmb sy\r
dc ivac, x0\r
\r
dmb sy\r
dc ivac, x0\r
\r
+ // flush translations for the target address from the TLBs\r
+ lsr x2, x2, #12\r
\r
// re-enable the MMU\r
msr sctlr_el\el, x8\r
\r
// re-enable the MMU\r
msr sctlr_el\el, x8\r
//VOID\r
//ArmReplaceLiveTranslationEntry (\r
// IN UINT64 *Entry,\r
//VOID\r
//ArmReplaceLiveTranslationEntry (\r
// IN UINT64 *Entry,\r
+// IN UINT64 Value,\r
+// IN UINT64 Address\r
// )\r
ASM_FUNC(ArmReplaceLiveTranslationEntry)\r
\r
// disable interrupts\r
// )\r
ASM_FUNC(ArmReplaceLiveTranslationEntry)\r
\r
// disable interrupts\r
msr daifset, #0xf\r
isb\r
\r
// clean and invalidate first so that we don't clobber\r
// adjacent entries that are dirty in the caches\r
dc civac, x0\r
msr daifset, #0xf\r
isb\r
\r
// clean and invalidate first so that we don't clobber\r
// adjacent entries that are dirty in the caches\r
dc civac, x0\r
\r
EL1_OR_EL2_OR_EL3(x3)\r
1:__replace_entry 1\r
\r
EL1_OR_EL2_OR_EL3(x3)\r
1:__replace_entry 1\r
b 4f\r
3:__replace_entry 3\r
\r
b 4f\r
3:__replace_entry 3\r
\r
ret\r
\r
ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)\r
ret\r
\r
ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)\r