\r
#include "CpuDxe.h"\r
\r
+\r
//\r
// Translation/page table definitions\r
//\r
break;\r
default:\r
return EFI_UNSUPPORTED;\r
- break;\r
}\r
\r
// determine protection attributes\r
switch(SectionAttributes & ARM_SECTION_RW_PERMISSIONS_MASK) {\r
case ARM_SECTION_NO_ACCESS: // no read, no write\r
- *GcdAttributes |= EFI_MEMORY_WP | EFI_MEMORY_RP;\r
+ //*GcdAttributes |= EFI_MEMORY_WP | EFI_MEMORY_RP;\r
break;\r
\r
case ARM_SECTION_PRIV_ACCESS_ONLY:\r
\r
default:\r
return EFI_UNSUPPORTED;\r
- break;\r
}\r
\r
// now process eXectue Never attribute\r
return EFI_SUCCESS;\r
}\r
\r
+/**\r
+ Searches memory descriptors covered by given memory range.\r
+\r
+ This function searches into the Gcd Memory Space for descriptors\r
+ (from StartIndex to EndIndex) that contains the memory range\r
+ specified by BaseAddress and Length.\r
+\r
+ @param MemorySpaceMap Gcd Memory Space Map as array.\r
+ @param NumberOfDescriptors Number of descriptors in map.\r
+ @param BaseAddress BaseAddress for the requested range.\r
+ @param Length Length for the requested range.\r
+ @param StartIndex Start index into the Gcd Memory Space Map.\r
+ @param EndIndex End index into the Gcd Memory Space Map.\r
+\r
+ @retval EFI_SUCCESS Search successfully.\r
+ @retval EFI_NOT_FOUND The requested descriptors does not exist.\r
+\r
+**/\r
+EFI_STATUS\r
+SearchGcdMemorySpaces (\r
+ IN EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap,\r
+ IN UINTN NumberOfDescriptors,\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ OUT UINTN *StartIndex,\r
+ OUT UINTN *EndIndex\r
+ )\r
+{\r
+ UINTN Index;\r
+\r
+ *StartIndex = 0;\r
+ *EndIndex = 0;\r
+ for (Index = 0; Index < NumberOfDescriptors; Index++) {\r
+ if (BaseAddress >= MemorySpaceMap[Index].BaseAddress &&\r
+ BaseAddress < MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length) {\r
+ *StartIndex = Index;\r
+ }\r
+ if (BaseAddress + Length - 1 >= MemorySpaceMap[Index].BaseAddress &&\r
+ BaseAddress + Length - 1 < MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length) {\r
+ *EndIndex = Index;\r
+ return EFI_SUCCESS;\r
+ }\r
+ }\r
+ return EFI_NOT_FOUND;\r
+}\r
+\r
+\r
+/**\r
+ Sets the attributes for a specified range in Gcd Memory Space Map.\r
+\r
+ This function sets the attributes for a specified range in\r
+ Gcd Memory Space Map.\r
+\r
+ @param MemorySpaceMap Gcd Memory Space Map as array\r
+ @param NumberOfDescriptors Number of descriptors in map\r
+ @param BaseAddress BaseAddress for the range\r
+ @param Length Length for the range\r
+ @param Attributes Attributes to set\r
+\r
+ @retval EFI_SUCCESS Memory attributes set successfully\r
+ @retval EFI_NOT_FOUND The specified range does not exist in Gcd Memory Space\r
+\r
+**/\r
+EFI_STATUS\r
+SetGcdMemorySpaceAttributes (\r
+ IN EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap,\r
+ IN UINTN NumberOfDescriptors,\r
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,\r
+ IN UINT64 Length,\r
+ IN UINT64 Attributes\r
+ )\r
+{\r
+ EFI_STATUS Status;\r
+ UINTN Index;\r
+ UINTN StartIndex;\r
+ UINTN EndIndex;\r
+ EFI_PHYSICAL_ADDRESS RegionStart;\r
+ UINT64 RegionLength;\r
+\r
+ //\r
+ // Get all memory descriptors covered by the memory range\r
+ //\r
+ Status = SearchGcdMemorySpaces (\r
+ MemorySpaceMap,\r
+ NumberOfDescriptors,\r
+ BaseAddress,\r
+ Length,\r
+ &StartIndex,\r
+ &EndIndex\r
+ );\r
+ if (EFI_ERROR (Status)) {\r
+ return Status;\r
+ }\r
+\r
+ //\r
+ // Go through all related descriptors and set attributes accordingly\r
+ //\r
+ for (Index = StartIndex; Index <= EndIndex; Index++) {\r
+ if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeNonExistent) {\r
+ continue;\r
+ }\r
+ //\r
+ // Calculate the start and end address of the overlapping range\r
+ //\r
+ if (BaseAddress >= MemorySpaceMap[Index].BaseAddress) {\r
+ RegionStart = BaseAddress;\r
+ } else {\r
+ RegionStart = MemorySpaceMap[Index].BaseAddress;\r
+ }\r
+ if (BaseAddress + Length - 1 < MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length) {\r
+ RegionLength = BaseAddress + Length - RegionStart;\r
+ } else {\r
+ RegionLength = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length - RegionStart;\r
+ }\r
+ //\r
+ // Set memory attributes according to MTRR attribute and the original attribute of descriptor\r
+ //\r
+ gDS->SetMemorySpaceAttributes (\r
+ RegionStart,\r
+ RegionLength,\r
+ (MemorySpaceMap[Index].Attributes & ~EFI_MEMORY_CACHETYPE_MASK) | (MemorySpaceMap[Index].Capabilities & Attributes)\r
+ );\r
+ }\r
+\r
+ return EFI_SUCCESS;\r
+}\r
\r
\r
EFI_STATUS\r
IN EFI_CPU_ARCH_PROTOCOL *CpuProtocol\r
)\r
{\r
- EFI_STATUS Status;\r
- UINT32 i;\r
- UINT32 Descriptor;\r
- UINT32 SectionAttributes;\r
- EFI_PHYSICAL_ADDRESS NextRegionBase;\r
- UINT64 NextRegionLength;\r
- UINT64 GcdAttributes;\r
- UINT32 NextRegionAttributes = 0;\r
+ EFI_STATUS Status;\r
+ UINT32 i;\r
+ UINT32 Descriptor;\r
+ UINT32 SectionAttributes;\r
+ EFI_PHYSICAL_ADDRESS NextRegionBase;\r
+ UINT64 NextRegionLength;\r
+ UINT64 GcdAttributes;\r
+ UINT32 NextRegionAttributes = 0;\r
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;\r
+ UINTN NumberOfDescriptors;\r
+ EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap;\r
+\r
\r
+ DEBUG ((EFI_D_PAGE, "SyncCacheConfig()\n"));\r
\r
// This code assumes MMU is enabled and filed with section translations\r
ASSERT (ArmMmuEnabled ());\r
\r
+ //\r
+ // Get the memory space map from GCD\r
+ //\r
+ MemorySpaceMap = NULL;\r
+ Status = gDS->GetMemorySpaceMap (&NumberOfDescriptors, &MemorySpaceMap);\r
+ ASSERT_EFI_ERROR (Status);\r
+\r
\r
// The GCD implementation maintains its own copy of the state of memory space attributes. GCD needs\r
// to know what the initial memory space attributes are. The CPU Arch. Protocol does not provide a\r
NextRegionBase = NextRegionLength = 0;\r
for (i=0; i< FIRST_LEVEL_ENTRY_COUNT; i++) {\r
\r
- // obtain existing descriptor\r
- Descriptor = FirstLevelTable[i];\r
+ // obtain existing descriptor and make sure it contains a valid Base Address even if it is a fault section\r
+ Descriptor = FirstLevelTable[i] | (ARM_SECTION_BASE_MASK & (i << ARM_SECTION_BASE_SHIFT));\r
\r
// extract attributes (cacheability and permissions)\r
SectionAttributes = Descriptor & 0xDEC;\r
\r
// convert section entry attributes to GCD bitmask\r
Status = SectionToGcdAttributes (NextRegionAttributes, &GcdAttributes);\r
- ASSERT_EFI_ERROR(Status);\r
+ ASSERT_EFI_ERROR (Status);\r
\r
// update GCD with these changes (this will recurse into our own CpuSetMemoryAttributes below which is OK)\r
- Status = gDS->SetMemorySpaceAttributes (NextRegionBase, NextRegionLength, GcdAttributes);\r
- ASSERT_EFI_ERROR(Status);\r
+ SetGcdMemorySpaceAttributes (MemorySpaceMap, NumberOfDescriptors, NextRegionBase, NextRegionLength, GcdAttributes);\r
+\r
\r
// start on a new region\r
NextRegionLength = 0;\r
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;\r
volatile ARM_PAGE_TABLE_ENTRY *PageTable;\r
\r
+ Status = EFI_SUCCESS;\r
+\r
// EntryMask: bitmask of values to change (1 = change this value, 0 = leave alone)\r
// EntryValue: values at bit positions specified by EntryMask\r
-\r
+ EntryMask = ARM_PAGE_DESC_TYPE_MASK;\r
+ EntryValue = ARM_PAGE_TYPE_SMALL;\r
// Although the PI spec is unclear on this the GCD guarantees that only\r
// one Attribute bit is set at a time, so we can safely use a switch statement\r
switch (Attributes) {\r
case EFI_MEMORY_UC:\r
// modify cacheability attributes\r
- EntryMask = ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
+ EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
// map to strongly ordered\r
- EntryValue = 0; // TEX[2:0] = 0, C=0, B=0\r
+ EntryValue |= 0; // TEX[2:0] = 0, C=0, B=0\r
break;\r
\r
case EFI_MEMORY_WC:\r
// modify cacheability attributes\r
- EntryMask = ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
+ EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
// map to normal non-cachable\r
- EntryValue = (0x1 << ARM_SMALL_PAGE_TEX_SHIFT); // TEX [2:0]= 001 = 0x2, B=0, C=0\r
+ EntryValue |= (0x1 << ARM_SMALL_PAGE_TEX_SHIFT); // TEX [2:0]= 001 = 0x2, B=0, C=0\r
break;\r
\r
case EFI_MEMORY_WT:\r
// modify cacheability attributes\r
- EntryMask = ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
+ EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
// write through with no-allocate\r
- EntryValue = ARM_PAGE_C; // TEX [2:0] = 0, C=1, B=0\r
+ EntryValue |= ARM_PAGE_C; // TEX [2:0] = 0, C=1, B=0\r
break;\r
\r
case EFI_MEMORY_WB:\r
// modify cacheability attributes\r
- EntryMask = ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
+ EntryMask |= ARM_SMALL_PAGE_TEX_MASK | ARM_PAGE_C | ARM_PAGE_B;\r
// write back (with allocate)\r
- EntryValue = (0x1 << ARM_SMALL_PAGE_TEX_SHIFT) | ARM_PAGE_C | ARM_PAGE_B; // TEX [2:0] = 001, C=1, B=1\r
+ EntryValue |= (0x1 << ARM_SMALL_PAGE_TEX_SHIFT) | ARM_PAGE_C | ARM_PAGE_B; // TEX [2:0] = 001, C=1, B=1\r
break;\r
\r
case EFI_MEMORY_WP:\r
case EFI_MEMORY_UCE:\r
// cannot be implemented UEFI definition unclear for ARM\r
// Cause a page fault if these ranges are accessed.\r
- EntryMask = 0x3;\r
- EntryValue = 0;\r
+ EntryValue = ARM_PAGE_TYPE_FAULT;\r
DEBUG ((EFI_D_PAGE, "SetMemoryAttributes(): setting page %lx with unsupported attribute %x will page fault on access\n", BaseAddress, Attributes));\r
break;\r
\r
default:\r
return EFI_UNSUPPORTED;\r
- break;\r
}\r
\r
// obtain page table base\r
Descriptor = FirstLevelTable[FirstLevelIdx];\r
\r
// does this descriptor need to be converted from section entry to 4K pages?\r
- if ((Descriptor & ARM_DESC_TYPE_MASK) == ARM_DESC_TYPE_SECTION ) {\r
+ if ((Descriptor & ARM_DESC_TYPE_MASK) != ARM_DESC_TYPE_PAGE_TABLE ) {\r
Status = ConvertSectionToPages (FirstLevelIdx << ARM_SECTION_BASE_SHIFT);\r
if (EFI_ERROR(Status)) {\r
// exit for loop\r
\r
// calculate index into the page table\r
PageTableIndex = ((BaseAddress + Offset) & ARM_SMALL_PAGE_INDEX_MASK) >> ARM_SMALL_PAGE_BASE_SHIFT;\r
- ASSERT(PageTableIndex < SMALL_PAGE_TABLE_ENTRY_COUNT);\r
+ ASSERT (PageTableIndex < SMALL_PAGE_TABLE_ENTRY_COUNT);\r
\r
// get the entry\r
PageTableEntry = PageTable[PageTableIndex];\r
// EntryMask: bitmask of values to change (1 = change this value, 0 = leave alone)\r
// EntryValue: values at bit positions specified by EntryMask\r
\r
+ // Make sure we handle a section range that is unmapped \r
+ EntryMask = ARM_DESC_TYPE_MASK;\r
+ EntryValue = ARM_DESC_TYPE_SECTION;\r
+\r
// Although the PI spec is unclear on this the GCD guarantees that only\r
// one Attribute bit is set at a time, so we can safely use a switch statement\r
switch(Attributes) {\r
case EFI_MEMORY_UC:\r
// modify cacheability attributes\r
- EntryMask = ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
+ EntryMask |= ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
// map to strongly ordered\r
- EntryValue = 0; // TEX[2:0] = 0, C=0, B=0\r
+ EntryValue |= 0; // TEX[2:0] = 0, C=0, B=0\r
break;\r
\r
case EFI_MEMORY_WC:\r
// modify cacheability attributes\r
- EntryMask = ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
+ EntryMask |= ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
// map to normal non-cachable\r
- EntryValue = (0x1 << ARM_SECTION_TEX_SHIFT); // TEX [2:0]= 001 = 0x2, B=0, C=0\r
+ EntryValue |= (0x1 << ARM_SECTION_TEX_SHIFT); // TEX [2:0]= 001 = 0x2, B=0, C=0\r
break;\r
\r
case EFI_MEMORY_WT:\r
// modify cacheability attributes\r
- EntryMask = ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
+ EntryMask |= ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
// write through with no-allocate\r
- EntryValue = ARM_SECTION_C; // TEX [2:0] = 0, C=1, B=0\r
+ EntryValue |= ARM_SECTION_C; // TEX [2:0] = 0, C=1, B=0\r
break;\r
\r
case EFI_MEMORY_WB:\r
// modify cacheability attributes\r
- EntryMask = ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
+ EntryMask |= ARM_SECTION_TEX_MASK | ARM_SECTION_C | ARM_SECTION_B;\r
// write back (with allocate)\r
- EntryValue = (0x1 << ARM_SECTION_TEX_SHIFT) | ARM_SECTION_C | ARM_SECTION_B; // TEX [2:0] = 001, C=1, B=1\r
+ EntryValue |= (0x1 << ARM_SECTION_TEX_SHIFT) | ARM_SECTION_C | ARM_SECTION_B; // TEX [2:0] = 001, C=1, B=1\r
break;\r
\r
case EFI_MEMORY_WP:\r
case EFI_MEMORY_UCE:\r
// cannot be implemented UEFI definition unclear for ARM\r
// Cause a page fault if these ranges are accessed.\r
- EntryMask = 0x3;\r
- EntryValue = 0;\r
+ EntryValue = ARM_DESC_TYPE_FAULT;\r
DEBUG ((EFI_D_PAGE, "SetMemoryAttributes(): setting section %lx with unsupported attribute %x will page fault on access\n", BaseAddress, Attributes));\r
break;\r
\r
\r
default:\r
return EFI_UNSUPPORTED;\r
- break;\r
}\r
\r
// obtain page table base\r
Descriptor = FirstLevelTable[FirstLevelIdx + i];\r
\r
// has this descriptor already been coverted to pages?\r
- if ((Descriptor & ARM_DESC_TYPE_MASK) == ARM_DESC_TYPE_PAGE_TABLE ) {\r
+ if ((Descriptor & ARM_DESC_TYPE_MASK) != ARM_DESC_TYPE_PAGE_TABLE ) {\r
// forward this 1MB range to page table function instead\r
Status = UpdatePageEntries ((FirstLevelIdx + i) << ARM_SECTION_BASE_SHIFT, ARM_PAGE_DESC_ENTRY_MVA_SIZE, Attributes, VirtualMask);\r
} else {\r
\r
// calculate index into first level translation table for start of modification\r
FirstLevelIdx = (BaseAddress & ARM_SECTION_BASE_MASK) >> ARM_SECTION_BASE_SHIFT;\r
- ASSERT(FirstLevelIdx < FIRST_LEVEL_ENTRY_COUNT);\r
+ ASSERT (FirstLevelIdx < FIRST_LEVEL_ENTRY_COUNT);\r
\r
// get section attributes and convert to page attributes\r
SectionDescriptor = FirstLevelTable[FirstLevelIdx];\r
}\r
\r
// flush d-cache so descriptors make it back to uncached memory for subsequent table walks\r
- // TODO: change to use only PageTable base and length\r
- // ArmInvalidateDataCache ();\r
- InvalidateDataCacheRange ((VOID *)&PageTableAddr, EFI_PAGE_SIZE);\r
+ InvalidateDataCacheRange ((VOID *)(UINTN)PageTableAddr, EFI_PAGE_SIZE);\r
\r
// formulate page table entry, Domain=0, NS=0\r
PageTableDescriptor = (((UINTN)PageTableAddr) & ARM_PAGE_DESC_BASE_MASK) | ARM_DESC_TYPE_PAGE_TABLE;\r
IN UINT64 Attributes\r
)\r
{\r
+ DEBUG ((EFI_D_PAGE, "SetMemoryAttributes(%lx, %lx, %lx)\n", BaseAddress, Length, Attributes));\r
if ( ((BaseAddress & (EFI_PAGE_SIZE-1)) != 0) || ((Length & (EFI_PAGE_SIZE-1)) != 0)){\r
// minimum granularity is EFI_PAGE_SIZE (4KB on ARM)\r
+ DEBUG ((EFI_D_PAGE, "SetMemoryAttributes(%lx, %lx, %lx): minimum ganularity is EFI_PAGE_SIZE\n", BaseAddress, Length, Attributes));\r
return EFI_UNSUPPORTED;\r
}\r
\r
*Attributes = GcdDescriptor.Attributes;\r
}\r
}\r
- \r
+\r
//\r
// Make this address range page fault if accessed. If it is a DMA buffer than this would \r
// be the PCI address. Code should always use the CPU address, and we will or in VirtualMask\r
// to that address. \r
//\r
- Status = SetMemoryAttributes (Address, Length, EFI_MEMORY_XP, 0);\r
+ Status = SetMemoryAttributes (Address, Length, EFI_MEMORY_WP, 0);\r
if (!EFI_ERROR (Status)) {\r
Status = SetMemoryAttributes (Address | VirtualMask, Length, EFI_MEMORY_UC, VirtualMask);\r
}\r
\r
+ DEBUG ((DEBUG_INFO | DEBUG_LOAD, "ConvertPagesToUncachedVirtualAddress()\n Unmapped 0x%08lx Mapped 0x%08lx 0x%x bytes\n", Address, Address | VirtualMask, Length));\r
+\r
return Status;\r
}\r
\r
\r
EFI_STATUS\r
EFIAPI\r
-CpuFreeConvertedPages (\r
+CpuReconvertPages (\r
IN VIRTUAL_UNCACHED_PAGES_PROTOCOL *This,\r
IN EFI_PHYSICAL_ADDRESS Address,\r
IN UINTN Length,\r
)\r
{\r
EFI_STATUS Status;\r
+\r
+ DEBUG ((DEBUG_INFO | DEBUG_LOAD, "CpuReconvertPages(%lx, %x, %lx, %lx)\n", Address, Length, VirtualMask, Attributes));\r
\r
//\r
// Unmap the alaised Address\r
//\r
- Status = SetMemoryAttributes (Address | VirtualMask, Length, EFI_MEMORY_XP, 0);\r
+ Status = SetMemoryAttributes (Address | VirtualMask, Length, EFI_MEMORY_WP, 0);\r
if (!EFI_ERROR (Status)) {\r
//\r
// Restore atttributes\r
\r
VIRTUAL_UNCACHED_PAGES_PROTOCOL gVirtualUncachedPages = {\r
CpuConvertPagesToUncachedVirtualAddress,\r
- CpuFreeConvertedPages\r
+ CpuReconvertPages\r
};\r
\r
\r