aboutsummaryrefslogtreecommitdiffstats
path: root/roms/edk2/ArmPkg/Library/ArmMmuLib
diff options
context:
space:
mode:
authorAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
committerAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
commitaf1a266670d040d2f4083ff309d732d648afba2a (patch)
tree2fc46203448ddcc6f81546d379abfaeb323575e9 /roms/edk2/ArmPkg/Library/ArmMmuLib
parente02cda008591317b1625707ff8e115a4841aa889 (diff)
Add submodule dependency filesHEADmaster
Change-Id: Iaf8d18082d3991dec7c0ebbea540f092188eb4ec
Diffstat (limited to 'roms/edk2/ArmPkg/Library/ArmMmuLib')
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c656
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S74
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c55
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibConvert.c32
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c413
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c435
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S29
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm26
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf43
-rw-r--r--roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuPeiLib.inf32
10 files changed, 1795 insertions, 0 deletions
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c b/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
new file mode 100644
index 000000000..513a763e6
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibCore.c
@@ -0,0 +1,656 @@
+/** @file
+* File managing the MMU for ARMv8 architecture
+*
+* Copyright (c) 2011-2020, ARM Limited. All rights reserved.
+* Copyright (c) 2016, Linaro Limited. All rights reserved.
+* Copyright (c) 2017, Intel Corporation. All rights reserved.<BR>
+*
+* SPDX-License-Identifier: BSD-2-Clause-Patent
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/AArch64.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/ArmMmuLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+
+STATIC
+UINT64
+ArmMemoryAttributeToPageAttribute (
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
+ )
+{
+ switch (Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK;
+
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
+
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
+
+ // Uncached and device mappings are treated as outer shareable by default,
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+
+ default:
+ ASSERT (0);
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ if (ArmReadCurrentEL () == AARCH64_EL2)
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
+ else
+ return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
+ }
+}
+
+#define MIN_T0SZ 16
+#define BITS_PER_LEVEL 9
+#define MAX_VA_BITS 48
+
+STATIC
+UINTN
+GetRootTableEntryCount (
+ IN UINTN T0SZ
+ )
+{
+ return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
+}
+
+STATIC
+UINTN
+GetRootTableLevel (
+ IN UINTN T0SZ
+ )
+{
+ return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
+}
+
+STATIC
+VOID
+ReplaceTableEntry (
+ IN UINT64 *Entry,
+ IN UINT64 Value,
+ IN UINT64 RegionStart,
+ IN BOOLEAN IsLiveBlockMapping
+ )
+{
+ if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
+ *Entry = Value;
+ ArmUpdateTranslationTableEntry (Entry, (VOID *)(UINTN)RegionStart);
+ } else {
+ ArmReplaceLiveTranslationEntry (Entry, Value, RegionStart);
+ }
+}
+
+STATIC
+VOID
+FreePageTablesRecursive (
+ IN UINT64 *TranslationTable,
+ IN UINTN Level
+ )
+{
+ UINTN Index;
+
+ ASSERT (Level <= 3);
+
+ if (Level < 3) {
+ for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
+ if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
+ FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
+ TT_ADDRESS_MASK_BLOCK_ENTRY),
+ Level + 1);
+ }
+ }
+ }
+ FreePages (TranslationTable, 1);
+}
+
+STATIC
+BOOLEAN
+IsBlockEntry (
+ IN UINT64 Entry,
+ IN UINTN Level
+ )
+{
+ if (Level == 3) {
+ return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
+ }
+ return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
+}
+
+STATIC
+BOOLEAN
+IsTableEntry (
+ IN UINT64 Entry,
+ IN UINTN Level
+ )
+{
+ if (Level == 3) {
+ //
+ // TT_TYPE_TABLE_ENTRY aliases TT_TYPE_BLOCK_ENTRY_LEVEL3
+ // so we need to take the level into account as well.
+ //
+ return FALSE;
+ }
+ return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
+}
+
+STATIC
+EFI_STATUS
+UpdateRegionMappingRecursive (
+ IN UINT64 RegionStart,
+ IN UINT64 RegionEnd,
+ IN UINT64 AttributeSetMask,
+ IN UINT64 AttributeClearMask,
+ IN UINT64 *PageTable,
+ IN UINTN Level
+ )
+{
+ UINTN BlockShift;
+ UINT64 BlockMask;
+ UINT64 BlockEnd;
+ UINT64 *Entry;
+ UINT64 EntryValue;
+ VOID *TranslationTable;
+ EFI_STATUS Status;
+
+ ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
+
+ BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
+ BlockMask = MAX_UINT64 >> BlockShift;
+
+ DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
+ Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
+
+ for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
+ BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
+ Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
+
+ //
+ // If RegionStart or BlockEnd is not aligned to the block size at this
+ // level, we will have to create a table mapping in order to map less
+ // than a block, and recurse to create the block or page entries at
+ // the next level. No block mappings are allowed at all at level 0,
+ // so in that case, we have to recurse unconditionally.
+ // If we are changing a table entry and the AttributeClearMask is non-zero,
+ // we cannot replace it with a block entry without potentially losing
+ // attribute information, so keep the table entry in that case.
+ //
+ if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||
+ (IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {
+ ASSERT (Level < 3);
+
+ if (!IsTableEntry (*Entry, Level)) {
+ //
+ // No table entry exists yet, so we need to allocate a page table
+ // for the next level.
+ //
+ TranslationTable = AllocatePages (1);
+ if (TranslationTable == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ if (!ArmMmuEnabled ()) {
+ //
+ // Make sure we are not inadvertently hitting in the caches
+ // when populating the page tables.
+ //
+ InvalidateDataCacheRange (TranslationTable, EFI_PAGE_SIZE);
+ }
+
+ ZeroMem (TranslationTable, EFI_PAGE_SIZE);
+
+ if (IsBlockEntry (*Entry, Level)) {
+ //
+ // We are splitting an existing block entry, so we have to populate
+ // the new table with the attributes of the block entry it replaces.
+ //
+ Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
+ (RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
+ 0, TranslationTable, Level + 1);
+ if (EFI_ERROR (Status)) {
+ //
+ // The range we passed to UpdateRegionMappingRecursive () is block
+ // aligned, so it is guaranteed that no further pages were allocated
+ // by it, and so we only have to free the page we allocated here.
+ //
+ FreePages (TranslationTable, 1);
+ return Status;
+ }
+ }
+ } else {
+ TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
+ }
+
+ //
+ // Recurse to the next level
+ //
+ Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
+ AttributeSetMask, AttributeClearMask, TranslationTable,
+ Level + 1);
+ if (EFI_ERROR (Status)) {
+ if (!IsTableEntry (*Entry, Level)) {
+ //
+ // We are creating a new table entry, so on failure, we can free all
+ // allocations we made recursively, given that the whole subhierarchy
+ // has not been wired into the live page tables yet. (This is not
+ // possible for existing table entries, since we cannot revert the
+ // modifications we made to the subhierarchy it represents.)
+ //
+ FreePageTablesRecursive (TranslationTable, Level + 1);
+ }
+ return Status;
+ }
+
+ if (!IsTableEntry (*Entry, Level)) {
+ EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
+ ReplaceTableEntry (Entry, EntryValue, RegionStart,
+ IsBlockEntry (*Entry, Level));
+ }
+ } else {
+ EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
+ EntryValue |= RegionStart;
+ EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
+ : TT_TYPE_BLOCK_ENTRY;
+
+ if (IsTableEntry (*Entry, Level)) {
+ //
+ // We are replacing a table entry with a block entry. This is only
+ // possible if we are keeping none of the original attributes.
+ // We can free the table entry's page table, and all the ones below
+ // it, since we are dropping the only possible reference to it.
+ //
+ ASSERT (AttributeClearMask == 0);
+ TranslationTable = (VOID *)(UINTN)(*Entry & TT_ADDRESS_MASK_BLOCK_ENTRY);
+ ReplaceTableEntry (Entry, EntryValue, RegionStart, TRUE);
+ FreePageTablesRecursive (TranslationTable, Level + 1);
+ } else {
+ ReplaceTableEntry (Entry, EntryValue, RegionStart, FALSE);
+ }
+ }
+ }
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+UpdateRegionMapping (
+ IN UINT64 RegionStart,
+ IN UINT64 RegionLength,
+ IN UINT64 AttributeSetMask,
+ IN UINT64 AttributeClearMask
+ )
+{
+ UINTN T0SZ;
+
+ if (((RegionStart | RegionLength) & EFI_PAGE_MASK)) {
+ return EFI_INVALID_PARAMETER;
+ }
+
+ T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
+
+ return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
+ AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
+ GetRootTableLevel (T0SZ));
+}
+
+STATIC
+EFI_STATUS
+FillTranslationTable (
+ IN UINT64 *RootTable,
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
+ )
+{
+ return UpdateRegionMapping (
+ MemoryRegion->VirtualBase,
+ MemoryRegion->Length,
+ ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF,
+ 0
+ );
+}
+
+STATIC
+UINT64
+GcdAttributeToPageAttribute (
+ IN UINT64 GcdAttributes
+ )
+{
+ UINT64 PageAttributes;
+
+ switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
+ case EFI_MEMORY_UC:
+ PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
+ break;
+ case EFI_MEMORY_WC:
+ PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
+ break;
+ case EFI_MEMORY_WT:
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
+ break;
+ case EFI_MEMORY_WB:
+ PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
+ break;
+ default:
+ PageAttributes = TT_ATTR_INDX_MASK;
+ break;
+ }
+
+ if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
+ (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
+ if (ArmReadCurrentEL () == AARCH64_EL2) {
+ PageAttributes |= TT_XN_MASK;
+ } else {
+ PageAttributes |= TT_UXN_MASK | TT_PXN_MASK;
+ }
+ }
+
+ if ((GcdAttributes & EFI_MEMORY_RO) != 0) {
+ PageAttributes |= TT_AP_RO_RO;
+ }
+
+ return PageAttributes | TT_AF;
+}
+
+EFI_STATUS
+ArmSetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ UINT64 PageAttributes;
+ UINT64 PageAttributeMask;
+
+ PageAttributes = GcdAttributeToPageAttribute (Attributes);
+ PageAttributeMask = 0;
+
+ if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
+ //
+ // No memory type was set in Attributes, so we are going to update the
+ // permissions only.
+ //
+ PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
+ PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
+ TT_PXN_MASK | TT_XN_MASK);
+ }
+
+ return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
+ PageAttributeMask);
+}
+
+STATIC
+EFI_STATUS
+SetMemoryRegionAttribute (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ IN UINT64 BlockEntryMask
+ )
+{
+ return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
+}
+
+EFI_STATUS
+ArmSetMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ UINT64 Val;
+
+ if (ArmReadCurrentEL () == AARCH64_EL1) {
+ Val = TT_PXN_MASK | TT_UXN_MASK;
+ } else {
+ Val = TT_XN_MASK;
+ }
+
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ Val,
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY);
+}
+
+EFI_STATUS
+ArmClearMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ UINT64 Mask;
+
+ // XN maps to UXN in the EL1&0 translation regime
+ Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
+
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ 0,
+ Mask);
+}
+
+EFI_STATUS
+ArmSetMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ TT_AP_RO_RO,
+ ~TT_ADDRESS_MASK_BLOCK_ENTRY);
+}
+
+EFI_STATUS
+ArmClearMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return SetMemoryRegionAttribute (
+ BaseAddress,
+ Length,
+ TT_AP_RW_RW,
+ ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
+}
+
+EFI_STATUS
+EFIAPI
+ArmConfigureMmu (
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
+ OUT VOID **TranslationTableBase OPTIONAL,
+ OUT UINTN *TranslationTableSize OPTIONAL
+ )
+{
+ VOID* TranslationTable;
+ UINTN MaxAddressBits;
+ UINT64 MaxAddress;
+ UINTN T0SZ;
+ UINTN RootTableEntryCount;
+ UINT64 TCR;
+ EFI_STATUS Status;
+
+ if (MemoryTable == NULL) {
+ ASSERT (MemoryTable != NULL);
+ return EFI_INVALID_PARAMETER;
+ }
+
+ //
+ // Limit the virtual address space to what we can actually use: UEFI
+ // mandates a 1:1 mapping, so no point in making the virtual address
+ // space larger than the physical address space. We also have to take
+ // into account the architectural limitations that result from UEFI's
+ // use of 4 KB pages.
+ //
+ MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
+ MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
+
+ T0SZ = 64 - MaxAddressBits;
+ RootTableEntryCount = GetRootTableEntryCount (T0SZ);
+
+ //
+ // Set TCR that allows us to retrieve T0SZ in the subsequent functions
+ //
+ // Ideally we will be running at EL2, but should support EL1 as well.
+ // UEFI should not run at EL3.
+ if (ArmReadCurrentEL () == AARCH64_EL2) {
+ //Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
+ TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
+
+ // Set the Physical Address Size using MaxAddress
+ if (MaxAddress < SIZE_4GB) {
+ TCR |= TCR_PS_4GB;
+ } else if (MaxAddress < SIZE_64GB) {
+ TCR |= TCR_PS_64GB;
+ } else if (MaxAddress < SIZE_1TB) {
+ TCR |= TCR_PS_1TB;
+ } else if (MaxAddress < SIZE_4TB) {
+ TCR |= TCR_PS_4TB;
+ } else if (MaxAddress < SIZE_16TB) {
+ TCR |= TCR_PS_16TB;
+ } else if (MaxAddress < SIZE_256TB) {
+ TCR |= TCR_PS_256TB;
+ } else {
+ DEBUG ((DEBUG_ERROR,
+ "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
+ MaxAddress));
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return EFI_UNSUPPORTED;
+ }
+ } else if (ArmReadCurrentEL () == AARCH64_EL1) {
+ // Due to Cortex-A57 erratum #822227 we must set TG1[1] == 1, regardless of EPD1.
+ TCR = T0SZ | TCR_TG0_4KB | TCR_TG1_4KB | TCR_EPD1;
+
+ // Set the Physical Address Size using MaxAddress
+ if (MaxAddress < SIZE_4GB) {
+ TCR |= TCR_IPS_4GB;
+ } else if (MaxAddress < SIZE_64GB) {
+ TCR |= TCR_IPS_64GB;
+ } else if (MaxAddress < SIZE_1TB) {
+ TCR |= TCR_IPS_1TB;
+ } else if (MaxAddress < SIZE_4TB) {
+ TCR |= TCR_IPS_4TB;
+ } else if (MaxAddress < SIZE_16TB) {
+ TCR |= TCR_IPS_16TB;
+ } else if (MaxAddress < SIZE_256TB) {
+ TCR |= TCR_IPS_256TB;
+ } else {
+ DEBUG ((DEBUG_ERROR,
+ "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
+ MaxAddress));
+ ASSERT (0); // Bigger than 48-bit memory space are not supported
+ return EFI_UNSUPPORTED;
+ }
+ } else {
+ ASSERT (0); // UEFI is only expected to run at EL2 and EL1, not EL3.
+ return EFI_UNSUPPORTED;
+ }
+
+ //
+ // Translation table walks are always cache coherent on ARMv8-A, so cache
+ // maintenance on page tables is never needed. Since there is a risk of
+ // loss of coherency when using mismatched attributes, and given that memory
+ // is mapped cacheable except for extraordinary cases (such as non-coherent
+ // DMA), have the page table walker perform cached accesses as well, and
+ // assert below that that matches the attributes we use for CPU accesses to
+ // the region.
+ //
+ TCR |= TCR_SH_INNER_SHAREABLE |
+ TCR_RGN_OUTER_WRITE_BACK_ALLOC |
+ TCR_RGN_INNER_WRITE_BACK_ALLOC;
+
+ // Set TCR
+ ArmSetTCR (TCR);
+
+ // Allocate pages for translation table
+ TranslationTable = AllocatePages (1);
+ if (TranslationTable == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+ //
+ // We set TTBR0 just after allocating the table to retrieve its location from
+ // the subsequent functions without needing to pass this value across the
+ // functions. The MMU is only enabled after the translation tables are
+ // populated.
+ //
+ ArmSetTTBR0 (TranslationTable);
+
+ if (TranslationTableBase != NULL) {
+ *TranslationTableBase = TranslationTable;
+ }
+
+ if (TranslationTableSize != NULL) {
+ *TranslationTableSize = RootTableEntryCount * sizeof (UINT64);
+ }
+
+ //
+ // Make sure we are not inadvertently hitting in the caches
+ // when populating the page tables.
+ //
+ InvalidateDataCacheRange (TranslationTable,
+ RootTableEntryCount * sizeof (UINT64));
+ ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
+
+ while (MemoryTable->Length != 0) {
+ Status = FillTranslationTable (TranslationTable, MemoryTable);
+ if (EFI_ERROR (Status)) {
+ goto FreeTranslationTable;
+ }
+ MemoryTable++;
+ }
+
+ //
+ // EFI_MEMORY_UC ==> MAIR_ATTR_DEVICE_MEMORY
+ // EFI_MEMORY_WC ==> MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE
+ // EFI_MEMORY_WT ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH
+ // EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
+ //
+ ArmSetMAIR (
+ MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
+ MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
+ MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
+ MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
+ );
+
+ ArmDisableAlignmentCheck ();
+ ArmEnableStackAlignmentCheck ();
+ ArmEnableInstructionCache ();
+ ArmEnableDataCache ();
+
+ ArmEnableMmu ();
+ return EFI_SUCCESS;
+
+FreeTranslationTable:
+ FreePages (TranslationTable, 1);
+ return Status;
+}
+
+RETURN_STATUS
+EFIAPI
+ArmMmuBaseLibConstructor (
+ VOID
+ )
+{
+ extern UINT32 ArmReplaceLiveTranslationEntrySize;
+
+ //
+ // The ArmReplaceLiveTranslationEntry () helper function may be invoked
+ // with the MMU off so we have to ensure that it gets cleaned to the PoC
+ //
+ WriteBackDataCacheRange ((VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
+ ArmReplaceLiveTranslationEntrySize);
+
+ return RETURN_SUCCESS;
+}
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S b/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
new file mode 100644
index 000000000..66ebca571
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuLibReplaceEntry.S
@@ -0,0 +1,74 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2016, Linaro Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+#include <AsmMacroIoLibV8.h>
+
+ .set CTRL_M_BIT, (1 << 0)
+
+ .macro __replace_entry, el
+
+ // disable the MMU
+ mrs x8, sctlr_el\el
+ bic x9, x8, #CTRL_M_BIT
+ msr sctlr_el\el, x9
+ isb
+
+ // write updated entry
+ str x1, [x0]
+
+ // invalidate again to get rid of stale clean cachelines that may
+ // have been filled speculatively since the last invalidate
+ dmb sy
+ dc ivac, x0
+
+ // flush translations for the target address from the TLBs
+ lsr x2, x2, #12
+ .if \el == 1
+ tlbi vaae1, x2
+ .else
+ tlbi vae\el, x2
+ .endif
+ dsb nsh
+
+ // re-enable the MMU
+ msr sctlr_el\el, x8
+ isb
+ .endm
+
+//VOID
+//ArmReplaceLiveTranslationEntry (
+// IN UINT64 *Entry,
+// IN UINT64 Value,
+// IN UINT64 Address
+// )
+ASM_FUNC(ArmReplaceLiveTranslationEntry)
+
+ // disable interrupts
+ mrs x4, daif
+ msr daifset, #0xf
+ isb
+
+ // clean and invalidate first so that we don't clobber
+ // adjacent entries that are dirty in the caches
+ dc civac, x0
+ dsb nsh
+
+ EL1_OR_EL2_OR_EL3(x3)
+1:__replace_entry 1
+ b 4f
+2:__replace_entry 2
+ b 4f
+3:__replace_entry 3
+
+4:msr daif, x4
+ ret
+
+ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)
+
+ASM_PFX(ArmReplaceLiveTranslationEntrySize):
+ .long . - ArmReplaceLiveTranslationEntry
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c b/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c
new file mode 100644
index 000000000..80317923c
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/AArch64/ArmMmuPeiLibConstructor.c
@@ -0,0 +1,55 @@
+#/* @file
+#
+# Copyright (c) 2016, Linaro Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#*/
+
+#include <Base.h>
+
+#include <Library/ArmLib.h>
+#include <Library/ArmMmuLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/DebugLib.h>
+
+EFI_STATUS
+EFIAPI
+ArmMmuPeiLibConstructor (
+ IN EFI_PEI_FILE_HANDLE FileHandle,
+ IN CONST EFI_PEI_SERVICES **PeiServices
+ )
+{
+ extern UINT32 ArmReplaceLiveTranslationEntrySize;
+
+ EFI_FV_FILE_INFO FileInfo;
+ EFI_STATUS Status;
+
+ ASSERT (FileHandle != NULL);
+
+ Status = (*PeiServices)->FfsGetFileInfo (FileHandle, &FileInfo);
+ ASSERT_EFI_ERROR (Status);
+
+ //
+ // Some platforms do not cope very well with cache maintenance being
+ // performed on regions backed by NOR flash. Since the firmware image
+ // can be assumed to be clean to the PoC when running XIP, even when PEI
+ // is executing from DRAM, we only need to perform the cache maintenance
+ // when not executing in place.
+ //
+ if ((UINTN)FileInfo.Buffer <= (UINTN)ArmReplaceLiveTranslationEntry &&
+ ((UINTN)FileInfo.Buffer + FileInfo.BufferSize >=
+ (UINTN)ArmReplaceLiveTranslationEntry + ArmReplaceLiveTranslationEntrySize)) {
+ DEBUG ((EFI_D_INFO, "ArmMmuLib: skipping cache maintenance on XIP PEIM\n"));
+ } else {
+ DEBUG ((EFI_D_INFO, "ArmMmuLib: performing cache maintenance on shadowed PEIM\n"));
+ //
+ // The ArmReplaceLiveTranslationEntry () helper function may be invoked
+ // with the MMU off so we have to ensure that it gets cleaned to the PoC
+ //
+ WriteBackDataCacheRange (ArmReplaceLiveTranslationEntry,
+ ArmReplaceLiveTranslationEntrySize);
+ }
+
+ return RETURN_SUCCESS;
+}
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibConvert.c b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibConvert.c
new file mode 100644
index 000000000..e3b02a9fb
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibConvert.c
@@ -0,0 +1,32 @@
+/** @file
+* File managing the MMU for ARMv7 architecture
+*
+* Copyright (c) 2011-2016, ARM Limited. All rights reserved.
+*
+* SPDX-License-Identifier: BSD-2-Clause-Patent
+*
+**/
+
+#include <Uefi.h>
+
+#include <Library/ArmLib.h>
+
+#include <Chipset/ArmV7.h>
+
+UINT32
+ConvertSectionAttributesToPageAttributes (
+ IN UINT32 SectionAttributes,
+ IN BOOLEAN IsLargePage
+ )
+{
+ UINT32 PageAttributes;
+
+ PageAttributes = 0;
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_CACHE_POLICY (SectionAttributes, IsLargePage);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_AP (SectionAttributes);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_XN (SectionAttributes, IsLargePage);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_NG (SectionAttributes);
+ PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_S (SectionAttributes);
+
+ return PageAttributes;
+}
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c
new file mode 100644
index 000000000..15e836e75
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibCore.c
@@ -0,0 +1,413 @@
+/** @file
+* File managing the MMU for ARMv7 architecture
+*
+* Copyright (c) 2011-2016, ARM Limited. All rights reserved.
+*
+* SPDX-License-Identifier: BSD-2-Clause-Patent
+*
+**/
+
+#include <Uefi.h>
+#include <Chipset/ArmV7.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/PcdLib.h>
+
+#define ID_MMFR0_SHARELVL_SHIFT 12
+#define ID_MMFR0_SHARELVL_MASK 0xf
+#define ID_MMFR0_SHARELVL_ONE 0
+#define ID_MMFR0_SHARELVL_TWO 1
+
+#define ID_MMFR0_INNERSHR_SHIFT 28
+#define ID_MMFR0_INNERSHR_MASK 0xf
+#define ID_MMFR0_OUTERSHR_SHIFT 8
+#define ID_MMFR0_OUTERSHR_MASK 0xf
+
+#define ID_MMFR0_SHR_IMP_UNCACHED 0
+#define ID_MMFR0_SHR_IMP_HW_COHERENT 1
+#define ID_MMFR0_SHR_IGNORED 0xf
+
+UINTN
+EFIAPI
+ArmReadIdMmfr0 (
+ VOID
+ );
+
+BOOLEAN
+EFIAPI
+ArmHasMpExtensions (
+ VOID
+ );
+
+STATIC
+BOOLEAN
+PreferNonshareableMemory (
+ VOID
+ )
+{
+ UINTN Mmfr;
+ UINTN Val;
+
+ if (FeaturePcdGet (PcdNormalMemoryNonshareableOverride)) {
+ return TRUE;
+ }
+
+ //
+ // Check whether the innermost level of shareability (the level we will use
+ // by default to map normal memory) is implemented with hardware coherency
+ // support. Otherwise, revert to mapping as non-shareable.
+ //
+ Mmfr = ArmReadIdMmfr0 ();
+ switch ((Mmfr >> ID_MMFR0_SHARELVL_SHIFT) & ID_MMFR0_SHARELVL_MASK) {
+ case ID_MMFR0_SHARELVL_ONE:
+ // one level of shareability
+ Val = (Mmfr >> ID_MMFR0_OUTERSHR_SHIFT) & ID_MMFR0_OUTERSHR_MASK;
+ break;
+ case ID_MMFR0_SHARELVL_TWO:
+ // two levels of shareability
+ Val = (Mmfr >> ID_MMFR0_INNERSHR_SHIFT) & ID_MMFR0_INNERSHR_MASK;
+ break;
+ default:
+ // unexpected value -> shareable is the safe option
+ ASSERT (FALSE);
+ return FALSE;
+ }
+ return Val != ID_MMFR0_SHR_IMP_HW_COHERENT;
+}
+
+STATIC
+VOID
+PopulateLevel2PageTable (
+ IN UINT32 *SectionEntry,
+ IN UINT32 PhysicalBase,
+ IN UINT32 RemainLength,
+ IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
+ )
+{
+ UINT32* PageEntry;
+ UINT32 Pages;
+ UINT32 Index;
+ UINT32 PageAttributes;
+ UINT32 SectionDescriptor;
+ UINT32 TranslationTable;
+ UINT32 BaseSectionAddress;
+ UINT32 FirstPageOffset;
+
+ switch (Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
+ PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;
+ PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_THROUGH;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ PageAttributes = TT_DESCRIPTOR_PAGE_DEVICE;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;
+ break;
+ default:
+ PageAttributes = TT_DESCRIPTOR_PAGE_UNCACHED;
+ break;
+ }
+
+ if (PreferNonshareableMemory ()) {
+ PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED;
+ }
+
+ // Check if the Section Entry has already been populated. Otherwise attach a
+ // Level 2 Translation Table to it
+ if (*SectionEntry != 0) {
+ // The entry must be a page table. Otherwise it exists an overlapping in the memory map
+ if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(*SectionEntry)) {
+ TranslationTable = *SectionEntry & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK;
+ } else if ((*SectionEntry & TT_DESCRIPTOR_SECTION_TYPE_MASK) == TT_DESCRIPTOR_SECTION_TYPE_SECTION) {
+ // Case where a virtual memory map descriptor overlapped a section entry
+
+ // Allocate a Level2 Page Table for this Section
+ TranslationTable = (UINTN)AllocateAlignedPages (
+ EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
+ TRANSLATION_TABLE_PAGE_ALIGNMENT);
+
+ // Translate the Section Descriptor into Page Descriptor
+ SectionDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (*SectionEntry, FALSE);
+
+ BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(*SectionEntry);
+
+ //
+ // Make sure we are not inadvertently hitting in the caches
+ // when populating the page tables
+ //
+ InvalidateDataCacheRange ((VOID *)TranslationTable,
+ TRANSLATION_TABLE_PAGE_SIZE);
+
+ // Populate the new Level2 Page Table for the section
+ PageEntry = (UINT32*)TranslationTable;
+ for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
+ PageEntry[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseSectionAddress + (Index << 12)) | SectionDescriptor;
+ }
+
+ // Overwrite the section entry to point to the new Level2 Translation Table
+ *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
+ (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |
+ TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
+ } else {
+ // We do not support the other section type (16MB Section)
+ ASSERT(0);
+ return;
+ }
+ } else {
+ TranslationTable = (UINTN)AllocateAlignedPages (
+ EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
+ TRANSLATION_TABLE_PAGE_ALIGNMENT);
+ //
+ // Make sure we are not inadvertently hitting in the caches
+ // when populating the page tables
+ //
+ InvalidateDataCacheRange ((VOID *)TranslationTable,
+ TRANSLATION_TABLE_PAGE_SIZE);
+ ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE);
+
+ *SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
+ (IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |
+ TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
+ }
+
+ FirstPageOffset = (PhysicalBase & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT;
+ PageEntry = (UINT32 *)TranslationTable + FirstPageOffset;
+ Pages = RemainLength / TT_DESCRIPTOR_PAGE_SIZE;
+
+ ASSERT (FirstPageOffset + Pages <= TRANSLATION_TABLE_PAGE_COUNT);
+
+ for (Index = 0; Index < Pages; Index++) {
+ *PageEntry++ = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(PhysicalBase) | PageAttributes;
+ PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE;
+ }
+
+ //
+ // Invalidate again to ensure that any line fetches that may have occurred
+ // [speculatively] since the previous invalidate are evicted again.
+ //
+ ArmDataMemoryBarrier ();
+ InvalidateDataCacheRange ((UINT32 *)TranslationTable + FirstPageOffset,
+ RemainLength / TT_DESCRIPTOR_PAGE_SIZE * sizeof (*PageEntry));
+}
+
+STATIC
+VOID
+FillTranslationTable (
+ IN UINT32 *TranslationTable,
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
+ )
+{
+ UINT32 *SectionEntry;
+ UINT32 Attributes;
+ UINT32 PhysicalBase;
+ UINT64 RemainLength;
+ UINT32 PageMapLength;
+
+ ASSERT(MemoryRegion->Length > 0);
+
+ if (MemoryRegion->PhysicalBase >= SIZE_4GB) {
+ return;
+ }
+
+ PhysicalBase = MemoryRegion->PhysicalBase;
+ RemainLength = MIN(MemoryRegion->Length, SIZE_4GB - PhysicalBase);
+
+ switch (MemoryRegion->Attributes) {
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0);
+ Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
+ Attributes = TT_DESCRIPTOR_SECTION_DEVICE(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1);
+ Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
+ Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(1);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
+ Attributes = TT_DESCRIPTOR_SECTION_DEVICE(1);
+ break;
+ case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(1);
+ break;
+ default:
+ Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);
+ break;
+ }
+
+ if (PreferNonshareableMemory ()) {
+ Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
+ }
+
+ // Get the first section entry for this mapping
+ SectionEntry = TRANSLATION_TABLE_ENTRY_FOR_VIRTUAL_ADDRESS(TranslationTable, MemoryRegion->VirtualBase);
+
+ while (RemainLength != 0) {
+ if (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE == 0 &&
+ RemainLength >= TT_DESCRIPTOR_SECTION_SIZE) {
+ // Case: Physical address aligned on the Section Size (1MB) && the length
+ // is greater than the Section Size
+ *SectionEntry = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(PhysicalBase) | Attributes;
+
+ //
+ // Issue a DMB to ensure that the page table entry update made it to
+ // memory before we issue the invalidate, otherwise, a subsequent
+ // speculative fetch could observe the old value.
+ //
+ ArmDataMemoryBarrier ();
+ ArmInvalidateDataCacheEntryByMVA ((UINTN)SectionEntry++);
+
+ PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE;
+ RemainLength -= TT_DESCRIPTOR_SECTION_SIZE;
+ } else {
+ PageMapLength = MIN (RemainLength, TT_DESCRIPTOR_SECTION_SIZE -
+ (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE));
+
+ // Case: Physical address aligned on the Section Size (1MB) && the length
+ // does not fill a section
+ // Case: Physical address NOT aligned on the Section Size (1MB)
+ PopulateLevel2PageTable (SectionEntry, PhysicalBase, PageMapLength,
+ MemoryRegion->Attributes);
+
+ //
+ // Issue a DMB to ensure that the page table entry update made it to
+ // memory before we issue the invalidate, otherwise, a subsequent
+ // speculative fetch could observe the old value.
+ //
+ ArmDataMemoryBarrier ();
+ ArmInvalidateDataCacheEntryByMVA ((UINTN)SectionEntry++);
+
+ // If it is the last entry
+ if (RemainLength < TT_DESCRIPTOR_SECTION_SIZE) {
+ break;
+ }
+
+ PhysicalBase += PageMapLength;
+ RemainLength -= PageMapLength;
+ }
+ }
+}
+
+RETURN_STATUS
+EFIAPI
+ArmConfigureMmu (
+ IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
+ OUT VOID **TranslationTableBase OPTIONAL,
+ OUT UINTN *TranslationTableSize OPTIONAL
+ )
+{
+ VOID *TranslationTable;
+ UINT32 TTBRAttributes;
+
+ TranslationTable = AllocateAlignedPages (
+ EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_SECTION_SIZE),
+ TRANSLATION_TABLE_SECTION_ALIGNMENT);
+ if (TranslationTable == NULL) {
+ return RETURN_OUT_OF_RESOURCES;
+ }
+
+ if (TranslationTableBase != NULL) {
+ *TranslationTableBase = TranslationTable;
+ }
+
+ if (TranslationTableSize != NULL) {
+ *TranslationTableSize = TRANSLATION_TABLE_SECTION_SIZE;
+ }
+
+ //
+ // Make sure we are not inadvertently hitting in the caches
+ // when populating the page tables
+ //
+ InvalidateDataCacheRange (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
+ ZeroMem (TranslationTable, TRANSLATION_TABLE_SECTION_SIZE);
+
+ while (MemoryTable->Length != 0) {
+ FillTranslationTable (TranslationTable, MemoryTable);
+ MemoryTable++;
+ }
+
+ TTBRAttributes = ArmHasMpExtensions () ? TTBR_MP_WRITE_BACK_ALLOC
+ : TTBR_WRITE_BACK_ALLOC;
+ if (TTBRAttributes & TTBR_SHAREABLE) {
+ if (PreferNonshareableMemory ()) {
+ TTBRAttributes ^= TTBR_SHAREABLE;
+ } else {
+ //
+ // Unlike the S bit in the short descriptors, which implies inner shareable
+ // on an implementation that supports two levels, the meaning of the S bit
+ // in the TTBR depends on the NOS bit, which defaults to Outer Shareable.
+ // However, we should only set this bit after we have confirmed that the
+ // implementation supports multiple levels, or else the NOS bit is UNK/SBZP
+ //
+ if (((ArmReadIdMmfr0 () >> 12) & 0xf) != 0) {
+ TTBRAttributes |= TTBR_NOT_OUTER_SHAREABLE;
+ }
+ }
+ }
+
+ ArmSetTTBR0 ((VOID *)((UINTN)TranslationTable | TTBRAttributes));
+
+ //
+ // The TTBCR register value is undefined at reset in the Non-Secure world.
+ // Writing 0 has the effect of:
+ // Clearing EAE: Use short descriptors, as mandated by specification.
+ // Clearing PD0 and PD1: Translation Table Walk Disable is off.
+ // Clearing N: Perform all translation table walks through TTBR0.
+ // (0 is the default reset value in systems not implementing
+ // the Security Extensions.)
+ //
+ ArmSetTTBCR (0);
+
+ ArmSetDomainAccessControl (DOMAIN_ACCESS_CONTROL_NONE(15) |
+ DOMAIN_ACCESS_CONTROL_NONE(14) |
+ DOMAIN_ACCESS_CONTROL_NONE(13) |
+ DOMAIN_ACCESS_CONTROL_NONE(12) |
+ DOMAIN_ACCESS_CONTROL_NONE(11) |
+ DOMAIN_ACCESS_CONTROL_NONE(10) |
+ DOMAIN_ACCESS_CONTROL_NONE( 9) |
+ DOMAIN_ACCESS_CONTROL_NONE( 8) |
+ DOMAIN_ACCESS_CONTROL_NONE( 7) |
+ DOMAIN_ACCESS_CONTROL_NONE( 6) |
+ DOMAIN_ACCESS_CONTROL_NONE( 5) |
+ DOMAIN_ACCESS_CONTROL_NONE( 4) |
+ DOMAIN_ACCESS_CONTROL_NONE( 3) |
+ DOMAIN_ACCESS_CONTROL_NONE( 2) |
+ DOMAIN_ACCESS_CONTROL_NONE( 1) |
+ DOMAIN_ACCESS_CONTROL_CLIENT(0));
+
+ ArmEnableInstructionCache();
+ ArmEnableDataCache();
+ ArmEnableMmu();
+ return RETURN_SUCCESS;
+}
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c
new file mode 100644
index 000000000..1ec734dea
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibUpdate.c
@@ -0,0 +1,435 @@
+/** @file
+* File managing the MMU for ARMv7 architecture
+*
+* Copyright (c) 2011-2016, ARM Limited. All rights reserved.
+*
+* SPDX-License-Identifier: BSD-2-Clause-Patent
+*
+**/
+
+#include <Uefi.h>
+
+#include <Library/ArmLib.h>
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/DebugLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/MemoryAllocationLib.h>
+
+#include <Chipset/ArmV7.h>
+
+#define __EFI_MEMORY_RWX 0 // no restrictions
+
+#define CACHE_ATTRIBUTE_MASK (EFI_MEMORY_UC | \
+ EFI_MEMORY_WC | \
+ EFI_MEMORY_WT | \
+ EFI_MEMORY_WB | \
+ EFI_MEMORY_UCE | \
+ EFI_MEMORY_WP)
+
+STATIC
+EFI_STATUS
+ConvertSectionToPages (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress
+ )
+{
+ UINT32 FirstLevelIdx;
+ UINT32 SectionDescriptor;
+ UINT32 PageTableDescriptor;
+ UINT32 PageDescriptor;
+ UINT32 Index;
+
+ volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
+ volatile ARM_PAGE_TABLE_ENTRY *PageTable;
+
+ DEBUG ((DEBUG_PAGE, "Converting section at 0x%x to pages\n", (UINTN)BaseAddress));
+
+ // Obtain page table base
+ FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
+
+ // Calculate index into first level translation table for start of modification
+ FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
+ ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
+
+ // Get section attributes and convert to page attributes
+ SectionDescriptor = FirstLevelTable[FirstLevelIdx];
+ PageDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (SectionDescriptor, FALSE);
+
+ // Allocate a page table for the 4KB entries (we use up a full page even though we only need 1KB)
+ PageTable = (volatile ARM_PAGE_TABLE_ENTRY *)AllocatePages (1);
+ if (PageTable == NULL) {
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ // Write the page table entries out
+ for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
+ PageTable[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseAddress + (Index << 12)) | PageDescriptor;
+ }
+
+ // Formulate page table entry, Domain=0, NS=0
+ PageTableDescriptor = (((UINTN)PageTable) & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) | TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
+
+ // Write the page table entry out, replacing section entry
+ FirstLevelTable[FirstLevelIdx] = PageTableDescriptor;
+
+ return EFI_SUCCESS;
+}
+
+STATIC
+EFI_STATUS
+UpdatePageEntries (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes,
+ OUT BOOLEAN *FlushTlbs OPTIONAL
+ )
+{
+ EFI_STATUS Status;
+ UINT32 EntryValue;
+ UINT32 EntryMask;
+ UINT32 FirstLevelIdx;
+ UINT32 Offset;
+ UINT32 NumPageEntries;
+ UINT32 Descriptor;
+ UINT32 p;
+ UINT32 PageTableIndex;
+ UINT32 PageTableEntry;
+ UINT32 CurrentPageTableEntry;
+ VOID *Mva;
+
+ volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
+ volatile ARM_PAGE_TABLE_ENTRY *PageTable;
+
+ Status = EFI_SUCCESS;
+
+ // EntryMask: bitmask of values to change (1 = change this value, 0 = leave alone)
+ // EntryValue: values at bit positions specified by EntryMask
+ EntryMask = TT_DESCRIPTOR_PAGE_TYPE_MASK | TT_DESCRIPTOR_PAGE_AP_MASK;
+ if (Attributes & EFI_MEMORY_XP) {
+ EntryValue = TT_DESCRIPTOR_PAGE_TYPE_PAGE_XN;
+ } else {
+ EntryValue = TT_DESCRIPTOR_PAGE_TYPE_PAGE;
+ }
+
+ // Although the PI spec is unclear on this, the GCD guarantees that only
+ // one Attribute bit is set at a time, so the order of the conditionals below
+ // is irrelevant. If no memory attribute is specified, we preserve whatever
+ // memory type is set in the page tables, and update the permission attributes
+ // only.
+ if (Attributes & EFI_MEMORY_UC) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
+ // map to strongly ordered
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_STRONGLY_ORDERED; // TEX[2:0] = 0, C=0, B=0
+ } else if (Attributes & EFI_MEMORY_WC) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
+ // map to normal non-cachable
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0
+ } else if (Attributes & EFI_MEMORY_WT) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
+ // write through with no-allocate
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_WRITE_THROUGH_NO_ALLOC; // TEX [2:0] = 0, C=1, B=0
+ } else if (Attributes & EFI_MEMORY_WB) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_MASK;
+ // write back (with allocate)
+ EntryValue |= TT_DESCRIPTOR_PAGE_CACHE_POLICY_WRITE_BACK_ALLOC; // TEX [2:0] = 001, C=1, B=1
+ } else if (Attributes & CACHE_ATTRIBUTE_MASK) {
+ // catch unsupported memory type attributes
+ ASSERT (FALSE);
+ return EFI_UNSUPPORTED;
+ }
+
+ if (Attributes & EFI_MEMORY_RO) {
+ EntryValue |= TT_DESCRIPTOR_PAGE_AP_RO_RO;
+ } else {
+ EntryValue |= TT_DESCRIPTOR_PAGE_AP_RW_RW;
+ }
+
+ // Obtain page table base
+ FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
+
+ // Calculate number of 4KB page table entries to change
+ NumPageEntries = Length / TT_DESCRIPTOR_PAGE_SIZE;
+
+ // Iterate for the number of 4KB pages to change
+ Offset = 0;
+ for(p = 0; p < NumPageEntries; p++) {
+ // Calculate index into first level translation table for page table value
+
+ FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress + Offset) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
+ ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
+
+ // Read the descriptor from the first level page table
+ Descriptor = FirstLevelTable[FirstLevelIdx];
+
+ // Does this descriptor need to be converted from section entry to 4K pages?
+ if (!TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(Descriptor)) {
+ Status = ConvertSectionToPages (FirstLevelIdx << TT_DESCRIPTOR_SECTION_BASE_SHIFT);
+ if (EFI_ERROR(Status)) {
+ // Exit for loop
+ break;
+ }
+
+ // Re-read descriptor
+ Descriptor = FirstLevelTable[FirstLevelIdx];
+ if (FlushTlbs != NULL) {
+ *FlushTlbs = TRUE;
+ }
+ }
+
+ // Obtain page table base address
+ PageTable = (ARM_PAGE_TABLE_ENTRY *)TT_DESCRIPTOR_PAGE_BASE_ADDRESS(Descriptor);
+
+ // Calculate index into the page table
+ PageTableIndex = ((BaseAddress + Offset) & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT;
+ ASSERT (PageTableIndex < TRANSLATION_TABLE_PAGE_COUNT);
+
+ // Get the entry
+ CurrentPageTableEntry = PageTable[PageTableIndex];
+
+ // Mask off appropriate fields
+ PageTableEntry = CurrentPageTableEntry & ~EntryMask;
+
+ // Mask in new attributes and/or permissions
+ PageTableEntry |= EntryValue;
+
+ if (CurrentPageTableEntry != PageTableEntry) {
+ Mva = (VOID *)(UINTN)((((UINTN)FirstLevelIdx) << TT_DESCRIPTOR_SECTION_BASE_SHIFT) + (PageTableIndex << TT_DESCRIPTOR_PAGE_BASE_SHIFT));
+
+ // Only need to update if we are changing the entry
+ PageTable[PageTableIndex] = PageTableEntry;
+ ArmUpdateTranslationTableEntry ((VOID *)&PageTable[PageTableIndex], Mva);
+ }
+
+ Status = EFI_SUCCESS;
+ Offset += TT_DESCRIPTOR_PAGE_SIZE;
+
+ } // End first level translation table loop
+
+ return Status;
+}
+
+STATIC
+EFI_STATUS
+UpdateSectionEntries (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ EFI_STATUS Status = EFI_SUCCESS;
+ UINT32 EntryMask;
+ UINT32 EntryValue;
+ UINT32 FirstLevelIdx;
+ UINT32 NumSections;
+ UINT32 i;
+ UINT32 CurrentDescriptor;
+ UINT32 Descriptor;
+ VOID *Mva;
+ volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
+
+ // EntryMask: bitmask of values to change (1 = change this value, 0 = leave alone)
+ // EntryValue: values at bit positions specified by EntryMask
+
+ // Make sure we handle a section range that is unmapped
+ EntryMask = TT_DESCRIPTOR_SECTION_TYPE_MASK | TT_DESCRIPTOR_SECTION_XN_MASK |
+ TT_DESCRIPTOR_SECTION_AP_MASK;
+ EntryValue = TT_DESCRIPTOR_SECTION_TYPE_SECTION;
+
+ // Although the PI spec is unclear on this, the GCD guarantees that only
+ // one Attribute bit is set at a time, so the order of the conditionals below
+ // is irrelevant. If no memory attribute is specified, we preserve whatever
+ // memory type is set in the page tables, and update the permission attributes
+ // only.
+ if (Attributes & EFI_MEMORY_UC) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
+ // map to strongly ordered
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_STRONGLY_ORDERED; // TEX[2:0] = 0, C=0, B=0
+ } else if (Attributes & EFI_MEMORY_WC) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
+ // map to normal non-cachable
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_NON_CACHEABLE; // TEX [2:0]= 001 = 0x2, B=0, C=0
+ } else if (Attributes & EFI_MEMORY_WT) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
+ // write through with no-allocate
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_THROUGH_NO_ALLOC; // TEX [2:0] = 0, C=1, B=0
+ } else if (Attributes & EFI_MEMORY_WB) {
+ // modify cacheability attributes
+ EntryMask |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_MASK;
+ // write back (with allocate)
+ EntryValue |= TT_DESCRIPTOR_SECTION_CACHE_POLICY_WRITE_BACK_ALLOC; // TEX [2:0] = 001, C=1, B=1
+ } else if (Attributes & CACHE_ATTRIBUTE_MASK) {
+ // catch unsupported memory type attributes
+ ASSERT (FALSE);
+ return EFI_UNSUPPORTED;
+ }
+
+ if (Attributes & EFI_MEMORY_RO) {
+ EntryValue |= TT_DESCRIPTOR_SECTION_AP_RO_RO;
+ } else {
+ EntryValue |= TT_DESCRIPTOR_SECTION_AP_RW_RW;
+ }
+
+ if (Attributes & EFI_MEMORY_XP) {
+ EntryValue |= TT_DESCRIPTOR_SECTION_XN_MASK;
+ }
+
+ // obtain page table base
+ FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
+
+ // calculate index into first level translation table for start of modification
+ FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
+ ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
+
+ // calculate number of 1MB first level entries this applies to
+ NumSections = Length / TT_DESCRIPTOR_SECTION_SIZE;
+
+ // iterate through each descriptor
+ for(i=0; i<NumSections; i++) {
+ CurrentDescriptor = FirstLevelTable[FirstLevelIdx + i];
+
+ // has this descriptor already been converted to pages?
+ if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(CurrentDescriptor)) {
+ // forward this 1MB range to page table function instead
+ Status = UpdatePageEntries (
+ (FirstLevelIdx + i) << TT_DESCRIPTOR_SECTION_BASE_SHIFT,
+ TT_DESCRIPTOR_SECTION_SIZE,
+ Attributes,
+ NULL);
+ } else {
+ // still a section entry
+
+ if (CurrentDescriptor != 0) {
+ // mask off appropriate fields
+ Descriptor = CurrentDescriptor & ~EntryMask;
+ } else {
+ Descriptor = ((UINTN)FirstLevelIdx + i) << TT_DESCRIPTOR_SECTION_BASE_SHIFT;
+ }
+
+ // mask in new attributes and/or permissions
+ Descriptor |= EntryValue;
+
+ if (CurrentDescriptor != Descriptor) {
+ Mva = (VOID *)(UINTN)(((UINTN)FirstLevelIdx + i) << TT_DESCRIPTOR_SECTION_BASE_SHIFT);
+
+ // Only need to update if we are changing the descriptor
+ FirstLevelTable[FirstLevelIdx + i] = Descriptor;
+ ArmUpdateTranslationTableEntry ((VOID *)&FirstLevelTable[FirstLevelIdx + i], Mva);
+ }
+
+ Status = EFI_SUCCESS;
+ }
+ }
+
+ return Status;
+}
+
+EFI_STATUS
+ArmSetMemoryAttributes (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length,
+ IN UINT64 Attributes
+ )
+{
+ EFI_STATUS Status;
+ UINT64 ChunkLength;
+ BOOLEAN FlushTlbs;
+
+ if (BaseAddress > (UINT64)MAX_ADDRESS) {
+ return EFI_UNSUPPORTED;
+ }
+
+ Length = MIN (Length, (UINT64)MAX_ADDRESS - BaseAddress + 1);
+ if (Length == 0) {
+ return EFI_SUCCESS;
+ }
+
+ FlushTlbs = FALSE;
+ while (Length > 0) {
+ if ((BaseAddress % TT_DESCRIPTOR_SECTION_SIZE == 0) &&
+ Length >= TT_DESCRIPTOR_SECTION_SIZE) {
+
+ ChunkLength = Length - Length % TT_DESCRIPTOR_SECTION_SIZE;
+
+ DEBUG ((DEBUG_PAGE,
+ "SetMemoryAttributes(): MMU section 0x%lx length 0x%lx to %lx\n",
+ BaseAddress, ChunkLength, Attributes));
+
+ Status = UpdateSectionEntries (BaseAddress, ChunkLength, Attributes);
+
+ FlushTlbs = TRUE;
+ } else {
+
+ //
+ // Process page by page until the next section boundary, but only if
+ // we have more than a section's worth of area to deal with after that.
+ //
+ ChunkLength = TT_DESCRIPTOR_SECTION_SIZE -
+ (BaseAddress % TT_DESCRIPTOR_SECTION_SIZE);
+ if (ChunkLength + TT_DESCRIPTOR_SECTION_SIZE > Length) {
+ ChunkLength = Length;
+ }
+
+ DEBUG ((DEBUG_PAGE,
+ "SetMemoryAttributes(): MMU page 0x%lx length 0x%lx to %lx\n",
+ BaseAddress, ChunkLength, Attributes));
+
+ Status = UpdatePageEntries (BaseAddress, ChunkLength, Attributes,
+ &FlushTlbs);
+ }
+
+ if (EFI_ERROR (Status)) {
+ break;
+ }
+
+ BaseAddress += ChunkLength;
+ Length -= ChunkLength;
+ }
+
+ if (FlushTlbs) {
+ ArmInvalidateTlb ();
+ }
+ return Status;
+}
+
+EFI_STATUS
+ArmSetMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return ArmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_XP);
+}
+
+EFI_STATUS
+ArmClearMemoryRegionNoExec (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return ArmSetMemoryAttributes (BaseAddress, Length, __EFI_MEMORY_RWX);
+}
+
+EFI_STATUS
+ArmSetMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return ArmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_RO);
+}
+
+EFI_STATUS
+ArmClearMemoryRegionReadOnly (
+ IN EFI_PHYSICAL_ADDRESS BaseAddress,
+ IN UINT64 Length
+ )
+{
+ return ArmSetMemoryAttributes (BaseAddress, Length, __EFI_MEMORY_RWX);
+}
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S
new file mode 100644
index 000000000..a97e3fabb
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.S
@@ -0,0 +1,29 @@
+#------------------------------------------------------------------------------
+#
+# Copyright (c) 2016, Linaro Limited. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+#include <AsmMacroIoLib.h>
+
+.text
+.align 2
+
+GCC_ASM_EXPORT (ArmReadIdMmfr0)
+GCC_ASM_EXPORT (ArmHasMpExtensions)
+
+#------------------------------------------------------------------------------
+
+ASM_PFX (ArmHasMpExtensions):
+ mrc p15,0,R0,c0,c0,5
+ // Get Multiprocessing extension (bit31)
+ lsr R0, R0, #31
+ bx LR
+
+ASM_PFX(ArmReadIdMmfr0):
+ mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0 Register
+ bx lr
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm
new file mode 100644
index 000000000..a65e95db5
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/Arm/ArmMmuLibV7Support.asm
@@ -0,0 +1,26 @@
+//------------------------------------------------------------------------------
+//
+// Copyright (c) 2016, Linaro Limited. All rights reserved.
+//
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+//------------------------------------------------------------------------------
+
+
+
+ INCLUDE AsmMacroExport.inc
+
+
+//------------------------------------------------------------------------------
+
+ RVCT_ASM_EXPORT ArmHasMpExtensions
+ mrc p15,0,R0,c0,c0,5
+ // Get Multiprocessing extension (bit31)
+ lsr R0, R0, #31
+ bx LR
+
+ RVCT_ASM_EXPORT ArmReadIdMmfr0
+ mrc p15, 0, r0, c0, c1, 4 ; Read ID_MMFR0 Register
+ bx lr
+
+ END
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf b/roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf
new file mode 100644
index 000000000..2a7e71479
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuBaseLib.inf
@@ -0,0 +1,43 @@
+#/** @file
+#
+# Copyright (c) 2016 Linaro Ltd. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#
+#**/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = ArmMmuBaseLib
+ FILE_GUID = da8f0232-fb14-42f0-922c-63104d2c70bd
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmMmuLib
+
+[Defines.AARCH64]
+ CONSTRUCTOR = ArmMmuBaseLibConstructor
+
+[Sources.AARCH64]
+ AArch64/ArmMmuLibCore.c
+ AArch64/ArmMmuLibReplaceEntry.S
+
+[Sources.ARM]
+ Arm/ArmMmuLibConvert.c
+ Arm/ArmMmuLibCore.c
+ Arm/ArmMmuLibUpdate.c
+ Arm/ArmMmuLibV7Support.S |GCC
+ Arm/ArmMmuLibV7Support.asm |RVCT
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ EmbeddedPkg/EmbeddedPkg.dec
+ MdePkg/MdePkg.dec
+
+[LibraryClasses]
+ ArmLib
+ CacheMaintenanceLib
+ MemoryAllocationLib
+
+[Pcd.ARM]
+ gArmTokenSpaceGuid.PcdNormalMemoryNonshareableOverride
diff --git a/roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuPeiLib.inf b/roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuPeiLib.inf
new file mode 100644
index 000000000..ce9674ea9
--- /dev/null
+++ b/roms/edk2/ArmPkg/Library/ArmMmuLib/ArmMmuPeiLib.inf
@@ -0,0 +1,32 @@
+#/** @file
+#
+# Copyright (c) 2016 Linaro Ltd. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#
+#**/
+
+[Defines]
+ INF_VERSION = 0x00010005
+ BASE_NAME = ArmMmuPeiLib
+ FILE_GUID = b50d8d53-1ad1-44ea-9e69-8c89d4a6d08b
+ MODULE_TYPE = PEIM
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = ArmMmuLib|PEIM
+ CONSTRUCTOR = ArmMmuPeiLibConstructor
+
+[Sources.AARCH64]
+ AArch64/ArmMmuLibCore.c
+ AArch64/ArmMmuPeiLibConstructor.c
+ AArch64/ArmMmuLibReplaceEntry.S
+
+[Packages]
+ ArmPkg/ArmPkg.dec
+ EmbeddedPkg/EmbeddedPkg.dec
+ MdePkg/MdePkg.dec
+
+[LibraryClasses]
+ ArmLib
+ CacheMaintenanceLib
+ MemoryAllocationLib