aboutsummaryrefslogtreecommitdiffstats
path: root/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib
diff options
context:
space:
mode:
authorAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
committerAngelos Mouzakitis <a.mouzakitis@virtualopensystems.com>2023-10-10 14:33:42 +0000
commitaf1a266670d040d2f4083ff309d732d648afba2a (patch)
tree2fc46203448ddcc6f81546d379abfaeb323575e9 /roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib
parente02cda008591317b1625707ff8e115a4841aa889 (diff)
Add submodule dependency filesHEADmaster
Change-Id: Iaf8d18082d3991dec7c0ebbea540f092188eb4ec
Diffstat (limited to 'roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib')
-rw-r--r--roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/BaseMemEncryptSevLib.inf51
-rw-r--r--roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c84
-rw-r--r--roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/MemEncryptSevLibInternal.c155
-rw-r--r--roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/MemEncryptSevLib.c90
-rw-r--r--roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.c897
-rw-r--r--roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h237
6 files changed, 1514 insertions, 0 deletions
diff --git a/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/BaseMemEncryptSevLib.inf b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/BaseMemEncryptSevLib.inf
new file mode 100644
index 000000000..7c44d0952
--- /dev/null
+++ b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/BaseMemEncryptSevLib.inf
@@ -0,0 +1,51 @@
+## @file
+# Library provides the helper functions for SEV guest
+#
+# Copyright (c) 2017 Advanced Micro Devices. All rights reserved.<BR>
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#
+##
+
+[Defines]
+ INF_VERSION = 1.25
+ BASE_NAME = MemEncryptSevLib
+ FILE_GUID = c1594631-3888-4be4-949f-9c630dbc842b
+ MODULE_TYPE = BASE
+ VERSION_STRING = 1.0
+ LIBRARY_CLASS = MemEncryptSevLib|PEIM DXE_DRIVER DXE_RUNTIME_DRIVER DXE_SMM_DRIVER UEFI_DRIVER
+
+#
+# The following information is for reference only and not required by the build
+# tools.
+#
+# VALID_ARCHITECTURES = IA32 X64
+#
+
+[Packages]
+ MdeModulePkg/MdeModulePkg.dec
+ MdePkg/MdePkg.dec
+ OvmfPkg/OvmfPkg.dec
+ UefiCpuPkg/UefiCpuPkg.dec
+
+[Sources.X64]
+ MemEncryptSevLibInternal.c
+ X64/MemEncryptSevLib.c
+ X64/VirtualMemory.c
+ X64/VirtualMemory.h
+
+[Sources.IA32]
+ Ia32/MemEncryptSevLib.c
+ MemEncryptSevLibInternal.c
+
+[LibraryClasses]
+ BaseLib
+ CacheMaintenanceLib
+ CpuLib
+ DebugLib
+ MemoryAllocationLib
+ PcdLib
+
+[FeaturePcd]
+ gUefiOvmfPkgTokenSpaceGuid.PcdSmmSmramRequire
diff --git a/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c
new file mode 100644
index 000000000..b4f6e5738
--- /dev/null
+++ b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/Ia32/MemEncryptSevLib.c
@@ -0,0 +1,84 @@
+/** @file
+
+ Secure Encrypted Virtualization (SEV) library helper function
+
+ Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Register/Amd/Cpuid.h>
+#include <Register/Amd/Msr.h>
+#include <Register/Cpuid.h>
+
+/**
+ This function clears memory encryption bit for the memory region specified by
+ BaseAddress and NumPages from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] BaseAddress The physical address that is the start
+ address of a memory region.
+ @param[in] NumPages The number of pages from start memory
+ region.
+ @param[in] Flush Flush the caches before clearing the bit
+ (mostly TRUE except MMIO addresses)
+
+ @retval RETURN_SUCCESS The attributes were cleared for the
+ memory region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+MemEncryptSevClearPageEncMask (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages,
+ IN BOOLEAN Flush
+ )
+{
+ //
+ // Memory encryption bit is not accessible in 32-bit mode
+ //
+ return RETURN_UNSUPPORTED;
+}
+
+/**
+ This function sets memory encryption bit for the memory region specified by
+ BaseAddress and NumPages from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] BaseAddress The physical address that is the start
+ address of a memory region.
+ @param[in] NumPages The number of pages from start memory
+ region.
+ @param[in] Flush Flush the caches before setting the bit
+ (mostly TRUE except MMIO addresses)
+
+ @retval RETURN_SUCCESS The attributes were set for the memory
+ region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Setting the memory encryption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+MemEncryptSevSetPageEncMask (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages,
+ IN BOOLEAN Flush
+ )
+{
+ //
+ // Memory encryption bit is not accessible in 32-bit mode
+ //
+ return RETURN_UNSUPPORTED;
+}
diff --git a/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/MemEncryptSevLibInternal.c b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/MemEncryptSevLibInternal.c
new file mode 100644
index 000000000..02b8eb225
--- /dev/null
+++ b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/MemEncryptSevLibInternal.c
@@ -0,0 +1,155 @@
+/** @file
+
+ Secure Encrypted Virtualization (SEV) library helper function
+
+ Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Library/PcdLib.h>
+#include <Register/Amd/Cpuid.h>
+#include <Register/Amd/Msr.h>
+#include <Register/Cpuid.h>
+#include <Register/QemuSmramSaveStateMap.h>
+#include <Register/SmramSaveStateMap.h>
+#include <Uefi/UefiBaseType.h>
+
+STATIC BOOLEAN mSevStatus = FALSE;
+STATIC BOOLEAN mSevEsStatus = FALSE;
+STATIC BOOLEAN mSevStatusChecked = FALSE;
+
+/**
+ Reads and sets the status of SEV features.
+
+ **/
+STATIC
+VOID
+EFIAPI
+InternalMemEncryptSevStatus (
+ VOID
+ )
+{
+ UINT32 RegEax;
+ MSR_SEV_STATUS_REGISTER Msr;
+ CPUID_MEMORY_ENCRYPTION_INFO_EAX Eax;
+
+ //
+ // Check if memory encryption leaf exist
+ //
+ AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL);
+ if (RegEax >= CPUID_MEMORY_ENCRYPTION_INFO) {
+ //
+ // CPUID Fn8000_001F[EAX] Bit 1 (Sev supported)
+ //
+ AsmCpuid (CPUID_MEMORY_ENCRYPTION_INFO, &Eax.Uint32, NULL, NULL, NULL);
+
+ if (Eax.Bits.SevBit) {
+ //
+ // Check MSR_0xC0010131 Bit 0 (Sev Enabled)
+ //
+ Msr.Uint32 = AsmReadMsr32 (MSR_SEV_STATUS);
+ if (Msr.Bits.SevBit) {
+ mSevStatus = TRUE;
+ }
+
+ //
+ // Check MSR_0xC0010131 Bit 1 (Sev-Es Enabled)
+ //
+ if (Msr.Bits.SevEsBit) {
+ mSevEsStatus = TRUE;
+ }
+ }
+ }
+
+ mSevStatusChecked = TRUE;
+}
+
+/**
+ Returns a boolean to indicate whether SEV-ES is enabled.
+
+ @retval TRUE SEV-ES is enabled
+ @retval FALSE SEV-ES is not enabled
+**/
+BOOLEAN
+EFIAPI
+MemEncryptSevEsIsEnabled (
+ VOID
+ )
+{
+ if (!mSevStatusChecked) {
+ InternalMemEncryptSevStatus ();
+ }
+
+ return mSevEsStatus;
+}
+
+/**
+ Returns a boolean to indicate whether SEV is enabled.
+
+ @retval TRUE SEV is enabled
+ @retval FALSE SEV is not enabled
+**/
+BOOLEAN
+EFIAPI
+MemEncryptSevIsEnabled (
+ VOID
+ )
+{
+ if (!mSevStatusChecked) {
+ InternalMemEncryptSevStatus ();
+ }
+
+ return mSevStatus;
+}
+
+
+/**
+ Locate the page range that covers the initial (pre-SMBASE-relocation) SMRAM
+ Save State Map.
+
+ @param[out] BaseAddress The base address of the lowest-address page that
+ covers the initial SMRAM Save State Map.
+
+ @param[out] NumberOfPages The number of pages in the page range that covers
+ the initial SMRAM Save State Map.
+
+ @retval RETURN_SUCCESS BaseAddress and NumberOfPages have been set on
+ output.
+
+ @retval RETURN_UNSUPPORTED SMM is unavailable.
+**/
+RETURN_STATUS
+EFIAPI
+MemEncryptSevLocateInitialSmramSaveStateMapPages (
+ OUT UINTN *BaseAddress,
+ OUT UINTN *NumberOfPages
+ )
+{
+ UINTN MapStart;
+ UINTN MapEnd;
+ UINTN MapPagesStart; // MapStart rounded down to page boundary
+ UINTN MapPagesEnd; // MapEnd rounded up to page boundary
+ UINTN MapPagesSize; // difference between MapPagesStart and MapPagesEnd
+
+ if (!FeaturePcdGet (PcdSmmSmramRequire)) {
+ return RETURN_UNSUPPORTED;
+ }
+
+ MapStart = SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET;
+ MapEnd = MapStart + sizeof (QEMU_SMRAM_SAVE_STATE_MAP);
+ MapPagesStart = MapStart & ~(UINTN)EFI_PAGE_MASK;
+ MapPagesEnd = ALIGN_VALUE (MapEnd, EFI_PAGE_SIZE);
+ MapPagesSize = MapPagesEnd - MapPagesStart;
+
+ ASSERT ((MapPagesSize & EFI_PAGE_MASK) == 0);
+
+ *BaseAddress = MapPagesStart;
+ *NumberOfPages = MapPagesSize >> EFI_PAGE_SHIFT;
+
+ return RETURN_SUCCESS;
+}
diff --git a/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/MemEncryptSevLib.c b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/MemEncryptSevLib.c
new file mode 100644
index 000000000..cf0921e21
--- /dev/null
+++ b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/MemEncryptSevLib.c
@@ -0,0 +1,90 @@
+/** @file
+
+ Secure Encrypted Virtualization (SEV) library helper function
+
+ Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include <Library/BaseLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemEncryptSevLib.h>
+#include <Register/Amd/Cpuid.h>
+#include <Register/Amd/Msr.h>
+#include <Register/Cpuid.h>
+
+#include "VirtualMemory.h"
+
+/**
+ This function clears memory encryption bit for the memory region specified by
+ BaseAddress and NumPages from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] BaseAddress The physical address that is the start
+ address of a memory region.
+ @param[in] NumPages The number of pages from start memory
+ region.
+ @param[in] Flush Flush the caches before clearing the bit
+ (mostly TRUE except MMIO addresses)
+
+ @retval RETURN_SUCCESS The attributes were cleared for the
+ memory region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Clearing the memory encryption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+MemEncryptSevClearPageEncMask (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages,
+ IN BOOLEAN Flush
+ )
+{
+ return InternalMemEncryptSevSetMemoryDecrypted (
+ Cr3BaseAddress,
+ BaseAddress,
+ EFI_PAGES_TO_SIZE (NumPages),
+ Flush
+ );
+}
+
+/**
+ This function sets memory encryption bit for the memory region specified by
+ BaseAddress and NumPages from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] BaseAddress The physical address that is the start
+ address of a memory region.
+ @param[in] NumPages The number of pages from start memory
+ region.
+ @param[in] Flush Flush the caches before setting the bit
+ (mostly TRUE except MMIO addresses)
+
+ @retval RETURN_SUCCESS The attributes were set for the memory
+ region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Setting the memory encryption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+MemEncryptSevSetPageEncMask (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS BaseAddress,
+ IN UINTN NumPages,
+ IN BOOLEAN Flush
+ )
+{
+ return InternalMemEncryptSevSetMemoryEncrypted (
+ Cr3BaseAddress,
+ BaseAddress,
+ EFI_PAGES_TO_SIZE (NumPages),
+ Flush
+ );
+}
diff --git a/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.c b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.c
new file mode 100644
index 000000000..5e110c84f
--- /dev/null
+++ b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.c
@@ -0,0 +1,897 @@
+/** @file
+
+ Virtual Memory Management Services to set or clear the memory encryption bit
+
+ Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+ Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+ Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.c
+
+**/
+
+#include <Library/CpuLib.h>
+#include <Register/Amd/Cpuid.h>
+#include <Register/Cpuid.h>
+
+#include "VirtualMemory.h"
+
+STATIC BOOLEAN mAddressEncMaskChecked = FALSE;
+STATIC UINT64 mAddressEncMask;
+STATIC PAGE_TABLE_POOL *mPageTablePool = NULL;
+
+typedef enum {
+ SetCBit,
+ ClearCBit
+} MAP_RANGE_MODE;
+
+/**
+ Get the memory encryption mask
+
+ @param[out] EncryptionMask contains the pte mask.
+
+**/
+STATIC
+UINT64
+GetMemEncryptionAddressMask (
+ VOID
+ )
+{
+ UINT64 EncryptionMask;
+ CPUID_MEMORY_ENCRYPTION_INFO_EBX Ebx;
+
+ if (mAddressEncMaskChecked) {
+ return mAddressEncMask;
+ }
+
+ //
+ // CPUID Fn8000_001F[EBX] Bit 0:5 (memory encryption bit position)
+ //
+ AsmCpuid (CPUID_MEMORY_ENCRYPTION_INFO, NULL, &Ebx.Uint32, NULL, NULL);
+ EncryptionMask = LShiftU64 (1, Ebx.Bits.PtePosBits);
+
+ mAddressEncMask = EncryptionMask & PAGING_1G_ADDRESS_MASK_64;
+ mAddressEncMaskChecked = TRUE;
+
+ return mAddressEncMask;
+}
+
+/**
+ Initialize a buffer pool for page table use only.
+
+ To reduce the potential split operation on page table, the pages reserved for
+ page table should be allocated in the times of PAGE_TABLE_POOL_UNIT_PAGES and
+ at the boundary of PAGE_TABLE_POOL_ALIGNMENT. So the page pool is always
+ initialized with number of pages greater than or equal to the given
+ PoolPages.
+
+ Once the pages in the pool are used up, this method should be called again to
+ reserve at least another PAGE_TABLE_POOL_UNIT_PAGES. Usually this won't
+ happen often in practice.
+
+ @param[in] PoolPages The least page number of the pool to be created.
+
+ @retval TRUE The pool is initialized successfully.
+ @retval FALSE The memory is out of resource.
+**/
+STATIC
+BOOLEAN
+InitializePageTablePool (
+ IN UINTN PoolPages
+ )
+{
+ VOID *Buffer;
+
+ //
+ // Always reserve at least PAGE_TABLE_POOL_UNIT_PAGES, including one page for
+ // header.
+ //
+ PoolPages += 1; // Add one page for header.
+ PoolPages = ((PoolPages - 1) / PAGE_TABLE_POOL_UNIT_PAGES + 1) *
+ PAGE_TABLE_POOL_UNIT_PAGES;
+ Buffer = AllocateAlignedPages (PoolPages, PAGE_TABLE_POOL_ALIGNMENT);
+ if (Buffer == NULL) {
+ DEBUG ((DEBUG_ERROR, "ERROR: Out of aligned pages\r\n"));
+ return FALSE;
+ }
+
+ //
+ // Link all pools into a list for easier track later.
+ //
+ if (mPageTablePool == NULL) {
+ mPageTablePool = Buffer;
+ mPageTablePool->NextPool = mPageTablePool;
+ } else {
+ ((PAGE_TABLE_POOL *)Buffer)->NextPool = mPageTablePool->NextPool;
+ mPageTablePool->NextPool = Buffer;
+ mPageTablePool = Buffer;
+ }
+
+ //
+ // Reserve one page for pool header.
+ //
+ mPageTablePool->FreePages = PoolPages - 1;
+ mPageTablePool->Offset = EFI_PAGES_TO_SIZE (1);
+
+ return TRUE;
+}
+
+/**
+ This API provides a way to allocate memory for page table.
+
+ This API can be called more than once to allocate memory for page tables.
+
+ Allocates the number of 4KB pages and returns a pointer to the allocated
+ buffer. The buffer returned is aligned on a 4KB boundary.
+
+ If Pages is 0, then NULL is returned.
+ If there is not enough memory remaining to satisfy the request, then NULL is
+ returned.
+
+ @param Pages The number of 4 KB pages to allocate.
+
+ @return A pointer to the allocated buffer or NULL if allocation fails.
+
+**/
+STATIC
+VOID *
+EFIAPI
+AllocatePageTableMemory (
+ IN UINTN Pages
+ )
+{
+ VOID *Buffer;
+
+ if (Pages == 0) {
+ return NULL;
+ }
+
+ //
+ // Renew the pool if necessary.
+ //
+ if (mPageTablePool == NULL ||
+ Pages > mPageTablePool->FreePages) {
+ if (!InitializePageTablePool (Pages)) {
+ return NULL;
+ }
+ }
+
+ Buffer = (UINT8 *)mPageTablePool + mPageTablePool->Offset;
+
+ mPageTablePool->Offset += EFI_PAGES_TO_SIZE (Pages);
+ mPageTablePool->FreePages -= Pages;
+
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "%a:%a: Buffer=0x%Lx Pages=%ld\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ Buffer,
+ Pages
+ ));
+
+ return Buffer;
+}
+
+
+/**
+ Split 2M page to 4K.
+
+ @param[in] PhysicalAddress Start physical address the 2M page
+ covered.
+ @param[in, out] PageEntry2M Pointer to 2M page entry.
+ @param[in] StackBase Stack base address.
+ @param[in] StackSize Stack size.
+
+**/
+STATIC
+VOID
+Split2MPageTo4K (
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN OUT UINT64 *PageEntry2M,
+ IN PHYSICAL_ADDRESS StackBase,
+ IN UINTN StackSize
+ )
+{
+ PHYSICAL_ADDRESS PhysicalAddress4K;
+ UINTN IndexOfPageTableEntries;
+ PAGE_TABLE_4K_ENTRY *PageTableEntry, *PageTableEntry1;
+ UINT64 AddressEncMask;
+
+ PageTableEntry = AllocatePageTableMemory(1);
+
+ PageTableEntry1 = PageTableEntry;
+
+ AddressEncMask = GetMemEncryptionAddressMask ();
+
+ ASSERT (PageTableEntry != NULL);
+ ASSERT (*PageEntry2M & AddressEncMask);
+
+ PhysicalAddress4K = PhysicalAddress;
+ for (IndexOfPageTableEntries = 0;
+ IndexOfPageTableEntries < 512;
+ (IndexOfPageTableEntries++,
+ PageTableEntry++,
+ PhysicalAddress4K += SIZE_4KB)) {
+ //
+ // Fill in the Page Table entries
+ //
+ PageTableEntry->Uint64 = (UINT64) PhysicalAddress4K | AddressEncMask;
+ PageTableEntry->Bits.ReadWrite = 1;
+ PageTableEntry->Bits.Present = 1;
+ if ((PhysicalAddress4K >= StackBase) &&
+ (PhysicalAddress4K < StackBase + StackSize)) {
+ //
+ // Set Nx bit for stack.
+ //
+ PageTableEntry->Bits.Nx = 1;
+ }
+ }
+
+ //
+ // Fill in 2M page entry.
+ //
+ *PageEntry2M = ((UINT64)(UINTN)PageTableEntry1 |
+ IA32_PG_P | IA32_PG_RW | AddressEncMask);
+}
+
+/**
+ Set one page of page table pool memory to be read-only.
+
+ @param[in] PageTableBase Base address of page table (CR3).
+ @param[in] Address Start address of a page to be set as read-only.
+ @param[in] Level4Paging Level 4 paging flag.
+
+**/
+STATIC
+VOID
+SetPageTablePoolReadOnly (
+ IN UINTN PageTableBase,
+ IN EFI_PHYSICAL_ADDRESS Address,
+ IN BOOLEAN Level4Paging
+ )
+{
+ UINTN Index;
+ UINTN EntryIndex;
+ UINT64 AddressEncMask;
+ EFI_PHYSICAL_ADDRESS PhysicalAddress;
+ UINT64 *PageTable;
+ UINT64 *NewPageTable;
+ UINT64 PageAttr;
+ UINT64 LevelSize[5];
+ UINT64 LevelMask[5];
+ UINTN LevelShift[5];
+ UINTN Level;
+ UINT64 PoolUnitSize;
+
+ ASSERT (PageTableBase != 0);
+
+ //
+ // Since the page table is always from page table pool, which is always
+ // located at the boundary of PcdPageTablePoolAlignment, we just need to
+ // set the whole pool unit to be read-only.
+ //
+ Address = Address & PAGE_TABLE_POOL_ALIGN_MASK;
+
+ LevelShift[1] = PAGING_L1_ADDRESS_SHIFT;
+ LevelShift[2] = PAGING_L2_ADDRESS_SHIFT;
+ LevelShift[3] = PAGING_L3_ADDRESS_SHIFT;
+ LevelShift[4] = PAGING_L4_ADDRESS_SHIFT;
+
+ LevelMask[1] = PAGING_4K_ADDRESS_MASK_64;
+ LevelMask[2] = PAGING_2M_ADDRESS_MASK_64;
+ LevelMask[3] = PAGING_1G_ADDRESS_MASK_64;
+ LevelMask[4] = PAGING_1G_ADDRESS_MASK_64;
+
+ LevelSize[1] = SIZE_4KB;
+ LevelSize[2] = SIZE_2MB;
+ LevelSize[3] = SIZE_1GB;
+ LevelSize[4] = SIZE_512GB;
+
+ AddressEncMask = GetMemEncryptionAddressMask() &
+ PAGING_1G_ADDRESS_MASK_64;
+ PageTable = (UINT64 *)(UINTN)PageTableBase;
+ PoolUnitSize = PAGE_TABLE_POOL_UNIT_SIZE;
+
+ for (Level = (Level4Paging) ? 4 : 3; Level > 0; --Level) {
+ Index = ((UINTN)RShiftU64 (Address, LevelShift[Level]));
+ Index &= PAGING_PAE_INDEX_MASK;
+
+ PageAttr = PageTable[Index];
+ if ((PageAttr & IA32_PG_PS) == 0) {
+ //
+ // Go to next level of table.
+ //
+ PageTable = (UINT64 *)(UINTN)(PageAttr & ~AddressEncMask &
+ PAGING_4K_ADDRESS_MASK_64);
+ continue;
+ }
+
+ if (PoolUnitSize >= LevelSize[Level]) {
+ //
+ // Clear R/W bit if current page granularity is not larger than pool unit
+ // size.
+ //
+ if ((PageAttr & IA32_PG_RW) != 0) {
+ while (PoolUnitSize > 0) {
+ //
+ // PAGE_TABLE_POOL_UNIT_SIZE and PAGE_TABLE_POOL_ALIGNMENT are fit in
+ // one page (2MB). Then we don't need to update attributes for pages
+ // crossing page directory. ASSERT below is for that purpose.
+ //
+ ASSERT (Index < EFI_PAGE_SIZE/sizeof (UINT64));
+
+ PageTable[Index] &= ~(UINT64)IA32_PG_RW;
+ PoolUnitSize -= LevelSize[Level];
+
+ ++Index;
+ }
+ }
+
+ break;
+
+ } else {
+ //
+ // The smaller granularity of page must be needed.
+ //
+ ASSERT (Level > 1);
+
+ NewPageTable = AllocatePageTableMemory (1);
+ ASSERT (NewPageTable != NULL);
+
+ PhysicalAddress = PageAttr & LevelMask[Level];
+ for (EntryIndex = 0;
+ EntryIndex < EFI_PAGE_SIZE/sizeof (UINT64);
+ ++EntryIndex) {
+ NewPageTable[EntryIndex] = PhysicalAddress | AddressEncMask |
+ IA32_PG_P | IA32_PG_RW;
+ if (Level > 2) {
+ NewPageTable[EntryIndex] |= IA32_PG_PS;
+ }
+ PhysicalAddress += LevelSize[Level - 1];
+ }
+
+ PageTable[Index] = (UINT64)(UINTN)NewPageTable | AddressEncMask |
+ IA32_PG_P | IA32_PG_RW;
+ PageTable = NewPageTable;
+ }
+ }
+}
+
+/**
+ Prevent the memory pages used for page table from been overwritten.
+
+ @param[in] PageTableBase Base address of page table (CR3).
+ @param[in] Level4Paging Level 4 paging flag.
+
+**/
+STATIC
+VOID
+EnablePageTableProtection (
+ IN UINTN PageTableBase,
+ IN BOOLEAN Level4Paging
+ )
+{
+ PAGE_TABLE_POOL *HeadPool;
+ PAGE_TABLE_POOL *Pool;
+ UINT64 PoolSize;
+ EFI_PHYSICAL_ADDRESS Address;
+
+ if (mPageTablePool == NULL) {
+ return;
+ }
+
+ //
+ // SetPageTablePoolReadOnly might update mPageTablePool. It's safer to
+ // remember original one in advance.
+ //
+ HeadPool = mPageTablePool;
+ Pool = HeadPool;
+ do {
+ Address = (EFI_PHYSICAL_ADDRESS)(UINTN)Pool;
+ PoolSize = Pool->Offset + EFI_PAGES_TO_SIZE (Pool->FreePages);
+
+ //
+ // The size of one pool must be multiple of PAGE_TABLE_POOL_UNIT_SIZE,
+ // which is one of page size of the processor (2MB by default). Let's apply
+ // the protection to them one by one.
+ //
+ while (PoolSize > 0) {
+ SetPageTablePoolReadOnly(PageTableBase, Address, Level4Paging);
+ Address += PAGE_TABLE_POOL_UNIT_SIZE;
+ PoolSize -= PAGE_TABLE_POOL_UNIT_SIZE;
+ }
+
+ Pool = Pool->NextPool;
+ } while (Pool != HeadPool);
+
+}
+
+
+/**
+ Split 1G page to 2M.
+
+ @param[in] PhysicalAddress Start physical address the 1G page
+ covered.
+ @param[in, out] PageEntry1G Pointer to 1G page entry.
+ @param[in] StackBase Stack base address.
+ @param[in] StackSize Stack size.
+
+**/
+STATIC
+VOID
+Split1GPageTo2M (
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN OUT UINT64 *PageEntry1G,
+ IN PHYSICAL_ADDRESS StackBase,
+ IN UINTN StackSize
+ )
+{
+ PHYSICAL_ADDRESS PhysicalAddress2M;
+ UINTN IndexOfPageDirectoryEntries;
+ PAGE_TABLE_ENTRY *PageDirectoryEntry;
+ UINT64 AddressEncMask;
+
+ PageDirectoryEntry = AllocatePageTableMemory(1);
+
+ AddressEncMask = GetMemEncryptionAddressMask ();
+ ASSERT (PageDirectoryEntry != NULL);
+ ASSERT (*PageEntry1G & GetMemEncryptionAddressMask ());
+ //
+ // Fill in 1G page entry.
+ //
+ *PageEntry1G = ((UINT64)(UINTN)PageDirectoryEntry |
+ IA32_PG_P | IA32_PG_RW | AddressEncMask);
+
+ PhysicalAddress2M = PhysicalAddress;
+ for (IndexOfPageDirectoryEntries = 0;
+ IndexOfPageDirectoryEntries < 512;
+ (IndexOfPageDirectoryEntries++,
+ PageDirectoryEntry++,
+ PhysicalAddress2M += SIZE_2MB)) {
+ if ((PhysicalAddress2M < StackBase + StackSize) &&
+ ((PhysicalAddress2M + SIZE_2MB) > StackBase)) {
+ //
+ // Need to split this 2M page that covers stack range.
+ //
+ Split2MPageTo4K (
+ PhysicalAddress2M,
+ (UINT64 *)PageDirectoryEntry,
+ StackBase,
+ StackSize
+ );
+ } else {
+ //
+ // Fill in the Page Directory entries
+ //
+ PageDirectoryEntry->Uint64 = (UINT64) PhysicalAddress2M | AddressEncMask;
+ PageDirectoryEntry->Bits.ReadWrite = 1;
+ PageDirectoryEntry->Bits.Present = 1;
+ PageDirectoryEntry->Bits.MustBe1 = 1;
+ }
+ }
+}
+
+
+/**
+ Set or Clear the memory encryption bit
+
+ @param[in] PagetablePoint Page table entry pointer (PTE).
+ @param[in] Mode Set or Clear encryption bit
+
+**/
+STATIC VOID
+SetOrClearCBit(
+ IN OUT UINT64* PageTablePointer,
+ IN MAP_RANGE_MODE Mode
+ )
+{
+ UINT64 AddressEncMask;
+
+ AddressEncMask = GetMemEncryptionAddressMask ();
+
+ if (Mode == SetCBit) {
+ *PageTablePointer |= AddressEncMask;
+ } else {
+ *PageTablePointer &= ~AddressEncMask;
+ }
+
+}
+
+/**
+ Check the WP status in CR0 register. This bit is used to lock or unlock write
+ access to pages marked as read-only.
+
+ @retval TRUE Write protection is enabled.
+ @retval FALSE Write protection is disabled.
+**/
+STATIC
+BOOLEAN
+IsReadOnlyPageWriteProtected (
+ VOID
+ )
+{
+ return ((AsmReadCr0 () & BIT16) != 0);
+}
+
+
+/**
+ Disable Write Protect on pages marked as read-only.
+**/
+STATIC
+VOID
+DisableReadOnlyPageWriteProtect (
+ VOID
+ )
+{
+ AsmWriteCr0 (AsmReadCr0() & ~BIT16);
+}
+
+/**
+ Enable Write Protect on pages marked as read-only.
+**/
+VOID
+EnableReadOnlyPageWriteProtect (
+ VOID
+ )
+{
+ AsmWriteCr0 (AsmReadCr0() | BIT16);
+}
+
+
+/**
+ This function either sets or clears memory encryption bit for the memory
+ region specified by PhysicalAddress and Length from the current page table
+ context.
+
+ The function iterates through the PhysicalAddress one page at a time, and set
+ or clears the memory encryption mask in the page table. If it encounters
+ that a given physical address range is part of large page then it attempts to
+ change the attribute at one go (based on size), otherwise it splits the
+ large pages into smaller (e.g 2M page into 4K pages) and then try to set or
+ clear the encryption bit on the smallest page size.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] PhysicalAddress The physical address that is the start
+ address of a memory region.
+ @param[in] Length The length of memory region
+ @param[in] Mode Set or Clear mode
+ @param[in] CacheFlush Flush the caches before applying the
+ encryption mask
+
+ @retval RETURN_SUCCESS The attributes were cleared for the
+ memory region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
+ is not supported
+**/
+
+STATIC
+RETURN_STATUS
+EFIAPI
+SetMemoryEncDec (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN UINTN Length,
+ IN MAP_RANGE_MODE Mode,
+ IN BOOLEAN CacheFlush
+ )
+{
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageMapLevel4Entry;
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageUpperDirectoryPointerEntry;
+ PAGE_MAP_AND_DIRECTORY_POINTER *PageDirectoryPointerEntry;
+ PAGE_TABLE_1G_ENTRY *PageDirectory1GEntry;
+ PAGE_TABLE_ENTRY *PageDirectory2MEntry;
+ PAGE_TABLE_4K_ENTRY *PageTableEntry;
+ UINT64 PgTableMask;
+ UINT64 AddressEncMask;
+ BOOLEAN IsWpEnabled;
+ RETURN_STATUS Status;
+
+ //
+ // Set PageMapLevel4Entry to suppress incorrect compiler/analyzer warnings.
+ //
+ PageMapLevel4Entry = NULL;
+
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "%a:%a: Cr3Base=0x%Lx Physical=0x%Lx Length=0x%Lx Mode=%a CacheFlush=%u\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ Cr3BaseAddress,
+ PhysicalAddress,
+ (UINT64)Length,
+ (Mode == SetCBit) ? "Encrypt" : "Decrypt",
+ (UINT32)CacheFlush
+ ));
+
+ //
+ // Check if we have a valid memory encryption mask
+ //
+ AddressEncMask = GetMemEncryptionAddressMask ();
+ if (!AddressEncMask) {
+ return RETURN_ACCESS_DENIED;
+ }
+
+ PgTableMask = AddressEncMask | EFI_PAGE_MASK;
+
+ if (Length == 0) {
+ return RETURN_INVALID_PARAMETER;
+ }
+
+ //
+ // We are going to change the memory encryption attribute from C=0 -> C=1 or
+ // vice versa Flush the caches to ensure that data is written into memory
+ // with correct C-bit
+ //
+ if (CacheFlush) {
+ WriteBackInvalidateDataCacheRange((VOID*) (UINTN)PhysicalAddress, Length);
+ }
+
+ //
+ // Make sure that the page table is changeable.
+ //
+ IsWpEnabled = IsReadOnlyPageWriteProtected ();
+ if (IsWpEnabled) {
+ DisableReadOnlyPageWriteProtect ();
+ }
+
+ Status = EFI_SUCCESS;
+
+ while (Length)
+ {
+ //
+ // If Cr3BaseAddress is not specified then read the current CR3
+ //
+ if (Cr3BaseAddress == 0) {
+ Cr3BaseAddress = AsmReadCr3();
+ }
+
+ PageMapLevel4Entry = (VOID*) (Cr3BaseAddress & ~PgTableMask);
+ PageMapLevel4Entry += PML4_OFFSET(PhysicalAddress);
+ if (!PageMapLevel4Entry->Bits.Present) {
+ DEBUG ((
+ DEBUG_ERROR,
+ "%a:%a: bad PML4 for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ Status = RETURN_NO_MAPPING;
+ goto Done;
+ }
+
+ PageDirectory1GEntry = (VOID *)(
+ (PageMapLevel4Entry->Bits.PageTableBaseAddress <<
+ 12) & ~PgTableMask
+ );
+ PageDirectory1GEntry += PDP_OFFSET(PhysicalAddress);
+ if (!PageDirectory1GEntry->Bits.Present) {
+ DEBUG ((
+ DEBUG_ERROR,
+ "%a:%a: bad PDPE for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ Status = RETURN_NO_MAPPING;
+ goto Done;
+ }
+
+ //
+ // If the MustBe1 bit is not 1, it's not actually a 1GB entry
+ //
+ if (PageDirectory1GEntry->Bits.MustBe1) {
+ //
+ // Valid 1GB page
+ // If we have at least 1GB to go, we can just update this entry
+ //
+ if (!(PhysicalAddress & (BIT30 - 1)) && Length >= BIT30) {
+ SetOrClearCBit(&PageDirectory1GEntry->Uint64, Mode);
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "%a:%a: updated 1GB entry for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ PhysicalAddress += BIT30;
+ Length -= BIT30;
+ } else {
+ //
+ // We must split the page
+ //
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "%a:%a: splitting 1GB page for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ Split1GPageTo2M (
+ (UINT64)PageDirectory1GEntry->Bits.PageTableBaseAddress << 30,
+ (UINT64 *)PageDirectory1GEntry,
+ 0,
+ 0
+ );
+ continue;
+ }
+ } else {
+ //
+ // Actually a PDP
+ //
+ PageUpperDirectoryPointerEntry =
+ (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory1GEntry;
+ PageDirectory2MEntry =
+ (VOID *)(
+ (PageUpperDirectoryPointerEntry->Bits.PageTableBaseAddress <<
+ 12) & ~PgTableMask
+ );
+ PageDirectory2MEntry += PDE_OFFSET(PhysicalAddress);
+ if (!PageDirectory2MEntry->Bits.Present) {
+ DEBUG ((
+ DEBUG_ERROR,
+ "%a:%a: bad PDE for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ Status = RETURN_NO_MAPPING;
+ goto Done;
+ }
+ //
+ // If the MustBe1 bit is not a 1, it's not a 2MB entry
+ //
+ if (PageDirectory2MEntry->Bits.MustBe1) {
+ //
+ // Valid 2MB page
+ // If we have at least 2MB left to go, we can just update this entry
+ //
+ if (!(PhysicalAddress & (BIT21-1)) && Length >= BIT21) {
+ SetOrClearCBit (&PageDirectory2MEntry->Uint64, Mode);
+ PhysicalAddress += BIT21;
+ Length -= BIT21;
+ } else {
+ //
+ // We must split up this page into 4K pages
+ //
+ DEBUG ((
+ DEBUG_VERBOSE,
+ "%a:%a: splitting 2MB page for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ Split2MPageTo4K (
+ (UINT64)PageDirectory2MEntry->Bits.PageTableBaseAddress << 21,
+ (UINT64 *)PageDirectory2MEntry,
+ 0,
+ 0
+ );
+ continue;
+ }
+ } else {
+ PageDirectoryPointerEntry =
+ (PAGE_MAP_AND_DIRECTORY_POINTER *)PageDirectory2MEntry;
+ PageTableEntry =
+ (VOID *)(
+ (PageDirectoryPointerEntry->Bits.PageTableBaseAddress <<
+ 12) & ~PgTableMask
+ );
+ PageTableEntry += PTE_OFFSET(PhysicalAddress);
+ if (!PageTableEntry->Bits.Present) {
+ DEBUG ((
+ DEBUG_ERROR,
+ "%a:%a: bad PTE for Physical=0x%Lx\n",
+ gEfiCallerBaseName,
+ __FUNCTION__,
+ PhysicalAddress
+ ));
+ Status = RETURN_NO_MAPPING;
+ goto Done;
+ }
+ SetOrClearCBit (&PageTableEntry->Uint64, Mode);
+ PhysicalAddress += EFI_PAGE_SIZE;
+ Length -= EFI_PAGE_SIZE;
+ }
+ }
+ }
+
+ //
+ // Protect the page table by marking the memory used for page table to be
+ // read-only.
+ //
+ if (IsWpEnabled) {
+ EnablePageTableProtection ((UINTN)PageMapLevel4Entry, TRUE);
+ }
+
+ //
+ // Flush TLB
+ //
+ CpuFlushTlb();
+
+Done:
+ //
+ // Restore page table write protection, if any.
+ //
+ if (IsWpEnabled) {
+ EnableReadOnlyPageWriteProtect ();
+ }
+
+ return Status;
+}
+
+/**
+ This function clears memory encryption bit for the memory region specified by
+ PhysicalAddress and Length from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] PhysicalAddress The physical address that is the start
+ address of a memory region.
+ @param[in] Length The length of memory region
+ @param[in] Flush Flush the caches before applying the
+ encryption mask
+
+ @retval RETURN_SUCCESS The attributes were cleared for the
+ memory region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+InternalMemEncryptSevSetMemoryDecrypted (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN UINTN Length,
+ IN BOOLEAN Flush
+ )
+{
+
+ return SetMemoryEncDec (
+ Cr3BaseAddress,
+ PhysicalAddress,
+ Length,
+ ClearCBit,
+ Flush
+ );
+}
+
+/**
+ This function sets memory encryption bit for the memory region specified by
+ PhysicalAddress and Length from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] PhysicalAddress The physical address that is the start
+ address of a memory region.
+ @param[in] Length The length of memory region
+ @param[in] Flush Flush the caches before applying the
+ encryption mask
+
+ @retval RETURN_SUCCESS The attributes were set for the memory
+ region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+InternalMemEncryptSevSetMemoryEncrypted (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN UINTN Length,
+ IN BOOLEAN Flush
+ )
+{
+ return SetMemoryEncDec (
+ Cr3BaseAddress,
+ PhysicalAddress,
+ Length,
+ SetCBit,
+ Flush
+ );
+}
diff --git a/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h
new file mode 100644
index 000000000..26d26cd92
--- /dev/null
+++ b/roms/edk2/OvmfPkg/Library/BaseMemEncryptSevLib/X64/VirtualMemory.h
@@ -0,0 +1,237 @@
+/** @file
+
+ Virtual Memory Management Services to set or clear the memory encryption bit
+
+ Copyright (c) 2006 - 2016, Intel Corporation. All rights reserved.<BR>
+ Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+ Code is derived from MdeModulePkg/Core/DxeIplPeim/X64/VirtualMemory.h
+
+**/
+
+#ifndef __VIRTUAL_MEMORY__
+#define __VIRTUAL_MEMORY__
+
+#include <Library/BaseLib.h>
+#include <Library/BaseMemoryLib.h>
+#include <Library/CacheMaintenanceLib.h>
+#include <Library/DebugLib.h>
+#include <Library/MemoryAllocationLib.h>
+#include <Uefi.h>
+
+#define SYS_CODE64_SEL 0x38
+
+#pragma pack(1)
+
+//
+// Page-Map Level-4 Offset (PML4) and
+// Page-Directory-Pointer Offset (PDPE) entries 4K & 2MB
+//
+
+typedef union {
+ struct {
+ UINT64 Present:1; // 0 = Not present in memory,
+ // 1 = Present in memory
+ UINT64 ReadWrite:1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor:1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough:1; // 0 = Write-Back caching,
+ // 1 = Write-Through caching
+ UINT64 CacheDisabled:1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed:1; // 0 = Not accessed,
+ // 1 = Accessed (set by CPU)
+ UINT64 Reserved:1; // Reserved
+ UINT64 MustBeZero:2; // Must Be Zero
+ UINT64 Available:3; // Available for use by system software
+ UINT64 PageTableBaseAddress:40; // Page Table Base Address
+ UINT64 AvabilableHigh:11; // Available for use by system software
+ UINT64 Nx:1; // No Execute bit
+ } Bits;
+ UINT64 Uint64;
+} PAGE_MAP_AND_DIRECTORY_POINTER;
+
+//
+// Page Table Entry 4KB
+//
+typedef union {
+ struct {
+ UINT64 Present:1; // 0 = Not present in memory,
+ // 1 = Present in memory
+ UINT64 ReadWrite:1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor:1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough:1; // 0 = Write-Back caching,
+ // 1 = Write-Through caching
+ UINT64 CacheDisabled:1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed:1; // 0 = Not accessed,
+ // 1 = Accessed (set by CPU)
+ UINT64 Dirty:1; // 0 = Not Dirty, 1 = written by
+ // processor on access to page
+ UINT64 PAT:1; //
+ UINT64 Global:1; // 0 = Not global page, 1 = global page
+ // TLB not cleared on CR3 write
+ UINT64 Available:3; // Available for use by system software
+ UINT64 PageTableBaseAddress:40; // Page Table Base Address
+ UINT64 AvabilableHigh:11; // Available for use by system software
+ UINT64 Nx:1; // 0 = Execute Code,
+ // 1 = No Code Execution
+ } Bits;
+ UINT64 Uint64;
+} PAGE_TABLE_4K_ENTRY;
+
+//
+// Page Table Entry 2MB
+//
+typedef union {
+ struct {
+ UINT64 Present:1; // 0 = Not present in memory,
+ // 1 = Present in memory
+ UINT64 ReadWrite:1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor:1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough:1; // 0 = Write-Back caching,
+ // 1=Write-Through caching
+ UINT64 CacheDisabled:1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed:1; // 0 = Not accessed,
+ // 1 = Accessed (set by CPU)
+ UINT64 Dirty:1; // 0 = Not Dirty, 1 = written by
+ // processor on access to page
+ UINT64 MustBe1:1; // Must be 1
+ UINT64 Global:1; // 0 = Not global page, 1 = global page
+ // TLB not cleared on CR3 write
+ UINT64 Available:3; // Available for use by system software
+ UINT64 PAT:1; //
+ UINT64 MustBeZero:8; // Must be zero;
+ UINT64 PageTableBaseAddress:31; // Page Table Base Address
+ UINT64 AvabilableHigh:11; // Available for use by system software
+ UINT64 Nx:1; // 0 = Execute Code,
+ // 1 = No Code Execution
+ } Bits;
+ UINT64 Uint64;
+} PAGE_TABLE_ENTRY;
+
+//
+// Page Table Entry 1GB
+//
+typedef union {
+ struct {
+ UINT64 Present:1; // 0 = Not present in memory,
+ // 1 = Present in memory
+ UINT64 ReadWrite:1; // 0 = Read-Only, 1= Read/Write
+ UINT64 UserSupervisor:1; // 0 = Supervisor, 1=User
+ UINT64 WriteThrough:1; // 0 = Write-Back caching,
+ // 1 = Write-Through caching
+ UINT64 CacheDisabled:1; // 0 = Cached, 1=Non-Cached
+ UINT64 Accessed:1; // 0 = Not accessed,
+ // 1 = Accessed (set by CPU)
+ UINT64 Dirty:1; // 0 = Not Dirty, 1 = written by
+ // processor on access to page
+ UINT64 MustBe1:1; // Must be 1
+ UINT64 Global:1; // 0 = Not global page, 1 = global page
+ // TLB not cleared on CR3 write
+ UINT64 Available:3; // Available for use by system software
+ UINT64 PAT:1; //
+ UINT64 MustBeZero:17; // Must be zero;
+ UINT64 PageTableBaseAddress:22; // Page Table Base Address
+ UINT64 AvabilableHigh:11; // Available for use by system software
+ UINT64 Nx:1; // 0 = Execute Code,
+ // 1 = No Code Execution
+ } Bits;
+ UINT64 Uint64;
+} PAGE_TABLE_1G_ENTRY;
+
+#pragma pack()
+
+#define IA32_PG_P BIT0
+#define IA32_PG_RW BIT1
+#define IA32_PG_PS BIT7
+
+#define PAGING_PAE_INDEX_MASK 0x1FF
+
+#define PAGING_4K_ADDRESS_MASK_64 0x000FFFFFFFFFF000ull
+#define PAGING_2M_ADDRESS_MASK_64 0x000FFFFFFFE00000ull
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+#define PAGING_L1_ADDRESS_SHIFT 12
+#define PAGING_L2_ADDRESS_SHIFT 21
+#define PAGING_L3_ADDRESS_SHIFT 30
+#define PAGING_L4_ADDRESS_SHIFT 39
+
+#define PAGING_PML4E_NUMBER 4
+
+#define PAGETABLE_ENTRY_MASK ((1UL << 9) - 1)
+#define PML4_OFFSET(x) ( (x >> 39) & PAGETABLE_ENTRY_MASK)
+#define PDP_OFFSET(x) ( (x >> 30) & PAGETABLE_ENTRY_MASK)
+#define PDE_OFFSET(x) ( (x >> 21) & PAGETABLE_ENTRY_MASK)
+#define PTE_OFFSET(x) ( (x >> 12) & PAGETABLE_ENTRY_MASK)
+#define PAGING_1G_ADDRESS_MASK_64 0x000FFFFFC0000000ull
+
+#define PAGE_TABLE_POOL_ALIGNMENT BASE_2MB
+#define PAGE_TABLE_POOL_UNIT_SIZE SIZE_2MB
+#define PAGE_TABLE_POOL_UNIT_PAGES \
+ EFI_SIZE_TO_PAGES (PAGE_TABLE_POOL_UNIT_SIZE)
+#define PAGE_TABLE_POOL_ALIGN_MASK \
+ (~(EFI_PHYSICAL_ADDRESS)(PAGE_TABLE_POOL_ALIGNMENT - 1))
+
+typedef struct {
+ VOID *NextPool;
+ UINTN Offset;
+ UINTN FreePages;
+} PAGE_TABLE_POOL;
+
+
+
+/**
+ This function clears memory encryption bit for the memory region specified by
+ PhysicalAddress and Length from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] PhysicalAddress The physical address that is the start
+ address of a memory region.
+ @param[in] Length The length of memory region
+ @param[in] Flush Flush the caches before applying the
+ encryption mask
+
+ @retval RETURN_SUCCESS The attributes were cleared for the
+ memory region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Clearing the memory encyrption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+InternalMemEncryptSevSetMemoryDecrypted (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN UINTN Length,
+ IN BOOLEAN Flush
+ );
+
+/**
+ This function sets memory encryption bit for the memory region specified by
+ PhysicalAddress and Length from the current page table context.
+
+ @param[in] Cr3BaseAddress Cr3 Base Address (if zero then use
+ current CR3)
+ @param[in] PhysicalAddress The physical address that is the start
+ address of a memory region.
+ @param[in] Length The length of memory region
+ @param[in] Flush Flush the caches before applying the
+ encryption mask
+
+ @retval RETURN_SUCCESS The attributes were set for the memory
+ region.
+ @retval RETURN_INVALID_PARAMETER Number of pages is zero.
+ @retval RETURN_UNSUPPORTED Setting the memory encyrption attribute
+ is not supported
+**/
+RETURN_STATUS
+EFIAPI
+InternalMemEncryptSevSetMemoryEncrypted (
+ IN PHYSICAL_ADDRESS Cr3BaseAddress,
+ IN PHYSICAL_ADDRESS PhysicalAddress,
+ IN UINTN Length,
+ IN BOOLEAN Flush
+ );
+
+#endif