aboutsummaryrefslogtreecommitdiffstats
path: root/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32
diff options
context:
space:
mode:
Diffstat (limited to 'roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32')
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm33
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm153
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c352
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c42
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm314
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm705
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c204
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm96
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c74
-rw-r--r--roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h91
10 files changed, 2064 insertions, 0 deletions
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm
new file mode 100644
index 000000000..0919d6d05
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Cet.nasm
@@ -0,0 +1,33 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2019, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;-------------------------------------------------------------------------------
+
+%include "Nasm.inc"
+
+SECTION .text
+
+global ASM_PFX(DisableCet)
+ASM_PFX(DisableCet):
+
+ ; Skip the pushed data for call
+ mov eax, 1
+ INCSSP_EAX
+
+ mov eax, cr4
+ btr eax, 23 ; clear CET
+ mov cr4, eax
+ ret
+
+global ASM_PFX(EnableCet)
+ASM_PFX(EnableCet):
+
+ mov eax, cr4
+ bts eax, 23 ; set CET
+ mov cr4, eax
+
+ ; use jmp to skip the check for ret
+ pop eax
+ jmp eax
+
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm
new file mode 100644
index 000000000..dbd1418c0
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/MpFuncs.nasm
@@ -0,0 +1,153 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; MpFuncs.nasm
+;
+; Abstract:
+;
+; This is the assembly code for Multi-processor S3 support
+;
+;-------------------------------------------------------------------------------
+
+SECTION .text
+
+extern ASM_PFX(InitializeFloatingPointUnits)
+
+%define VacantFlag 0x0
+%define NotVacantFlag 0xff
+
+%define LockLocation RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+%define StackStart LockLocation + 0x4
+%define StackSize LockLocation + 0x8
+%define RendezvousProc LockLocation + 0xC
+%define GdtrProfile LockLocation + 0x10
+%define IdtrProfile LockLocation + 0x16
+%define BufferStart LockLocation + 0x1C
+
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
+;procedure serializes all the AP processors through an Init sequence. It must be
+;noted that APs arrive here very raw...ie: real mode, no stack.
+;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
+;IS IN MACHINE CODE.
+;-------------------------------------------------------------------------------------
+;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
+
+BITS 16
+global ASM_PFX(RendezvousFunnelProc)
+ASM_PFX(RendezvousFunnelProc):
+RendezvousFunnelProcStart:
+
+; At this point CS = 0x(vv00) and ip= 0x0.
+
+ mov ax, cs
+ mov ds, ax
+ mov es, ax
+ mov ss, ax
+ xor ax, ax
+ mov fs, ax
+ mov gs, ax
+
+flat32Start:
+
+ mov si, BufferStart
+ mov edx,dword [si] ; EDX is keeping the start address of wakeup buffer
+
+ mov si, GdtrProfile
+o32 lgdt [cs:si]
+
+ mov si, IdtrProfile
+o32 lidt [cs:si]
+
+ xor ax, ax
+ mov ds, ax
+
+ mov eax, cr0 ; Get control register 0
+ or eax, 0x000000001 ; Set PE bit (bit #0)
+ mov cr0, eax
+
+FLAT32_JUMP:
+
+a32 jmp dword 0x20:0x0
+
+BITS 32
+PMODE_ENTRY: ; protected mode entry point
+
+ mov ax, 0x8
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax ; Flat mode setup.
+
+ mov esi, edx
+
+ mov edi, esi
+ add edi, LockLocation
+ mov al, NotVacantFlag
+TestLock:
+ xchg byte [edi], al
+ cmp al, NotVacantFlag
+ jz TestLock
+
+ProgramStack:
+
+ mov edi, esi
+ add edi, StackSize
+ mov eax, dword [edi]
+ mov edi, esi
+ add edi, StackStart
+ add eax, dword [edi]
+ mov esp, eax
+ mov dword [edi], eax
+
+Releaselock:
+
+ mov al, VacantFlag
+ mov edi, esi
+ add edi, LockLocation
+ xchg byte [edi], al
+
+ ;
+ ; Call assembly function to initialize FPU.
+ ;
+ mov ebx, ASM_PFX(InitializeFloatingPointUnits)
+ call ebx
+ ;
+ ; Call C Function
+ ;
+ mov edi, esi
+ add edi, RendezvousProc
+ mov eax, dword [edi]
+
+ test eax, eax
+ jz GoToSleep
+ call eax ; Call C function
+
+GoToSleep:
+ cli
+ hlt
+ jmp $-2
+
+RendezvousFunnelProcEnd:
+;-------------------------------------------------------------------------------------
+; AsmGetAddressMap (&AddressMap);
+;-------------------------------------------------------------------------------------
+global ASM_PFX(AsmGetAddressMap)
+ASM_PFX(AsmGetAddressMap):
+
+ pushad
+ mov ebp,esp
+
+ mov ebx, dword [ebp+0x24]
+ mov dword [ebx], RendezvousFunnelProcStart
+ mov dword [ebx+0x4], PMODE_ENTRY - RendezvousFunnelProcStart
+ mov dword [ebx+0x8], FLAT32_JUMP - RendezvousFunnelProcStart
+ mov dword [ebx+0xc], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
+
+ popad
+ ret
+
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
new file mode 100644
index 000000000..2483f2ea8
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/PageTbl.c
@@ -0,0 +1,352 @@
+/** @file
+Page table manipulation functions for IA-32 processors
+
+Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+Copyright (c) 2017, AMD Incorporated. All rights reserved.<BR>
+
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+/**
+ Disable CET.
+**/
+VOID
+EFIAPI
+DisableCet (
+ VOID
+ );
+
+/**
+ Enable CET.
+**/
+VOID
+EFIAPI
+EnableCet (
+ VOID
+ );
+
+/**
+ Create PageTable for SMM use.
+
+ @return PageTable Address
+
+**/
+UINT32
+SmmInitPageTable (
+ VOID
+ )
+{
+ UINTN PageFaultHandlerHookAddress;
+ IA32_IDT_GATE_DESCRIPTOR *IdtEntry;
+ EFI_STATUS Status;
+
+ //
+ // Initialize spin lock
+ //
+ InitializeSpinLock (mPFLock);
+
+ mPhysicalAddressBits = 32;
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable) ||
+ HEAP_GUARD_NONSTOP_MODE ||
+ NULL_DETECTION_NONSTOP_MODE) {
+ //
+ // Set own Page Fault entry instead of the default one, because SMM Profile
+ // feature depends on IRET instruction to do Single Step
+ //
+ PageFaultHandlerHookAddress = (UINTN)PageFaultIdtHandlerSmmProfile;
+ IdtEntry = (IA32_IDT_GATE_DESCRIPTOR *) gcSmiIdtr.Base;
+ IdtEntry += EXCEPT_IA32_PAGE_FAULT;
+ IdtEntry->Bits.OffsetLow = (UINT16)PageFaultHandlerHookAddress;
+ IdtEntry->Bits.Reserved_0 = 0;
+ IdtEntry->Bits.GateType = IA32_IDT_GATE_TYPE_INTERRUPT_32;
+ IdtEntry->Bits.OffsetHigh = (UINT16)(PageFaultHandlerHookAddress >> 16);
+ } else {
+ //
+ // Register SMM Page Fault Handler
+ //
+ Status = SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_PAGE_FAULT, SmiPFHandler);
+ ASSERT_EFI_ERROR (Status);
+ }
+
+ //
+ // Additional SMM IDT initialization for SMM stack guard
+ //
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ InitializeIDTSmmStackGuard ();
+ }
+ return Gen4GPageTable (TRUE);
+}
+
+/**
+ Page Fault handler for SMM use.
+
+**/
+VOID
+SmiDefaultPFHandler (
+ VOID
+ )
+{
+ CpuDeadLoop ();
+}
+
+/**
+ ThePage Fault handler wrapper for SMM use.
+
+ @param InterruptType Defines the type of interrupt or exception that
+ occurred on the processor.This parameter is processor architecture specific.
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+**/
+VOID
+EFIAPI
+SmiPFHandler (
+ IN EFI_EXCEPTION_TYPE InterruptType,
+ IN EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ UINTN PFAddress;
+ UINTN GuardPageAddress;
+ UINTN CpuIndex;
+
+ ASSERT (InterruptType == EXCEPT_IA32_PAGE_FAULT);
+
+ AcquireSpinLock (mPFLock);
+
+ PFAddress = AsmReadCr2 ();
+
+ //
+ // If a page fault occurs in SMRAM range, it might be in a SMM stack guard page,
+ // or SMM page protection violation.
+ //
+ if ((PFAddress >= mCpuHotPlugData.SmrrBase) &&
+ (PFAddress < (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize))) {
+ DumpCpuContext (InterruptType, SystemContext);
+ CpuIndex = GetCpuIndex ();
+ GuardPageAddress = (mSmmStackArrayBase + EFI_PAGE_SIZE + CpuIndex * mSmmStackSize);
+ if ((FeaturePcdGet (PcdCpuSmmStackGuard)) &&
+ (PFAddress >= GuardPageAddress) &&
+ (PFAddress < (GuardPageAddress + EFI_PAGE_SIZE))) {
+ DEBUG ((DEBUG_ERROR, "SMM stack overflow!\n"));
+ } else {
+ if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DEBUG ((DEBUG_ERROR, "SMM exception at execution (0x%x)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
+ );
+ } else {
+ DEBUG ((DEBUG_ERROR, "SMM exception at access (0x%x)\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
+ );
+ }
+
+ if (HEAP_GUARD_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextIa32->ExceptionData);
+ goto Exit;
+ }
+ }
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If a page fault occurs in non-SMRAM range.
+ //
+ if ((PFAddress < mCpuHotPlugData.SmrrBase) ||
+ (PFAddress >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) {
+ if ((SystemContext.SystemContextIa32->ExceptionData & IA32_PF_EC_ID) != 0) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Code executed on IP(0x%x) out of SMM range after SMM is locked!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp (*(UINTN *)(UINTN)SystemContext.SystemContextIa32->Esp);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ //
+ // If NULL pointer was just accessed
+ //
+ if ((PcdGet8 (PcdNullPointerDetectionPropertyMask) & BIT1) != 0 &&
+ (PFAddress < EFI_PAGE_SIZE)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "!!! NULL pointer access !!!\n"));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
+ );
+
+ if (NULL_DETECTION_NONSTOP_MODE) {
+ GuardPagePFHandler (SystemContext.SystemContextIa32->ExceptionData);
+ goto Exit;
+ }
+
+ CpuDeadLoop ();
+ goto Exit;
+ }
+
+ if (IsSmmCommBufferForbiddenAddress (PFAddress)) {
+ DumpCpuContext (InterruptType, SystemContext);
+ DEBUG ((DEBUG_ERROR, "Access SMM communication forbidden address (0x%x)!\n", PFAddress));
+ DEBUG_CODE (
+ DumpModuleInfoByIp ((UINTN)SystemContext.SystemContextIa32->Eip);
+ );
+ CpuDeadLoop ();
+ goto Exit;
+ }
+ }
+
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ SmmProfilePFHandler (
+ SystemContext.SystemContextIa32->Eip,
+ SystemContext.SystemContextIa32->ExceptionData
+ );
+ } else {
+ DumpCpuContext (InterruptType, SystemContext);
+ SmiDefaultPFHandler ();
+ }
+
+Exit:
+ ReleaseSpinLock (mPFLock);
+}
+
+/**
+ This function sets memory attribute for page table.
+**/
+VOID
+SetPageTableAttributes (
+ VOID
+ )
+{
+ UINTN Index2;
+ UINTN Index3;
+ UINT64 *L1PageTable;
+ UINT64 *L2PageTable;
+ UINT64 *L3PageTable;
+ BOOLEAN IsSplitted;
+ BOOLEAN PageTableSplitted;
+ BOOLEAN CetEnabled;
+
+ //
+ // Don't mark page table to read-only if heap guard is enabled.
+ //
+ // BIT2: SMM page guard enabled
+ // BIT3: SMM pool guard enabled
+ //
+ if ((PcdGet8 (PcdHeapGuardPropertyMask) & (BIT3 | BIT2)) != 0) {
+ DEBUG ((DEBUG_INFO, "Don't mark page table to read-only as heap guard is enabled\n"));
+ return ;
+ }
+
+ //
+ // Don't mark page table to read-only if SMM profile is enabled.
+ //
+ if (FeaturePcdGet (PcdCpuSmmProfileEnable)) {
+ DEBUG ((DEBUG_INFO, "Don't mark page table to read-only as SMM profile is enabled\n"));
+ return ;
+ }
+
+ DEBUG ((DEBUG_INFO, "SetPageTableAttributes\n"));
+
+ //
+ // Disable write protection, because we need mark page table to be write protected.
+ // We need *write* page table memory, to mark itself to be *read only*.
+ //
+ CetEnabled = ((AsmReadCr4() & CR4_CET_ENABLE) != 0) ? TRUE : FALSE;
+ if (CetEnabled) {
+ //
+ // CET must be disabled if WP is disabled.
+ //
+ DisableCet();
+ }
+ AsmWriteCr0 (AsmReadCr0() & ~CR0_WP);
+
+ do {
+ DEBUG ((DEBUG_INFO, "Start...\n"));
+ PageTableSplitted = FALSE;
+
+ L3PageTable = (UINT64 *)GetPageTableBase ();
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L3PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index3 = 0; Index3 < 4; Index3++) {
+ L2PageTable = (UINT64 *)(UINTN)(L3PageTable[Index3] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L2PageTable == NULL) {
+ continue;
+ }
+
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L2PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+
+ for (Index2 = 0; Index2 < SIZE_4KB/sizeof(UINT64); Index2++) {
+ if ((L2PageTable[Index2] & IA32_PG_PS) != 0) {
+ // 2M
+ continue;
+ }
+ L1PageTable = (UINT64 *)(UINTN)(L2PageTable[Index2] & ~mAddressEncMask & PAGING_4K_ADDRESS_MASK_64);
+ if (L1PageTable == NULL) {
+ continue;
+ }
+ SmmSetMemoryAttributesEx ((EFI_PHYSICAL_ADDRESS)(UINTN)L1PageTable, SIZE_4KB, EFI_MEMORY_RO, &IsSplitted);
+ PageTableSplitted = (PageTableSplitted || IsSplitted);
+ }
+ }
+ } while (PageTableSplitted);
+
+ //
+ // Enable write protection, after page table updated.
+ //
+ AsmWriteCr0 (AsmReadCr0() | CR0_WP);
+ if (CetEnabled) {
+ //
+ // re-enable CET.
+ //
+ EnableCet();
+ }
+
+ return ;
+}
+
+/**
+ This function returns with no action for 32 bit.
+
+ @param[out] *Cr2 Pointer to variable to hold CR2 register value.
+**/
+VOID
+SaveCr2 (
+ OUT UINTN *Cr2
+ )
+{
+ return ;
+}
+
+/**
+ This function returns with no action for 32 bit.
+
+ @param[in] Cr2 Value to write into CR2 register.
+**/
+VOID
+RestoreCr2 (
+ IN UINTN Cr2
+ )
+{
+ return ;
+}
+
+/**
+ Return whether access to non-SMRAM is restricted.
+
+ @retval TRUE Access to non-SMRAM is restricted.
+ @retval FALSE Access to non-SMRAM is not restricted.
+**/
+BOOLEAN
+IsRestrictedMemoryAccess (
+ VOID
+ )
+{
+ return TRUE;
+}
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
new file mode 100644
index 000000000..31ee067ad
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/Semaphore.c
@@ -0,0 +1,42 @@
+/** @file
+Semaphore mechanism to indicate to the BSP that an AP has exited SMM
+after SMBASE relocation.
+
+Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+UINTN mSmmRelocationOriginalAddress;
+volatile BOOLEAN *mRebasedFlag;
+
+/**
+ Hook return address of SMM Save State so that semaphore code
+ can be executed immediately after AP exits SMM to indicate to
+ the BSP that an AP has exited SMM after SMBASE relocation.
+
+ @param[in] CpuIndex The processor index.
+ @param[in] RebasedFlag A pointer to a flag that is set to TRUE
+ immediately after AP exits SMM.
+
+**/
+VOID
+SemaphoreHook (
+ IN UINTN CpuIndex,
+ IN volatile BOOLEAN *RebasedFlag
+ )
+{
+ SMRAM_SAVE_STATE_MAP *CpuState;
+
+ mRebasedFlag = RebasedFlag;
+
+ CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
+ mSmmRelocationOriginalAddress = (UINTN)HookReturnFromSmm (
+ CpuIndex,
+ CpuState,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete,
+ (UINT64)(UINTN)&SmmRelocationSemaphoreComplete
+ );
+}
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
new file mode 100644
index 000000000..167f5e14d
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiEntry.nasm
@@ -0,0 +1,314 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2019, Intel Corporation. All rights reserved.<BR>
+; Copyright (c) 2020, AMD Incorporated. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiEntry.nasm
+;
+; Abstract:
+;
+; Code template of the SMI handler for a particular processor
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+%include "Nasm.inc"
+
+%define MSR_IA32_S_CET 0x6A2
+%define MSR_IA32_CET_SH_STK_EN 0x1
+%define MSR_IA32_CET_WR_SHSTK_EN 0x2
+%define MSR_IA32_CET_ENDBR_EN 0x4
+%define MSR_IA32_CET_LEG_IW_EN 0x8
+%define MSR_IA32_CET_NO_TRACK_EN 0x10
+%define MSR_IA32_CET_SUPPRESS_DIS 0x20
+%define MSR_IA32_CET_SUPPRESS 0x400
+%define MSR_IA32_CET_TRACKER 0x800
+%define MSR_IA32_PL0_SSP 0x6A4
+
+%define CR4_CET 0x800000
+
+%define MSR_IA32_MISC_ENABLE 0x1A0
+%define MSR_EFER 0xc0000080
+%define MSR_EFER_XD 0x800
+
+;
+; Constants relating to PROCESSOR_SMM_DESCRIPTOR
+;
+%define DSC_OFFSET 0xfb00
+%define DSC_GDTPTR 0x30
+%define DSC_GDTSIZ 0x38
+%define DSC_CS 14
+%define DSC_DS 16
+%define DSC_SS 18
+%define DSC_OTHERSEG 20
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+%define TSS_SEGMENT 0x40
+
+extern ASM_PFX(SmiRendezvous)
+extern ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))
+extern ASM_PFX(CpuSmmDebugEntry)
+extern ASM_PFX(CpuSmmDebugExit)
+
+global ASM_PFX(gcSmiHandlerTemplate)
+global ASM_PFX(gcSmiHandlerSize)
+global ASM_PFX(gPatchSmiCr3)
+global ASM_PFX(gPatchSmiStack)
+global ASM_PFX(gPatchSmbase)
+extern ASM_PFX(mXdSupported)
+global ASM_PFX(gPatchXdSupported)
+global ASM_PFX(gPatchMsrIa32MiscEnableSupported)
+extern ASM_PFX(gSmiHandlerIdtr)
+
+extern ASM_PFX(mCetSupported)
+global ASM_PFX(mPatchCetSupported)
+global ASM_PFX(mPatchCetPl0Ssp)
+global ASM_PFX(mPatchCetInterruptSsp)
+
+ SECTION .text
+
+BITS 16
+ASM_PFX(gcSmiHandlerTemplate):
+_SmiEntryPoint:
+ mov bx, _GdtDesc - _SmiEntryPoint + 0x8000
+ mov ax,[cs:DSC_OFFSET + DSC_GDTSIZ]
+ dec ax
+ mov [cs:bx], ax
+ mov eax, [cs:DSC_OFFSET + DSC_GDTPTR]
+ mov [cs:bx + 2], eax
+ mov ebp, eax ; ebp = GDT base
+o32 lgdt [cs:bx] ; lgdt fword ptr cs:[bx]
+ mov ax, PROTECT_MODE_CS
+ mov [cs:bx-0x2],ax
+ mov edi, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmbase):
+ lea eax, [edi + (@32bit - _SmiEntryPoint) + 0x8000]
+ mov [cs:bx-0x6],eax
+ mov ebx, cr0
+ and ebx, 0x9ffafff3
+ or ebx, 0x23
+ mov cr0, ebx
+ jmp dword 0x0:0x0
+_GdtDesc:
+ DW 0
+ DD 0
+
+BITS 32
+@32bit:
+ mov ax, PROTECT_MODE_DS
+o16 mov ds, ax
+o16 mov es, ax
+o16 mov fs, ax
+o16 mov gs, ax
+o16 mov ss, ax
+ mov esp, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiStack):
+ mov eax, ASM_PFX(gSmiHandlerIdtr)
+ lidt [eax]
+ jmp ProtFlatMode
+
+ProtFlatMode:
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmiCr3):
+ mov cr3, eax
+;
+; Need to test for CR4 specific bit support
+;
+ mov eax, 1
+ cpuid ; use CPUID to determine if specific CR4 bits are supported
+ xor eax, eax ; Clear EAX
+ test edx, BIT2 ; Check for DE capabilities
+ jz .0
+ or eax, BIT3
+.0:
+ test edx, BIT6 ; Check for PAE capabilities
+ jz .1
+ or eax, BIT5
+.1:
+ test edx, BIT7 ; Check for MCE capabilities
+ jz .2
+ or eax, BIT6
+.2:
+ test edx, BIT24 ; Check for FXSR capabilities
+ jz .3
+ or eax, BIT9
+.3:
+ test edx, BIT25 ; Check for SSE capabilities
+ jz .4
+ or eax, BIT10
+.4: ; as cr4.PGE is not set here, refresh cr3
+ mov cr4, eax ; in PreModifyMtrrs() to flush TLB.
+
+ cmp byte [dword ASM_PFX(FeaturePcdGet (PcdCpuSmmStackGuard))], 0
+ jz .6
+; Load TSS
+ mov byte [ebp + TSS_SEGMENT + 5], 0x89 ; clear busy flag
+ mov eax, TSS_SEGMENT
+ ltr ax
+.6:
+
+; enable NXE if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchXdSupported):
+ cmp al, 0
+ jz @SkipXd
+
+; If MSR_IA32_MISC_ENABLE is supported, clear XD Disable bit
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(gPatchMsrIa32MiscEnableSupported):
+ cmp al, 1
+ jz MsrIa32MiscEnableSupported
+
+; MSR_IA32_MISC_ENABLE not supported
+ xor edx, edx
+ push edx ; don't try to restore the XD Disable bit just before RSM
+ jmp EnableNxe
+
+;
+; Check XD disable bit
+;
+MsrIa32MiscEnableSupported:
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ push edx ; save MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
+ jz EnableNxe
+ and dx, 0xFFFB ; clear XD Disable bit if it is set
+ wrmsr
+EnableNxe:
+ mov ecx, MSR_EFER
+ rdmsr
+ or ax, MSR_EFER_XD ; enable NXE
+ wrmsr
+ jmp @XdDone
+@SkipXd:
+ sub esp, 4
+@XdDone:
+
+ mov ebx, cr0
+ or ebx, 0x80010023 ; enable paging + WP + NE + MP + PE
+ mov cr0, ebx
+ lea ebx, [edi + DSC_OFFSET]
+ mov ax, [ebx + DSC_DS]
+ mov ds, eax
+ mov ax, [ebx + DSC_OTHERSEG]
+ mov es, eax
+ mov fs, eax
+ mov gs, eax
+ mov ax, [ebx + DSC_SS]
+ mov ss, eax
+
+ mov ebx, [esp + 4] ; ebx <- CpuIndex
+
+; enable CET if supported
+ mov al, strict byte 1 ; source operand may be patched
+ASM_PFX(mPatchCetSupported):
+ cmp al, 0
+ jz CetDone
+
+ mov ecx, MSR_IA32_S_CET
+ rdmsr
+ push edx
+ push eax
+
+ mov ecx, MSR_IA32_PL0_SSP
+ rdmsr
+ push edx
+ push eax
+
+ mov ecx, MSR_IA32_S_CET
+ mov eax, MSR_IA32_CET_SH_STK_EN
+ xor edx, edx
+ wrmsr
+
+ mov ecx, MSR_IA32_PL0_SSP
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetPl0Ssp):
+ xor edx, edx
+ wrmsr
+ mov ecx, cr0
+ btr ecx, 16 ; clear WP
+ mov cr0, ecx
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(mPatchCetInterruptSsp):
+ cmp eax, 0
+ jz CetInterruptDone
+ mov [eax], eax ; reload SSP, and clear busyflag.
+ xor ecx, ecx
+ mov [eax + 4], ecx
+CetInterruptDone:
+
+ mov ecx, cr0
+ bts ecx, 16 ; set WP
+ mov cr0, ecx
+
+ mov eax, 0x668 | CR4_CET
+ mov cr4, eax
+
+ SETSSBSY
+
+CetDone:
+
+ push ebx
+ mov eax, ASM_PFX(CpuSmmDebugEntry)
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, ASM_PFX(SmiRendezvous)
+ call eax
+ add esp, 4
+
+ push ebx
+ mov eax, ASM_PFX(CpuSmmDebugExit)
+ call eax
+ add esp, 4
+
+ mov eax, ASM_PFX(mCetSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz CetDone2
+
+ mov eax, 0x668
+ mov cr4, eax ; disable CET
+
+ mov ecx, MSR_IA32_PL0_SSP
+ pop eax
+ pop edx
+ wrmsr
+
+ mov ecx, MSR_IA32_S_CET
+ pop eax
+ pop edx
+ wrmsr
+CetDone2:
+
+ mov eax, ASM_PFX(mXdSupported)
+ mov al, [eax]
+ cmp al, 0
+ jz .7
+ pop edx ; get saved MSR_IA32_MISC_ENABLE[63-32]
+ test edx, BIT2
+ jz .7
+ mov ecx, MSR_IA32_MISC_ENABLE
+ rdmsr
+ or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
+ wrmsr
+
+.7:
+
+ StuffRsb32
+ rsm
+
+ASM_PFX(gcSmiHandlerSize): DW $ - _SmiEntryPoint
+
+global ASM_PFX(PiSmmCpuSmiEntryFixupAddress)
+ASM_PFX(PiSmmCpuSmiEntryFixupAddress):
+ ret
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
new file mode 100644
index 000000000..e7b85a994
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmiException.nasm
@@ -0,0 +1,705 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2009 - 2019, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmiException.nasm
+;
+; Abstract:
+;
+; Exception handlers used in SM mode
+;
+;-------------------------------------------------------------------------------
+
+extern ASM_PFX(FeaturePcdGet (PcdCpuSmmProfileEnable))
+extern ASM_PFX(SmiPFHandler)
+extern ASM_PFX(mSetupDebugTrap)
+
+global ASM_PFX(gcSmiIdtr)
+global ASM_PFX(gcSmiGdtr)
+global ASM_PFX(gTaskGateDescriptor)
+global ASM_PFX(gcPsd)
+
+ SECTION .data
+
+NullSeg: DQ 0 ; reserved by architecture
+CodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeCodeSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+ProtModeSsSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+DataSeg32:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x93
+ DB 0xcf ; LimitHigh
+ DB 0 ; BaseHigh
+CodeSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x9b
+ DB 0x8f
+ DB 0
+DataSeg16:
+ DW -1
+ DW 0
+ DB 0
+ DB 0x93
+ DB 0x8f
+ DB 0
+CodeSeg64:
+ DW -1 ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x9b
+ DB 0xaf ; LimitHigh
+ DB 0 ; BaseHigh
+GDT_SIZE equ $ - NullSeg
+
+TssSeg:
+ DW TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x89
+ DB 0x80 ; LimitHigh
+ DB 0 ; BaseHigh
+ExceptionTssSeg:
+ DW EXCEPTION_TSS_DESC_SIZE ; LimitLow
+ DW 0 ; BaseLow
+ DB 0 ; BaseMid
+ DB 0x89
+ DB 0x80 ; LimitHigh
+ DB 0 ; BaseHigh
+
+CODE_SEL equ CodeSeg32 - NullSeg
+DATA_SEL equ DataSeg32 - NullSeg
+TSS_SEL equ TssSeg - NullSeg
+EXCEPTION_TSS_SEL equ ExceptionTssSeg - NullSeg
+
+struc IA32_TSS
+ resw 1
+ resw 1
+ .ESP0: resd 1
+ .SS0: resw 1
+ resw 1
+ .ESP1: resd 1
+ .SS1: resw 1
+ resw 1
+ .ESP2: resd 1
+ .SS2: resw 1
+ resw 1
+ ._CR3: resd 1
+ .EIP: resd 1
+ .EFLAGS: resd 1
+ ._EAX: resd 1
+ ._ECX: resd 1
+ ._EDX: resd 1
+ ._EBX: resd 1
+ ._ESP: resd 1
+ ._EBP: resd 1
+ ._ESI: resd 1
+ ._EDI: resd 1
+ ._ES: resw 1
+ resw 1
+ ._CS: resw 1
+ resw 1
+ ._SS: resw 1
+ resw 1
+ ._DS: resw 1
+ resw 1
+ ._FS: resw 1
+ resw 1
+ ._GS: resw 1
+ resw 1
+ .LDT: resw 1
+ resw 1
+ resw 1
+ resw 1
+endstruc
+
+; Create 2 TSS segments just after GDT
+TssDescriptor:
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD 0 ; EIP
+ DD 0 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW 0 ; ES
+ DW 0 ; Reserved
+ DW 0 ; CS
+ DW 0 ; Reserved
+ DW 0 ; SS
+ DW 0 ; Reserved
+ DW 0 ; DS
+ DW 0 ; Reserved
+ DW 0 ; FS
+ DW 0 ; Reserved
+ DW 0 ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+TSS_DESC_SIZE equ $ - TssDescriptor
+
+ExceptionTssDescriptor:
+ DW 0 ; PreviousTaskLink
+ DW 0 ; Reserved
+ DD 0 ; ESP0
+ DW 0 ; SS0
+ DW 0 ; Reserved
+ DD 0 ; ESP1
+ DW 0 ; SS1
+ DW 0 ; Reserved
+ DD 0 ; ESP2
+ DW 0 ; SS2
+ DW 0 ; Reserved
+ DD 0 ; CR3
+ DD PFHandlerEntry ; EIP
+ DD 00000002 ; EFLAGS
+ DD 0 ; EAX
+ DD 0 ; ECX
+ DD 0 ; EDX
+ DD 0 ; EBX
+ DD 0 ; ESP
+ DD 0 ; EBP
+ DD 0 ; ESI
+ DD 0 ; EDI
+ DW DATA_SEL ; ES
+ DW 0 ; Reserved
+ DW CODE_SEL ; CS
+ DW 0 ; Reserved
+ DW DATA_SEL ; SS
+ DW 0 ; Reserved
+ DW DATA_SEL ; DS
+ DW 0 ; Reserved
+ DW DATA_SEL ; FS
+ DW 0 ; Reserved
+ DW DATA_SEL ; GS
+ DW 0 ; Reserved
+ DW 0 ; LDT Selector
+ DW 0 ; Reserved
+ DW 0 ; T
+ DW 0 ; I/O Map Base
+ DD 0 ; SSP
+EXCEPTION_TSS_DESC_SIZE equ $ - ExceptionTssDescriptor
+
+ASM_PFX(gcPsd):
+ DB 'PSDSIG '
+ DW PSD_SIZE
+ DW 2
+ DW 1 << 2
+ DW CODE_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW DATA_SEL
+ DW 0
+ DQ 0
+ DQ 0
+ DQ 0
+ DD 0
+ DD NullSeg
+ DD GDT_SIZE
+ DD 0
+ times 24 DB 0
+ DD 0
+ DD 0
+PSD_SIZE equ $ - ASM_PFX(gcPsd)
+
+ASM_PFX(gcSmiGdtr):
+ DW GDT_SIZE - 1
+ DD NullSeg
+
+ASM_PFX(gcSmiIdtr):
+ DW 0
+ DD 0
+
+ASM_PFX(gTaskGateDescriptor):
+ DW 0 ; Reserved
+ DW EXCEPTION_TSS_SEL ; TSS Segment selector
+ DB 0 ; Reserved
+ DB 0x85 ; Task Gate, present, DPL = 0
+ DW 0 ; Reserved
+
+ SECTION .text
+;------------------------------------------------------------------------------
+; PageFaultIdtHandlerSmmProfile is the entry point page fault only
+;
+;
+; Stack:
+; +---------------------+
+; + EFlags +
+; +---------------------+
+; + CS +
+; +---------------------+
+; + EIP +
+; +---------------------+
+; + Error Code +
+; +---------------------+
+; + Vector Number +
+; +---------------------+
+; + EBP +
+; +---------------------+ <-- EBP
+;
+;
+;------------------------------------------------------------------------------
+global ASM_PFX(PageFaultIdtHandlerSmmProfile)
+ASM_PFX(PageFaultIdtHandlerSmmProfile):
+ push 0xe ; Page Fault
+
+ push ebp
+ mov ebp, esp
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0xfffffff0
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push eax
+ push ecx
+ push edx
+ push ebx
+ lea ecx, [ebp + 6 * 4]
+ push ecx ; ESP
+ push dword [ebp] ; EBP
+ push esi
+ push edi
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ mov eax, ss
+ push eax
+ movzx eax, word [ebp + 4 * 4]
+ push eax
+ mov eax, ds
+ push eax
+ mov eax, es
+ push eax
+ mov eax, fs
+ push eax
+ mov eax, gs
+ push eax
+
+;; UINT32 Eip;
+ mov eax, [ebp + 3 * 4]
+ push eax
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ xor eax, eax
+ str ax
+ push eax
+ sldt ax
+ push eax
+
+;; UINT32 EFlags;
+ mov eax, [ebp + 5 * 4]
+ push eax
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 0x208
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ sub esp, 512
+ mov edi, esp
+ fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword [ebp + 2 * 4]
+
+;; call into exception handler
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, dword [ebp + 1 * 4]
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ mov eax, ASM_PFX(SmiPFHandler)
+ call eax
+ add esp, 8
+
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoint in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov cr3, eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop dword [ebp + 5 * 4]
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop dword [ebp + 3 * 4]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop gs
+ pop fs
+ pop es
+ pop ds
+ pop dword [ebp + 4 * 4]
+ pop ss
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop edi
+ pop esi
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop ebx
+ pop edx
+ pop ecx
+ pop eax
+
+ mov esp, ebp
+ pop ebp
+
+; Enable TF bit after page fault handler runs
+ bts dword [esp + 16], 8 ; EFLAGS
+
+ add esp, 8 ; skip INT# & ErrCode
+Return:
+ iretd
+;
+; Page Fault Exception Handler entry when SMM Stack Guard is enabled
+; Executiot starts here after a task switch
+;
+PFHandlerEntry:
+;
+; Get this processor's TSS
+;
+ sub esp, 8
+ sgdt [esp + 2]
+ mov eax, [esp + 4] ; GDT base
+ add esp, 8
+ mov ecx, [eax + TSS_SEL + 2]
+ shl ecx, 8
+ mov cl, [eax + TSS_SEL + 7]
+ ror ecx, 8 ; ecx = TSS base
+
+ mov ebp, esp
+
+ ;
+ ; Align stack to make sure that EFI_FX_SAVE_STATE_IA32 of EFI_SYSTEM_CONTEXT_IA32
+ ; is 16-byte aligned
+ ;
+ and esp, 0xfffffff0
+ sub esp, 12
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ push dword [ecx + IA32_TSS._EAX]
+ push dword [ecx + IA32_TSS._ECX]
+ push dword [ecx + IA32_TSS._EDX]
+ push dword [ecx + IA32_TSS._EBX]
+ push dword [ecx + IA32_TSS._ESP]
+ push dword [ecx + IA32_TSS._EBP]
+ push dword [ecx + IA32_TSS._ESI]
+ push dword [ecx + IA32_TSS._EDI]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+ movzx eax, word [ecx + IA32_TSS._SS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._CS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._DS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._ES]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._FS]
+ push eax
+ movzx eax, word [ecx + IA32_TSS._GS]
+ push eax
+
+;; UINT32 Eip;
+ push dword [ecx + IA32_TSS.EIP]
+
+;; UINT32 Gdtr[2], Idtr[2];
+ sub esp, 8
+ sidt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+ sub esp, 8
+ sgdt [esp]
+ mov eax, [esp + 2]
+ xchg eax, [esp]
+ and eax, 0xFFFF
+ mov [esp+4], eax
+
+;; UINT32 Ldtr, Tr;
+ mov eax, TSS_SEL
+ push eax
+ movzx eax, word [ecx + IA32_TSS.LDT]
+ push eax
+
+;; UINT32 EFlags;
+ push dword [ecx + IA32_TSS.EFLAGS]
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ mov eax, cr4
+ or eax, 0x208
+ mov cr4, eax
+ push eax
+ mov eax, cr3
+ push eax
+ mov eax, cr2
+ push eax
+ xor eax, eax
+ push eax
+ mov eax, cr0
+ push eax
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+ mov eax, dr7
+ push eax
+ mov eax, dr6
+ push eax
+ mov eax, dr3
+ push eax
+ mov eax, dr2
+ push eax
+ mov eax, dr1
+ push eax
+ mov eax, dr0
+ push eax
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+;; Clear TS bit in CR0 to avoid Device Not Available Exception (#NM)
+;; when executing fxsave/fxrstor instruction
+ clts
+ sub esp, 512
+ mov edi, esp
+ fxsave [edi]
+
+; UEFI calling convention for IA32 requires that Direction flag in EFLAGs is clear
+ cld
+
+;; UINT32 ExceptionData;
+ push dword [ebp]
+
+;; call into exception handler
+ mov ebx, ecx
+ mov eax, ASM_PFX(SmiPFHandler)
+
+;; Prepare parameter and call
+ mov edx, esp
+ push edx
+ mov edx, 14
+ push edx
+
+ ;
+ ; Call External Exception Handler
+ ;
+ call eax
+ add esp, 8
+
+ mov ecx, ebx
+;; UINT32 ExceptionData;
+ add esp, 4
+
+;; FX_SAVE_STATE_IA32 FxSaveState;
+ mov esi, esp
+ fxrstor [esi]
+ add esp, 512
+
+;; UINT32 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
+;; Skip restoration of DRx registers to support debuggers
+;; that set breakpoints in interrupt/exception context
+ add esp, 4 * 6
+
+;; UINT32 Cr0, Cr1, Cr2, Cr3, Cr4;
+ pop eax
+ mov cr0, eax
+ add esp, 4 ; not for Cr1
+ pop eax
+ mov cr2, eax
+ pop eax
+ mov dword [ecx + IA32_TSS._CR3], eax
+ pop eax
+ mov cr4, eax
+
+;; UINT32 EFlags;
+ pop dword [ecx + IA32_TSS.EFLAGS]
+
+;; UINT32 Ldtr, Tr;
+;; UINT32 Gdtr[2], Idtr[2];
+;; Best not let anyone mess with these particular registers...
+ add esp, 24
+
+;; UINT32 Eip;
+ pop dword [ecx + IA32_TSS.EIP]
+
+;; UINT32 Gs, Fs, Es, Ds, Cs, Ss;
+;; NOTE - modified segment registers could hang the debugger... We
+;; could attempt to insulate ourselves against this possibility,
+;; but that poses risks as well.
+;;
+ pop eax
+o16 mov [ecx + IA32_TSS._GS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._FS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._ES], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._DS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._CS], ax
+ pop eax
+o16 mov [ecx + IA32_TSS._SS], ax
+
+;; UINT32 Edi, Esi, Ebp, Esp, Ebx, Edx, Ecx, Eax;
+ pop dword [ecx + IA32_TSS._EDI]
+ pop dword [ecx + IA32_TSS._ESI]
+ add esp, 4 ; not for ebp
+ add esp, 4 ; not for esp
+ pop dword [ecx + IA32_TSS._EBX]
+ pop dword [ecx + IA32_TSS._EDX]
+ pop dword [ecx + IA32_TSS._ECX]
+ pop dword [ecx + IA32_TSS._EAX]
+
+ mov esp, ebp
+
+; Set single step DB# if SMM profile is enabled and page fault exception happens
+ cmp byte [dword ASM_PFX(mSetupDebugTrap)], 0
+ jz @Done2
+
+; Create return context for iretd in stub function
+ mov eax, dword [ecx + IA32_TSS._ESP] ; Get old stack pointer
+ mov ebx, dword [ecx + IA32_TSS.EIP]
+ mov [eax - 0xc], ebx ; create EIP in old stack
+ movzx ebx, word [ecx + IA32_TSS._CS]
+ mov [eax - 0x8], ebx ; create CS in old stack
+ mov ebx, dword [ecx + IA32_TSS.EFLAGS]
+ bts ebx, 8
+ mov [eax - 0x4], ebx ; create eflags in old stack
+ mov eax, dword [ecx + IA32_TSS._ESP] ; Get old stack pointer
+ sub eax, 0xc ; minus 12 byte
+ mov dword [ecx + IA32_TSS._ESP], eax ; Set new stack pointer
+; Replace the EIP of interrupted task with stub function
+ mov eax, ASM_PFX(PageFaultStubFunction)
+ mov dword [ecx + IA32_TSS.EIP], eax
+; Jump to the iretd so next page fault handler as a task will start again after iretd.
+@Done2:
+ add esp, 4 ; skip ErrCode
+
+ jmp Return
+
+global ASM_PFX(PageFaultStubFunction)
+ASM_PFX(PageFaultStubFunction):
+;
+; we need clean TS bit in CR0 to execute
+; x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions.
+;
+ clts
+ iretd
+
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
new file mode 100644
index 000000000..ef277349d
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmFuncsArch.c
@@ -0,0 +1,204 @@
+/** @file
+ SMM CPU misc functions for Ia32 arch specific.
+
+Copyright (c) 2015 - 2019, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+
+extern UINT64 gTaskGateDescriptor;
+
+EFI_PHYSICAL_ADDRESS mGdtBuffer;
+UINTN mGdtBufferSize;
+
+extern BOOLEAN mCetSupported;
+extern UINTN mSmmShadowStackSize;
+
+X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
+X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
+UINT32 mCetPl0Ssp;
+UINT32 mCetInterruptSsp;
+
+/**
+ Initialize IDT for SMM Stack Guard.
+
+**/
+VOID
+EFIAPI
+InitializeIDTSmmStackGuard (
+ VOID
+ )
+{
+ IA32_IDT_GATE_DESCRIPTOR *IdtGate;
+
+ //
+ // If SMM Stack Guard feature is enabled, the Page Fault Exception entry in IDT
+ // is a Task Gate Descriptor so that when a Page Fault Exception occurs,
+ // the processors can use a known good stack in case stack is ran out.
+ //
+ IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
+ IdtGate += EXCEPT_IA32_PAGE_FAULT;
+ IdtGate->Uint64 = gTaskGateDescriptor;
+}
+
+/**
+ Initialize Gdt for all processors.
+
+ @param[in] Cr3 CR3 value.
+ @param[out] GdtStepSize The step size for GDT table.
+
+ @return GdtBase for processor 0.
+ GdtBase for processor X is: GdtBase + (GdtStepSize * X)
+**/
+VOID *
+InitGdt (
+ IN UINTN Cr3,
+ OUT UINTN *GdtStepSize
+ )
+{
+ UINTN Index;
+ IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
+ UINTN TssBase;
+ UINTN GdtTssTableSize;
+ UINT8 *GdtTssTables;
+ UINTN GdtTableStepSize;
+ UINTN InterruptShadowStack;
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ //
+ // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS.
+ // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
+ // on each SMI entry.
+ //
+
+ //
+ // Enlarge GDT to contain 2 TSS descriptors
+ //
+ gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR));
+
+ GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + EXCEPTION_TSS_SIZE + 7) & ~7; // 8 bytes aligned
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ //
+ // IA32 Stack Guard need use task switch to switch stack that need
+ // write GDT and TSS, so AllocateCodePages() could not be used here
+ // as code pages will be set to RO.
+ //
+ GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
+ ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE + EXCEPTION_TSS_SIZE);
+ //
+ // Fixup TSS descriptors
+ //
+ TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
+ GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+
+ TssBase += TSS_SIZE;
+ GdtDescriptor++;
+ GdtDescriptor->Bits.BaseLow = (UINT16)TssBase;
+ GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16);
+ GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24);
+ //
+ // Fixup TSS segments
+ //
+ // ESP as known good stack
+ //
+ *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize;
+ *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3;
+
+ //
+ // Setup ShadowStack for stack switch
+ //
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ InterruptShadowStack = (UINTN)(mSmmStackArrayBase + mSmmStackSize + EFI_PAGES_TO_SIZE (1) - sizeof(UINT64) + (mSmmStackSize + mSmmShadowStackSize) * Index);
+ *(UINT32 *)(TssBase + TSS_IA32_SSP_OFFSET) = (UINT32)InterruptShadowStack;
+ }
+ }
+ } else {
+ //
+ // Just use original table, AllocatePage and copy them here to make sure GDTs are covered in page memory.
+ //
+ GdtTssTableSize = gcSmiGdtr.Limit + 1;
+ mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
+ GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
+ ASSERT (GdtTssTables != NULL);
+ mGdtBuffer = (UINTN)GdtTssTables;
+ GdtTableStepSize = GdtTssTableSize;
+
+ for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
+ CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1);
+ }
+ }
+
+ *GdtStepSize = GdtTableStepSize;
+ return GdtTssTables;
+}
+
+/**
+ Transfer AP to safe hlt-loop after it finished restore CPU features on S3 patch.
+
+ @param[in] ApHltLoopCode The address of the safe hlt-loop function.
+ @param[in] TopOfStack A pointer to the new stack to use for the ApHltLoopCode.
+ @param[in] NumberToFinishAddress Address of Semaphore of APs finish count.
+
+**/
+VOID
+TransferApToSafeState (
+ IN UINTN ApHltLoopCode,
+ IN UINTN TopOfStack,
+ IN UINTN NumberToFinishAddress
+ )
+{
+ SwitchStack (
+ (SWITCH_STACK_ENTRY_POINT)ApHltLoopCode,
+ (VOID *)NumberToFinishAddress,
+ NULL,
+ (VOID *)TopOfStack
+ );
+ //
+ // It should never reach here
+ //
+ ASSERT (FALSE);
+}
+
+/**
+ Initialize the shadow stack related data structure.
+
+ @param CpuIndex The index of CPU.
+ @param ShadowStack The bottom of the shadow stack for this CPU.
+**/
+VOID
+InitShadowStack (
+ IN UINTN CpuIndex,
+ IN VOID *ShadowStack
+ )
+{
+ UINTN SmmShadowStackSize;
+
+ if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
+ SmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ SmmShadowStackSize += EFI_PAGES_TO_SIZE (2);
+ }
+ mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof(UINT64));
+ PatchInstructionX86 (mPatchCetPl0Ssp, mCetPl0Ssp, 4);
+ DEBUG ((DEBUG_INFO, "mCetPl0Ssp - 0x%x\n", mCetPl0Ssp));
+ DEBUG ((DEBUG_INFO, "ShadowStack - 0x%x\n", ShadowStack));
+ DEBUG ((DEBUG_INFO, " SmmShadowStackSize - 0x%x\n", SmmShadowStackSize));
+
+ if (FeaturePcdGet (PcdCpuSmmStackGuard)) {
+ mCetInterruptSsp = (UINT32)((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - sizeof(UINT64));
+ PatchInstructionX86 (mPatchCetInterruptSsp, mCetInterruptSsp, 4);
+ DEBUG ((DEBUG_INFO, "mCetInterruptSsp - 0x%x\n", mCetInterruptSsp));
+ }
+ }
+}
+
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
new file mode 100644
index 000000000..b5e77a1a5
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmInit.nasm
@@ -0,0 +1,96 @@
+;------------------------------------------------------------------------------ ;
+; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+; Module Name:
+;
+; SmmInit.nasm
+;
+; Abstract:
+;
+; Functions for relocating SMBASE's for all processors
+;
+;-------------------------------------------------------------------------------
+
+%include "StuffRsbNasm.inc"
+
+extern ASM_PFX(SmmInitHandler)
+extern ASM_PFX(mRebasedFlag)
+extern ASM_PFX(mSmmRelocationOriginalAddress)
+
+global ASM_PFX(gPatchSmmCr3)
+global ASM_PFX(gPatchSmmCr4)
+global ASM_PFX(gPatchSmmCr0)
+global ASM_PFX(gPatchSmmInitStack)
+global ASM_PFX(gcSmiInitGdtr)
+global ASM_PFX(gcSmmInitSize)
+global ASM_PFX(gcSmmInitTemplate)
+
+%define PROTECT_MODE_CS 0x8
+%define PROTECT_MODE_DS 0x20
+
+ SECTION .text
+
+ASM_PFX(gcSmiInitGdtr):
+ DW 0
+ DQ 0
+
+global ASM_PFX(SmmStartup)
+
+BITS 16
+ASM_PFX(SmmStartup):
+ mov eax, 0x80000001 ; read capability
+ cpuid
+ mov ebx, edx ; rdmsr will change edx. keep it in ebx.
+ and ebx, BIT20 ; extract NX capability bit
+ shr ebx, 9 ; shift bit to IA32_EFER.NXE[BIT11] position
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr3):
+ mov cr3, eax
+o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr4):
+ mov cr4, eax
+ mov ecx, 0xc0000080 ; IA32_EFER MSR
+ rdmsr
+ or eax, ebx ; set NXE bit if NX is available
+ wrmsr
+ mov eax, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmCr0):
+ mov di, PROTECT_MODE_DS
+ mov cr0, eax
+ jmp PROTECT_MODE_CS : dword @32bit
+
+BITS 32
+@32bit:
+ mov ds, edi
+ mov es, edi
+ mov fs, edi
+ mov gs, edi
+ mov ss, edi
+ mov esp, strict dword 0 ; source operand will be patched
+ASM_PFX(gPatchSmmInitStack):
+ call ASM_PFX(SmmInitHandler)
+ StuffRsb32
+ rsm
+
+BITS 16
+ASM_PFX(gcSmmInitTemplate):
+ mov ebp, ASM_PFX(SmmStartup)
+ sub ebp, 0x30000
+ jmp ebp
+
+ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
+
+BITS 32
+global ASM_PFX(SmmRelocationSemaphoreComplete)
+ASM_PFX(SmmRelocationSemaphoreComplete):
+ push eax
+ mov eax, [ASM_PFX(mRebasedFlag)]
+ mov byte [eax], 1
+ pop eax
+ jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
+
+global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
+ASM_PFX(PiSmmCpuSmmInitFixupAddress):
+ ret
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
new file mode 100644
index 000000000..e83031fc0
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.c
@@ -0,0 +1,74 @@
+/** @file
+IA-32 processor specific functions to enable SMM profile.
+
+Copyright (c) 2012 - 2016, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "PiSmmCpuDxeSmm.h"
+#include "SmmProfileInternal.h"
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ )
+{
+ mSmmS3ResumeState->SmmS3Cr3 = Gen4GPageTable (TRUE);
+
+ return ;
+}
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+ 32-bit firmware does not need it.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ )
+{
+}
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ )
+{
+}
+
+/**
+ Clear TF in FLAGS.
+
+ @param SystemContext A pointer to the processor context when
+ the interrupt occurred on the processor.
+
+**/
+VOID
+ClearTrapFlag (
+ IN OUT EFI_SYSTEM_CONTEXT SystemContext
+ )
+{
+ SystemContext.SystemContextIa32->Eflags &= (UINTN) ~BIT8;
+}
diff --git a/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
new file mode 100644
index 000000000..533e8561b
--- /dev/null
+++ b/roms/edk2/UefiCpuPkg/PiSmmCpuDxeSmm/Ia32/SmmProfileArch.h
@@ -0,0 +1,91 @@
+/** @file
+IA-32 processor specific header file to enable SMM profile.
+
+Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.<BR>
+SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#ifndef _SMM_PROFILE_ARCH_H_
+#define _SMM_PROFILE_ARCH_H_
+
+#pragma pack (1)
+
+typedef struct _MSR_DS_AREA_STRUCT {
+ UINT32 BTSBufferBase;
+ UINT32 BTSIndex;
+ UINT32 BTSAbsoluteMaximum;
+ UINT32 BTSInterruptThreshold;
+ UINT32 PEBSBufferBase;
+ UINT32 PEBSIndex;
+ UINT32 PEBSAbsoluteMaximum;
+ UINT32 PEBSInterruptThreshold;
+ UINT32 PEBSCounterReset[4];
+ UINT32 Reserved;
+} MSR_DS_AREA_STRUCT;
+
+typedef struct _BRANCH_TRACE_RECORD {
+ UINT32 LastBranchFrom;
+ UINT32 LastBranchTo;
+ UINT32 Rsvd0 : 4;
+ UINT32 BranchPredicted : 1;
+ UINT32 Rsvd1 : 27;
+} BRANCH_TRACE_RECORD;
+
+typedef struct _PEBS_RECORD {
+ UINT32 Eflags;
+ UINT32 LinearIP;
+ UINT32 Eax;
+ UINT32 Ebx;
+ UINT32 Ecx;
+ UINT32 Edx;
+ UINT32 Esi;
+ UINT32 Edi;
+ UINT32 Ebp;
+ UINT32 Esp;
+} PEBS_RECORD;
+
+#pragma pack ()
+
+#define PHYSICAL_ADDRESS_MASK ((1ull << 32) - SIZE_4KB)
+
+/**
+ Update page table to map the memory correctly in order to make the instruction
+ which caused page fault execute successfully. And it also save the original page
+ table to be restored in single-step exception. 32-bit firmware does not need it.
+
+ @param PageTable PageTable Address.
+ @param PFAddress The memory address which caused page fault exception.
+ @param CpuIndex The index of the processor.
+ @param ErrorCode The Error code of exception.
+ @param IsValidPFAddress The flag indicates if SMM profile data need be added.
+
+**/
+VOID
+RestorePageTableAbove4G (
+ UINT64 *PageTable,
+ UINT64 PFAddress,
+ UINTN CpuIndex,
+ UINTN ErrorCode,
+ BOOLEAN *IsValidPFAddress
+ );
+
+/**
+ Create SMM page table for S3 path.
+
+**/
+VOID
+InitSmmS3Cr3 (
+ VOID
+ );
+
+/**
+ Allocate pages for creating 4KB-page based on 2MB-page when page fault happens.
+
+**/
+VOID
+InitPagesForPFHandler (
+ VOID
+ );
+
+#endif // _SMM_PROFILE_ARCH_H_