diff options
author | Angelos Mouzakitis <a.mouzakitis@virtualopensystems.com> | 2023-10-10 14:33:42 +0000 |
---|---|---|
committer | Angelos Mouzakitis <a.mouzakitis@virtualopensystems.com> | 2023-10-10 14:33:42 +0000 |
commit | af1a266670d040d2f4083ff309d732d648afba2a (patch) | |
tree | 2fc46203448ddcc6f81546d379abfaeb323575e9 /roms/edk2/MdePkg/Library/BaseLib/Arm | |
parent | e02cda008591317b1625707ff8e115a4841aa889 (diff) |
Change-Id: Iaf8d18082d3991dec7c0ebbea540f092188eb4ec
Diffstat (limited to 'roms/edk2/MdePkg/Library/BaseLib/Arm')
20 files changed, 1240 insertions, 0 deletions
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.S new file mode 100644 index 000000000..3bc2246a8 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.S @@ -0,0 +1,30 @@ +#------------------------------------------------------------------------------
+#
+# CpuBreakpoint() for ARM
+#
+# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+.text
+.p2align 2
+GCC_ASM_EXPORT(CpuBreakpoint)
+
+#/**
+# Generates a breakpoint on the CPU.
+#
+# Generates a breakpoint on the CPU. The breakpoint must be implemented such
+# that code can resume normal execution after the breakpoint.
+#
+#**/
+#VOID
+#EFIAPI
+#CpuBreakpoint (
+# VOID
+# );
+#
+ASM_PFX(CpuBreakpoint):
+ swi 0xdbdbdb
+ bx lr
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.asm new file mode 100644 index 000000000..baf6d2783 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuBreakpoint.asm @@ -0,0 +1,35 @@ +;------------------------------------------------------------------------------
+;
+; CpuBreakpoint() for ARM
+;
+; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+; Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT CpuBreakpoint
+
+; Force ARM mode for this section, as MSFT assembler defaults to THUMB
+ AREA Cpu_Breakpoint, CODE, READONLY, ARM
+
+ ARM
+
+;/**
+; Generates a breakpoint on the CPU.
+;
+; Generates a breakpoint on the CPU. The breakpoint must be implemented such
+; that code can resume normal execution after the breakpoint.
+;
+;**/
+;VOID
+;EFIAPI
+;CpuBreakpoint (
+; VOID
+; );
+;
+CpuBreakpoint
+ swi 0xdbdbdb
+ bx lr
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuPause.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuPause.asm new file mode 100644 index 000000000..a16223c7f --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/CpuPause.asm @@ -0,0 +1,35 @@ +;------------------------------------------------------------------------------
+;
+; CpuPause() for ARM
+;
+; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+; Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT CpuPause
+ AREA cpu_pause, CODE, READONLY
+
+;/**
+; Requests CPU to pause for a short period of time.
+;
+; Requests CPU to pause for a short period of time. Typically used in MP
+; systems to prevent memory starvation while waiting for a spin lock.
+;
+;**/
+;VOID
+;EFIAPI
+;CpuPause (
+; VOID
+; );
+;
+CpuPause
+ NOP
+ NOP
+ NOP
+ NOP
+ NOP
+ BX LR
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/DisableInterrupts.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/DisableInterrupts.S new file mode 100644 index 000000000..1f3fe7cc9 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/DisableInterrupts.S @@ -0,0 +1,29 @@ +#------------------------------------------------------------------------------
+#
+# DisableInterrupts() for ARM
+#
+# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+.text
+.p2align 2
+GCC_ASM_EXPORT(DisableInterrupts)
+
+#/**
+# Disables CPU interrupts.
+#
+#**/
+#VOID
+#EFIAPI
+#DisableInterrupts (
+# VOID
+# );
+#
+ASM_PFX(DisableInterrupts):
+ mrs R0,CPSR
+ orr R0,R0,#0x80 @Disable IRQ interrupts
+ msr CPSR_c,R0
+ bx LR
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/DisableInterrupts.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/DisableInterrupts.asm new file mode 100644 index 000000000..e4e90b5f7 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/DisableInterrupts.asm @@ -0,0 +1,31 @@ +;------------------------------------------------------------------------------
+;
+; DisableInterrupts() for ARM
+;
+; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+; Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT DisableInterrupts
+
+ AREA Interrupt_disable, CODE, READONLY
+
+;/**
+; Disables CPU interrupts.
+;
+;**/
+;VOID
+;EFIAPI
+;DisableInterrupts (
+; VOID
+; );
+;
+DisableInterrupts
+ MRS R0,CPSR
+ ORR R0,R0,#0x80 ;Disable IRQ interrupts
+ MSR CPSR_c,R0
+ BX LR
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/EnableInterrupts.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/EnableInterrupts.S new file mode 100644 index 000000000..5de3b0945 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/EnableInterrupts.S @@ -0,0 +1,30 @@ +#------------------------------------------------------------------------------
+#
+# EnableInterrupts() for ARM
+#
+# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+.text
+.p2align 2
+GCC_ASM_EXPORT(EnableInterrupts)
+
+
+#/**
+# Enables CPU interrupts.
+#
+#**/
+#VOID
+#EFIAPI
+#EnableInterrupts (
+# VOID
+# );
+#
+ASM_PFX(EnableInterrupts):
+ mrs R0,CPSR
+ bic R0,R0,#0x80 @Enable IRQ interrupts
+ msr CPSR_c,R0
+ bx LR
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/EnableInterrupts.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/EnableInterrupts.asm new file mode 100644 index 000000000..0fcf66f7d --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/EnableInterrupts.asm @@ -0,0 +1,31 @@ +;------------------------------------------------------------------------------
+;
+; EnableInterrupts() for ARM
+;
+; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+; Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT EnableInterrupts
+
+ AREA Interrupt_enable, CODE, READONLY
+
+;/**
+; Enables CPU interrupts.
+;
+;**/
+;VOID
+;EFIAPI
+;EnableInterrupts (
+; VOID
+; );
+;
+EnableInterrupts
+ MRS R0,CPSR
+ BIC R0,R0,#0x80 ;Enable IRQ interrupts
+ MSR CPSR_c,R0
+ BX LR
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/GetInterruptsState.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/GetInterruptsState.S new file mode 100644 index 000000000..d5ee7d0e2 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/GetInterruptsState.S @@ -0,0 +1,37 @@ +#------------------------------------------------------------------------------
+#
+# GetInterruptState() function for ARM
+#
+# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+.text
+.p2align 2
+GCC_ASM_EXPORT (GetInterruptState)
+
+#/**
+# Retrieves the current CPU interrupt state.
+#
+# Returns TRUE is interrupts are currently enabled. Otherwise
+# returns FALSE.
+#
+# @retval TRUE CPU interrupts are enabled.
+# @retval FALSE CPU interrupts are disabled.
+#
+#**/
+#
+#BOOLEAN
+#EFIAPI
+#GetInterruptState (
+# VOID
+# );
+#
+ASM_PFX(GetInterruptState):
+ mrs R0, CPSR
+ tst R0, #0x80 @Check if IRQ is enabled.
+ moveq R0, #1
+ movne R0, #0
+ bx LR
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/GetInterruptsState.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/GetInterruptsState.asm new file mode 100644 index 000000000..af7b9d6c6 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/GetInterruptsState.asm @@ -0,0 +1,39 @@ +;------------------------------------------------------------------------------
+;
+; GetInterruptState() function for ARM
+;
+; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+; Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT GetInterruptState
+
+ AREA Interrupt_enable, CODE, READONLY
+
+;/**
+; Retrieves the current CPU interrupt state.
+;
+; Returns TRUE is interrupts are currently enabled. Otherwise
+; returns FALSE.
+;
+; @retval TRUE CPU interrupts are enabled.
+; @retval FALSE CPU interrupts are disabled.
+;
+;**/
+;
+;BOOLEAN
+;EFIAPI
+;GetInterruptState (
+; VOID
+; );
+;
+GetInterruptState
+ MRS R0, CPSR
+ TST R0, #0x80 ;Check if IRQ is enabled.
+ MOVEQ R0, #1
+ MOVNE R0, #0
+ BX LR
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/InternalSwitchStack.c b/roms/edk2/MdePkg/Library/BaseLib/Arm/InternalSwitchStack.c new file mode 100644 index 000000000..3fe1125b6 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/InternalSwitchStack.c @@ -0,0 +1,73 @@ +/** @file
+ SwitchStack() function for ARM.
+
+ Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+ Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "BaseLibInternals.h"
+
+/**
+ Transfers control to a function starting with a new stack.
+
+ This internal worker function transfers control to the function
+ specified by EntryPoint using the new stack specified by NewStack,
+ and passes in the parameters specified by Context1 and Context2.
+ Context1 and Context2 are optional and may be NULL.
+ The function EntryPoint must never return.
+
+ @param EntryPoint The pointer to the function to enter.
+ @param Context1 The first parameter to pass in.
+ @param Context2 The second Parameter to pass in
+ @param NewStack The new Location of the stack
+
+**/
+VOID
+EFIAPI
+InternalSwitchStackAsm (
+ IN SWITCH_STACK_ENTRY_POINT EntryPoint,
+ IN VOID *Context1, OPTIONAL
+ IN VOID *Context2, OPTIONAL
+ IN VOID *NewStack
+ );
+
+
+/**
+ Transfers control to a function starting with a new stack.
+
+ Transfers control to the function specified by EntryPoint using the
+ new stack specified by NewStack, and passes in the parameters specified
+ by Context1 and Context2. Context1 and Context2 are optional and may
+ be NULL. The function EntryPoint must never return.
+ Marker will be ignored on IA-32, x64, and EBC.
+ IPF CPUs expect one additional parameter of type VOID * that specifies
+ the new backing store pointer.
+
+ If EntryPoint is NULL, then ASSERT().
+ If NewStack is NULL, then ASSERT().
+
+ @param EntryPoint A pointer to function to call with the new stack.
+ @param Context1 A pointer to the context to pass into the EntryPoint
+ function.
+ @param Context2 A pointer to the context to pass into the EntryPoint
+ function.
+ @param NewStack A pointer to the new stack to use for the EntryPoint
+ function.
+ @param Marker A VA_LIST marker for the variable argument list.
+
+**/
+VOID
+EFIAPI
+InternalSwitchStack (
+ IN SWITCH_STACK_ENTRY_POINT EntryPoint,
+ IN VOID *Context1, OPTIONAL
+ IN VOID *Context2, OPTIONAL
+ IN VOID *NewStack,
+ IN VA_LIST Marker
+ )
+
+{
+ InternalSwitchStackAsm (EntryPoint, Context1, Context2, NewStack);
+}
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/Math64.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/Math64.S new file mode 100755 index 000000000..ec12b1015 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/Math64.S @@ -0,0 +1,263 @@ +#------------------------------------------------------------------------------
+#
+# Replacement for Math64.c that is coded to use older GCC intrinsics.
+# Doing this reduces the number of intrinsics that are required when
+# you port to a new version of gcc.
+#
+# Need to split this into multple files to size optimize the image.
+#
+# Copyright (c) 2009 - 2010, Apple Inc. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+
+ .text
+ .align 2
+ GCC_ASM_EXPORT(InternalMathLShiftU64)
+
+ASM_PFX(InternalMathLShiftU64):
+ stmfd sp!, {r4, r5, r6}
+ mov r6, r1
+ rsb ip, r2, #32
+ mov r4, r6, asl r2
+ subs r1, r2, #32
+ orr r4, r4, r0, lsr ip
+ mov r3, r0, asl r2
+ movpl r4, r0, asl r1
+ mov r5, r0
+ mov r0, r3
+ mov r1, r4
+ ldmfd sp!, {r4, r5, r6}
+ bx lr
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathRShiftU64)
+
+ASM_PFX(InternalMathRShiftU64):
+ stmfd sp!, {r4, r5, r6}
+ mov r5, r0
+ rsb ip, r2, #32
+ mov r3, r5, lsr r2
+ subs r0, r2, #32
+ orr r3, r3, r1, asl ip
+ mov r4, r1, lsr r2
+ movpl r3, r1, lsr r0
+ mov r6, r1
+ mov r0, r3
+ mov r1, r4
+ ldmfd sp!, {r4, r5, r6}
+ bx lr
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathARShiftU64)
+
+ASM_PFX(InternalMathARShiftU64):
+ stmfd sp!, {r4, r5, r6}
+ mov r5, r0
+ rsb ip, r2, #32
+ mov r3, r5, lsr r2
+ subs r0, r2, #32
+ orr r3, r3, r1, asl ip
+ mov r4, r1, asr r2
+ movpl r3, r1, asr r0
+ mov r6, r1
+ mov r0, r3
+ mov r1, r4
+ ldmfd sp!, {r4, r5, r6}
+ bx lr
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathLRotU64)
+
+ASM_PFX(InternalMathLRotU64):
+ stmfd sp!, {r4, r5, r6, r7, lr}
+ add r7, sp, #12
+ mov r6, r1
+ rsb ip, r2, #32
+ mov r4, r6, asl r2
+ rsb lr, r2, #64
+ subs r1, r2, #32
+ orr r4, r4, r0, lsr ip
+ mov r3, r0, asl r2
+ movpl r4, r0, asl r1
+ sub ip, r2, #32
+ mov r5, r0
+ mov r0, r0, lsr lr
+ rsbs r2, r2, #32
+ orr r0, r0, r6, asl ip
+ mov r1, r6, lsr lr
+ movpl r0, r6, lsr r2
+ orr r1, r1, r4
+ orr r0, r0, r3
+ ldmfd sp!, {r4, r5, r6, r7, pc}
+
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathRRotU64)
+
+ASM_PFX(InternalMathRRotU64):
+ stmfd sp!, {r4, r5, r6, r7, lr}
+ add r7, sp, #12
+ mov r5, r0
+ rsb ip, r2, #32
+ mov r3, r5, lsr r2
+ rsb lr, r2, #64
+ subs r0, r2, #32
+ orr r3, r3, r1, asl ip
+ mov r4, r1, lsr r2
+ movpl r3, r1, lsr r0
+ sub ip, r2, #32
+ mov r6, r1
+ mov r1, r1, asl lr
+ rsbs r2, r2, #32
+ orr r1, r1, r5, lsr ip
+ mov r0, r5, asl lr
+ movpl r1, r5, asl r2
+ orr r0, r0, r3
+ orr r1, r1, r4
+ ldmfd sp!, {r4, r5, r6, r7, pc}
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathMultU64x32)
+
+ASM_PFX(InternalMathMultU64x32):
+ stmfd sp!, {r7, lr}
+ add r7, sp, #0
+ mov r3, #0
+ mov ip, r0
+ mov lr, r1
+ umull r0, r1, ip, r2
+ mla r1, lr, r2, r1
+ mla r1, ip, r3, r1
+ ldmfd sp!, {r7, pc}
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathMultU64x64)
+
+ASM_PFX(InternalMathMultU64x64):
+ stmfd sp!, {r7, lr}
+ add r7, sp, #0
+ mov ip, r0
+ mov lr, r1
+ umull r0, r1, ip, r2
+ mla r1, lr, r2, r1
+ mla r1, ip, r3, r1
+ ldmfd sp!, {r7, pc}
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathDivU64x32)
+
+ASM_PFX(InternalMathDivU64x32):
+ stmfd sp!, {r7, lr}
+ add r7, sp, #0
+ mov r3, #0
+ bl ASM_PFX(__udivdi3)
+ ldmfd sp!, {r7, pc}
+
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathModU64x32)
+
+ASM_PFX(InternalMathModU64x32):
+ stmfd sp!, {r7, lr}
+ add r7, sp, #0
+ mov r3, #0
+ bl ASM_PFX(__umoddi3)
+ ldmfd sp!, {r7, pc}
+
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathDivRemU64x32)
+
+ASM_PFX(InternalMathDivRemU64x32):
+ stmfd sp!, {r4, r5, r6, r7, lr}
+ add r7, sp, #12
+ stmfd sp!, {r10, r11}
+ subs r6, r3, #0
+ mov r10, r0
+ mov r11, r1
+ moveq r4, r2
+ moveq r5, #0
+ beq L22
+ mov r4, r2
+ mov r5, #0
+ mov r3, #0
+ bl ASM_PFX(__umoddi3)
+ str r0, [r6, #0]
+L22:
+ mov r0, r10
+ mov r1, r11
+ mov r2, r4
+ mov r3, r5
+ bl ASM_PFX(__udivdi3)
+ ldmfd sp!, {r10, r11}
+ ldmfd sp!, {r4, r5, r6, r7, pc}
+
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathDivRemU64x64)
+
+ASM_PFX(InternalMathDivRemU64x64):
+ stmfd sp!, {r4, r5, r6, r7, lr}
+ add r7, sp, #12
+ stmfd sp!, {r10, r11}
+ ldr r6, [sp, #28]
+ mov r4, r0
+ cmp r6, #0
+ mov r5, r1
+ mov r10, r2
+ mov r11, r3
+ beq L26
+ bl ASM_PFX(__umoddi3)
+ stmia r6, {r0-r1}
+L26:
+ mov r0, r4
+ mov r1, r5
+ mov r2, r10
+ mov r3, r11
+ bl ASM_PFX(__udivdi3)
+ ldmfd sp!, {r10, r11}
+ ldmfd sp!, {r4, r5, r6, r7, pc}
+
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathDivRemS64x64)
+
+ASM_PFX(InternalMathDivRemS64x64):
+ stmfd sp!, {r4, r5, r6, r7, lr}
+ add r7, sp, #12
+ stmfd sp!, {r10, r11}
+ ldr r6, [sp, #28]
+ mov r4, r0
+ cmp r6, #0
+ mov r5, r1
+ mov r10, r2
+ mov r11, r3
+ beq L30
+ bl ASM_PFX(__moddi3)
+ stmia r6, {r0-r1}
+L30:
+ mov r0, r4
+ mov r1, r5
+ mov r2, r10
+ mov r3, r11
+ bl ASM_PFX(__divdi3)
+ ldmfd sp!, {r10, r11}
+ ldmfd sp!, {r4, r5, r6, r7, pc}
+
+
+ .align 2
+ GCC_ASM_EXPORT(InternalMathSwapBytes64)
+
+ASM_PFX(InternalMathSwapBytes64):
+ stmfd sp!, {r4, r5, r7, lr}
+ mov r5, r1
+ bl ASM_PFX(SwapBytes32)
+ mov r4, r0
+ mov r0, r5
+ bl ASM_PFX(SwapBytes32)
+ mov r1, r4
+ ldmfd sp!, {r4, r5, r7, pc}
+
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/MemoryFence.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/MemoryFence.S new file mode 100644 index 000000000..8bf1a7505 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/MemoryFence.S @@ -0,0 +1,33 @@ +##------------------------------------------------------------------------------
+#
+# MemoryFence() for AArch64
+#
+# Copyright (c) 2013, ARM Ltd. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##------------------------------------------------------------------------------
+
+.text
+.p2align 2
+
+GCC_ASM_EXPORT(MemoryFence)
+
+
+#/**
+# Used to serialize load and store operations.
+#
+# All loads and stores that proceed calls to this function are guaranteed to be
+# globally visible when this function returns.
+#
+#**/
+#VOID
+#EFIAPI
+#MemoryFence (
+# VOID
+# );
+#
+ASM_PFX(MemoryFence):
+ // System wide Data Memory Barrier.
+ dmb
+ bx lr
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/MemoryFence.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/MemoryFence.asm new file mode 100644 index 000000000..e751584aa --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/MemoryFence.asm @@ -0,0 +1,33 @@ +;------------------------------------------------------------------------------
+;
+; MemoryFence() for AArch64
+;
+; Copyright (c) 2013, ARM Ltd. All rights reserved.
+;
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT MemoryFence
+
+ AREA MemoryBarriers, CODE, READONLY
+
+;/**
+; Used to serialize load and store operations.
+;
+; All loads and stores that proceed calls to this function are guaranteed to be
+; globally visible when this function returns.
+;
+;**/
+;VOID
+;EFIAPI
+;MemoryFence (
+; VOID
+; );
+;
+MemoryFence FUNCTION
+ dmb
+ bx lr
+ ENDFUNC
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.S new file mode 100644 index 000000000..82d94faf6 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.S @@ -0,0 +1,64 @@ +#------------------------------------------------------------------------------
+#
+# Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+#------------------------------------------------------------------------------
+.text
+.p2align 2
+
+GCC_ASM_EXPORT(SetJump)
+GCC_ASM_EXPORT(InternalLongJump)
+
+#/**
+# Saves the current CPU context that can be restored with a call to LongJump() and returns 0.#
+#
+# Saves the current CPU context in the buffer specified by JumpBuffer and returns 0. The initial
+# call to SetJump() must always return 0. Subsequent calls to LongJump() cause a non-zero
+# value to be returned by SetJump().
+#
+# If JumpBuffer is NULL, then ASSERT().
+# For IPF CPUs, if JumpBuffer is not aligned on a 16-byte boundary, then ASSERT().
+#
+# @param JumpBuffer A pointer to CPU context buffer.
+#
+#**/
+#
+#UINTN
+#EFIAPI
+#SetJump (
+# IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer // R0
+# );
+#
+ASM_PFX(SetJump):
+ mov r3, r13
+ stmia r0, {r3-r12,r14}
+ eor r0, r0, r0
+ bx lr
+
+#/**
+# Restores the CPU context that was saved with SetJump().#
+#
+# Restores the CPU context from the buffer specified by JumpBuffer.
+# This function never returns to the caller.
+# Instead is resumes execution based on the state of JumpBuffer.
+#
+# @param JumpBuffer A pointer to CPU context buffer.
+# @param Value The value to return when the SetJump() context is restored.
+#
+#**/
+#VOID
+#EFIAPI
+#InternalLongJump (
+# IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer, // R0
+# IN UINTN Value // R1
+# );
+#
+ASM_PFX(InternalLongJump):
+ ldmia r0, {r3-r12,r14}
+ mov r13, r3
+ mov r0, r1
+ bx lr
+
+ASM_FUNCTION_REMOVE_IF_UNREFERENCED
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.asm new file mode 100644 index 000000000..936f722be --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/SetJumpLongJump.asm @@ -0,0 +1,64 @@ +;------------------------------------------------------------------------------
+;
+; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+; Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT SetJump
+ EXPORT InternalLongJump
+
+ AREA BaseLib, CODE, READONLY
+
+;/**
+; Saves the current CPU context that can be restored with a call to LongJump() and returns 0.;
+;
+; Saves the current CPU context in the buffer specified by JumpBuffer and returns 0. The initial
+; call to SetJump() must always return 0. Subsequent calls to LongJump() cause a non-zero
+; value to be returned by SetJump().
+;
+; If JumpBuffer is NULL, then ASSERT().
+; For IPF CPUs, if JumpBuffer is not aligned on a 16-byte boundary, then ASSERT().
+;
+; @param JumpBuffer A pointer to CPU context buffer.
+;
+;**/
+;
+;UINTN
+;EFIAPI
+;SetJump (
+; IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer // R0
+; )
+;
+SetJump
+ MOV R3, R13
+ STM R0, {R3-R12,R14}
+ EOR R0, R0
+ BX LR
+
+;/**
+; Restores the CPU context that was saved with SetJump().;
+;
+; Restores the CPU context from the buffer specified by JumpBuffer.
+; This function never returns to the caller.
+; Instead is resumes execution based on the state of JumpBuffer.
+;
+; @param JumpBuffer A pointer to CPU context buffer.
+; @param Value The value to return when the SetJump() context is restored.
+;
+;**/
+;VOID
+;EFIAPI
+;InternalLongJump (
+; IN BASE_LIBRARY_JUMP_BUFFER *JumpBuffer, // R0
+; IN UINTN Value // R1
+; );
+;
+InternalLongJump
+ LDM R0, {R3-R12,R14}
+ MOV R13, R3
+ MOV R0, R1
+ BX LR
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/SpeculationBarrier.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/SpeculationBarrier.S new file mode 100644 index 000000000..73f8105e3 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/SpeculationBarrier.S @@ -0,0 +1,33 @@ +##------------------------------------------------------------------------------
+#
+# SpeculationBarrier() for AArch64
+#
+# Copyright (c) 2019, Linaro Ltd. All rights reserved.
+#
+# SPDX-License-Identifier: BSD-2-Clause-Patent
+#
+##------------------------------------------------------------------------------
+
+.text
+.p2align 2
+
+GCC_ASM_EXPORT(SpeculationBarrier)
+
+
+#/**
+# Uses as a barrier to stop speculative execution.
+#
+# Ensures that no later instruction will execute speculatively, until all prior
+# instructions have completed.
+#
+#**/
+#VOID
+#EFIAPI
+#SpeculationBarrier (
+# VOID
+# );
+#
+ASM_PFX(SpeculationBarrier):
+ dsb
+ isb
+ bx lr
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/SpeculationBarrier.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/SpeculationBarrier.asm new file mode 100644 index 000000000..01a64346f --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/SpeculationBarrier.asm @@ -0,0 +1,33 @@ +;------------------------------------------------------------------------------
+;
+; SpeculationBarrier() for AArch64
+;
+; Copyright (c) 2019, Linaro Ltd. All rights reserved.
+;
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT SpeculationBarrier
+
+ AREA MemoryBarriers, CODE, READONLY
+
+;/**
+; Uses as a barrier to stop speculative execution.
+;
+; Ensures that no later instruction will execute speculatively, until all prior
+; instructions have completed.
+;
+;**/
+;VOID
+;EFIAPI
+;SpeculationBarrier (
+; VOID
+; );
+;
+SpeculationBarrier
+ dsb
+ isb
+ bx lr
+
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/SwitchStack.S b/roms/edk2/MdePkg/Library/BaseLib/Arm/SwitchStack.S new file mode 100644 index 000000000..844c9a27f --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/SwitchStack.S @@ -0,0 +1,62 @@ +//------------------------------------------------------------------------------
+//
+// Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+// Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+// Portions copyright (c) 2011, ARM Limited. All rights reserved.<BR>
+// SPDX-License-Identifier: BSD-2-Clause-Patent
+//
+//------------------------------------------------------------------------------
+
+.text
+.align 5
+
+GCC_ASM_EXPORT(InternalSwitchStackAsm)
+GCC_ASM_EXPORT(CpuPause)
+
+/**
+//
+// This allows the caller to switch the stack and goes to the new entry point
+//
+// @param EntryPoint The pointer to the location to enter
+// @param Context Parameter to pass in
+// @param Context2 Parameter2 to pass in
+// @param NewStack New Location of the stack
+//
+// @return Nothing. Goes to the Entry Point passing in the new parameters
+//
+VOID
+EFIAPI
+InternalSwitchStackAsm (
+ SWITCH_STACK_ENTRY_POINT EntryPoint,
+ VOID *Context,
+ VOID *Context2,
+ VOID *NewStack
+ );
+**/
+ASM_PFX(InternalSwitchStackAsm):
+ MOV LR, R0
+ MOV SP, R3
+ MOV R0, R1
+ MOV R1, R2
+ BX LR
+
+/**
+//
+// Requests CPU to pause for a short period of time.
+//
+// Requests CPU to pause for a short period of time. Typically used in MP
+// systems to prevent memory starvation while waiting for a spin lock.
+//
+VOID
+EFIAPI
+CpuPause (
+ VOID
+ )
+**/
+ASM_PFX(CpuPause):
+ nop
+ nop
+ nop
+ nop
+ nop
+ BX LR
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/SwitchStack.asm b/roms/edk2/MdePkg/Library/BaseLib/Arm/SwitchStack.asm new file mode 100644 index 000000000..690f632e6 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/SwitchStack.asm @@ -0,0 +1,39 @@ +;------------------------------------------------------------------------------
+;
+; Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+; Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+; SPDX-License-Identifier: BSD-2-Clause-Patent
+;
+;------------------------------------------------------------------------------
+
+ EXPORT InternalSwitchStackAsm
+
+ AREA Switch_Stack, CODE, READONLY
+
+;/**
+; This allows the caller to switch the stack and goes to the new entry point
+;
+; @param EntryPoint The pointer to the location to enter
+; @param Context Parameter to pass in
+; @param Context2 Parameter2 to pass in
+; @param NewStack New Location of the stack
+;
+; @return Nothing. Goes to the Entry Point passing in the new parameters
+;
+;**/
+;VOID
+;EFIAPI
+;InternalSwitchStackAsm (
+; SWITCH_STACK_ENTRY_POINT EntryPoint,
+; VOID *Context,
+; VOID *Context2,
+; VOID *NewStack
+; );
+;
+InternalSwitchStackAsm
+ MOV LR, R0
+ MOV SP, R3
+ MOV R0, R1
+ MOV R1, R2
+ BX LR
+ END
diff --git a/roms/edk2/MdePkg/Library/BaseLib/Arm/Unaligned.c b/roms/edk2/MdePkg/Library/BaseLib/Arm/Unaligned.c new file mode 100644 index 000000000..e9934e700 --- /dev/null +++ b/roms/edk2/MdePkg/Library/BaseLib/Arm/Unaligned.c @@ -0,0 +1,246 @@ +/** @file
+ Unaligned access functions of BaseLib for ARM.
+
+ volatile was added to work around optimization issues.
+
+ Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
+ Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
+ SPDX-License-Identifier: BSD-2-Clause-Patent
+
+**/
+
+#include "BaseLibInternals.h"
+
+/**
+ Reads a 16-bit value from memory that may be unaligned.
+
+ This function returns the 16-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 16-bit value that may be unaligned.
+
+ @return The 16-bit value read from Buffer.
+
+**/
+UINT16
+EFIAPI
+ReadUnaligned16 (
+ IN CONST UINT16 *Buffer
+ )
+{
+ volatile UINT8 LowerByte;
+ volatile UINT8 HigherByte;
+
+ ASSERT (Buffer != NULL);
+
+ LowerByte = ((UINT8*)Buffer)[0];
+ HigherByte = ((UINT8*)Buffer)[1];
+
+ return (UINT16)(LowerByte | (HigherByte << 8));
+}
+
+/**
+ Writes a 16-bit value to memory that may be unaligned.
+
+ This function writes the 16-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 16-bit value that may be unaligned.
+ @param Value 16-bit value to write to Buffer.
+
+ @return The 16-bit value to write to Buffer.
+
+**/
+UINT16
+EFIAPI
+WriteUnaligned16 (
+ OUT UINT16 *Buffer,
+ IN UINT16 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ ((volatile UINT8*)Buffer)[0] = (UINT8)Value;
+ ((volatile UINT8*)Buffer)[1] = (UINT8)(Value >> 8);
+
+ return Value;
+}
+
+/**
+ Reads a 24-bit value from memory that may be unaligned.
+
+ This function returns the 24-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 24-bit value that may be unaligned.
+
+ @return The 24-bit value read from Buffer.
+
+**/
+UINT32
+EFIAPI
+ReadUnaligned24 (
+ IN CONST UINT32 *Buffer
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ return (UINT32)(
+ ReadUnaligned16 ((UINT16*)Buffer) |
+ (((UINT8*)Buffer)[2] << 16)
+ );
+}
+
+/**
+ Writes a 24-bit value to memory that may be unaligned.
+
+ This function writes the 24-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 24-bit value that may be unaligned.
+ @param Value 24-bit value to write to Buffer.
+
+ @return The 24-bit value to write to Buffer.
+
+**/
+UINT32
+EFIAPI
+WriteUnaligned24 (
+ OUT UINT32 *Buffer,
+ IN UINT32 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ WriteUnaligned16 ((UINT16*)Buffer, (UINT16)Value);
+ *(UINT8*)((UINT16*)Buffer + 1) = (UINT8)(Value >> 16);
+ return Value;
+}
+
+/**
+ Reads a 32-bit value from memory that may be unaligned.
+
+ This function returns the 32-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 32-bit value that may be unaligned.
+
+ @return The 32-bit value read from Buffer.
+
+**/
+UINT32
+EFIAPI
+ReadUnaligned32 (
+ IN CONST UINT32 *Buffer
+ )
+{
+ UINT16 LowerBytes;
+ UINT16 HigherBytes;
+
+ ASSERT (Buffer != NULL);
+
+ LowerBytes = ReadUnaligned16 ((UINT16*) Buffer);
+ HigherBytes = ReadUnaligned16 ((UINT16*) Buffer + 1);
+
+ return (UINT32) (LowerBytes | (HigherBytes << 16));
+}
+
+/**
+ Writes a 32-bit value to memory that may be unaligned.
+
+ This function writes the 32-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 32-bit value that may be unaligned.
+ @param Value 32-bit value to write to Buffer.
+
+ @return The 32-bit value to write to Buffer.
+
+**/
+UINT32
+EFIAPI
+WriteUnaligned32 (
+ OUT UINT32 *Buffer,
+ IN UINT32 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ WriteUnaligned16 ((UINT16*)Buffer, (UINT16)Value);
+ WriteUnaligned16 ((UINT16*)Buffer + 1, (UINT16)(Value >> 16));
+ return Value;
+}
+
+/**
+ Reads a 64-bit value from memory that may be unaligned.
+
+ This function returns the 64-bit value pointed to by Buffer. The function
+ guarantees that the read operation does not produce an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 64-bit value that may be unaligned.
+
+ @return The 64-bit value read from Buffer.
+
+**/
+UINT64
+EFIAPI
+ReadUnaligned64 (
+ IN CONST UINT64 *Buffer
+ )
+{
+ UINT32 LowerBytes;
+ UINT32 HigherBytes;
+
+ ASSERT (Buffer != NULL);
+
+ LowerBytes = ReadUnaligned32 ((UINT32*) Buffer);
+ HigherBytes = ReadUnaligned32 ((UINT32*) Buffer + 1);
+
+ return (UINT64) (LowerBytes | LShiftU64 (HigherBytes, 32));
+}
+
+/**
+ Writes a 64-bit value to memory that may be unaligned.
+
+ This function writes the 64-bit value specified by Value to Buffer. Value is
+ returned. The function guarantees that the write operation does not produce
+ an alignment fault.
+
+ If the Buffer is NULL, then ASSERT().
+
+ @param Buffer The pointer to a 64-bit value that may be unaligned.
+ @param Value 64-bit value to write to Buffer.
+
+ @return The 64-bit value to write to Buffer.
+
+**/
+UINT64
+EFIAPI
+WriteUnaligned64 (
+ OUT UINT64 *Buffer,
+ IN UINT64 Value
+ )
+{
+ ASSERT (Buffer != NULL);
+
+ WriteUnaligned32 ((UINT32*)Buffer, (UINT32)Value);
+ WriteUnaligned32 ((UINT32*)Buffer + 1, (UINT32)RShiftU64 (Value, 32));
+ return Value;
+}
|