diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2016-09-02 08:26:23 +0100 |
---|---|---|
committer | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2016-09-13 16:28:15 +0100 |
commit | 01f688be90f59cd1ea92195bd238e8d97295fbf1 (patch) | |
tree | cb1975178c707423e817a6b1005b4f0a6ff79ba9 /MdePkg | |
parent | 94a3845be698ddeed9d126363c755bd3bb13dd17 (diff) | |
download | edk2-01f688be90f59cd1ea92195bd238e8d97295fbf1.zip edk2-01f688be90f59cd1ea92195bd238e8d97295fbf1.tar.gz edk2-01f688be90f59cd1ea92195bd238e8d97295fbf1.tar.bz2 |
MdePkg/BaseMemoryLib: widen aligned accesses to 32 or 64 bits
Since the default BaseMemoryLib should be callable from any context,
including ones where unaligned accesses are not allowed, it implements
InternalCopyMem() and InternalSetMem() using byte accesses only.
However, especially in a context where the MMU is off, such narrow
accesses may be disproportionately costly, and so if the size and
alignment of the access allow it, use 32-bit or even 64-bit loads and
stores (the latter may be beneficial even on a 32-bit architectures like
ARM, which has load pair/store pair instructions)
Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: Liming Gao <liming.gao@intel.com>
Diffstat (limited to 'MdePkg')
-rw-r--r-- | MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf | 2 | ||||
-rw-r--r-- | MdePkg/Library/BaseMemoryLib/CopyMem.c | 112 | ||||
-rw-r--r-- | MdePkg/Library/BaseMemoryLib/SetMem.c | 40 |
3 files changed, 140 insertions, 14 deletions
diff --git a/MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf b/MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf index 6d906e9..358eeed 100644 --- a/MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf +++ b/MdePkg/Library/BaseMemoryLib/BaseMemoryLib.inf @@ -26,7 +26,7 @@ #
-# VALID_ARCHITECTURES = IA32 X64 IPF EBC
+# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM AARCH64
#
[Sources]
diff --git a/MdePkg/Library/BaseMemoryLib/CopyMem.c b/MdePkg/Library/BaseMemoryLib/CopyMem.c index 37f0366..6f4fd90 100644 --- a/MdePkg/Library/BaseMemoryLib/CopyMem.c +++ b/MdePkg/Library/BaseMemoryLib/CopyMem.c @@ -4,6 +4,9 @@ particular platform easily if an optimized version is desired.
Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>
+ Copyright (c) 2012 - 2013, ARM Ltd. All rights reserved.<BR>
+ Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>
+
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -44,18 +47,107 @@ InternalMemCopyMem ( //
volatile UINT8 *Destination8;
CONST UINT8 *Source8;
+ volatile UINT32 *Destination32;
+ CONST UINT32 *Source32;
+ volatile UINT64 *Destination64;
+ CONST UINT64 *Source64;
+ UINTN Alignment;
+
+ if ((((UINTN)DestinationBuffer & 0x7) == 0) && (((UINTN)SourceBuffer & 0x7) == 0) && (Length >= 8)) {
+ if (SourceBuffer > DestinationBuffer) {
+ Destination64 = (UINT64*)DestinationBuffer;
+ Source64 = (CONST UINT64*)SourceBuffer;
+ while (Length >= 8) {
+ *(Destination64++) = *(Source64++);
+ Length -= 8;
+ }
+
+ // Finish if there are still some bytes to copy
+ Destination8 = (UINT8*)Destination64;
+ Source8 = (CONST UINT8*)Source64;
+ while (Length-- != 0) {
+ *(Destination8++) = *(Source8++);
+ }
+ } else if (SourceBuffer < DestinationBuffer) {
+ Destination64 = (UINT64*)((UINTN)DestinationBuffer + Length);
+ Source64 = (CONST UINT64*)((UINTN)SourceBuffer + Length);
+
+ // Destination64 and Source64 were aligned on a 64-bit boundary
+ // but if length is not a multiple of 8 bytes then they won't be
+ // anymore.
+
+ Alignment = Length & 0x7;
+ if (Alignment != 0) {
+ Destination8 = (UINT8*)Destination64;
+ Source8 = (CONST UINT8*)Source64;
+
+ while (Alignment-- != 0) {
+ *(--Destination8) = *(--Source8);
+ --Length;
+ }
+ Destination64 = (UINT64*)Destination8;
+ Source64 = (CONST UINT64*)Source8;
+ }
+
+ while (Length > 0) {
+ *(--Destination64) = *(--Source64);
+ Length -= 8;
+ }
+ }
+ } else if ((((UINTN)DestinationBuffer & 0x3) == 0) && (((UINTN)SourceBuffer & 0x3) == 0) && (Length >= 4)) {
+ if (SourceBuffer > DestinationBuffer) {
+ Destination32 = (UINT32*)DestinationBuffer;
+ Source32 = (CONST UINT32*)SourceBuffer;
+ while (Length >= 4) {
+ *(Destination32++) = *(Source32++);
+ Length -= 4;
+ }
+
+ // Finish if there are still some bytes to copy
+ Destination8 = (UINT8*)Destination32;
+ Source8 = (CONST UINT8*)Source32;
+ while (Length-- != 0) {
+ *(Destination8++) = *(Source8++);
+ }
+ } else if (SourceBuffer < DestinationBuffer) {
+ Destination32 = (UINT32*)((UINTN)DestinationBuffer + Length);
+ Source32 = (CONST UINT32*)((UINTN)SourceBuffer + Length);
+
+ // Destination32 and Source32 were aligned on a 32-bit boundary
+ // but if length is not a multiple of 4 bytes then they won't be
+ // anymore.
+
+ Alignment = Length & 0x3;
+ if (Alignment != 0) {
+ Destination8 = (UINT8*)Destination32;
+ Source8 = (CONST UINT8*)Source32;
+
+ while (Alignment-- != 0) {
+ *(--Destination8) = *(--Source8);
+ --Length;
+ }
+ Destination32 = (UINT32*)Destination8;
+ Source32 = (CONST UINT32*)Source8;
+ }
- if (SourceBuffer > DestinationBuffer) {
- Destination8 = (UINT8*)DestinationBuffer;
- Source8 = (CONST UINT8*)SourceBuffer;
- while (Length-- != 0) {
- *(Destination8++) = *(Source8++);
+ while (Length > 0) {
+ *(--Destination32) = *(--Source32);
+ Length -= 4;
+ }
}
- } else if (SourceBuffer < DestinationBuffer) {
- Destination8 = (UINT8*)DestinationBuffer + Length;
- Source8 = (CONST UINT8*)SourceBuffer + Length;
- while (Length-- != 0) {
- *(--Destination8) = *(--Source8);
+ } else {
+ if (SourceBuffer > DestinationBuffer) {
+ Destination8 = (UINT8*)DestinationBuffer;
+ Source8 = (CONST UINT8*)SourceBuffer;
+ while (Length-- != 0) {
+ *(Destination8++) = *(Source8++);
+ }
+ } else if (SourceBuffer < DestinationBuffer) {
+ Destination8 = (UINT8*)DestinationBuffer + Length;
+ Source8 = (CONST UINT8*)SourceBuffer + Length;
+ while (Length-- != 0) {
+ *(--Destination8) = *(--Source8);
+ }
}
}
return DestinationBuffer;
diff --git a/MdePkg/Library/BaseMemoryLib/SetMem.c b/MdePkg/Library/BaseMemoryLib/SetMem.c index 5e74085..b6fb811 100644 --- a/MdePkg/Library/BaseMemoryLib/SetMem.c +++ b/MdePkg/Library/BaseMemoryLib/SetMem.c @@ -5,6 +5,9 @@ is desired.
Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>
+ Copyright (c) 2012 - 2013, ARM Ltd. All rights reserved.<BR>
+ Copyright (c) 2016, Linaro Ltd. All rights reserved.<BR>
+
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
@@ -43,11 +46,42 @@ InternalMemSetMem ( // volatile to prevent the optimizer from replacing this function with
// the intrinsic memset()
//
- volatile UINT8 *Pointer;
+ volatile UINT8 *Pointer8;
+ volatile UINT32 *Pointer32;
+ volatile UINT64 *Pointer64;
+ UINT32 Value32;
+ UINT64 Value64;
+
+ if ((((UINTN)Buffer & 0x7) == 0) && (Length >= 8)) {
+ // Generate the 64bit value
+ Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;
+ Value64 = LShiftU64 (Value32, 32) | Value32;
+
+ Pointer64 = (UINT64*)Buffer;
+ while (Length >= 8) {
+ *(Pointer64++) = Value64;
+ Length -= 8;
+ }
- Pointer = (UINT8*)Buffer;
+ // Finish with bytes if needed
+ Pointer8 = (UINT8*)Pointer64;
+ } else if ((((UINTN)Buffer & 0x3) == 0) && (Length >= 4)) {
+ // Generate the 32bit value
+ Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;
+
+ Pointer32 = (UINT32*)Buffer;
+ while (Length >= 4) {
+ *(Pointer32++) = Value32;
+ Length -= 4;
+ }
+
+ // Finish with bytes if needed
+ Pointer8 = (UINT8*)Pointer32;
+ } else {
+ Pointer8 = (UINT8*)Buffer;
+ }
while (Length-- > 0) {
- *(Pointer++) = Value;
+ *(Pointer8++) = Value;
}
return Buffer;
}
|