summaryrefslogtreecommitdiff
path: root/ArmPkg
diff options
context:
space:
mode:
authorHeyi Guo <heyi.guo@linaro.org>2015-09-09 13:37:33 +0000
committerabiesheuvel <abiesheuvel@Edk2>2015-09-09 13:37:33 +0000
commit41f890164bc201f69841309c6b55e24c64121960 (patch)
treebe2b788da21c55d3b3c73d1f75ddea688a61cfdb /ArmPkg
parent946067bfb074962667bd61fafffd0e777242e12b (diff)
downloadedk2-41f890164bc201f69841309c6b55e24c64121960.zip
edk2-41f890164bc201f69841309c6b55e24c64121960.tar.gz
edk2-41f890164bc201f69841309c6b55e24c64121960.tar.bz2
ArmPkg/Mmu: Fix literal number left shift bug
There is a hidden bug for below code: (1 << BaseAddressAlignment) & *BlockEntrySize From disassembly code, we can see the literal number 1 will be treated as INT32 by compiler by default, and we'll get 0xFFFFFFFF80000000 when BaseAddressAlignment is equal to 31. So we will always get 31 when alignment is larger than 31. if ((1 << BaseAddressAlignment) & *BlockEntrySize) { 5224: f9404be0 ldr x0, [sp,#144] 5228: 2a0003e1 mov w1, w0 522c: 52800020 mov w0, #0x1 // #1 5230: 1ac12000 lsl w0, w0, w1 5234: 93407c01 sxtw x1, w0 The bug can be replayed on QEMU AARCH64; by adding some debug print, we can see lots of level 1 tables created (for block of 1GB) even when the region is large enough to use 512GB block size. Use LowBitSet64() in BaseLib instead to fix the bug. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Heyi Guo <heyi.guo@linaro.org> Cc: Leif Lindholm <leif.lindholm@linaro.org> Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@18423 6f19259b-4bc3-4df7-8a09-765794883524
Diffstat (limited to 'ArmPkg')
-rw-r--r--ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c16
1 files changed, 4 insertions, 12 deletions
diff --git a/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c b/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
index 5b6c453..dc50e0e 100644
--- a/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
+++ b/ArmPkg/Library/ArmLib/AArch64/AArch64Mmu.c
@@ -276,8 +276,8 @@ GetBlockEntryListFromAddress (
return NULL;
}
- // Ensure the required size is aligned on 4KB boundary
- if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0) {
+ // Ensure the required size is aligned on 4KB boundary and not 0
+ if ((*BlockEntrySize & (SIZE_4KB - 1)) != 0 || *BlockEntrySize == 0) {
ASSERT_EFI_ERROR (EFI_INVALID_PARAMETER);
return NULL;
}
@@ -294,18 +294,10 @@ GetBlockEntryListFromAddress (
// If the start address is 0x0 then we use the size of the region to identify the alignment
if (RegionStart == 0) {
// Identify the highest possible alignment for the Region Size
- for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
- if ((1 << BaseAddressAlignment) & *BlockEntrySize) {
- break;
- }
- }
+ BaseAddressAlignment = LowBitSet64 (*BlockEntrySize);
} else {
// Identify the highest possible alignment for the Base Address
- for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
- if ((1 << BaseAddressAlignment) & RegionStart) {
- break;
- }
- }
+ BaseAddressAlignment = LowBitSet64 (RegionStart);
}
// Identify the Page Level the RegionStart must belongs to