aboutsummaryrefslogtreecommitdiff
path: root/compiler-rt/lib
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r--compiler-rt/lib/builtins/assembly.h2
-rw-r--r--compiler-rt/lib/builtins/cpu_model/x86.c246
-rw-r--r--compiler-rt/lib/scudo/standalone/secondary.h69
-rw-r--r--compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp85
4 files changed, 258 insertions, 144 deletions
diff --git a/compiler-rt/lib/builtins/assembly.h b/compiler-rt/lib/builtins/assembly.h
index ac119af..368cbaf 100644
--- a/compiler-rt/lib/builtins/assembly.h
+++ b/compiler-rt/lib/builtins/assembly.h
@@ -14,7 +14,7 @@
#ifndef COMPILERRT_ASSEMBLY_H
#define COMPILERRT_ASSEMBLY_H
-#if defined(__linux__) && defined(__CET__)
+#ifdef __CET__
#if __has_include(<cet.h>)
#include <cet.h>
#endif
diff --git a/compiler-rt/lib/builtins/cpu_model/x86.c b/compiler-rt/lib/builtins/cpu_model/x86.c
index d9ff116..c21b2ba 100644
--- a/compiler-rt/lib/builtins/cpu_model/x86.c
+++ b/compiler-rt/lib/builtins/cpu_model/x86.c
@@ -36,14 +36,14 @@ enum VendorSignatures {
SIG_AMD = 0x68747541, // Auth
};
-enum ProcessorVendors : unsigned int {
+enum ProcessorVendors {
VENDOR_INTEL = 1,
VENDOR_AMD,
VENDOR_OTHER,
VENDOR_MAX
};
-enum ProcessorTypes : unsigned int {
+enum ProcessorTypes {
INTEL_BONNELL = 1,
INTEL_CORE2,
INTEL_COREI7,
@@ -235,6 +235,19 @@ enum ProcessorFeatures {
CPU_FEATURE_MAX
};
+#ifndef _WIN32
+__attribute__((visibility("hidden")))
+#endif
+struct __processor_model {
+ unsigned int __cpu_vendor;
+ unsigned int __cpu_type;
+ unsigned int __cpu_subtype;
+ unsigned int __cpu_features[1];
+} __cpu_model = {0, 0, 0, {0}};
+
+static_assert(sizeof(__cpu_model) == 16,
+ "Wrong size of __cpu_model will result in ABI break");
+
// This code is copied from lib/Support/Host.cpp.
// Changes to either file should be mirrored in the other.
@@ -319,13 +332,17 @@ static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
#define testFeature(F) (Features[F / 32] & (1 << (F % 32))) != 0
-static const char *getIntelProcessorTypeAndSubtype(
- unsigned Family, unsigned Model, const unsigned *Features,
- enum ProcessorTypes *Type, enum ProcessorSubtypes *Subtype) {
+static const char *
+getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
+ const unsigned *Features,
+ struct __processor_model *CpuModel) {
// We select CPU strings to match the code in Host.cpp, but we don't use them
// in compiler-rt.
const char *CPU = 0;
+ enum ProcessorTypes Type = CPU_TYPE_MAX;
+ enum ProcessorSubtypes Subtype = CPU_SUBTYPE_MAX;
+
switch (Family) {
case 0x6:
switch (Model) {
@@ -337,7 +354,7 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x16: // Intel Celeron processor model 16h. All processors are
// manufactured using the 65 nm process
CPU = "core2";
- *Type = INTEL_CORE2;
+ Type = INTEL_CORE2;
break;
case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model
// 17h. All processors are manufactured using the 45 nm process.
@@ -346,7 +363,7 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x1d: // Intel Xeon processor MP. All processors are manufactured using
// the 45 nm process.
CPU = "penryn";
- *Type = INTEL_CORE2;
+ Type = INTEL_CORE2;
break;
case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All
// processors are manufactured using the 45 nm process.
@@ -355,29 +372,29 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x1f:
case 0x2e: // Nehalem EX
CPU = "nehalem";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_NEHALEM;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_NEHALEM;
break;
case 0x25: // Intel Core i7, laptop version.
case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All
// processors are manufactured using the 32 nm process.
case 0x2f: // Westmere EX
CPU = "westmere";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_WESTMERE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_WESTMERE;
break;
case 0x2a: // Intel Core i7 processor. All processors are manufactured
// using the 32 nm process.
case 0x2d:
CPU = "sandybridge";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_SANDYBRIDGE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_SANDYBRIDGE;
break;
case 0x3a:
case 0x3e: // Ivy Bridge EP
CPU = "ivybridge";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_IVYBRIDGE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_IVYBRIDGE;
break;
// Haswell:
@@ -386,8 +403,8 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x45:
case 0x46:
CPU = "haswell";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_HASWELL;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_HASWELL;
break;
// Broadwell:
@@ -396,8 +413,8 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x4f:
case 0x56:
CPU = "broadwell";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_BROADWELL;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_BROADWELL;
break;
// Skylake:
@@ -408,61 +425,61 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0xa5: // Comet Lake-H/S
case 0xa6: // Comet Lake-U
CPU = "skylake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_SKYLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_SKYLAKE;
break;
// Rocketlake:
case 0xa7:
CPU = "rocketlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ROCKETLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ROCKETLAKE;
break;
// Skylake Xeon:
case 0x55:
- *Type = INTEL_COREI7;
+ Type = INTEL_COREI7;
if (testFeature(FEATURE_AVX512BF16)) {
CPU = "cooperlake";
- *Subtype = INTEL_COREI7_COOPERLAKE;
+ Subtype = INTEL_COREI7_COOPERLAKE;
} else if (testFeature(FEATURE_AVX512VNNI)) {
CPU = "cascadelake";
- *Subtype = INTEL_COREI7_CASCADELAKE;
+ Subtype = INTEL_COREI7_CASCADELAKE;
} else {
CPU = "skylake-avx512";
- *Subtype = INTEL_COREI7_SKYLAKE_AVX512;
+ Subtype = INTEL_COREI7_SKYLAKE_AVX512;
}
break;
// Cannonlake:
case 0x66:
CPU = "cannonlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_CANNONLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_CANNONLAKE;
break;
// Icelake:
case 0x7d:
case 0x7e:
CPU = "icelake-client";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ICELAKE_CLIENT;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ICELAKE_CLIENT;
break;
// Tigerlake:
case 0x8c:
case 0x8d:
CPU = "tigerlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_TIGERLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_TIGERLAKE;
break;
// Alderlake:
case 0x97:
case 0x9a:
CPU = "alderlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ALDERLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ALDERLAKE;
break;
// Raptorlake:
@@ -470,23 +487,23 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0xba:
case 0xbf:
CPU = "raptorlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ALDERLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ALDERLAKE;
break;
// Meteorlake:
case 0xaa:
case 0xac:
CPU = "meteorlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ALDERLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ALDERLAKE;
break;
// Gracemont:
case 0xbe:
CPU = "gracemont";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ALDERLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ALDERLAKE;
break;
// Arrowlake:
@@ -494,72 +511,72 @@ static const char *getIntelProcessorTypeAndSubtype(
// Arrowlake U:
case 0xb5:
CPU = "arrowlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ARROWLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ARROWLAKE;
break;
// Arrowlake S:
case 0xc6:
CPU = "arrowlake-s";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ARROWLAKE_S;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ARROWLAKE_S;
break;
// Lunarlake:
case 0xbd:
CPU = "lunarlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ARROWLAKE_S;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ARROWLAKE_S;
break;
// Pantherlake:
case 0xcc:
CPU = "pantherlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_PANTHERLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_PANTHERLAKE;
break;
// Wildcatlake:
case 0xd5:
CPU = "wildcatlake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_PANTHERLAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_PANTHERLAKE;
break;
// Icelake Xeon:
case 0x6a:
case 0x6c:
CPU = "icelake-server";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_ICELAKE_SERVER;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_ICELAKE_SERVER;
break;
// Emerald Rapids:
case 0xcf:
CPU = "emeraldrapids";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_SAPPHIRERAPIDS;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_SAPPHIRERAPIDS;
break;
// Sapphire Rapids:
case 0x8f:
CPU = "sapphirerapids";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_SAPPHIRERAPIDS;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_SAPPHIRERAPIDS;
break;
// Granite Rapids:
case 0xad:
CPU = "graniterapids";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_GRANITERAPIDS;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_GRANITERAPIDS;
break;
// Granite Rapids D:
case 0xae:
CPU = "graniterapids-d";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_GRANITERAPIDS_D;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_GRANITERAPIDS_D;
break;
case 0x1c: // Most 45 nm Intel Atom processors
@@ -568,7 +585,7 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x35: // 32 nm Atom Midview
case 0x36: // 32 nm Atom Midview
CPU = "bonnell";
- *Type = INTEL_BONNELL;
+ Type = INTEL_BONNELL;
break;
// Atom Silvermont codes from the Intel software optimization guide.
@@ -579,52 +596,52 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x5d:
case 0x4c: // really airmont
CPU = "silvermont";
- *Type = INTEL_SILVERMONT;
+ Type = INTEL_SILVERMONT;
break;
// Goldmont:
case 0x5c: // Apollo Lake
case 0x5f: // Denverton
CPU = "goldmont";
- *Type = INTEL_GOLDMONT;
+ Type = INTEL_GOLDMONT;
break; // "goldmont"
case 0x7a:
CPU = "goldmont-plus";
- *Type = INTEL_GOLDMONT_PLUS;
+ Type = INTEL_GOLDMONT_PLUS;
break;
case 0x86:
case 0x8a: // Lakefield
case 0x96: // Elkhart Lake
case 0x9c: // Jasper Lake
CPU = "tremont";
- *Type = INTEL_TREMONT;
+ Type = INTEL_TREMONT;
break;
// Sierraforest:
case 0xaf:
CPU = "sierraforest";
- *Type = INTEL_SIERRAFOREST;
+ Type = INTEL_SIERRAFOREST;
break;
// Grandridge:
case 0xb6:
CPU = "grandridge";
- *Type = INTEL_GRANDRIDGE;
+ Type = INTEL_GRANDRIDGE;
break;
// Clearwaterforest:
case 0xdd:
CPU = "clearwaterforest";
- *Type = INTEL_CLEARWATERFOREST;
+ Type = INTEL_CLEARWATERFOREST;
break;
case 0x57:
CPU = "knl";
- *Type = INTEL_KNL;
+ Type = INTEL_KNL;
break;
case 0x85:
CPU = "knm";
- *Type = INTEL_KNM;
+ Type = INTEL_KNM;
break;
default: // Unknown family 6 CPU.
@@ -636,8 +653,8 @@ static const char *getIntelProcessorTypeAndSubtype(
// Diamond Rapids:
case 0x01:
CPU = "diamondrapids";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_DIAMONDRAPIDS;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_DIAMONDRAPIDS;
break;
default: // Unknown family 19 CPU.
@@ -649,8 +666,8 @@ static const char *getIntelProcessorTypeAndSubtype(
case 0x1:
case 0x3:
CPU = "novalake";
- *Type = INTEL_COREI7;
- *Subtype = INTEL_COREI7_NOVALAKE;
+ Type = INTEL_COREI7;
+ Subtype = INTEL_COREI7_NOVALAKE;
break;
default: // Unknown family 0x12 CPU.
break;
@@ -661,14 +678,23 @@ static const char *getIntelProcessorTypeAndSubtype(
break; // Unknown.
}
+ if (Type != CPU_TYPE_MAX)
+ CpuModel->__cpu_type = Type;
+ if (Subtype != CPU_SUBTYPE_MAX)
+ CpuModel->__cpu_subtype = Subtype;
+
return CPU;
}
-static const char *getAMDProcessorTypeAndSubtype(
- unsigned Family, unsigned Model, const unsigned *Features,
- enum ProcessorTypes *Type, enum ProcessorSubtypes *Subtype) {
+static const char *
+getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
+ const unsigned *Features,
+ struct __processor_model *CpuModel) {
const char *CPU = 0;
+ enum ProcessorTypes Type = CPU_TYPE_MAX;
+ enum ProcessorSubtypes Subtype = CPU_SUBTYPE_MAX;
+
switch (Family) {
case 4:
CPU = "i486";
@@ -709,53 +735,53 @@ static const char *getAMDProcessorTypeAndSubtype(
case 16:
case 18:
CPU = "amdfam10";
- *Type = AMDFAM10H; // "amdfam10"
+ Type = AMDFAM10H; // "amdfam10"
switch (Model) {
case 2:
- *Subtype = AMDFAM10H_BARCELONA;
+ Subtype = AMDFAM10H_BARCELONA;
break;
case 4:
- *Subtype = AMDFAM10H_SHANGHAI;
+ Subtype = AMDFAM10H_SHANGHAI;
break;
case 8:
- *Subtype = AMDFAM10H_ISTANBUL;
+ Subtype = AMDFAM10H_ISTANBUL;
break;
}
break;
case 20:
CPU = "btver1";
- *Type = AMD_BTVER1;
+ Type = AMD_BTVER1;
break;
case 21:
CPU = "bdver1";
- *Type = AMDFAM15H;
+ Type = AMDFAM15H;
if (Model >= 0x60 && Model <= 0x7f) {
CPU = "bdver4";
- *Subtype = AMDFAM15H_BDVER4;
+ Subtype = AMDFAM15H_BDVER4;
break; // 60h-7Fh: Excavator
}
if (Model >= 0x30 && Model <= 0x3f) {
CPU = "bdver3";
- *Subtype = AMDFAM15H_BDVER3;
+ Subtype = AMDFAM15H_BDVER3;
break; // 30h-3Fh: Steamroller
}
if ((Model >= 0x10 && Model <= 0x1f) || Model == 0x02) {
CPU = "bdver2";
- *Subtype = AMDFAM15H_BDVER2;
+ Subtype = AMDFAM15H_BDVER2;
break; // 02h, 10h-1Fh: Piledriver
}
if (Model <= 0x0f) {
- *Subtype = AMDFAM15H_BDVER1;
+ Subtype = AMDFAM15H_BDVER1;
break; // 00h-0Fh: Bulldozer
}
break;
case 22:
CPU = "btver2";
- *Type = AMD_BTVER2;
+ Type = AMD_BTVER2;
break;
case 23:
CPU = "znver1";
- *Type = AMDFAM17H;
+ Type = AMDFAM17H;
if ((Model >= 0x30 && Model <= 0x3f) || (Model == 0x47) ||
(Model >= 0x60 && Model <= 0x67) || (Model >= 0x68 && Model <= 0x6f) ||
(Model >= 0x70 && Model <= 0x7f) || (Model >= 0x84 && Model <= 0x87) ||
@@ -771,20 +797,20 @@ static const char *getAMDProcessorTypeAndSubtype(
// Family 17h Models 98h-9Fh (Mero) Zen 2
// Family 17h Models A0h-AFh (Mendocino) Zen 2
CPU = "znver2";
- *Subtype = AMDFAM17H_ZNVER2;
+ Subtype = AMDFAM17H_ZNVER2;
break;
}
if ((Model >= 0x10 && Model <= 0x1f) || (Model >= 0x20 && Model <= 0x2f)) {
// Family 17h Models 10h-1Fh (Raven1) Zen
// Family 17h Models 10h-1Fh (Picasso) Zen+
// Family 17h Models 20h-2Fh (Raven2 x86) Zen
- *Subtype = AMDFAM17H_ZNVER1;
+ Subtype = AMDFAM17H_ZNVER1;
break;
}
break;
case 25:
CPU = "znver3";
- *Type = AMDFAM19H;
+ Type = AMDFAM19H;
if (Model <= 0x0f || (Model >= 0x20 && Model <= 0x2f) ||
(Model >= 0x30 && Model <= 0x3f) || (Model >= 0x40 && Model <= 0x4f) ||
(Model >= 0x50 && Model <= 0x5f)) {
@@ -793,7 +819,7 @@ static const char *getAMDProcessorTypeAndSubtype(
// Family 19h Models 30h-3Fh (Badami) Zen 3
// Family 19h Models 40h-4Fh (Rembrandt) Zen 3+
// Family 19h Models 50h-5Fh (Cezanne) Zen 3
- *Subtype = AMDFAM19H_ZNVER3;
+ Subtype = AMDFAM19H_ZNVER3;
break;
}
if ((Model >= 0x10 && Model <= 0x1f) || (Model >= 0x60 && Model <= 0x6f) ||
@@ -805,13 +831,13 @@ static const char *getAMDProcessorTypeAndSubtype(
// Family 19h Models 78h-7Fh (Phoenix 2, Hawkpoint2) Zen 4
// Family 19h Models A0h-AFh (Stones-Dense) Zen 4
CPU = "znver4";
- *Subtype = AMDFAM19H_ZNVER4;
+ Subtype = AMDFAM19H_ZNVER4;
break; // "znver4"
}
break; // family 19h
case 26:
CPU = "znver5";
- *Type = AMDFAM1AH;
+ Type = AMDFAM1AH;
if (Model <= 0x77) {
// Models 00h-0Fh (Breithorn).
// Models 10h-1Fh (Breithorn-Dense).
@@ -823,7 +849,7 @@ static const char *getAMDProcessorTypeAndSubtype(
// Models 60h-6Fh (Krackan1).
// Models 70h-77h (Sarlak).
CPU = "znver5";
- *Subtype = AMDFAM1AH_ZNVER5;
+ Subtype = AMDFAM1AH_ZNVER5;
break; // "znver5"
}
break;
@@ -831,6 +857,11 @@ static const char *getAMDProcessorTypeAndSubtype(
break; // Unknown AMD CPU.
}
+ if (Type != CPU_TYPE_MAX)
+ CpuModel->__cpu_type = Type;
+ if (Subtype != CPU_SUBTYPE_MAX)
+ CpuModel->__cpu_subtype = Subtype;
+
return CPU;
}
@@ -1155,19 +1186,6 @@ int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE;
#ifndef _WIN32
__attribute__((visibility("hidden")))
#endif
-struct __processor_model {
- unsigned int __cpu_vendor;
- enum ProcessorTypes __cpu_type;
- enum ProcessorSubtypes __cpu_subtype;
- unsigned int __cpu_features[1];
-} __cpu_model = {0, 0, 0, {0}};
-
-static_assert(sizeof(__cpu_model) == 16,
- "Wrong size of __cpu_model will result in ABI break");
-
-#ifndef _WIN32
-__attribute__((visibility("hidden")))
-#endif
unsigned __cpu_features2[(CPU_FEATURE_MAX - 1) / 32];
// A constructor function that is sets __cpu_model and __cpu_features2 with
@@ -1207,15 +1225,11 @@ int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
if (Vendor == SIG_INTEL) {
// Get CPU type.
- getIntelProcessorTypeAndSubtype(Family, Model, &Features[0],
- &(__cpu_model.__cpu_type),
- &(__cpu_model.__cpu_subtype));
+ getIntelProcessorTypeAndSubtype(Family, Model, &Features[0], &__cpu_model);
__cpu_model.__cpu_vendor = VENDOR_INTEL;
} else if (Vendor == SIG_AMD) {
// Get CPU type.
- getAMDProcessorTypeAndSubtype(Family, Model, &Features[0],
- &(__cpu_model.__cpu_type),
- &(__cpu_model.__cpu_subtype));
+ getAMDProcessorTypeAndSubtype(Family, Model, &Features[0], &__cpu_model);
__cpu_model.__cpu_vendor = VENDOR_AMD;
} else
__cpu_model.__cpu_vendor = VENDOR_OTHER;
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index f0b7bce..2509db2 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -249,6 +249,7 @@ public:
LRUEntries.clear();
LRUEntries.init(Entries, sizeof(Entries));
+ OldestPresentEntry = nullptr;
AvailEntries.clear();
AvailEntries.init(Entries, sizeof(Entries));
@@ -322,8 +323,6 @@ public:
}
CachedBlock PrevEntry = Quarantine[QuarantinePos];
Quarantine[QuarantinePos] = Entry;
- if (OldestTime == 0)
- OldestTime = Entry.Time;
Entry = PrevEntry;
}
@@ -339,9 +338,6 @@ public:
}
insert(Entry);
-
- if (OldestTime == 0)
- OldestTime = Entry.Time;
} while (0);
for (MemMapT &EvictMemMap : EvictionMemMaps)
@@ -355,7 +351,6 @@ public:
SCUDO_SCOPED_TRACE(
GetSecondaryReleaseToOSTraceName(ReleaseToOS::Normal));
- // TODO: Add ReleaseToOS logic to LRU algorithm
releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
Mutex.unlock();
} else
@@ -535,6 +530,11 @@ public:
void unmapTestOnly() { empty(); }
+ void releaseOlderThanTestOnly(u64 ReleaseTime) {
+ ScopedLock L(Mutex);
+ releaseOlderThan(ReleaseTime);
+ }
+
private:
void insert(const CachedBlock &Entry) REQUIRES(Mutex) {
CachedBlock *AvailEntry = AvailEntries.front();
@@ -542,10 +542,16 @@ private:
*AvailEntry = Entry;
LRUEntries.push_front(AvailEntry);
+ if (OldestPresentEntry == nullptr && AvailEntry->Time != 0)
+ OldestPresentEntry = AvailEntry;
}
void remove(CachedBlock *Entry) REQUIRES(Mutex) {
DCHECK(Entry->isValid());
+ if (OldestPresentEntry == Entry) {
+ OldestPresentEntry = LRUEntries.getPrev(Entry);
+ DCHECK(OldestPresentEntry == nullptr || OldestPresentEntry->Time != 0);
+ }
LRUEntries.remove(Entry);
Entry->invalidate();
AvailEntries.push_front(Entry);
@@ -560,6 +566,7 @@ private:
for (CachedBlock &Entry : LRUEntries)
MapInfo[N++] = Entry.MemMap;
LRUEntries.clear();
+ OldestPresentEntry = nullptr;
}
for (uptr I = 0; I < N; I++) {
MemMapT &MemMap = MapInfo[I];
@@ -567,36 +574,42 @@ private:
}
}
- void releaseIfOlderThan(CachedBlock &Entry, u64 Time) REQUIRES(Mutex) {
- if (!Entry.isValid() || !Entry.Time)
- return;
- if (Entry.Time > Time) {
- if (OldestTime == 0 || Entry.Time < OldestTime)
- OldestTime = Entry.Time;
- return;
+ void releaseOlderThan(u64 ReleaseTime) REQUIRES(Mutex) {
+ SCUDO_SCOPED_TRACE(GetSecondaryReleaseOlderThanTraceName());
+
+ if (!Config::getQuarantineDisabled()) {
+ for (uptr I = 0; I < Config::getQuarantineSize(); I++) {
+ auto &Entry = Quarantine[I];
+ if (!Entry.isValid() || Entry.Time == 0 || Entry.Time > ReleaseTime)
+ continue;
+ Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase,
+ Entry.CommitSize);
+ Entry.Time = 0;
+ }
}
- Entry.MemMap.releaseAndZeroPagesToOS(Entry.CommitBase, Entry.CommitSize);
- Entry.Time = 0;
- }
- void releaseOlderThan(u64 Time) REQUIRES(Mutex) {
- SCUDO_SCOPED_TRACE(GetSecondaryReleaseOlderThanTraceName());
+ for (CachedBlock *Entry = OldestPresentEntry; Entry != nullptr;
+ Entry = LRUEntries.getPrev(Entry)) {
+ DCHECK(Entry->isValid());
+ DCHECK(Entry->Time != 0);
+
+ if (Entry->Time > ReleaseTime) {
+ // All entries are newer than this, so no need to keep scanning.
+ OldestPresentEntry = Entry;
+ return;
+ }
- if (!LRUEntries.size() || OldestTime == 0 || OldestTime > Time)
- return;
- OldestTime = 0;
- if (!Config::getQuarantineDisabled())
- for (uptr I = 0; I < Config::getQuarantineSize(); I++)
- releaseIfOlderThan(Quarantine[I], Time);
- for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
- releaseIfOlderThan(Entries[I], Time);
+ Entry->MemMap.releaseAndZeroPagesToOS(Entry->CommitBase,
+ Entry->CommitSize);
+ Entry->Time = 0;
+ }
+ OldestPresentEntry = nullptr;
}
HybridMutex Mutex;
u32 QuarantinePos GUARDED_BY(Mutex) = 0;
atomic_u32 MaxEntriesCount = {};
atomic_uptr MaxEntrySize = {};
- u64 OldestTime GUARDED_BY(Mutex) = 0;
atomic_s32 ReleaseToOsIntervalMs = {};
u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
@@ -606,6 +619,8 @@ private:
NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
Quarantine GUARDED_BY(Mutex) = {};
+ // The oldest entry in the LRUEntries that has Time non-zero.
+ CachedBlock *OldestPresentEntry GUARDED_BY(Mutex) = nullptr;
// Cached blocks stored in LRU order
DoublyLinkedList<CachedBlock> LRUEntries GUARDED_BY(Mutex);
// The unused Entries
diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index d8a7f6b..855a3e6 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -403,6 +403,11 @@ template <class Config> struct CacheInfoType {
MemMap.getBase(), MemMap);
}
}
+
+ void storeMemMap(scudo::MemMapT &MemMap) {
+ Cache->store(Options, MemMap.getBase(), MemMap.getCapacity(),
+ MemMap.getBase(), MemMap);
+ }
};
TEST(ScudoSecondaryTest, AllocatorCacheEntryOrder) {
@@ -503,3 +508,83 @@ TEST(ScudoSecondaryTest, AllocatorCacheOptions) {
Info.Cache->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
EXPECT_TRUE(Info.Cache->canCache(1UL << 16));
}
+
+TEST(ScudoSecondaryTest, ReleaseOlderThanAllEntries) {
+ CacheInfoType<TestCacheConfig> Info;
+ using CacheConfig = CacheInfoType<TestCacheConfig>::CacheConfig;
+
+ Info.Cache->releaseOlderThanTestOnly(UINT64_MAX);
+
+ Info.fillCacheWithSameSizeBlocks(CacheConfig::getDefaultMaxEntriesCount(),
+ 1024);
+ for (size_t I = 0; I < Info.MemMaps.size(); I++) {
+ // Set the first u32 value to a non-zero value.
+ *reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()) = 10;
+ }
+
+ Info.Cache->releaseOlderThanTestOnly(UINT64_MAX);
+
+ EXPECT_EQ(Info.MemMaps.size(), CacheConfig::getDefaultMaxEntriesCount());
+ for (size_t I = 0; I < Info.MemMaps.size(); I++) {
+ // All released maps will now be zero.
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+}
+
+// This test assumes that the timestamp comes from getMonotonicFast.
+TEST(ScudoSecondaryTest, ReleaseOlderThanGroups) {
+ CacheInfoType<TestCacheConfig> Info;
+
+ // Disable the release interval so we can do tests the releaseOlderThan
+ // function.
+ Info.Cache->setOption(scudo::Option::ReleaseInterval, -1);
+
+ // Create all of the maps we are going to use.
+ for (size_t I = 0; I < 6; I++) {
+ Info.MemMaps.emplace_back(Info.allocate(1024));
+ // Set the first u32 value to a non-zero value.
+ *reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()) = 10;
+ }
+
+ // Create three groups of entries at three different intervals.
+ Info.storeMemMap(Info.MemMaps[0]);
+ Info.storeMemMap(Info.MemMaps[1]);
+ scudo::u64 FirstTime = scudo::getMonotonicTimeFast();
+
+ // Need to make sure the next set of entries are stamped with a newer time.
+ while (scudo::getMonotonicTimeFast() <= FirstTime)
+ ;
+
+ Info.storeMemMap(Info.MemMaps[2]);
+ Info.storeMemMap(Info.MemMaps[3]);
+ scudo::u64 SecondTime = scudo::getMonotonicTimeFast();
+
+ // Need to make sure the next set of entries are stamped with a newer time.
+ while (scudo::getMonotonicTimeFast() <= SecondTime)
+ ;
+
+ Info.storeMemMap(Info.MemMaps[4]);
+ Info.storeMemMap(Info.MemMaps[5]);
+ scudo::u64 ThirdTime = scudo::getMonotonicTimeFast();
+
+ Info.Cache->releaseOlderThanTestOnly(FirstTime);
+ for (size_t I = 0; I < 2; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+ for (size_t I = 2; I < 6; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 10U);
+ }
+
+ Info.Cache->releaseOlderThanTestOnly(SecondTime);
+ for (size_t I = 0; I < 4; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+ for (size_t I = 4; I < 6; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 10U);
+ }
+
+ Info.Cache->releaseOlderThanTestOnly(ThirdTime);
+ for (size_t I = 0; I < 6; I++) {
+ EXPECT_EQ(*reinterpret_cast<scudo::u32 *>(Info.MemMaps[I].getBase()), 0U);
+ }
+}