aboutsummaryrefslogtreecommitdiff
path: root/binutils
diff options
context:
space:
mode:
authorSimon Marchi <simon.marchi@efficios.com>2022-03-16 09:01:15 -0400
committerSimon Marchi <simon.marchi@polymtl.ca>2022-03-16 09:01:15 -0400
commitc077c5802c396e4548516f15c8f03d7684b236ef (patch)
tree74438621c9ae1313a00db836b9546019fc188943 /binutils
parent37870be8740a4f903a61d43e6c1adede415473a9 (diff)
downloadgdb-c077c5802c396e4548516f15c8f03d7684b236ef.zip
gdb-c077c5802c396e4548516f15c8f03d7684b236ef.tar.gz
gdb-c077c5802c396e4548516f15c8f03d7684b236ef.tar.bz2
binutils/readelf: decode AMDGPU-specific e_flags
Decode and print the AMDGPU-specific fields of e_flags, as documented here: https://llvm.org/docs/AMDGPUUsage.html#header That is: - The specific GPU model - Whether the xnack and sramecc features are enabled The result looks like: - Flags: 0x52f + Flags: 0x52f, gfx906, xnack any, sramecc any The flags for the "HSA" OS ABI are properly versioned and documented on that page. But the NONE, PAL and MESA3D OS ABIs are not well documented nor versioned. Taking a peek at the LLVM source code, we see that they encode their flags the same way as HSA v3. For example, for PAL: https://github.com/llvm/llvm-project/blob/c8b614cd74a92d85936aed5ac7c642af75ffdc29/llvm/lib/Target/AMDGPU/MCTargetDesc/AMDGPUTargetStreamer.cpp#L601 So for those other OS ABIs, we read them the same as HSA v3. binutils/ChangeLog: * readelf.c: Include elf/amdgcn.h. (decode_AMDGPU_machine_flags): New. (get_machine_flags): Handle flags for EM_AMDGPU machine type. include/ChangeLog: * elf/amdgcn.h: Add EF_AMDGPU_MACH_AMDGCN_* and EF_AMDGPU_FEATURE_* defines. Change-Id: Ib5b94df7cae0719a22cf4e4fd0629330e9485c12
Diffstat (limited to 'binutils')
-rw-r--r--binutils/ChangeLog6
-rw-r--r--binutils/readelf.c152
2 files changed, 158 insertions, 0 deletions
diff --git a/binutils/ChangeLog b/binutils/ChangeLog
index 694ad49..3ed0b0a 100644
--- a/binutils/ChangeLog
+++ b/binutils/ChangeLog
@@ -1,5 +1,11 @@
2022-03-16 Simon Marchi <simon.marchi@efficios.com>
+ * readelf.c: Include elf/amdgcn.h.
+ (decode_AMDGPU_machine_flags): New.
+ (get_machine_flags): Handle flags for EM_AMDGPU machine type.
+
+2022-03-16 Simon Marchi <simon.marchi@efficios.com>
+
* readelf.c (get_osabi_name): Handle EM_AMDGPU OS ABIs.
2022-03-16 Nick Clifton <nickc@redhat.com>
diff --git a/binutils/readelf.c b/binutils/readelf.c
index e8974aa..00b5e54 100644
--- a/binutils/readelf.c
+++ b/binutils/readelf.c
@@ -92,6 +92,7 @@
#include "elf/aarch64.h"
#include "elf/alpha.h"
+#include "elf/amdgpu.h"
#include "elf/arc.h"
#include "elf/arm.h"
#include "elf/avr.h"
@@ -3565,6 +3566,153 @@ decode_NDS32_machine_flags (unsigned e_flags, char buf[], size_t size)
r += snprintf (buf + r, size -r, ", L2C");
}
+static void
+decode_AMDGPU_machine_flags (Filedata *filedata, unsigned int e_flags,
+ char *buf)
+{
+ unsigned char *e_ident = filedata->file_header.e_ident;
+ unsigned char osabi = e_ident[EI_OSABI];
+ unsigned char abiversion = e_ident[EI_ABIVERSION];
+ unsigned int mach;
+
+ /* HSA OS ABI v2 used a different encoding, but we don't need to support it,
+ it has been deprecated for a while.
+
+ The PAL, MESA3D and NONE OS ABIs are not properly versioned, at the time
+ of writing, they use the same flags as HSA v3, so the code below uses that
+ assumption. */
+ if (osabi == ELFOSABI_AMDGPU_HSA && abiversion < ELFABIVERSION_AMDGPU_HSA_V3)
+ return;
+
+ mach = e_flags & EF_AMDGPU_MACH;
+ switch (mach)
+ {
+#define AMDGPU_CASE(code, string) \
+ case code: strcat (buf, ", " string); break;
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX600, "gfx600")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX601, "gfx601")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX700, "gfx700")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX701, "gfx701")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX702, "gfx702")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX703, "gfx703")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX704, "gfx704")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX801, "gfx801")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX802, "gfx802")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX803, "gfx803")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX810, "gfx810")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX900, "gfx900")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX902, "gfx902")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX904, "gfx904")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX906, "gfx906")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX908, "gfx908")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX909, "gfx909")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX90C, "gfx90c")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1010, "gfx1010")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1011, "gfx1011")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1012, "gfx1012")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1030, "gfx1030")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1031, "gfx1031")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1032, "gfx1032")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1033, "gfx1033")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX602, "gfx602")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX705, "gfx705")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX805, "gfx805")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1035, "gfx1035")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1034, "gfx1034")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX90A, "gfx90a")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX940, "gfx940")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1013, "gfx1013")
+ AMDGPU_CASE (EF_AMDGPU_MACH_AMDGCN_GFX1036, "gfx1036")
+ default:
+ sprintf (buf, _(", <unknown AMDGPU GPU type: %#x>"), mach);
+ break;
+#undef AMDGPU_CASE
+ }
+
+ buf += strlen (buf);
+ e_flags &= ~EF_AMDGPU_MACH;
+
+ if ((osabi == ELFOSABI_AMDGPU_HSA
+ && abiversion == ELFABIVERSION_AMDGPU_HSA_V3)
+ || osabi != ELFOSABI_AMDGPU_HSA)
+ {
+ /* For HSA v3 and other OS ABIs. */
+ if (e_flags & EF_AMDGPU_FEATURE_XNACK_V3)
+ {
+ strcat (buf, ", xnack on");
+ buf += strlen (buf);
+ e_flags &= ~EF_AMDGPU_FEATURE_XNACK_V3;
+ }
+
+ if (e_flags & EF_AMDGPU_FEATURE_SRAMECC_V3)
+ {
+ strcat (buf, ", sramecc on");
+ buf += strlen (buf);
+ e_flags &= ~EF_AMDGPU_FEATURE_SRAMECC_V3;
+ }
+ }
+ else
+ {
+ /* For HSA v4+. */
+ int xnack, sramecc;
+
+ xnack = e_flags & EF_AMDGPU_FEATURE_XNACK_V4;
+ switch (xnack)
+ {
+ case EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4:
+ break;
+
+ case EF_AMDGPU_FEATURE_XNACK_ANY_V4:
+ strcat (buf, ", xnack any");
+ break;
+
+ case EF_AMDGPU_FEATURE_XNACK_OFF_V4:
+ strcat (buf, ", xnack off");
+ break;
+
+ case EF_AMDGPU_FEATURE_XNACK_ON_V4:
+ strcat (buf, ", xnack on");
+ break;
+
+ default:
+ sprintf (buf, _(", <unknown xnack value: %#x>"), xnack);
+ break;
+ }
+
+ buf += strlen (buf);
+ e_flags &= ~EF_AMDGPU_FEATURE_XNACK_V4;
+
+ sramecc = e_flags & EF_AMDGPU_FEATURE_SRAMECC_V4;
+ switch (sramecc)
+ {
+ case EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4:
+ break;
+
+ case EF_AMDGPU_FEATURE_SRAMECC_ANY_V4:
+ strcat (buf, ", sramecc any");
+ break;
+
+ case EF_AMDGPU_FEATURE_SRAMECC_OFF_V4:
+ strcat (buf, ", sramecc off");
+ break;
+
+ case EF_AMDGPU_FEATURE_SRAMECC_ON_V4:
+ strcat (buf, ", sramecc on");
+ break;
+
+ default:
+ sprintf (buf, _(", <unknown sramecc value: %#x>"), sramecc);
+ break;
+ }
+
+ buf += strlen (buf);
+ e_flags &= ~EF_AMDGPU_FEATURE_SRAMECC_V4;
+ }
+
+ if (e_flags != 0)
+ sprintf (buf, _(", unknown flags bits: %#x"), e_flags);
+}
+
static char *
get_machine_flags (Filedata * filedata, unsigned e_flags, unsigned e_machine)
{
@@ -3717,6 +3865,10 @@ get_machine_flags (Filedata * filedata, unsigned e_flags, unsigned e_machine)
}
break;
+ case EM_AMDGPU:
+ decode_AMDGPU_machine_flags (filedata, e_flags, buf);
+ break;
+
case EM_CYGNUS_MEP:
switch (e_flags & EF_MEP_CPU_MASK)
{