aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Sandiford <richard.sandiford@arm.com>2023-12-05 10:11:27 +0000
committerRichard Sandiford <richard.sandiford@arm.com>2023-12-05 10:11:27 +0000
commit80fc055cf00fee4b1f9f19f77c8880b12226e086 (patch)
tree2bea0e571c02966011cf019454625d14c030cd04
parent084122adb5792a9c8e7f7876e2c1d59ba80c228b (diff)
downloadgcc-80fc055cf00fee4b1f9f19f77c8880b12226e086.zip
gcc-80fc055cf00fee4b1f9f19f77c8880b12226e086.tar.gz
gcc-80fc055cf00fee4b1f9f19f77c8880b12226e086.tar.bz2
aarch64: Add a VNx1TI mode
Although TI isn't really a native SVE element mode, it's convenient for SME if we define VNx1TI anyway, so that it can be used to distinguish .Q ZA operations from others. It's purely an RTL convenience and isn't (yet) a valid storage mode. gcc/ * config/aarch64/aarch64-modes.def: Add VNx1TI.
-rw-r--r--gcc/config/aarch64/aarch64-modes.def21
1 files changed, 14 insertions, 7 deletions
diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def
index 6b4f4e1..a3efc5b 100644
--- a/gcc/config/aarch64/aarch64-modes.def
+++ b/gcc/config/aarch64/aarch64-modes.def
@@ -156,7 +156,7 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
for 8-bit, 16-bit, 32-bit and 64-bit elements respectively. It isn't
strictly necessary to set the alignment here, since the default would
be clamped to BIGGEST_ALIGNMENT anyhow, but it seems clearer. */
-#define SVE_MODES(NVECS, VB, VH, VS, VD) \
+#define SVE_MODES(NVECS, VB, VH, VS, VD, VT) \
VECTOR_MODES_WITH_PREFIX (VNx, INT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
\
@@ -164,6 +164,7 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
ADJUST_NUNITS (VH##HI, aarch64_sve_vg * NVECS * 4); \
ADJUST_NUNITS (VS##SI, aarch64_sve_vg * NVECS * 2); \
ADJUST_NUNITS (VD##DI, aarch64_sve_vg * NVECS); \
+ ADJUST_NUNITS (VT##TI, exact_div (aarch64_sve_vg * NVECS, 2)); \
ADJUST_NUNITS (VH##BF, aarch64_sve_vg * NVECS * 4); \
ADJUST_NUNITS (VH##HF, aarch64_sve_vg * NVECS * 4); \
ADJUST_NUNITS (VS##SF, aarch64_sve_vg * NVECS * 2); \
@@ -173,17 +174,23 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2)
ADJUST_ALIGNMENT (VH##HI, 16); \
ADJUST_ALIGNMENT (VS##SI, 16); \
ADJUST_ALIGNMENT (VD##DI, 16); \
+ ADJUST_ALIGNMENT (VT##TI, 16); \
ADJUST_ALIGNMENT (VH##BF, 16); \
ADJUST_ALIGNMENT (VH##HF, 16); \
ADJUST_ALIGNMENT (VS##SF, 16); \
ADJUST_ALIGNMENT (VD##DF, 16);
-/* Give SVE vectors the names normally used for 256-bit vectors.
- The actual number depends on command-line flags. */
-SVE_MODES (1, VNx16, VNx8, VNx4, VNx2)
-SVE_MODES (2, VNx32, VNx16, VNx8, VNx4)
-SVE_MODES (3, VNx48, VNx24, VNx12, VNx6)
-SVE_MODES (4, VNx64, VNx32, VNx16, VNx8)
+/* Give SVE vectors names of the form VNxX, where X describes what is
+ stored in each 128-bit unit. The actual size of the mode depends
+ on command-line flags.
+
+ VNx1TI isn't really a native SVE mode, but it can be useful in some
+ limited situations. */
+VECTOR_MODE_WITH_PREFIX (VNx, INT, TI, 1, 1);
+SVE_MODES (1, VNx16, VNx8, VNx4, VNx2, VNx1)
+SVE_MODES (2, VNx32, VNx16, VNx8, VNx4, VNx2)
+SVE_MODES (3, VNx48, VNx24, VNx12, VNx6, VNx3)
+SVE_MODES (4, VNx64, VNx32, VNx16, VNx8, VNx4)
/* Partial SVE vectors: