aboutsummaryrefslogtreecommitdiff
path: root/gcc/testsuite/brig.dg/test
diff options
context:
space:
mode:
authorHenry Linjamäki <henry.linjamaki@parmance.com>2017-11-16 17:20:45 +0000
committerPekka Jääskeläinen <visit0r@gcc.gnu.org>2017-11-16 17:20:45 +0000
commitdc03239c2d8607a0f5cdb13a220bcf0c7f8ed7cd (patch)
treef2316102f532f913f50ba2931a318058c4bc88e0 /gcc/testsuite/brig.dg/test
parent35a282e0bc285f2b62dd29220a6ef3a3ed87a0b3 (diff)
downloadgcc-dc03239c2d8607a0f5cdb13a220bcf0c7f8ed7cd.zip
gcc-dc03239c2d8607a0f5cdb13a220bcf0c7f8ed7cd.tar.gz
gcc-dc03239c2d8607a0f5cdb13a220bcf0c7f8ed7cd.tar.bz2
[BRIGFE] Reduce the number of type conversions due to
the untyped HSAIL regs. Instead of always representing the HSAIL's untyped registers as unsigned int, the gccbrig now pre-analyzes the BRIG code and builds the register variables as a type used the most when storing or reading data to/from each register. This reduces the total conversions which cannot be always optimized away. From-SVN: r254837
Diffstat (limited to 'gcc/testsuite/brig.dg/test')
-rw-r--r--gcc/testsuite/brig.dg/test/gimple/internal-casts.hsail146
-rw-r--r--gcc/testsuite/brig.dg/test/gimple/packed.hsail18
-rw-r--r--gcc/testsuite/brig.dg/test/gimple/vector.hsail10
3 files changed, 159 insertions, 15 deletions
diff --git a/gcc/testsuite/brig.dg/test/gimple/internal-casts.hsail b/gcc/testsuite/brig.dg/test/gimple/internal-casts.hsail
new file mode 100644
index 0000000..52673c9
--- /dev/null
+++ b/gcc/testsuite/brig.dg/test/gimple/internal-casts.hsail
@@ -0,0 +1,146 @@
+module &module:1:0:$full:$large:$default;
+
+/* Test for casting from/to representation of HSA registers. */
+
+/* HSA registers are untyped but in gccbrig they are presented as */
+/* variables with a type selected by analysis. Currently, each */
+/* register variable, per function, has a type as it is used at */
+/* most. Therefore, register variable can be nearly any type. The */
+/* tests makes sure the generic/tree expressions have the right casts */
+/* from/to the register variables. */
+
+
+/* { dg-do compile } */
+/* { dg-options "-fdump-tree-original" } */
+
+prog kernel &Kernel(kernarg_u64 %input_ptr, kernarg_u64 %output_ptr)
+{
+ private_u64 %foo;
+ private_u64 %bar;
+ private_b128 %baz;
+
+ ld_kernarg_u64 $d0, [%input_ptr];
+ ld_global_u32 $s0, [$d0];
+
+ /* Trick gccbrig to set wanted type for the registers. */
+
+/* $s0 is selected as float... */
+/* { dg-final { scan-tree-dump "<float:32> s0;" "original"} } */
+/* ..., therefore, there should not be any casts. */
+/* { dg-final { scan-tree-dump "s10 = s0 \\\+ s0;" "original"} } */
+
+ add_f32 $s10, $s0, $s0;
+ add_f32 $s10, $s0, $s0;
+ add_f32 $s10, $s0, $s0;
+ add_f32 $s10, $s0, $s0;
+ add_f32 $s10, $s0, $s0;
+
+/* Expression with other type, a cast is needed. */
+/* { dg-final { scan-tree-dump "s1 = VIEW_CONVERT_EXPR<unsigned int>.s0. \\\+ 123;" "original"} } */
+
+ add_u32 $s1, $s0, 123;
+
+/* { dg-final { scan-tree-dump "unsigned int s1;" "original"} } */
+
+ add_u32 $s10, $s1, 0;
+ add_u32 $s10, $s1, 0;
+ add_u32 $s10, $s1, 0;
+ add_u32 $s10, $s1, 0;
+ add_u32 $s10, $s1, 0;
+
+/* { dg-final { scan-tree-dump "s0 = VIEW_CONVERT_EXPR<<float:32>>.s1.;" "original"} } */
+
+ mov_b32 $s0, $s1;
+
+/* Rig the election for $d0 to be double. */
+/* { dg-final { scan-tree-dump "<float:64> d0;" "original"} } */
+/* { dg-final { scan-tree-dump "d10 = d0 \\\+ d0;" "original"} } */
+
+ add_f64 $d10, $d0, $d0;
+ add_f64 $d10, $d0, $d0;
+ add_f64 $d10, $d0, $d0;
+ add_f64 $d10, $d0, $d0;
+ add_f64 $d10, $d0, $d0;
+
+/* Make $s2 to be vector type. */
+/* { dg-final { scan-tree-dump "vector.4. unsigned char s2;" "original"} } */
+/* { dg-final { scan-tree-dump "s2 = VIEW_CONVERT_EXPR<vector.4. unsigned char>\\\(s1\\\) \\\+ VIEW_CONVERT_EXPR<vector.4. unsigned char>\\\(s1\\\);" "original"} } */
+
+ add_pp_u8x4 $s2, $s1, $s1;
+
+/* { dg-final { scan-tree-dump "s20 = s2 \\\+ s2;" "original"} } */
+
+ add_pp_u8x4 $s20, $s2, $s2;
+ add_pp_u8x4 $s20, $s2, $s2;
+ add_pp_u8x4 $s20, $s2, $s2;
+ add_pp_u8x4 $s20, $s2, $s2;
+
+/* { dg-final { scan-tree-dump "d0 = VIEW_CONVERT_EXPR<<float:64>>.{VIEW_CONVERT_EXPR<unsigned int>.s0., VIEW_CONVERT_EXPR<unsigned int>.s2.}.;" "original"} } */
+
+ combine_v2_b64_b32 $d0, ($s0, $s2);
+
+/* { dg-final { scan-tree-dump "s2 = VIEW_CONVERT_EXPR<vector.4. unsigned char>.BIT_FIELD_REF <d0, 32, 0>.;" "original"} } */
+/* { dg-final { scan-tree-dump "s1 = BIT_FIELD_REF <d0, 32, 32>;" "original"} } */
+
+ expand_v2_b32_b64 ($s2, $s1), $d0;
+
+/* { dg-final { scan-tree-dump "s0 = VIEW_CONVERT_EXPR<<float:32>>\\\(.*VIEW_CONVERT_EXPR<unsigned int>.s0\[\)\]*;" "original"} } */
+
+ cvt_s16_s8 $s0, $s0;
+
+/* { dg-final { scan-tree-dump "c0 = .*VIEW_CONVERT_EXPR<<float:32>>.s2..* != 0;" "original"} } */
+
+ cvt_b1_f32 $c0, $s2;
+
+/* { dg-final { scan-tree-dump ".*__private_base_addr.* = .*\\\(unsigned char\\\) VIEW_CONVERT_EXPR<unsigned int>\\\(s0\\\)\[\)\]*;" "original"} } */
+
+ st_private_u8 $s0, [%foo];
+
+/* { dg-final { scan-tree-dump ".*__private_base_addr.* = .*\\\(unsigned short\\\) VIEW_CONVERT_EXPR<unsigned int>\\\(s2\\\)\[\)\]*;" "original"} } */
+
+ st_private_u16 $s2, [%bar];
+
+/* { dg-final { scan-tree-dump "mem_read.\[0-9\]* = \\\*\\\(signed char \\\*\\\) \\\(__private_base_addr .*\\\);\[ \n\]*s2 = VIEW_CONVERT_EXPR<vector.4. unsigned char>\\\(\\\(signed int\\\) mem_read.\[0-9\]*\\\);" "original"} } */
+
+ ld_private_s8 $s2, [%foo];
+
+/* { dg-final { scan-tree-dump "mem_read.\[0-9\]* = \\\*\\\(signed short \\\*\\\) \\\(__private_base_addr .*\\\);\[ \n\]*s0 = VIEW_CONVERT_EXPR<<float:32>>\\\(\\\(signed int\\\) mem_read.\[0-9\]*\\\);" "original"} } */
+
+ ld_private_s16 $s0, [%bar];
+
+/* { dg-final { scan-tree-dump "\\\*\\\(<float:32> \\\*\\\) \\\(__private_base_addr.*\\\) \\\+ 0 = s0;" "original"} } */
+/* { dg-final { scan-tree-dump "\\\*\\\(<float:32> \\\*\\\) \\\(__private_base_addr.*\\\) \\\+ 4 = VIEW_CONVERT_EXPR<<float:32>>\\\(s1\\\);" "original"} } */
+/* { dg-final { scan-tree-dump "\\\*\\\(<float:32> \\\*\\\) \\\(__private_base_addr.*\\\) \\\+ 8 = VIEW_CONVERT_EXPR<<float:32>>\\\(s2\\\);" "original"} } */
+
+ st_v3_private_f32 ($s0, $s1, $s2), [%baz];
+
+/* { dg-final { scan-tree-dump "mem_read.\[0-9\]* = \\\*\\\(signed short \\\*\\\) \\\(__private_base_addr.*\\\) \\\+ 0;\[ \n\]*s0 = VIEW_CONVERT_EXPR<<float:32>>\\\(\\\(signed int\\\) mem_read.\[0-9\]*\\\);" "original"} } */
+/* { dg-final { scan-tree-dump "mem_read.\[0-9\]* = \\\*\\\(signed short \\\*\\\) \\\(__private_base_addr.*\\\) \\\+ 2;\[ \n\]*s1 = VIEW_CONVERT_EXPR<unsigned int>\\\(\\\(signed int\\\) mem_read.\[0-9\]*\\\);" "original"} } */
+/* { dg-final { scan-tree-dump "mem_read.\[0-9\]* = \\\*\\\(signed short \\\*\\\) \\\(__private_base_addr.*\\\) \\\+ 4;\[ \n\]*s2 = VIEW_CONVERT_EXPR<vector.4. unsigned char>\\\(\\\(signed int\\\) mem_read.\[0-9\]*\\\);" "original"} } */
+
+ ld_v3_private_s16 ($s0, $s1, $s2), [%baz];
+
+/* { dg-final { scan-tree-dump "s5 = .*VIEW_CONVERT_EXPR<unsigned int>\\\(s0\\\) == VIEW_CONVERT_EXPR<unsigned int>\\\(s2\\\)\\\) .*;" "original"} } */
+
+ cmp_eq_s32_u32 $s5, $s0, $s2;
+
+/* { dg-final { scan-tree-dump "s6 = VIEW_CONVERT_EXPR<<float:32>>\\\(.*VIEW_CONVERT_EXPR<vector\\\(2\\\) unsigned short>\\\(s0\\\).*VIEW_CONVERT_EXPR<vector\\\(2\\\) unsigned short>\\\(s2\\\).*;" "original"} } */
+
+ cmp_eq_pp_u16x2_u16x2 $s6, $s0, $s2;
+
+/* { dg-final { scan-tree-dump "<float:32> s60;" "original"} } */
+
+ add_f32 $s60, $s6, $s6;
+ add_f32 $s60, $s6, $s6;
+ add_f32 $s60, $s6, $s6;
+ add_f32 $s60, $s6, $s6;
+
+ ld_kernarg_u64 $d0, [%output_ptr];
+ st_global_u32 $s0, [$d0];
+
+ ret;
+};
+
+
+
+
diff --git a/gcc/testsuite/brig.dg/test/gimple/packed.hsail b/gcc/testsuite/brig.dg/test/gimple/packed.hsail
index 9219ffd4..4cba5fc 100644
--- a/gcc/testsuite/brig.dg/test/gimple/packed.hsail
+++ b/gcc/testsuite/brig.dg/test/gimple/packed.hsail
@@ -42,7 +42,7 @@ prog kernel &Kernel(kernarg_u64 %input_ptr, kernarg_u64 %output_ptr)
ret;
};
-/* The b128 load is done using uint128_t*.
+/* The b128 load is done using uint128_t*. */
/* { dg-final { scan-tree-dump "q0 = VIEW_CONVERT_EXPR<uint128_t>\\\(mem_read.\[0-9\]+\\\);" "original"} } */
/* Before arithmetics, the uint128_t is casted to a vector datatype. */
@@ -52,27 +52,25 @@ prog kernel &Kernel(kernarg_u64 %input_ptr, kernarg_u64 %output_ptr)
/* in comparison to the HSAIL syntax. */
/* { dg-final { scan-tree-dump "\\\+ { 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0 }" "original"} } */
-/* After arithmetics, the vector DT is casted back to a uint128_t. */
-/* { dg-final { scan-tree-dump "q1 = VIEW_CONVERT_EXPR<uint128_t>" "original"} } */
-
/* Broadcasted the constant vector's lowest element and summed it up in the next line. */
-/* { dg-final { scan-tree-dump "= { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15 };\[\n \]+_\[0-9\]+ = _\[0-9\]+ \\\+ _\[0-9\]+;" "gimple"} } */
+/* { dg-final { scan-tree-dump "= { 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15 };\[\n \]+\[a-z0-9_\]+ = \[a-z0-9_\]+ \\\+ \[a-z0-9_\]+;" "gimple"} } */
/* Broadcasted the registers lowest element via a VEC_PERM_EXPR that has an all-zeros mask. */
-/* { dg-final { scan-tree-dump "VEC_PERM_EXPR <_\[0-9\]+, _\[0-9\]+, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }>;" "gimple" } } */
+/* { dg-final { scan-tree-dump "VEC_PERM_EXPR <\[a-z0-9_\]+, \[a-z0-9_\]+, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }>;" "gimple" } } */
/* For the add_ss we assume performing the computation over the whole vector is cheaper than */
/* extracting the scalar and performing a scalar operation. This aims to stay in the vector
/* datapath as long as possible. */
-/* { dg-final { scan-tree-dump "_\[0-9\]+ = VIEW_CONVERT_EXPR<vector\\\(16\\\) unsigned char>\\\(q2\\\);\[\n \]+_\[0-9\]+ = VIEW_CONVERT_EXPR<vector\\\(16\\\) unsigned char>\\\(q3\\\);\[\n \]+_\[0-9\]+ = _\[0-9\]+ \\\+ _\[0-9\]+;" "gimple" } } */
+/* { dg-final { scan-tree-dump "_\[0-9\]+ = q2 \\\+ q3;" "gimple" } } */
/* Insert the lowest element of the result to the lowest element of the result register. */
-/* { dg-final { scan-tree-dump "= VEC_PERM_EXPR <_\[0-9\]+, new_output.\[0-9\]+_\[0-9\]+, { 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }>;" "gimple" } } */
+/* { dg-final { scan-tree-dump "= VEC_PERM_EXPR <\[a-z0-9_\]+, new_output.\[0-9\]+_\[0-9\]+, { 16, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }>;" "gimple" } } */
-/* { dg-final { scan-tree-dump "q4 = VIEW_CONVERT_EXPR<uint128_t>\\\(s_output.\[0-9\]+_\[0-9\]+\\\);" "gimple" } } */
+/* FIXME */
+/* { dg-final { scan-tree-dump "q4 = \(VIEW_CONVERT_EXPR<uint128_t>\\\()?s_output.\[0-9\]+\(_\[0-9\]+\)*\\\)?;" "gimple" } } */
/* The saturating arithmetics are (curently) implemented using scalar builtin calls. */
/* { dg-final { scan-tree-dump-times "= __builtin___hsail_sat_add_u8" 64 "gimple" } } */
/* A single operand vector instr (neg.) */
-/* { dg-final { scan-tree-dump " = VIEW_CONVERT_EXPR<vector\\\(8\\\) signed short>\\\(q8\\\);\[\n \]+_\[0-9\]+ = -_\[0-9\]+;\[\n \]+" "gimple" } } */
+/* { dg-final { scan-tree-dump "= VIEW_CONVERT_EXPR<vector\\\(8\\\) signed short>\\\(\(s_output.\[0-9\]+_\[0-9\]+|q8\)\\\);\[\n \]+q9 = -_\[0-9\]+;\[\n \]+" "gimple" } } */
diff --git a/gcc/testsuite/brig.dg/test/gimple/vector.hsail b/gcc/testsuite/brig.dg/test/gimple/vector.hsail
index 2071840..7529333 100644
--- a/gcc/testsuite/brig.dg/test/gimple/vector.hsail
+++ b/gcc/testsuite/brig.dg/test/gimple/vector.hsail
@@ -32,18 +32,18 @@ prog kernel &Kernel(kernarg_u64 %input_ptr, kernarg_u64 %output_ptr)
/* { dg-final { scan-tree-dump " = MEM\\\[\\\(vector\\\(2\\\) <float:32> \\\*\\\)" "original"} } */
/* The v3 load is scalarized (at the moment) due to gcc requiring 2's exponent wide vectors. */
-/* { dg-final { scan-tree-dump "s0 = VIEW_CONVERT_EXPR<unsigned int>\\\(BIT_FIELD_REF <mem_read.\[0-9\]+, 32, 0>\\\);\[\n ]+s1 = VIEW_CONVERT_EXPR<unsigned int>\\\(BIT_FIELD_REF <mem_read.\[0-9\]+, 32, 32>\\\);" "original"} } */
+/* { dg-final { scan-tree-dump "s0 = .*BIT_FIELD_REF <mem_read.\[0-9\]+, 32, 0>\\\)?;\[\n ]+s1 = .*BIT_FIELD_REF <mem_read.\[0-9\]+, 32, 32>\\\)?;" "original"} } */
/* The v4 load is done via casting to a vector datatype ptr. */
/* { dg-final { scan-tree-dump " = MEM\\\[\\\(vector\\\(4\\\) <float:32> \\\*\\\)" "original"} } */
/* The combines are generated to vector constructors. */
-/* { dg-final { scan-tree-dump "{s1, s0}" "original"} } */
-/* { dg-final { scan-tree-dump "{s2, s3}" "original"} } */
+/* { dg-final { scan-tree-dump "{.*s1\\\)?, .*s0\\\)?}" "original"} } */
+/* { dg-final { scan-tree-dump "{.*s2\\\)?, .*s3\\\)?}" "original"} } */
/* Expands to BIT_FIELD_REFs. */
-/* { dg-final { scan-tree-dump "s0 = BIT_FIELD_REF <d4, 32, 0>;" "original"} } */
-/* { dg-final { scan-tree-dump "s3 = BIT_FIELD_REF <d4, 32, 32>;" "original"} } */
+/* { dg-final { scan-tree-dump "s0 = \(VIEW_CONVERT_EXPR.*\\\(\)?BIT_FIELD_REF <d4, 32, 0>\\\)?;" "original"} } */
+/* { dg-final { scan-tree-dump "s3 = \(VIEW_CONVERT_EXPR.*\\\(\)?BIT_FIELD_REF <d4, 32, 32>\\\)?;" "original"} } */
/* The v1 store is done via casting to a vector datatype ptr and constructing a vector from the inputs. */
/* { dg-final { scan-tree-dump "MEM\\\[\\\(vector\\\(2\\\) <float:32> \\\*\\\)\\\(<float:32> \\\*\\\) d1\\\] = " "original"} } */