aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorChristophe Lyon <christophe.lyon@arm.com>2023-01-16 17:48:25 +0000
committerChristophe Lyon <christophe.lyon@arm.com>2023-01-19 14:18:14 +0000
commita38c079248411ea9dc0610873b3366192315bfee (patch)
treeb3305f8615a6718963d45b366e17738a328408d9 /gcc
parent1cfaaa42296298ea2d2b22a93d3c8c17104e6cd3 (diff)
downloadgcc-a38c079248411ea9dc0610873b3366192315bfee.zip
gcc-a38c079248411ea9dc0610873b3366192315bfee.tar.gz
gcc-a38c079248411ea9dc0610873b3366192315bfee.tar.bz2
aarch64: fix ICE in aarch64_layout_arg [PR108411]
The previous patch added an assert which should not be applied to PST types (Pure Scalable Types) because alignment does not matter in this case. This patch moves the assert after the PST case is handled to avoid the ICE. PR target/108411 gcc/ * config/aarch64/aarch64.cc (aarch64_layout_arg): Improve comment. Move assert about alignment a bit later.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/config/aarch64/aarch64.cc27
1 files changed, 20 insertions, 7 deletions
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index d36b573..d55c250 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -7659,7 +7659,18 @@ aarch64_layout_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
&& (currently_expanding_function_start
|| currently_expanding_gimple_stmt));
- /* There are several things to note here:
+ /* HFAs and HVAs can have an alignment greater than 16 bytes. For example:
+
+ typedef struct foo {
+ __Int8x16_t foo[2] __attribute__((aligned(32)));
+ } foo;
+
+ is still a HVA despite its larger-than-normal alignment.
+ However, such over-aligned HFAs and HVAs are guaranteed to have
+ no padding.
+
+ If we exclude HFAs and HVAs from the discussion below, then there
+ are several things to note:
- Both the C and AAPCS64 interpretations of a type's alignment should
give a value that is no greater than the type's size.
@@ -7704,12 +7715,6 @@ aarch64_layout_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
would treat the alignment as though it was *equal to* 16 bytes.
Both behaviors were wrong, but in different cases. */
- unsigned int alignment
- = aarch64_function_arg_alignment (mode, type, &abi_break,
- &abi_break_packed);
- gcc_assert (alignment <= 16 * BITS_PER_UNIT
- && (!alignment || abi_break < alignment)
- && (!abi_break_packed || alignment < abi_break_packed));
pcum->aapcs_arg_processed = true;
@@ -7780,6 +7785,14 @@ aarch64_layout_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
&nregs);
gcc_assert (!sve_p || !allocate_nvrn);
+ unsigned int alignment
+ = aarch64_function_arg_alignment (mode, type, &abi_break,
+ &abi_break_packed);
+
+ gcc_assert ((allocate_nvrn || alignment <= 16 * BITS_PER_UNIT)
+ && (!alignment || abi_break < alignment)
+ && (!abi_break_packed || alignment < abi_break_packed));
+
/* allocate_ncrn may be false-positive, but allocate_nvrn is quite reliable.
The following code thus handles passing by SIMD/FP registers first. */