aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/ChangeLog25
-rw-r--r--gcc/DATESTAMP2
-rw-r--r--gcc/ada/ChangeLog126
-rw-r--r--gcc/config/loongarch/genopts/loongarch.opt.in4
-rw-r--r--gcc/config/loongarch/lasx.md53
-rw-r--r--gcc/config/loongarch/loongarch.cc78
-rw-r--r--gcc/config/loongarch/loongarch.md14
-rw-r--r--gcc/config/loongarch/loongarch.opt4
-rw-r--r--gcc/config/loongarch/loongarch.opt.urls3
-rw-r--r--gcc/config/loongarch/lsx.md53
-rw-r--r--gcc/config/loongarch/simd.md71
-rw-r--r--gcc/cp/ChangeLog6
-rw-r--r--gcc/doc/invoke.texi13
-rw-r--r--gcc/fortran/ChangeLog12
-rw-r--r--gcc/fortran/decl.cc14
-rw-r--r--gcc/fortran/primary.cc8
-rw-r--r--gcc/fortran/resolve.cc3
-rw-r--r--gcc/gimple-range-op.cc4
-rw-r--r--gcc/range-op-mixed.h41
-rw-r--r--gcc/range-op.cc76
-rw-r--r--gcc/testsuite/ChangeLog26
-rw-r--r--gcc/testsuite/gcc.dg/pr91191.c20
-rw-r--r--gcc/testsuite/gcc.target/loongarch/pr122097.c271
-rw-r--r--gcc/testsuite/gcc.target/loongarch/trap-1.c9
-rw-r--r--gcc/testsuite/gcc.target/loongarch/trap-default.c9
-rw-r--r--gcc/testsuite/gfortran.dg/pdt_62.f0378
-rw-r--r--gcc/testsuite/gfortran.dg/pdt_63.f0326
-rw-r--r--gcc/testsuite/gfortran.dg/pdt_64.f0317
-rw-r--r--gcc/testsuite/gfortran.dg/vect/pr70102.f21
-rw-r--r--gcc/tree-vect-slp.cc11
30 files changed, 976 insertions, 122 deletions
diff --git a/gcc/ChangeLog b/gcc/ChangeLog
index 1ae78b4..f05b25b 100644
--- a/gcc/ChangeLog
+++ b/gcc/ChangeLog
@@ -1,3 +1,28 @@
+2025-10-28 Richard Biener <rguenther@suse.de>
+
+ * tree-vect-loop-manip.cc (vect_update_ivs_after_vectorizer):
+ Avoid explicit LOOP_VINFO_IV_EXIT reference.
+
+2025-10-28 Artemiy Volkov <artemiy.volkov@arm.com>
+
+ * match.pd: Add pattern to simplify view_convert (BIT_FIELD_REF).
+
+2025-10-28 Kito Cheng <kito.cheng@sifive.com>
+
+ * config/riscv/riscv.cc (riscv_get_vls_cc_attr): Fix error message
+ parameter order and add check_only condition. Improve diagnostic
+ message formatting with proper quoting.
+ (riscv_handle_rvv_vls_cc_attribute): Anonymize unused node parameter.
+
+2025-10-28 Avinash Jayakar <avinashd@linux.ibm.com>
+
+ PR tree-optimization/122065
+ * tree-vect-generic.cc (add_rshift): Update name and add code parameter.
+ (add_shift): Update name.
+ (expand_vector_mult): New lowering for MULT_EXPR.
+ (expand_vector_divmod): Use updated function name.
+ (expand_vector_operation): Use updated function name.
+
2025-10-27 Andrew Pinski <andrew.pinski@oss.qualcomm.com>
* expr.cc (expr_has_boolean_range): New function.
diff --git a/gcc/DATESTAMP b/gcc/DATESTAMP
index 0bfc1e9..aa425f4 100644
--- a/gcc/DATESTAMP
+++ b/gcc/DATESTAMP
@@ -1 +1 @@
-20251028
+20251029
diff --git a/gcc/ada/ChangeLog b/gcc/ada/ChangeLog
index bd1e2ae..fc58e04 100644
--- a/gcc/ada/ChangeLog
+++ b/gcc/ada/ChangeLog
@@ -1,3 +1,129 @@
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR ada/48039
+ * sem_ch12.adb (Analyze_Subprogram_Instantiation): Call
+ Remove_Homonym to remove the enclosing package from visibility.
+
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * exp_util.adb (Remove_Side_Effects): Use separately the Etype of
+ the expression to build new nodes and its Underlying_Type to drive
+ part of the processing.
+
+2025-10-28 Johannes Kliemann <kliemann@adacore.com>
+
+ * adaint.c: Remove __gnat_enable_signals, __gnat_disable_signals
+ and related code for QNX.
+ * libgnarl/s-taprop__qnx.adb: Disable and enable
+ signals in Ada.
+
+2025-10-28 Alexandre Oliva <oliva@adacore.com>
+
+ * sem_ch13.adb (Analyze_Aspect_Export_Import): Skip
+ Set_Is_Imported on E_Exception.
+ * sem_prag.adb (Process_Import_Or_Interface): Explain
+ why not Set_Is_Imported.
+
+2025-10-28 Denis Mazzucato <mazzucato@adacore.com>
+
+ * sem_util.adb (Collect_Primitive_Operations): Avoid setting
+ Is_Primitive for noninherited and nonoverriding subprograms not
+ declared immediately within a package specification.
+ * sem_ch13.adb (Check_Nonoverridable_Aspect_Subprograms): Better
+ error posting to allow multiple errors on same type but different
+ aggregate subprogram.
+
+2025-10-28 Ronan Desplanques <desplanques@adacore.com>
+
+ * table.ads (Clear, Is_Empty): New subprograms.
+ * table.adb (Clear, Is_Empty): Likewise.
+ (Init): Use new subprogram.
+ * atree.adb (Traverse_Func_With_Parent): Use new subprograms.
+ * fmap.adb (Empty_Tables): Use new subprogram.
+ * par_sco.adb (Process_Pending_Decisions): Likewise.
+ * sem_elab.adb (Check_Elab_Call): Likewise.
+ * sem_ch12.adb (Build_Local_Package, Analyze_Package_Instantiation,
+ Analyze_Subprogram_Instantiation): Likewise.
+ (Save_And_Reset): Use Table.Table.First.
+
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR ada/122063
+ * exp_fixd.adb (Build_Double_Divide_Code): Convert the result of the
+ multiply.
+ (Build_Multiply): Use base types of operands to size the operation.
+ (Build_Rem): Likewise.
+ (Build_Scaled_Divide_Code): Convert the result of the multiply.
+
+2025-10-28 Tonu Naks <naks@adacore.com>
+
+ * doc/gnat_rm/obsolescent_features.rst: typo
+ * gnat_rm.texi: Regenerate.
+
+2025-10-28 Javier Miranda <miranda@adacore.com>
+
+ * aspects.adb (Get_Aspect_Id): Enable aspect Unsigned_Base_Range
+ using -gnatd.u
+ * debug.adb (Debug_Flag_Dot_U): Document this switch.
+ * einfo-utils.adb (Is_Modular_Integer_Type): Return True if
+ the entity is a modular integer type and its base type does
+ not have the attribute has_unsigned_base_range_aspect.
+ (Is_Signed_Integer_Type): Return True if the entity is a signed
+ integer type, or it is a modular integer type and its base type
+ has the attribute has_unsigned_base_range_aspect.
+ * einfo.ads (E_Modular_Integer_Type): Add documentation of
+ Has_Unsigned_Base_Range_Aspect.
+ * par-ch4.adb (Scan_Apostrophe): Enable attribute Unsigned_Base_Range
+ using -gnatd.u
+ * sem_ch13.adb (Analyze_One_Aspect): Check general language
+ restrictions on aspect Unsigned_Base_Range. For Unsigned_Base_Range
+ aspect, do not delay the generation of the pragma becase we need
+ to process it before any type or subtype derivation is analyzed.
+ * sem_ch3.adb (Build_Scalar_Bound): Disable code analyzing the
+ bound with the base type of the parent type because, for unsigned
+ base range types, their base type is a modular type but their
+ type is a signed integer type.
+ * sem_prag.adb (Analyze_Pragma): Enable pragma Unsigned_Base_Range
+ using -gnatd.u. Check more errors on Unsigned_Base_Range pragma,
+ and create the new base type only when required.
+
+2025-10-28 Ronan Desplanques <desplanques@adacore.com>
+
+ * sem_ch12.adb (Build_Local_Package)
+ (Analyze_Package_Instantiation, Analyze_Subprogram_Instantiation):
+ Fix Set_Last calls.
+ (Set_Instance_Of): Use Table.Table.Append.
+ (Save_And_Reset): Remove useless call. Remove defensive code.
+ (Restore): Remove incorrect Set_Last call and adapt to
+ Set_Instance_Of change.
+
+2025-10-28 Denis Mazzucato <mazzucato@adacore.com>
+
+ * sem_prag.adb (Analyze_Pragma): Add enclosing quotation when the
+ invalid switch ends with a space.
+
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR ada/59234
+ * sem_ch12.adb (Analyze_Formal_Package_Declaration): Mark the
+ special name built for the formal in the parent of a child unit
+ as internal.
+
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ PR ada/34511
+ * sem_ch12.adb (Analyze_Associations): Add Parent_Installed formal
+ parameter and pass it in call to Analyze_One_Association.
+ (Analyze_One_Association): Add Parent_Installed formal parameter
+ and pass it in call to Instantiate_Formal_Subprogram.
+ (Analyze_Formal_Package_Declaration): Pass Parent_Installed in call
+ to Analyze_Associations.
+ (Analyze_Package_Instantiation): Likewise.
+ (Analyze_Subprogram_Instantiation): Likewise.
+ (Instantiate_Formal_Subprogram): Add Parent_Installed formal
+ parameter and prune references to the parent unit(s) only if
+ it is true.
+
2025-10-27 Eric Botcazou <ebotcazou@adacore.com>
PR ada/29958
diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
index 39c1545..f0c089a 100644
--- a/gcc/config/loongarch/genopts/loongarch.opt.in
+++ b/gcc/config/loongarch/genopts/loongarch.opt.in
@@ -205,6 +205,10 @@ mmax-inline-memcpy-size=
Target Joined RejectNegative UInteger Var(la_max_inline_memcpy_size) Init(1024) Save
-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024.
+mbreak-code=
+Target Joined UInteger Var(la_break_code) Init(-1) Save
+-mbreak-code=CODE Use 'break CODE' for traps supposed to be unrecoverable, or an 'amswap.w' instruction leading to INE if CODE is out of range.
+
Enum
Name(explicit_relocs) Type(int)
The code model option names for -mexplicit-relocs:
diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
index eed4d2b..3048c48 100644
--- a/gcc/config/loongarch/lasx.md
+++ b/gcc/config/loongarch/lasx.md
@@ -834,59 +834,6 @@
[(set_attr "type" "simd_div")
(set_attr "mode" "<MODE>")])
-(define_insn "xor<mode>3"
- [(set (match_operand:LASX 0 "register_operand" "=f,f,f")
- (xor:LASX
- (match_operand:LASX 1 "register_operand" "f,f,f")
- (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
- "ISA_HAS_LASX"
- "@
- xvxor.v\t%u0,%u1,%u2
- xvbitrevi.%v0\t%u0,%u1,%V2
- xvxori.b\t%u0,%u1,%B2"
- [(set_attr "type" "simd_logic,simd_bit,simd_logic")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "ior<mode>3"
- [(set (match_operand:LASX 0 "register_operand" "=f,f,f")
- (ior:LASX
- (match_operand:LASX 1 "register_operand" "f,f,f")
- (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
- "ISA_HAS_LASX"
- "@
- xvor.v\t%u0,%u1,%u2
- xvbitseti.%v0\t%u0,%u1,%V2
- xvori.b\t%u0,%u1,%B2"
- [(set_attr "type" "simd_logic,simd_bit,simd_logic")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "and<mode>3"
- [(set (match_operand:LASX 0 "register_operand" "=f,f,f")
- (and:LASX
- (match_operand:LASX 1 "register_operand" "f,f,f")
- (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))]
- "ISA_HAS_LASX"
-{
- switch (which_alternative)
- {
- case 0:
- return "xvand.v\t%u0,%u1,%u2";
- case 1:
- {
- rtx elt0 = CONST_VECTOR_ELT (operands[2], 0);
- unsigned HOST_WIDE_INT val = ~UINTVAL (elt0);
- operands[2] = loongarch_gen_const_int_vector (<MODE>mode, val & (-val));
- return "xvbitclri.%v0\t%u0,%u1,%V2";
- }
- case 2:
- return "xvandi.b\t%u0,%u1,%B2";
- default:
- gcc_unreachable ();
- }
-}
- [(set_attr "type" "simd_logic,simd_bit,simd_logic")
- (set_attr "mode" "<MODE>")])
-
(define_insn "one_cmpl<mode>2"
[(set (match_operand:ILASX 0 "register_operand" "=f")
(not:ILASX (match_operand:ILASX 1 "register_operand" "f")))]
diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
index 5c2a9eb..bdf2906 100644
--- a/gcc/config/loongarch/loongarch.cc
+++ b/gcc/config/loongarch/loongarch.cc
@@ -1718,14 +1718,36 @@ loongarch_symbol_binds_local_p (const_rtx x)
bool
loongarch_const_vector_bitimm_set_p (rtx op, machine_mode mode)
{
- if (GET_CODE (op) == CONST_VECTOR && op != CONST0_RTX (mode))
+ if (GET_CODE (op) == CONST_VECTOR
+ && (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_INT))
{
- unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0));
+ unsigned HOST_WIDE_INT val;
+
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ {
+ rtx val_s = CONST_VECTOR_ELT (op, 0);
+ const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (val_s);
+ if (GET_MODE (val_s) == DFmode)
+ {
+ long tmp[2];
+ REAL_VALUE_TO_TARGET_DOUBLE (*x, tmp);
+ val = (unsigned HOST_WIDE_INT) tmp[1] << 32 | tmp[0];
+ }
+ else
+ {
+ long tmp;
+ REAL_VALUE_TO_TARGET_SINGLE (*x, tmp);
+ val = (unsigned HOST_WIDE_INT) tmp;
+ }
+ }
+ else
+ val = UINTVAL (CONST_VECTOR_ELT (op, 0));
+
int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode)));
if (vlog2 != -1)
{
- gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1);
return loongarch_const_vector_same_val_p (op, mode);
}
@@ -1740,14 +1762,35 @@ loongarch_const_vector_bitimm_set_p (rtx op, machine_mode mode)
bool
loongarch_const_vector_bitimm_clr_p (rtx op, machine_mode mode)
{
- if (GET_CODE (op) == CONST_VECTOR && op != CONSTM1_RTX (mode))
+ if (GET_CODE (op) == CONST_VECTOR
+ && (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
+ || GET_MODE_CLASS (mode) == MODE_VECTOR_INT))
{
- unsigned HOST_WIDE_INT val = ~UINTVAL (CONST_VECTOR_ELT (op, 0));
+ unsigned HOST_WIDE_INT val;
+ if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
+ {
+ rtx val_s = CONST_VECTOR_ELT (op, 0);
+ const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (val_s);
+ if (GET_MODE (val_s) == DFmode)
+ {
+ long tmp[2];
+ REAL_VALUE_TO_TARGET_DOUBLE (*x, tmp);
+ val = ~((unsigned HOST_WIDE_INT) tmp[1] << 32 | tmp[0]);
+ }
+ else
+ {
+ long tmp;
+ REAL_VALUE_TO_TARGET_SINGLE (*x, tmp);
+ val = ~((unsigned HOST_WIDE_INT) tmp);
+ }
+ }
+ else
+ val = ~UINTVAL (CONST_VECTOR_ELT (op, 0));
+
int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode)));
if (vlog2 != -1)
{
- gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1);
return loongarch_const_vector_same_val_p (op, mode);
}
@@ -6450,7 +6493,28 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
if (CONST_VECTOR_P (op))
{
machine_mode mode = GET_MODE_INNER (GET_MODE (op));
- unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0));
+ rtx val_s = CONST_VECTOR_ELT (op, 0);
+ unsigned HOST_WIDE_INT val;
+
+ if (GET_MODE_CLASS (mode) == MODE_FLOAT)
+ {
+ const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (val_s);
+ if (GET_MODE (val_s) == DFmode)
+ {
+ long tmp[2];
+ REAL_VALUE_TO_TARGET_DOUBLE (*x, tmp);
+ val = (unsigned HOST_WIDE_INT) (tmp[1] << 32 | tmp[0]);
+ }
+ else
+ {
+ long tmp;
+ REAL_VALUE_TO_TARGET_SINGLE (*x, tmp);
+ val = (unsigned HOST_WIDE_INT) tmp;
+ }
+ }
+ else
+ val = UINTVAL (val_s);
+
int vlog2 = exact_log2 (val & GET_MODE_MASK (mode));
if (vlog2 != -1)
fprintf (file, "%d", vlog2);
diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
index be9a235..625f30c 100644
--- a/gcc/config/loongarch/loongarch.md
+++ b/gcc/config/loongarch/loongarch.md
@@ -679,14 +679,22 @@
;; ....................
;;
-(define_insn "trap"
- [(trap_if (const_int 1) (const_int 0))]
+(define_insn "*trap"
+ [(trap_if (const_int 1) (match_operand 0 "const_int_operand"))]
""
{
- return "break\t0";
+ return (const_uimm15_operand (operands[0], VOIDmode)
+ ? "break\t%0"
+ : "amswap.w\t$r0,$r1,$r0");
}
[(set_attr "type" "trap")])
+(define_expand "trap"
+ [(trap_if (const_int 1) (match_dup 0))]
+ ""
+{
+ operands[0] = GEN_INT (la_break_code);
+})
;;
diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
index fbe61c0..628eabe 100644
--- a/gcc/config/loongarch/loongarch.opt
+++ b/gcc/config/loongarch/loongarch.opt
@@ -213,6 +213,10 @@ mmax-inline-memcpy-size=
Target Joined RejectNegative UInteger Var(la_max_inline_memcpy_size) Init(1024) Save
-mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024.
+mbreak-code=
+Target Joined UInteger Var(la_break_code) Init(-1) Save
+-mbreak-code=CODE Use 'break CODE' for traps supposed to be unrecoverable, or an 'amswap.w' instruction leading to INE if CODE is out of range.
+
Enum
Name(explicit_relocs) Type(int)
The code model option names for -mexplicit-relocs:
diff --git a/gcc/config/loongarch/loongarch.opt.urls b/gcc/config/loongarch/loongarch.opt.urls
index 606a211..c93f046 100644
--- a/gcc/config/loongarch/loongarch.opt.urls
+++ b/gcc/config/loongarch/loongarch.opt.urls
@@ -48,6 +48,9 @@ UrlSuffix(gcc/LoongArch-Options.html#index-mstrict-align-1)
mmax-inline-memcpy-size=
UrlSuffix(gcc/LoongArch-Options.html#index-mmax-inline-memcpy-size)
+mbreak-code=
+UrlSuffix(gcc/LoongArch-Options.html#index-mbreak-code)
+
mexplicit-relocs=
UrlSuffix(gcc/LoongArch-Options.html#index-mexplicit-relocs-1)
diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
index fb0236b..7131a53 100644
--- a/gcc/config/loongarch/lsx.md
+++ b/gcc/config/loongarch/lsx.md
@@ -654,59 +654,6 @@
[(set_attr "type" "simd_div")
(set_attr "mode" "<MODE>")])
-(define_insn "xor<mode>3"
- [(set (match_operand:LSX 0 "register_operand" "=f,f,f")
- (xor:LSX
- (match_operand:LSX 1 "register_operand" "f,f,f")
- (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
- "ISA_HAS_LSX"
- "@
- vxor.v\t%w0,%w1,%w2
- vbitrevi.%v0\t%w0,%w1,%V2
- vxori.b\t%w0,%w1,%B2"
- [(set_attr "type" "simd_logic,simd_bit,simd_logic")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "ior<mode>3"
- [(set (match_operand:LSX 0 "register_operand" "=f,f,f")
- (ior:LSX
- (match_operand:LSX 1 "register_operand" "f,f,f")
- (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
- "ISA_HAS_LSX"
- "@
- vor.v\t%w0,%w1,%w2
- vbitseti.%v0\t%w0,%w1,%V2
- vori.b\t%w0,%w1,%B2"
- [(set_attr "type" "simd_logic,simd_bit,simd_logic")
- (set_attr "mode" "<MODE>")])
-
-(define_insn "and<mode>3"
- [(set (match_operand:LSX 0 "register_operand" "=f,f,f")
- (and:LSX
- (match_operand:LSX 1 "register_operand" "f,f,f")
- (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))]
- "ISA_HAS_LSX"
-{
- switch (which_alternative)
- {
- case 0:
- return "vand.v\t%w0,%w1,%w2";
- case 1:
- {
- rtx elt0 = CONST_VECTOR_ELT (operands[2], 0);
- unsigned HOST_WIDE_INT val = ~UINTVAL (elt0);
- operands[2] = loongarch_gen_const_int_vector (<MODE>mode, val & (-val));
- return "vbitclri.%v0\t%w0,%w1,%V2";
- }
- case 2:
- return "vandi.b\t%w0,%w1,%B2";
- default:
- gcc_unreachable ();
- }
-}
- [(set_attr "type" "simd_logic,simd_bit,simd_logic")
- (set_attr "mode" "<MODE>")])
-
(define_insn "one_cmpl<mode>2"
[(set (match_operand:ILSX 0 "register_operand" "=f")
(not:ILSX (match_operand:ILSX 1 "register_operand" "f")))]
diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
index 4156b26..9f4525a 100644
--- a/gcc/config/loongarch/simd.md
+++ b/gcc/config/loongarch/simd.md
@@ -972,6 +972,77 @@
DONE;
})
+(define_insn "xor<mode>3"
+ [(set (match_operand:ALLVEC 0 "register_operand" "=f,f,f")
+ (xor:ALLVEC
+ (match_operand:ALLVEC 1 "register_operand" "f,f,f")
+ (match_operand:ALLVEC 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
+ ""
+ "@
+ <x>vxor.v\t%<wu>0,%<wu>1,%<wu>2
+ <x>vbitrevi.%v0\t%<wu>0,%<wu>1,%V2
+ <x>vxori.b\t%<wu>0,%<wu>1,%B2"
+ [(set_attr "type" "simd_logic,simd_bit,simd_logic")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "ior<mode>3"
+ [(set (match_operand:ALLVEC 0 "register_operand" "=f,f,f")
+ (ior:ALLVEC
+ (match_operand:ALLVEC 1 "register_operand" "f,f,f")
+ (match_operand:ALLVEC 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
+ ""
+ "@
+ <x>vor.v\t%<wu>0,%<wu>1,%<wu>2
+ <x>vbitseti.%v0\t%<wu>0,%<wu>1,%V2
+ <x>vori.b\t%<wu>0,%<wu>1,%B2"
+ [(set_attr "type" "simd_logic,simd_bit,simd_logic")
+ (set_attr "mode" "<MODE>")])
+
+(define_insn "and<mode>3"
+ [(set (match_operand:ALLVEC 0 "register_operand" "=f,f,f")
+ (and:ALLVEC
+ (match_operand:ALLVEC 1 "register_operand" "f,f,f")
+ (match_operand:ALLVEC 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))]
+ ""
+{
+ switch (which_alternative)
+ {
+ case 0:
+ return "<x>vand.v\t%<wu>0,%<wu>1,%<wu>2";
+ case 1:
+ {
+ rtx elt0 = CONST_VECTOR_ELT (operands[2], 0);
+ unsigned HOST_WIDE_INT val;
+ if (GET_MODE_CLASS (<MODE>mode) == MODE_VECTOR_FLOAT)
+ {
+ const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (elt0);
+ if (GET_MODE (elt0) == DFmode)
+ {
+ long tmp[2];
+ REAL_VALUE_TO_TARGET_DOUBLE (*x, tmp);
+ val = ~((unsigned HOST_WIDE_INT) tmp[1] << 32 | tmp[0]);
+ }
+ else
+ {
+ long tmp;
+ REAL_VALUE_TO_TARGET_SINGLE (*x, tmp);
+ val = ~((unsigned HOST_WIDE_INT) tmp);
+ }
+ }
+ else
+ val = ~UINTVAL (elt0);
+ operands[2] = loongarch_gen_const_int_vector (<VIMODE>mode, val & (-val));
+ return "<x>vbitclri.%v0\t%<wu>0,%<wu>1,%V2";
+ }
+ case 2:
+ return "<x>vandi.b\t%<wu>0,%<wu>1,%B2";
+ default:
+ gcc_unreachable ();
+ }
+}
+ [(set_attr "type" "simd_logic,simd_bit,simd_logic")
+ (set_attr "mode" "<MODE>")])
+
; The LoongArch SX Instructions.
(include "lsx.md")
diff --git a/gcc/cp/ChangeLog b/gcc/cp/ChangeLog
index e03b69c..ae4581a 100644
--- a/gcc/cp/ChangeLog
+++ b/gcc/cp/ChangeLog
@@ -1,3 +1,9 @@
+2025-10-28 Marek Polacek <polacek@redhat.com>
+ Jakub Jelinek <jakub@redhat.com>
+
+ * decl.cc (finish_enum_value_list): Use fold_convert instead of
+ copy_node.
+
2025-10-27 Nathaniel Shead <nathanieloshead@gmail.com>
PR c++/122422
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index b40fc89..32b9c48 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -1097,7 +1097,7 @@ Objective-C and Objective-C++ Dialects}.
-mfpu=@var{fpu-type} -msimd=@var{simd-type}
-msoft-float -msingle-float -mdouble-float -mlsx -mno-lsx -mlasx -mno-lasx
-mbranch-cost=@var{n} -maddr-reg-reg-cost=@var{n} -mcheck-zero-division
--mno-check-zero-division
+-mno-check-zero-division -mbreak-code=@var{code}
-mcond-move-int -mno-cond-move-int
-mcond-move-float -mno-cond-move-float
-memcpy -mno-memcpy -mstrict-align -mno-strict-align -G @var{num}
@@ -28457,6 +28457,17 @@ Trap (do not trap) on integer division by zero. The default is
@option{-mcheck-zero-division} for @option{-O0} or @option{-Og}, and
@option{-mno-check-zero-division} for other optimization levels.
+@opindex mbreak-code
+@item -mbreak-code=@var{code}
+Emit a @code{break} @var{code} instruction for irrecoverable traps
+from @code{__builtin_trap} or inserted by the compiler (for example
+an erroneous path isolated with
+@option{-fisolate-erroneous-paths-dereference}), or an
+@code{amswap.w $r0, $r1, $r0} instruction which will cause the hardware
+to trigger an Instruction Not-defined Exception if @var{code} is negative
+or greater than 32767. The default is -1, meaning to use the
+@code{amswap.w} instruction.
+
@opindex mcond-move-int
@item -mcond-move-int
@itemx -mno-cond-move-int
diff --git a/gcc/fortran/ChangeLog b/gcc/fortran/ChangeLog
index c0a5710..cee5ef4 100644
--- a/gcc/fortran/ChangeLog
+++ b/gcc/fortran/ChangeLog
@@ -1,3 +1,15 @@
+2025-10-28 Yuao Ma <c8ef@outlook.com>
+
+ PR fortran/122342
+ * trans-const.cc (gfc_conv_constant): Create a variable for the
+ non-char pointer.
+
+2025-10-28 Paul-Antoine Arras <parras@baylibre.com>
+
+ PR fortran/122439
+ * openmp.cc (gfc_resolve_omp_context_selector): Skip selectors that have
+ OMP_TRAIT_INVALID.
+
2025-10-27 Paul Thomas <pault@gcc.gnu.org>
PR fortran/922290
diff --git a/gcc/fortran/decl.cc b/gcc/fortran/decl.cc
index 569786a..5b222cd 100644
--- a/gcc/fortran/decl.cc
+++ b/gcc/fortran/decl.cc
@@ -3938,6 +3938,20 @@ gfc_get_pdt_instance (gfc_actual_arglist *param_list, gfc_symbol **sym,
actual_param = param_list;
sprintf (name, "Pdt%s", pdt->name);
+ /* Prevent a PDT component of the same type as the template from being
+ converted into an instance. Doing this results in the component being
+ lost. */
+ if (gfc_current_state () == COMP_DERIVED
+ && !(gfc_state_stack->previous
+ && gfc_state_stack->previous->state == COMP_DERIVED)
+ && gfc_current_block ()->attr.pdt_template
+ && !strcmp (gfc_current_block ()->name, (*sym)->name))
+ {
+ if (ext_param_list)
+ *ext_param_list = gfc_copy_actual_arglist (param_list);
+ return MATCH_YES;
+ }
+
/* Run through the parameter name list and pick up the actual
parameter values or use the default values in the PDT declaration. */
for (; type_param_name_list;
diff --git a/gcc/fortran/primary.cc b/gcc/fortran/primary.cc
index 2d2c664..0722c76d 100644
--- a/gcc/fortran/primary.cc
+++ b/gcc/fortran/primary.cc
@@ -2690,6 +2690,14 @@ gfc_match_varspec (gfc_expr *primary, int equiv_flag, bool sub_flag,
else
component = NULL;
+ if (previous && inquiry
+ && (previous->attr.pdt_kind || previous->attr.pdt_len))
+ {
+ gfc_error_now ("R901: A type parameter ref is not a designtor and "
+ "cannot be followed by the type inquiry ref at %C");
+ return MATCH_ERROR;
+ }
+
if (intrinsic && !inquiry)
{
if (previous)
diff --git a/gcc/fortran/resolve.cc b/gcc/fortran/resolve.cc
index 117a51c..ecd2ada 100644
--- a/gcc/fortran/resolve.cc
+++ b/gcc/fortran/resolve.cc
@@ -18956,7 +18956,8 @@ gfc_impure_variable (gfc_symbol *sym)
{
if (ns == sym->ns)
break;
- if (ns->proc_name->attr.flavor == FL_PROCEDURE && !sym->attr.function)
+ if (ns->proc_name->attr.flavor == FL_PROCEDURE
+ && !(sym->attr.function || sym->attr.result))
return 1;
}
diff --git a/gcc/gimple-range-op.cc b/gcc/gimple-range-op.cc
index c9bc5c0..3a22606 100644
--- a/gcc/gimple-range-op.cc
+++ b/gcc/gimple-range-op.cc
@@ -150,6 +150,10 @@ gimple_range_op_handler::gimple_range_op_handler (gimple *s)
if (TREE_CODE (ssa) == SSA_NAME)
m_op1 = ssa;
}
+ // VIEW_CONVERT_EXPR needs to descend one level deeper to pick
+ // up the symbolic operand.
+ if (TREE_CODE (m_op1) == VIEW_CONVERT_EXPR)
+ m_op1 = TREE_OPERAND (m_op1, 0);
if (gimple_num_ops (m_stmt) >= 3)
m_op2 = gimple_assign_rhs2 (m_stmt);
// Check that operands are supported types. One check is enough.
diff --git a/gcc/range-op-mixed.h b/gcc/range-op-mixed.h
index 567b0cd..db31c2b 100644
--- a/gcc/range-op-mixed.h
+++ b/gcc/range-op-mixed.h
@@ -527,6 +527,47 @@ private:
const irange &outer) const;
};
+
+class operator_view : public range_operator
+{
+public:
+ using range_operator::fold_range;
+ using range_operator::op1_range;
+ using range_operator::update_bitmask;
+ bool fold_range (irange &r, tree type,
+ const irange &op1, const irange &op2,
+ relation_trio rel = TRIO_VARYING) const override;
+ bool fold_range (prange &r, tree type,
+ const prange &op1, const prange &op2,
+ relation_trio rel = TRIO_VARYING) const final override;
+ bool fold_range (irange &r, tree type,
+ const prange &op1, const irange &op2,
+ relation_trio rel = TRIO_VARYING) const final override;
+ bool fold_range (prange &r, tree type,
+ const irange &op1, const prange &op2,
+ relation_trio rel = TRIO_VARYING) const final override;
+
+ bool op1_range (irange &r, tree type,
+ const irange &lhs, const irange &op2,
+ relation_trio rel = TRIO_VARYING) const override;
+ bool op1_range (prange &r, tree type,
+ const prange &lhs, const prange &op2,
+ relation_trio rel = TRIO_VARYING) const final override;
+ bool op1_range (irange &r, tree type,
+ const prange &lhs, const irange &op2,
+ relation_trio rel = TRIO_VARYING) const final override;
+ bool op1_range (prange &r, tree type,
+ const irange &lhs, const prange &op2,
+ relation_trio rel = TRIO_VARYING) const final override;
+
+ void update_bitmask (irange &r, const irange &lh,
+ const irange &) const final override;
+private:
+// VIEW_CONVERT_EXPR works much like a cast between integral values, so use
+// the cast operator. Non-integrals are not handled as yet.
+ operator_cast m_cast;
+};
+
class operator_plus : public range_operator
{
public:
diff --git a/gcc/range-op.cc b/gcc/range-op.cc
index 6b6bf78..cf5b8fe 100644
--- a/gcc/range-op.cc
+++ b/gcc/range-op.cc
@@ -60,6 +60,7 @@ operator_ge op_ge;
operator_identity op_ident;
operator_cst op_cst;
operator_cast op_cast;
+operator_view op_view;
operator_plus op_plus;
operator_abs op_abs;
operator_minus op_minus;
@@ -97,6 +98,7 @@ range_op_table::range_op_table ()
set (INTEGER_CST, op_cst);
set (NOP_EXPR, op_cast);
set (CONVERT_EXPR, op_cast);
+ set (VIEW_CONVERT_EXPR, op_view);
set (FLOAT_EXPR, op_cast);
set (FIX_TRUNC_EXPR, op_cast);
set (PLUS_EXPR, op_plus);
@@ -3247,6 +3249,80 @@ operator_cast::op1_range (irange &r, tree type,
return true;
}
+// VIEW_CONVERT_EXPR works like a cast between integral values.
+// If the number of bits are not the same, behaviour is undefined,
+// so cast behaviour still works.
+
+bool
+operator_view::fold_range (irange &r, tree type,
+ const irange &op1, const irange &op2,
+ relation_trio rel) const
+{
+ return m_cast.fold_range (r, type, op1, op2, rel);
+}
+
+bool
+operator_view::fold_range (prange &r, tree type,
+ const prange &op1, const prange &op2,
+ relation_trio rel) const
+{
+ return m_cast.fold_range (r, type, op1, op2, rel);
+}
+bool
+operator_view::fold_range (irange &r, tree type,
+ const prange &op1, const irange &op2,
+ relation_trio rel) const
+{
+ return m_cast.fold_range (r, type, op1, op2, rel);
+}
+
+bool
+operator_view::fold_range (prange &r, tree type,
+ const irange &op1, const prange &op2,
+ relation_trio rel) const
+{
+ return m_cast.fold_range (r, type, op1, op2, rel);
+}
+
+bool
+operator_view::op1_range (irange &r, tree type,
+ const irange &lhs, const irange &op2,
+ relation_trio rel) const
+{
+ return m_cast.op1_range (r, type, lhs, op2, rel);
+}
+
+bool
+operator_view::op1_range (prange &r, tree type,
+ const prange &lhs, const prange &op2,
+ relation_trio rel) const
+{
+ return m_cast.op1_range (r, type, lhs, op2, rel);
+}
+
+bool
+operator_view::op1_range (irange &r, tree type,
+ const prange &lhs, const irange &op2,
+ relation_trio rel) const
+{
+ return m_cast.op1_range (r, type, lhs, op2, rel);
+}
+
+bool
+operator_view::op1_range (prange &r, tree type,
+ const irange &lhs, const prange &op2,
+ relation_trio rel) const
+{
+ return m_cast.op1_range (r, type, lhs, op2, rel);
+}
+
+void
+operator_view::update_bitmask (irange &r, const irange &lh,
+ const irange &rh) const
+{
+ m_cast.update_bitmask (r, lh, rh);
+}
+
class operator_logical_and : public range_operator
{
diff --git a/gcc/testsuite/ChangeLog b/gcc/testsuite/ChangeLog
index 40f1582..e13b07d 100644
--- a/gcc/testsuite/ChangeLog
+++ b/gcc/testsuite/ChangeLog
@@ -1,3 +1,29 @@
+2025-10-28 Yuao Ma <c8ef@outlook.com>
+
+ PR fortran/122342
+ * gfortran.dg/coarray_atomic_5.f90: Update testcase.
+ * gfortran.dg/team_form_3.f90: Likewise.
+
+2025-10-28 Artemiy Volkov <artemiy.volkov@arm.com>
+
+ * gcc.dg/tree-ssa/forwprop-42.c: New test.
+
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/specs/generic_inst5.ads: New test.
+ * gnat.dg/specs/generic_inst5_pkg1.ads: New helper.
+ * gnat.dg/specs/generic_inst5_pkg2.ads: Likewise.
+
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/specs/generic_inst4-child2.ads: New test.
+ * gnat.dg/specs/generic_inst4.ads: New helper.
+ * gnat.dg/specs/generic_inst4-child1.ads: Likewise.
+
+2025-10-28 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gnat.dg/specs/generic_inst3.ads: Add dg-do directive.
+
2025-10-27 Nathaniel Shead <nathanieloshead@gmail.com>
PR c++/122422
diff --git a/gcc/testsuite/gcc.dg/pr91191.c b/gcc/testsuite/gcc.dg/pr91191.c
new file mode 100644
index 0000000..7bf727e
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/pr91191.c
@@ -0,0 +1,20 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -fdump-tree-evrp" } */
+
+unsigned char reg(_Bool b) {
+ union U {
+ unsigned char f0;
+ _Bool f1;
+ };
+ union U u;
+ u.f1 = b;
+ if (u.f0 > 1) {
+ // This cannot happen
+ // if b is only allowed
+ // to be 0 or 1:
+ return 42;
+ }
+ return 13;
+}
+
+/* { dg-final { scan-tree-dump "return 13" "evrp" } } */
diff --git a/gcc/testsuite/gcc.target/loongarch/pr122097.c b/gcc/testsuite/gcc.target/loongarch/pr122097.c
new file mode 100644
index 0000000..5d32b19
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/pr122097.c
@@ -0,0 +1,271 @@
+/* { dg-do compile } */
+/* { dg-options "-O3 -mabi=lp64d -mlsx" } */
+/* { dg-final { scan-assembler "vbitseti\.d\t\\\$vr\[0-9\]+,\\\$vr\[0-9\]+,63" } } */
+
+typedef long unsigned int size_t;
+typedef unsigned char simde__mmask8;
+typedef long simde__m128i __attribute__ ((__aligned__ ((16))))
+__attribute__ ((__vector_size__ (16))) __attribute__ ((__may_alias__));
+typedef union
+{
+
+ __attribute__ ((__aligned__ ((16)))) long i64
+ __attribute__ ((__vector_size__ (16))) __attribute__ ((__may_alias__));
+} simde__m128i_private;
+typedef double simde_float64;
+typedef simde_float64 simde__m128d __attribute__ ((__aligned__ ((16))))
+__attribute__ ((__vector_size__ (16))) __attribute__ ((__may_alias__));
+typedef long int int_fast32_t;
+typedef union
+{
+
+ __attribute__ ((__aligned__ ((16)))) int_fast32_t i32f
+ __attribute__ ((__vector_size__ (16))) __attribute__ ((__may_alias__));
+ __attribute__ ((__aligned__ ((16)))) long i64
+ __attribute__ ((__vector_size__ (16))) __attribute__ ((__may_alias__));
+ __attribute__ ((__aligned__ ((16)))) simde_float64 f64
+ __attribute__ ((__vector_size__ (16))) __attribute__ ((__may_alias__));
+} simde__m128d_private;
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde__m128d_from_private (simde__m128d_private v)
+{
+ simde__m128d r;
+ __builtin_memcpy (&r, &v, sizeof (r));
+ return r;
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_mm_set_pd (simde_float64 e1, simde_float64 e0)
+{
+
+ simde__m128d_private r_;
+ r_.f64[0] = e0;
+ r_.f64[1] = e1;
+
+ return simde__m128d_from_private (r_);
+}
+__attribute__ ((__always_inline__)) inline static simde__m128i
+simde_mm_castpd_si128 (simde__m128d a)
+{
+ simde__m128i r;
+ __builtin_memcpy (&r, &a, sizeof (a));
+ return r;
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128i
+simde__m128i_from_private (simde__m128i_private v)
+{
+ simde__m128i r;
+ __builtin_memcpy (&r, &v, sizeof (r));
+ return r;
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128i_private
+simde__m128i_to_private (simde__m128i v)
+{
+ simde__m128i_private r;
+ __builtin_memcpy (&r, &v, sizeof (r));
+ return r;
+}
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_mm_castsi128_pd (simde__m128i a)
+{
+ simde__m128d r;
+ __builtin_memcpy (&r, &a, sizeof (a));
+ return r;
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128i
+simde_mm_mask_mov_epi64 (simde__m128i src, simde__mmask8 k, simde__m128i a)
+{
+
+ simde__m128i_private src_ = simde__m128i_to_private (src),
+ a_ = simde__m128i_to_private (a), r_;
+
+ for (size_t i = 0; i < (sizeof (r_.i64) / sizeof (r_.i64[0])); i++)
+ {
+ r_.i64[i] = ((k >> i) & 1) ? a_.i64[i] : src_.i64[i];
+ }
+
+ return simde__m128i_from_private (r_);
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_mm_mask_mov_pd (simde__m128d src, simde__mmask8 k, simde__m128d a)
+{
+ return simde_mm_castsi128_pd (simde_mm_mask_mov_epi64 (
+ simde_mm_castpd_si128 (src), k, simde_mm_castpd_si128 (a)));
+}
+
+static double
+simde_test_f64_precision_to_slop (int precision)
+{
+ return __builtin_expect (!!(precision == 0x7fffffff), 0)
+ ? 0.0
+ : __builtin_pow (10.0, -((double)(precision)));
+}
+__attribute__ ((__always_inline__)) inline static void
+simde_mm_storeu_pd (simde_float64 *mem_addr, simde__m128d a)
+{
+
+ __builtin_memcpy (mem_addr, &a, sizeof (a));
+}
+int simde_test_equal_f64 (simde_float64 a, simde_float64 b,
+ simde_float64 slop);
+void simde_test_debug_printf_ (const char *format, ...);
+static int
+simde_assert_equal_vf64_ (size_t vec_len, simde_float64 const a[(vec_len)],
+ simde_float64 const b[(vec_len)], simde_float64 slop,
+ const char *filename, int line, const char *astr,
+ const char *bstr)
+{
+ for (size_t i = 0; i < vec_len; i++)
+ {
+ if (__builtin_expect (!!(!simde_test_equal_f64 (a[i], b[i], slop)), 0))
+ {
+ simde_test_debug_printf_ (
+ "%s:%d: assertion failed: %s[%zu] ~= %s[%zu] (%f ~= %f)\n",
+ filename, line, astr, i, bstr, i, ((double)(a[i])),
+ ((double)(b[i])));
+ return 1;
+ }
+ }
+ return 0;
+}
+static int
+simde_test_x86_assert_equal_f64x2_ (simde__m128d a, simde__m128d b,
+ simde_float64 slop, const char *filename,
+ int line, const char *astr,
+ const char *bstr)
+{
+ simde_float64 a_[sizeof (a) / sizeof (simde_float64)],
+ b_[sizeof (a) / sizeof (simde_float64)];
+ simde_mm_storeu_pd (a_, a);
+ simde_mm_storeu_pd (b_, b);
+ return simde_assert_equal_vf64_ (sizeof (a_) / sizeof (a_[0]), a_, b_, slop,
+ filename, line, astr, bstr);
+}
+__attribute__ ((__always_inline__)) inline static simde__m128d_private
+simde__m128d_to_private (simde__m128d v)
+{
+ simde__m128d_private r;
+ __builtin_memcpy (&r, &v, sizeof (r));
+ return r;
+}
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_mm_min_pd (simde__m128d a, simde__m128d b)
+{
+
+ simde__m128d_private r_, a_ = simde__m128d_to_private (a),
+ b_ = simde__m128d_to_private (b);
+
+ for (size_t i = 0; i < (sizeof (r_.f64) / sizeof (r_.f64[0])); i++)
+ {
+ r_.f64[i] = (a_.f64[i] < b_.f64[i]) ? a_.f64[i] : b_.f64[i];
+ }
+
+ return simde__m128d_from_private (r_);
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_mm_max_pd (simde__m128d a, simde__m128d b)
+{
+
+ simde__m128d_private r_, a_ = simde__m128d_to_private (a),
+ b_ = simde__m128d_to_private (b);
+
+ for (size_t i = 0; i < (sizeof (r_.f64) / sizeof (r_.f64[0])); i++)
+ {
+ r_.f64[i] = (a_.f64[i] > b_.f64[i]) ? a_.f64[i] : b_.f64[i];
+ }
+
+ return simde__m128d_from_private (r_);
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_x_mm_abs_pd (simde__m128d a)
+{
+
+ simde__m128d_private r_, a_ = simde__m128d_to_private (a);
+ for (size_t i = 0; i < (sizeof (r_.f64) / sizeof (r_.f64[0])); i++)
+ {
+ r_.f64[i] = __builtin_fabs (a_.f64[i]);
+ }
+
+ return simde__m128d_from_private (r_);
+}
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_mm_cmple_pd (simde__m128d a, simde__m128d b)
+{
+
+ simde__m128d_private r_, a_ = simde__m128d_to_private (a),
+ b_ = simde__m128d_to_private (b);
+
+ r_.i64 = ((__typeof__ (r_.i64))((a_.f64 <= b_.f64)));
+ return simde__m128d_from_private (r_);
+}
+
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_x_mm_select_pd (simde__m128d a, simde__m128d b, simde__m128d mask)
+{
+ simde__m128d_private r_, a_ = simde__m128d_to_private (a),
+ b_ = simde__m128d_to_private (b),
+ mask_ = simde__m128d_to_private (mask);
+
+ r_.i64 = a_.i64 ^ ((a_.i64 ^ b_.i64) & mask_.i64);
+ return simde__m128d_from_private (r_);
+}
+simde__m128d simde_mm_cmpge_pd (simde__m128d a, simde__m128d b);
+
+simde__m128d
+simde_x_mm_copysign_pd (simde__m128d dest, simde__m128d src)
+{
+ simde__m128d_private r_, dest_ = simde__m128d_to_private (dest),
+ src_ = simde__m128d_to_private (src);
+ for (size_t i = 0; i < (sizeof (r_.f64) / sizeof (r_.f64[0])); i++)
+ {
+ r_.f64[i] = __builtin_copysign (dest_.f64[i], src_.f64[i]);
+ }
+
+ return simde__m128d_from_private (r_);
+}
+simde__m128d simde_mm_or_pd (simde__m128d a, simde__m128d b);
+
+simde__m128d simde_mm_set1_pd (simde_float64 a);
+
+__attribute__ ((__always_inline__)) inline static simde__m128d
+simde_mm_range_pd (simde__m128d a, simde__m128d b, int imm8)
+{
+ simde__m128d r;
+
+ r = simde_x_mm_select_pd (
+ b, a, simde_mm_cmple_pd (simde_x_mm_abs_pd (a), simde_x_mm_abs_pd (b)));
+
+ r = simde_x_mm_copysign_pd (r, a);
+
+ return r;
+}
+int
+test_simde_mm_mask_range_pd (void)
+{
+
+ simde__m128d src, a, b, e, r;
+
+ src = simde_mm_set_pd (-2.92, -85.39);
+ a = simde_mm_set_pd (-47.59, -122.31);
+ b = simde_mm_set_pd (877.42, 69.15);
+ e = simde_mm_set_pd (-47.59, -69.15);
+ r = simde_mm_mask_mov_pd (src, 143, simde_mm_range_pd (a, b, 2));
+ do
+ {
+ if (simde_test_x86_assert_equal_f64x2_ (
+ r, e, simde_test_f64_precision_to_slop (1),
+ "../test/x86/avx512/range.c", 1454, "r", "e"))
+ {
+ return 1;
+ }
+ }
+ while (0);
+
+ return 0;
+}
diff --git a/gcc/testsuite/gcc.target/loongarch/trap-1.c b/gcc/testsuite/gcc.target/loongarch/trap-1.c
new file mode 100644
index 0000000..8936f60
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/trap-1.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -w -fisolate-erroneous-paths-dereference -mbreak-code=1" } */
+/* { dg-final { scan-assembler "break\\t1" } } */
+
+int
+bug (void)
+{
+ return *(int *)0;
+}
diff --git a/gcc/testsuite/gcc.target/loongarch/trap-default.c b/gcc/testsuite/gcc.target/loongarch/trap-default.c
new file mode 100644
index 0000000..32948d4
--- /dev/null
+++ b/gcc/testsuite/gcc.target/loongarch/trap-default.c
@@ -0,0 +1,9 @@
+/* { dg-do compile } */
+/* { dg-options "-O2 -w -fisolate-erroneous-paths-dereference" } */
+/* { dg-final { scan-assembler "amswap\\.w\\t\\\$r0,\\\$r1,\\\$r0" } } */
+
+int
+bug (void)
+{
+ return *(int *)0;
+}
diff --git a/gcc/testsuite/gfortran.dg/pdt_62.f03 b/gcc/testsuite/gfortran.dg/pdt_62.f03
new file mode 100644
index 0000000..efbcdad
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pdt_62.f03
@@ -0,0 +1,78 @@
+! { dg-do run }
+!
+! Test fix for PR122433
+!
+! Contributed by Damian Rouson <damian@archaeologic.codes>
+!
+module neuron_m
+ implicit none
+
+ type string_t
+ character(len=:), allocatable :: string_
+ end type
+
+ type neuron_t(k)
+ integer, kind :: k = kind(1.)
+ real(k) bias_
+ type(neuron_t(k)), allocatable :: next
+ end type
+
+contains
+ recursive function from_json(neuron_lines, start) result(neuron)
+ type(string_t) neuron_lines(:)
+ integer start
+ type(neuron_t) neuron
+ character(len=:), allocatable :: line
+ line = neuron_lines(start+1)%string_
+ read(line(index(line, ":")+1:), fmt=*) neuron%bias_
+ line = adjustr(neuron_lines(start+3)%string_)
+! Used to give "Error: Syntax error in IF-clause" for next line.
+ if (line(len(line):) == ",") neuron%next = from_json(neuron_lines, start+4)
+ end function
+ recursive function from_json_8(neuron_lines, start) result(neuron)
+ type(string_t) neuron_lines(:)
+ integer start
+ type(neuron_t(kind(1d0))) neuron
+ character(len=:), allocatable :: line
+ line = neuron_lines(start+1)%string_
+ read(line(index(line, ":")+1:), fmt=*) neuron%bias_
+ line = adjustr(neuron_lines(start+3)%string_)
+ if (line(len(line):) == ",") neuron%next = from_json_8(neuron_lines, start+4)
+ end function
+end module
+
+ use neuron_m
+ call foo
+ call bar
+contains
+ subroutine foo
+ type(neuron_t) neuron
+ type(string_t) :: neuron_lines(8)
+ neuron_lines(2)%string_ = "real : 4.0 "
+ neuron_lines(4)%string_ = " ,"
+ neuron_lines(6)%string_ = "real : 8.0 "
+ neuron_lines(8)%string_ = " "
+ neuron = from_json(neuron_lines, 1)
+ if (int (neuron%bias_) /= 4) stop 1
+ if (allocated (neuron%next)) then
+ if (int (neuron%next%bias_) /= 8) stop 2
+ else
+ stop 3
+ endif
+ end subroutine
+ subroutine bar
+ type(neuron_t(kind(1d0))) neuron
+ type(string_t) :: neuron_lines(8)
+ neuron_lines(2)%string_ = "real : 4.0d0 "
+ neuron_lines(4)%string_ = " ,"
+ neuron_lines(6)%string_ = "real : 8.0d0 "
+ neuron_lines(8)%string_ = " "
+ neuron = from_json_8(neuron_lines, 1)
+ if (int (neuron%bias_) /= 4) stop 1
+ if (allocated (neuron%next)) then
+ if (int (neuron%next%bias_) /= 8) stop 2
+ else
+ stop 3
+ endif
+ end subroutine
+end
diff --git a/gcc/testsuite/gfortran.dg/pdt_63.f03 b/gcc/testsuite/gfortran.dg/pdt_63.f03
new file mode 100644
index 0000000..127e5fe
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pdt_63.f03
@@ -0,0 +1,26 @@
+! { dg-do compile }
+!
+! Test fix for PR122434
+!
+! Contributed by Damian Rouson <damian@archaeologic.codes>
+!
+module neuron_m
+ implicit none
+
+ type neuron_t
+ real, allocatable :: weight_
+ end type
+
+ interface
+ type(neuron_t) pure module function from_json() result(neuron)
+ end function
+ end interface
+
+contains
+ module procedure from_json
+ associate(num_inputs => 1)
+! Gave "Error: Bad allocate-object at (1) for a PURE procedure" in next line.
+ allocate(neuron%weight_, source=0.)
+ end associate
+ end procedure
+end module
diff --git a/gcc/testsuite/gfortran.dg/pdt_64.f03 b/gcc/testsuite/gfortran.dg/pdt_64.f03
new file mode 100644
index 0000000..dfa4e3a
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/pdt_64.f03
@@ -0,0 +1,17 @@
+! { dg-do compile }
+!
+! Test the fix for PR122165.
+!
+! Contributed by Steve Kargl <kargls@comcast.net>
+!
+program foo
+ implicit none
+ type dt(k,l)
+ integer(8), len :: k = 1
+ integer(8), KIND :: l = 1
+ character(k) :: arr
+ end type
+ type(dt(:)), allocatable :: d1
+ if (d1%k%kind /= 8) stop 1 ! { dg-error "cannot be followed by the type inquiry ref" }
+ if (d1%l%kind /= 8) stop 2 ! { dg-error "cannot be followed by the type inquiry ref" }
+end
diff --git a/gcc/testsuite/gfortran.dg/vect/pr70102.f b/gcc/testsuite/gfortran.dg/vect/pr70102.f
new file mode 100644
index 0000000..b6a2878
--- /dev/null
+++ b/gcc/testsuite/gfortran.dg/vect/pr70102.f
@@ -0,0 +1,21 @@
+! { dg-do compile }
+! { dg-additional-options "-Ofast" }
+ subroutine test (x,y,z)
+ integer x,y,z
+ real*8 a(5,x,y,z),b(5,x,y,z)
+ real*8 c
+
+ c = 0.0d0
+ do k=1,z
+ do j=1,y
+ do i=1,x
+ do l=1,5
+ c = c + a(l,i,j,k)*b(l,i,j,k)
+ enddo
+ enddo
+ enddo
+ enddo
+ write(30,*)'c ==',c
+ return
+ end
+! { dg-final { scan-tree-dump "vectorizing a reduction chain" "vect" { target vect_double } } }
diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
index e02b337..66c4518 100644
--- a/gcc/tree-vect-slp.cc
+++ b/gcc/tree-vect-slp.cc
@@ -4045,7 +4045,9 @@ vect_build_slp_store_interleaving (vec<slp_tree> &rhs_nodes,
}
/* Analyze an SLP instance starting from SCALAR_STMTS which are a group
- of KIND. Return true if successful. */
+ of KIND. Return true if successful. SCALAR_STMTS is owned by this
+ function, REMAIN and ROOT_STMT_INFOS ownership is transfered back to
+ the caller upon failure. */
static bool
vect_build_slp_instance (vec_info *vinfo,
@@ -4059,7 +4061,10 @@ vect_build_slp_instance (vec_info *vinfo,
{
/* If there's no budget left bail out early. */
if (*limit == 0)
- return false;
+ {
+ scalar_stmts.release ();
+ return false;
+ }
if (kind == slp_inst_kind_ctor)
{
@@ -5564,10 +5569,10 @@ vect_analyze_slp (vec_info *vinfo, unsigned max_tree_size,
bb_vinfo->roots[i].remain,
max_tree_size, &limit, bst_map, false))
{
- bb_vinfo->roots[i].stmts = vNULL;
bb_vinfo->roots[i].roots = vNULL;
bb_vinfo->roots[i].remain = vNULL;
}
+ bb_vinfo->roots[i].stmts = vNULL;
}
}