aboutsummaryrefslogtreecommitdiff
path: root/gcc/rust/backend
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2023-03-29 09:01:23 -0700
committerIan Lance Taylor <iant@golang.org>2023-03-29 09:01:23 -0700
commit6612f4f8cb9b0d5af18ec69ad04e56debc3e6ced (patch)
tree1deecdcfbf185c7044bc861d0ace51285c96cb62 /gcc/rust/backend
parent795cffe109e28b248a54b8ee583cbae48368c2a7 (diff)
parentaa8f4242efc99f24de73c59d53996f28db28c13f (diff)
downloadgcc-6612f4f8cb9b0d5af18ec69ad04e56debc3e6ced.zip
gcc-6612f4f8cb9b0d5af18ec69ad04e56debc3e6ced.tar.gz
gcc-6612f4f8cb9b0d5af18ec69ad04e56debc3e6ced.tar.bz2
Merge from trunk revision aa8f4242efc99f24de73c59d53996f28db28c13f.
Diffstat (limited to 'gcc/rust/backend')
-rw-r--r--gcc/rust/backend/rust-builtins.cc305
-rw-r--r--gcc/rust/backend/rust-builtins.h114
-rw-r--r--gcc/rust/backend/rust-compile-base.cc745
-rw-r--r--gcc/rust/backend/rust-compile-base.h146
-rw-r--r--gcc/rust/backend/rust-compile-block.cc158
-rw-r--r--gcc/rust/backend/rust-compile-block.h209
-rw-r--r--gcc/rust/backend/rust-compile-context.cc193
-rw-r--r--gcc/rust/backend/rust-compile-context.h402
-rw-r--r--gcc/rust/backend/rust-compile-expr.cc3139
-rw-r--r--gcc/rust/backend/rust-compile-expr.h167
-rw-r--r--gcc/rust/backend/rust-compile-extern.h172
-rw-r--r--gcc/rust/backend/rust-compile-fnparam.cc121
-rw-r--r--gcc/rust/backend/rust-compile-fnparam.h70
-rw-r--r--gcc/rust/backend/rust-compile-implitem.cc101
-rw-r--r--gcc/rust/backend/rust-compile-implitem.h91
-rw-r--r--gcc/rust/backend/rust-compile-intrinsic.cc886
-rw-r--r--gcc/rust/backend/rust-compile-intrinsic.h40
-rw-r--r--gcc/rust/backend/rust-compile-item.cc219
-rw-r--r--gcc/rust/backend/rust-compile-item.h88
-rw-r--r--gcc/rust/backend/rust-compile-pattern.cc333
-rw-r--r--gcc/rust/backend/rust-compile-pattern.h95
-rw-r--r--gcc/rust/backend/rust-compile-resolve-path.cc309
-rw-r--r--gcc/rust/backend/rust-compile-resolve-path.h73
-rw-r--r--gcc/rust/backend/rust-compile-stmt.cc115
-rw-r--r--gcc/rust/backend/rust-compile-stmt.h69
-rw-r--r--gcc/rust/backend/rust-compile-struct-field-expr.cc81
-rw-r--r--gcc/rust/backend/rust-compile-struct-field-expr.h46
-rw-r--r--gcc/rust/backend/rust-compile-type.cc752
-rw-r--r--gcc/rust/backend/rust-compile-type.h79
-rw-r--r--gcc/rust/backend/rust-compile-var-decl.h95
-rw-r--r--gcc/rust/backend/rust-compile.cc416
-rw-r--r--gcc/rust/backend/rust-compile.h47
-rw-r--r--gcc/rust/backend/rust-constexpr.cc6480
-rw-r--r--gcc/rust/backend/rust-constexpr.h33
-rw-r--r--gcc/rust/backend/rust-mangle.cc313
-rw-r--r--gcc/rust/backend/rust-mangle.h52
-rw-r--r--gcc/rust/backend/rust-tree.cc6157
-rw-r--r--gcc/rust/backend/rust-tree.h3391
38 files changed, 26302 insertions, 0 deletions
diff --git a/gcc/rust/backend/rust-builtins.cc b/gcc/rust/backend/rust-builtins.cc
new file mode 100644
index 0000000..0517a9a
--- /dev/null
+++ b/gcc/rust/backend/rust-builtins.cc
@@ -0,0 +1,305 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-builtins.h"
+
+namespace Rust {
+namespace Compile {
+
+static const int builtin_const = 1 << 0;
+static const int builtin_noreturn = 1 << 1;
+static const int builtin_novops = 1 << 2;
+
+BuiltinsContext &
+BuiltinsContext::get ()
+{
+ static BuiltinsContext instance;
+ return instance;
+}
+
+bool
+BuiltinsContext::lookup_simple_builtin (const std::string &name, tree *builtin)
+{
+ auto it = rust_intrinsic_to_gcc_builtin.find (name);
+ if (it == rust_intrinsic_to_gcc_builtin.end ())
+ return false;
+
+ return lookup_gcc_builtin (it->second, builtin);
+}
+
+BuiltinsContext::BuiltinsContext () { setup (); }
+
+void
+BuiltinsContext::setup_overflow_fns ()
+{
+ tree overflow_type
+ = build_varargs_function_type_list (boolean_type_node, NULL_TREE);
+
+ define_builtin ("add_overflow", BUILT_IN_ADD_OVERFLOW,
+ "__builtin_add_overflow", "add_overflow", overflow_type, 0);
+ define_builtin ("sub_overflow", BUILT_IN_SUB_OVERFLOW,
+ "__builtin_sub_overflow", "sub_overflow", overflow_type, 0);
+ define_builtin ("mul_overflow", BUILT_IN_MUL_OVERFLOW,
+ "__builtin_mul_overflow", "mul_overflow", overflow_type, 0);
+}
+
+void
+BuiltinsContext::setup_math_fns ()
+{
+ tree fn_type_f32_to_f32
+ = build_function_type_list (float_type_node, float_type_node, NULL_TREE);
+ tree fn_type_f64_to_f64
+ = build_function_type_list (double_type_node, double_type_node, NULL_TREE);
+ tree fn_type_f32_f32_to_f32
+ = build_function_type_list (float_type_node, float_type_node,
+ float_type_node, NULL_TREE);
+ tree fn_type_f64_f64_to_f64
+ = build_function_type_list (double_type_node, double_type_node,
+ double_type_node, NULL_TREE);
+ tree fn_type_f32_i32_to_f32
+ = build_function_type_list (float_type_node, float_type_node,
+ integer_type_node, NULL_TREE);
+ tree fn_type_f64_i32_to_f64
+ = build_function_type_list (double_type_node, double_type_node,
+ integer_type_node, NULL_TREE);
+
+ define_builtin ("sqrtf32", BUILT_IN_SQRTF, "__builtin_sqrtf", "sqrtf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("sqrtf64", BUILT_IN_SQRT, "__builtin_sqrt", "sqrt",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("powif32", BUILT_IN_POWIF, "__builtin_powif", "powif",
+ fn_type_f32_i32_to_f32, builtin_const);
+ define_builtin ("powif64", BUILT_IN_POWI, "__builtin_powi", "powi",
+ fn_type_f64_i32_to_f64, builtin_const);
+
+ define_builtin ("sinf32", BUILT_IN_SINF, "__builtin_sinf", "sinf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("sinf64", BUILT_IN_SIN, "__builtin_sin", "sin",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("cosf32", BUILT_IN_COSF, "__builtin_cosf", "cosf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("cosf64", BUILT_IN_COS, "__builtin_cos", "cos",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("powf32", BUILT_IN_POWF, "__builtin_powf", "powf",
+ fn_type_f32_f32_to_f32, builtin_const);
+ define_builtin ("powf64", BUILT_IN_POW, "__builtin_pow", "pow",
+ fn_type_f64_f64_to_f64, builtin_const);
+
+ define_builtin ("expf32", BUILT_IN_EXPF, "__builtin_expf", "expf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("expf64", BUILT_IN_EXP, "__builtin_exp", "exp",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("exp2f32", BUILT_IN_EXP2F, "__builtin_exp2f", "exp2f",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("exp2f64", BUILT_IN_EXP2, "__builtin_exp2", "exp2",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("logf32", BUILT_IN_LOGF, "__builtin_logf", "logf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("logf64", BUILT_IN_LOG, "__builtin_log", "log",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("log10f32", BUILT_IN_LOG10F, "__builtin_log10f", "log10f",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("log10f64", BUILT_IN_LOG10, "__builtin_log10", "log10",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("log2f32", BUILT_IN_LOG2F, "__builtin_log2f", "log2f",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("log2f64", BUILT_IN_LOG2, "__builtin_log2", "log2",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("fmaf32", BUILT_IN_FMAF, "__builtin_fmaf", "fmaf",
+ fn_type_f32_f32_to_f32, builtin_const);
+ define_builtin ("fmaf64", BUILT_IN_FMA, "__builtin_fma", "fma",
+ fn_type_f64_f64_to_f64, builtin_const);
+
+ define_builtin ("fabsf32", BUILT_IN_FABSF, "__builtin_fabsf", "fabsf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("fabsf64", BUILT_IN_FABS, "__builtin_fabs", "fabs",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("minnumf32", BUILT_IN_FMINF, "__builtin_fminf", "fminf",
+ fn_type_f32_f32_to_f32, builtin_const);
+ define_builtin ("minnumf64", BUILT_IN_FMIN, "__builtin_fmin", "fmin",
+ fn_type_f64_f64_to_f64, builtin_const);
+
+ define_builtin ("maxnumf32", BUILT_IN_FMAXF, "__builtin_fmaxf", "fmaxf",
+ fn_type_f32_f32_to_f32, builtin_const);
+ define_builtin ("maxnumf64", BUILT_IN_FMAX, "__builtin_fmax", "fmax",
+ fn_type_f64_f64_to_f64, builtin_const);
+
+ define_builtin ("copysignf32", BUILT_IN_COPYSIGNF, "__builtin_copysignf",
+ "copysignf", fn_type_f32_f32_to_f32, builtin_const);
+ define_builtin ("copysignf64", BUILT_IN_COPYSIGN, "__builtin_copysign",
+ "copysign", fn_type_f64_f64_to_f64, builtin_const);
+
+ define_builtin ("floorf32", BUILT_IN_FLOORF, "__builtin_floorf", "floorf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("floorf64", BUILT_IN_FLOOR, "__builtin_floor", "floor",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("ceilf32", BUILT_IN_CEILF, "__builtin_ceilf", "ceilf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("ceilf64", BUILT_IN_CEIL, "__builtin_ceil", "ceil",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("truncf32", BUILT_IN_TRUNCF, "__builtin_truncf", "truncf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("truncf64", BUILT_IN_TRUNC, "__builtin_trunc", "trunc",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("rintf32", BUILT_IN_RINTF, "__builtin_rintf", "rintf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("rintf64", BUILT_IN_RINT, "__builtin_rint", "rint",
+ fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("nearbyintf32", BUILT_IN_NEARBYINTF, "__builtin_nearbyintf",
+ "nearbyintf", fn_type_f32_to_f32, builtin_const);
+ define_builtin ("nearbyintf64", BUILT_IN_NEARBYINT, "__builtin_nearbyint",
+ "nearbyint", fn_type_f64_to_f64, builtin_const);
+
+ define_builtin ("roundf32", BUILT_IN_ROUNDF, "__builtin_roundf", "roundf",
+ fn_type_f32_to_f32, builtin_const);
+ define_builtin ("roundf64", BUILT_IN_ROUND, "__builtin_round", "round",
+ fn_type_f64_to_f64, builtin_const);
+}
+
+void
+BuiltinsContext::setup_atomic_fns ()
+{
+ auto atomic_store_type
+ = build_varargs_function_type_list (void_type_node, NULL_TREE);
+ auto atomic_load_type = [] (tree ret_type_node) {
+ return build_function_type_list (ret_type_node,
+ ptr_type_node, // const_ptr_type_node?
+ integer_type_node, NULL_TREE);
+ };
+
+ // FIXME: These should be the definition for the generic version of the
+ // atomic_store builtins, but I cannot get them to work properly. Revisit
+ // later. define_builtin ("atomic_store", BUILT_IN_ATOMIC_STORE,
+ // "__atomic_store", NULL,
+ // atomic_store_type, 0);
+ // define_builtin ("atomic_store_n", BUILT_IN_ATOMIC_STORE_N,
+ // "__atomic_store_n",
+ // NULL, atomic_store_type, 0);
+
+ define_builtin ("atomic_store_1", BUILT_IN_ATOMIC_STORE_1, "__atomic_store_1",
+ NULL, atomic_store_type, 0);
+ define_builtin ("atomic_store_2", BUILT_IN_ATOMIC_STORE_2, "__atomic_store_2",
+ NULL, atomic_store_type, 0);
+ define_builtin ("atomic_store_4", BUILT_IN_ATOMIC_STORE_4, "__atomic_store_4",
+ NULL, atomic_store_type, 0);
+ define_builtin ("atomic_store_8", BUILT_IN_ATOMIC_STORE_8, "__atomic_store_8",
+ NULL, atomic_store_type, 0);
+ define_builtin ("atomic_store_16", BUILT_IN_ATOMIC_STORE_16,
+ "__atomic_store_16", NULL, atomic_store_type, 0);
+
+ define_builtin ("atomic_load_1", BUILT_IN_ATOMIC_LOAD_1, "__atomic_load_1",
+ NULL, atomic_load_type (integer_type_node), 0);
+ define_builtin ("atomic_load_2", BUILT_IN_ATOMIC_LOAD_2, "__atomic_load_2",
+ NULL, atomic_load_type (integer_type_node), 0);
+ define_builtin ("atomic_load_4", BUILT_IN_ATOMIC_LOAD_4, "__atomic_load_4",
+ NULL, atomic_load_type (integer_type_node), 0);
+ define_builtin ("atomic_load_8", BUILT_IN_ATOMIC_LOAD_8, "__atomic_load_8",
+ NULL, atomic_load_type (integer_type_node), 0);
+}
+
+void
+BuiltinsContext::setup ()
+{
+ setup_math_fns ();
+ setup_overflow_fns ();
+ setup_atomic_fns ();
+
+ define_builtin ("unreachable", BUILT_IN_UNREACHABLE, "__builtin_unreachable",
+ NULL, build_function_type (void_type_node, void_list_node),
+ builtin_const | builtin_noreturn);
+
+ define_builtin ("abort", BUILT_IN_ABORT, "__builtin_abort", "abort",
+ build_function_type (void_type_node, void_list_node),
+ builtin_const | builtin_noreturn);
+
+ define_builtin ("breakpoint", BUILT_IN_TRAP, "__builtin_trap", "breakpoint",
+ build_function_type (void_type_node, void_list_node),
+ builtin_const | builtin_noreturn);
+
+ define_builtin ("memcpy", BUILT_IN_MEMCPY, "__builtin_memcpy", "memcpy",
+ build_function_type_list (build_pointer_type (void_type_node),
+ build_pointer_type (void_type_node),
+ build_pointer_type (void_type_node),
+ size_type_node, NULL_TREE),
+ 0);
+
+ define_builtin ("prefetch", BUILT_IN_PREFETCH, "__builtin_prefetch",
+ "prefetch",
+ build_varargs_function_type_list (
+ build_pointer_type (const_ptr_type_node), NULL_TREE),
+ builtin_const);
+}
+
+static void
+handle_flags (tree decl, int flags)
+{
+ if (flags & builtin_const)
+ TREE_READONLY (decl) = 1;
+ if (flags & builtin_noreturn)
+ TREE_READONLY (decl) = 1;
+ if (flags & builtin_novops)
+ DECL_IS_NOVOPS (decl) = 1;
+}
+
+void
+BuiltinsContext::define_builtin (const std::string rust_name,
+ built_in_function bcode, const char *name,
+ const char *libname, tree fntype, int flags)
+{
+ tree decl = add_builtin_function (name, fntype, bcode, BUILT_IN_NORMAL,
+ libname, NULL_TREE);
+ handle_flags (decl, flags);
+ set_builtin_decl (bcode, decl, true);
+
+ this->builtin_functions_[name] = decl;
+ if (libname != NULL)
+ {
+ decl = add_builtin_function (libname, fntype, bcode, BUILT_IN_NORMAL,
+ NULL, NULL_TREE);
+ handle_flags (decl, flags);
+
+ this->builtin_functions_[libname] = decl;
+ }
+
+ rust_intrinsic_to_gcc_builtin[rust_name] = name;
+}
+
+bool
+BuiltinsContext::lookup_gcc_builtin (const std::string &name, tree *builtin)
+{
+ auto it = builtin_functions_.find (name);
+ if (it == builtin_functions_.end ())
+ return false;
+
+ *builtin = it->second;
+ return true;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-builtins.h b/gcc/rust/backend/rust-builtins.h
new file mode 100644
index 0000000..c282510
--- /dev/null
+++ b/gcc/rust/backend/rust-builtins.h
@@ -0,0 +1,114 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_BUILTINS_H
+#define RUST_BUILTINS_H
+
+#include "rust-system.h"
+#include "rust-tree.h"
+#include "langhooks.h"
+#include "tree.h"
+
+namespace Rust {
+namespace Compile {
+
+// https://github.com/rust-lang/rust/blob/master/library/core/src/intrinsics.rs
+// https://github.com/rust-lang/rust/blob/master/compiler/rustc_codegen_llvm/src/intrinsic.rs
+// https://github.com/Rust-GCC/gccrs/issues/658
+//
+// let llvm_name = match name {
+// sym::sqrtf32 => "llvm.sqrt.f32",
+// sym::sqrtf64 => "llvm.sqrt.f64",
+// sym::powif32 => "llvm.powi.f32",
+// sym::powif64 => "llvm.powi.f64",
+// sym::sinf32 => "llvm.sin.f32",
+// sym::sinf64 => "llvm.sin.f64",
+// sym::cosf32 => "llvm.cos.f32",
+// sym::cosf64 => "llvm.cos.f64",
+// sym::powf32 => "llvm.pow.f32",
+// sym::powf64 => "llvm.pow.f64",
+// sym::expf32 => "llvm.exp.f32",
+// sym::expf64 => "llvm.exp.f64",
+// sym::exp2f32 => "llvm.exp2.f32",
+// sym::exp2f64 => "llvm.exp2.f64",
+// sym::logf32 => "llvm.log.f32",
+// sym::logf64 => "llvm.log.f64",
+// sym::log10f32 => "llvm.log10.f32",
+// sym::log10f64 => "llvm.log10.f64",
+// sym::log2f32 => "llvm.log2.f32",
+// sym::log2f64 => "llvm.log2.f64",
+// sym::fmaf32 => "llvm.fma.f32",
+// sym::fmaf64 => "llvm.fma.f64",
+// sym::fabsf32 => "llvm.fabs.f32",
+// sym::fabsf64 => "llvm.fabs.f64",
+// sym::minnumf32 => "llvm.minnum.f32",
+// sym::minnumf64 => "llvm.minnum.f64",
+// sym::maxnumf32 => "llvm.maxnum.f32",
+// sym::maxnumf64 => "llvm.maxnum.f64",
+// sym::copysignf32 => "llvm.copysign.f32",
+// sym::copysignf64 => "llvm.copysign.f64",
+// sym::floorf32 => "llvm.floor.f32",
+// sym::floorf64 => "llvm.floor.f64",
+// sym::ceilf32 => "llvm.ceil.f32",
+// sym::ceilf64 => "llvm.ceil.f64",
+// sym::truncf32 => "llvm.trunc.f32",
+// sym::truncf64 => "llvm.trunc.f64",
+// sym::rintf32 => "llvm.rint.f32",
+// sym::rintf64 => "llvm.rint.f64",
+// sym::nearbyintf32 => "llvm.nearbyint.f32",
+// sym::nearbyintf64 => "llvm.nearbyint.f64",
+// sym::roundf32 => "llvm.round.f32",
+// sym::roundf64 => "llvm.round.f64",
+// _ => return None,
+// };
+// Some(cx.get_intrinsic(&llvm_name))
+class BuiltinsContext
+{
+public:
+ static BuiltinsContext &get ();
+
+ bool lookup_simple_builtin (const std::string &name, tree *builtin);
+
+private:
+ BuiltinsContext ();
+
+ void setup_overflow_fns ();
+ void setup_math_fns ();
+ void setup_atomic_fns ();
+
+ void setup ();
+
+ // Define a builtin function. BCODE is the builtin function code
+ // defined by builtins.def. NAME is the name of the builtin function.
+ // LIBNAME is the name of the corresponding library function, and is
+ // NULL if there isn't one. FNTYPE is the type of the function.
+ // CONST_P is true if the function has the const attribute.
+ // NORETURN_P is true if the function has the noreturn attribute.
+ void define_builtin (const std::string rust_name, built_in_function bcode,
+ const char *name, const char *libname, tree fntype,
+ int flags);
+
+ bool lookup_gcc_builtin (const std::string &name, tree *builtin);
+
+ // A mapping of the GCC built-ins exposed to GCC Rust.
+ std::map<std::string, tree> builtin_functions_;
+ std::map<std::string, std::string> rust_intrinsic_to_gcc_builtin;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_BUILTINS_H
diff --git a/gcc/rust/backend/rust-compile-base.cc b/gcc/rust/backend/rust-compile-base.cc
new file mode 100644
index 0000000..568abf9
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-base.cc
@@ -0,0 +1,745 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-base.h"
+#include "rust-abi.h"
+#include "rust-compile-item.h"
+#include "rust-compile-stmt.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-fnparam.h"
+#include "rust-compile-var-decl.h"
+#include "rust-constexpr.h"
+#include "rust-diagnostics.h"
+#include "rust-expr.h" // for AST::AttrInputLiteral
+#include "rust-macro.h" // for AST::MetaNameValueStr
+
+#include "fold-const.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "tree.h"
+
+namespace Rust {
+namespace Compile {
+
+bool inline should_mangle_item (const tree fndecl)
+{
+ return lookup_attribute ("no_mangle", DECL_ATTRIBUTES (fndecl)) == NULL_TREE;
+}
+
+void
+HIRCompileBase::setup_fndecl (tree fndecl, bool is_main_entry_point,
+ bool is_generic_fn, HIR::Visibility &visibility,
+ const HIR::FunctionQualifiers &qualifiers,
+ const AST::AttrVec &attrs)
+{
+ // if its the main fn or pub visibility mark its as DECL_PUBLIC
+ // please see https://github.com/Rust-GCC/gccrs/pull/137
+ bool is_pub = visibility.get_vis_type () == HIR::Visibility::VisType::PUBLIC;
+ if (is_main_entry_point || (is_pub && !is_generic_fn))
+ {
+ TREE_PUBLIC (fndecl) = 1;
+ }
+
+ // is it a const fn
+ DECL_DECLARED_CONSTEXPR_P (fndecl) = qualifiers.is_const ();
+ if (qualifiers.is_const ())
+ {
+ TREE_READONLY (fndecl) = 1;
+ }
+
+ // is it inline?
+ for (const auto &attr : attrs)
+ {
+ bool is_inline = attr.get_path ().as_string ().compare ("inline") == 0;
+ bool is_must_use
+ = attr.get_path ().as_string ().compare ("must_use") == 0;
+ bool is_cold = attr.get_path ().as_string ().compare ("cold") == 0;
+ bool is_link_section
+ = attr.get_path ().as_string ().compare ("link_section") == 0;
+ bool no_mangle = attr.get_path ().as_string ().compare ("no_mangle") == 0;
+ bool is_deprecated
+ = attr.get_path ().as_string ().compare ("deprecated") == 0;
+
+ if (is_inline)
+ {
+ handle_inline_attribute_on_fndecl (fndecl, attr);
+ }
+ else if (is_must_use)
+ {
+ handle_must_use_attribute_on_fndecl (fndecl, attr);
+ }
+ else if (is_cold)
+ {
+ handle_cold_attribute_on_fndecl (fndecl, attr);
+ }
+ else if (is_link_section)
+ {
+ handle_link_section_attribute_on_fndecl (fndecl, attr);
+ }
+ else if (is_deprecated)
+ {
+ handle_deprecated_attribute_on_fndecl (fndecl, attr);
+ }
+ else if (no_mangle)
+ {
+ handle_no_mangle_attribute_on_fndecl (fndecl, attr);
+ }
+ }
+}
+
+void
+HIRCompileBase::handle_cold_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr)
+{
+ // simple #[cold]
+ if (!attr.has_attr_input ())
+ {
+ tree cold = get_identifier ("cold");
+ // this will get handled by the GCC backend later
+ DECL_ATTRIBUTES (fndecl)
+ = tree_cons (cold, NULL_TREE, DECL_ATTRIBUTES (fndecl));
+ return;
+ }
+
+ rust_error_at (attr.get_locus (),
+ "attribute %<cold%> does not accept any arguments");
+}
+
+void
+HIRCompileBase::handle_link_section_attribute_on_fndecl (
+ tree fndecl, const AST::Attribute &attr)
+{
+ if (!attr.has_attr_input ())
+ {
+ rust_error_at (attr.get_locus (),
+ "%<link_section%> expects exactly one argment");
+ return;
+ }
+
+ rust_assert (attr.get_attr_input ().get_attr_input_type ()
+ == AST::AttrInput::AttrInputType::LITERAL);
+
+ auto &literal = static_cast<AST::AttrInputLiteral &> (attr.get_attr_input ());
+ const auto &msg_str = literal.get_literal ().as_string ();
+
+ if (decl_section_name (fndecl))
+ {
+ rust_warning_at (attr.get_locus (), 0, "section name redefined");
+ }
+
+ set_decl_section_name (fndecl, msg_str.c_str ());
+}
+
+void
+HIRCompileBase::handle_no_mangle_attribute_on_fndecl (
+ tree fndecl, const AST::Attribute &attr)
+{
+ if (attr.has_attr_input ())
+ {
+ rust_error_at (attr.get_locus (),
+ "attribute %<no_mangle%> does not accept any arguments");
+ return;
+ }
+
+ DECL_ATTRIBUTES (fndecl) = tree_cons (get_identifier ("no_mangle"), NULL_TREE,
+ DECL_ATTRIBUTES (fndecl));
+}
+
+void
+HIRCompileBase::handle_deprecated_attribute_on_fndecl (
+ tree fndecl, const AST::Attribute &attr)
+{
+ tree value = NULL_TREE;
+ TREE_DEPRECATED (fndecl) = 1;
+
+ // simple #[deprecated]
+ if (!attr.has_attr_input ())
+ return;
+
+ const AST::AttrInput &input = attr.get_attr_input ();
+ auto input_type = input.get_attr_input_type ();
+
+ if (input_type == AST::AttrInput::AttrInputType::LITERAL)
+ {
+ // handle #[deprecated = "message"]
+ auto &literal
+ = static_cast<AST::AttrInputLiteral &> (attr.get_attr_input ());
+ const auto &msg_str = literal.get_literal ().as_string ();
+ value = build_string (msg_str.size (), msg_str.c_str ());
+ }
+ else if (input_type == AST::AttrInput::AttrInputType::TOKEN_TREE)
+ {
+ // handle #[deprecated(since = "...", note = "...")]
+ const auto &option = static_cast<const AST::DelimTokenTree &> (input);
+ AST::AttrInputMetaItemContainer *meta_item = option.parse_to_meta_item ();
+ for (const auto &item : meta_item->get_items ())
+ {
+ auto converted_item = item->to_meta_name_value_str ();
+ if (!converted_item)
+ continue;
+ auto key_value = converted_item->get_name_value_pair ();
+ if (key_value.first.compare ("since") == 0)
+ {
+ // valid, but this is handled by Cargo and some third-party audit
+ // tools
+ continue;
+ }
+ else if (key_value.first.compare ("note") == 0)
+ {
+ const auto &msg_str = key_value.second;
+ if (value)
+ rust_error_at (attr.get_locus (), "multiple %<note%> items");
+ value = build_string (msg_str.size (), msg_str.c_str ());
+ }
+ else
+ {
+ rust_error_at (attr.get_locus (), "unknown meta item %qs",
+ key_value.first.c_str ());
+ }
+ }
+ }
+
+ if (value)
+ {
+ tree attr_list = build_tree_list (NULL_TREE, value);
+ DECL_ATTRIBUTES (fndecl)
+ = tree_cons (get_identifier ("deprecated"), attr_list,
+ DECL_ATTRIBUTES (fndecl));
+ }
+}
+
+void
+HIRCompileBase::handle_inline_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr)
+{
+ // simple #[inline]
+ if (!attr.has_attr_input ())
+ {
+ DECL_DECLARED_INLINE_P (fndecl) = 1;
+ return;
+ }
+
+ const AST::AttrInput &input = attr.get_attr_input ();
+ bool is_token_tree
+ = input.get_attr_input_type () == AST::AttrInput::AttrInputType::TOKEN_TREE;
+ rust_assert (is_token_tree);
+ const auto &option = static_cast<const AST::DelimTokenTree &> (input);
+ AST::AttrInputMetaItemContainer *meta_item = option.parse_to_meta_item ();
+ if (meta_item->get_items ().size () != 1)
+ {
+ rust_error_at (attr.get_locus (), "invalid number of arguments");
+ return;
+ }
+
+ const std::string inline_option
+ = meta_item->get_items ().at (0)->as_string ();
+
+ // we only care about NEVER and ALWAYS else its an error
+ bool is_always = inline_option.compare ("always") == 0;
+ bool is_never = inline_option.compare ("never") == 0;
+
+ // #[inline(never)]
+ if (is_never)
+ {
+ DECL_UNINLINABLE (fndecl) = 1;
+ }
+ // #[inline(always)]
+ else if (is_always)
+ {
+ DECL_DECLARED_INLINE_P (fndecl) = 1;
+ DECL_ATTRIBUTES (fndecl) = tree_cons (get_identifier ("always_inline"),
+ NULL, DECL_ATTRIBUTES (fndecl));
+ }
+ else
+ {
+ rust_error_at (attr.get_locus (), "unknown inline option");
+ }
+}
+
+void
+HIRCompileBase::handle_must_use_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr)
+{
+ tree nodiscard = get_identifier ("nodiscard");
+ tree value = NULL_TREE;
+
+ if (attr.has_attr_input ())
+ {
+ rust_assert (attr.get_attr_input ().get_attr_input_type ()
+ == AST::AttrInput::AttrInputType::LITERAL);
+
+ auto &literal
+ = static_cast<AST::AttrInputLiteral &> (attr.get_attr_input ());
+ const auto &msg_str = literal.get_literal ().as_string ();
+ tree message = build_string (msg_str.size (), msg_str.c_str ());
+
+ value = tree_cons (nodiscard, message, NULL_TREE);
+ }
+
+ DECL_ATTRIBUTES (fndecl)
+ = tree_cons (nodiscard, value, DECL_ATTRIBUTES (fndecl));
+}
+
+void
+HIRCompileBase::setup_abi_options (tree fndecl, ABI abi)
+{
+ tree abi_tree = NULL_TREE;
+
+ switch (abi)
+ {
+ case Rust::ABI::RUST:
+ case Rust::ABI::INTRINSIC:
+ case Rust::ABI::C:
+ case Rust::ABI::CDECL:
+ // `decl_attributes` function (not the macro) has the side-effect of
+ // actually switching the codegen backend to use the ABI we annotated.
+ // However, since `cdecl` is the default ABI GCC will be using, explicitly
+ // specifying that ABI will cause GCC to emit a warning saying the
+ // attribute is useless (which is confusing to the user as the attribute
+ // is added by us).
+ DECL_ATTRIBUTES (fndecl)
+ = tree_cons (get_identifier ("cdecl"), NULL, DECL_ATTRIBUTES (fndecl));
+
+ return;
+
+ case Rust::ABI::STDCALL:
+ abi_tree = get_identifier ("stdcall");
+
+ break;
+
+ case Rust::ABI::FASTCALL:
+ abi_tree = get_identifier ("fastcall");
+
+ break;
+
+ case Rust::ABI::SYSV64:
+ abi_tree = get_identifier ("sysv_abi");
+
+ break;
+
+ case Rust::ABI::WIN64:
+ abi_tree = get_identifier ("ms_abi");
+
+ break;
+
+ default:
+ break;
+ }
+
+ decl_attributes (&fndecl, build_tree_list (abi_tree, NULL_TREE), 0);
+}
+
+// ported from gcc/c/c-typecheck.c
+//
+// Mark EXP saying that we need to be able to take the
+// address of it; it should not be allocated in a register.
+// Returns true if successful. ARRAY_REF_P is true if this
+// is for ARRAY_REF construction - in that case we don't want
+// to look through VIEW_CONVERT_EXPR from VECTOR_TYPE to ARRAY_TYPE,
+// it is fine to use ARRAY_REFs for vector subscripts on vector
+// register variables.
+bool
+HIRCompileBase::mark_addressable (tree exp, Location locus)
+{
+ tree x = exp;
+
+ while (1)
+ switch (TREE_CODE (x))
+ {
+ case VIEW_CONVERT_EXPR:
+ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
+ && VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (x, 0))))
+ return true;
+ x = TREE_OPERAND (x, 0);
+ break;
+
+ case COMPONENT_REF:
+ // TODO
+ // if (DECL_C_BIT_FIELD (TREE_OPERAND (x, 1)))
+ // {
+ // error ("cannot take address of bit-field %qD", TREE_OPERAND (x,
+ // 1)); return false;
+ // }
+
+ /* FALLTHRU */
+ case ADDR_EXPR:
+ case ARRAY_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ x = TREE_OPERAND (x, 0);
+ break;
+
+ case COMPOUND_LITERAL_EXPR:
+ TREE_ADDRESSABLE (x) = 1;
+ TREE_ADDRESSABLE (COMPOUND_LITERAL_EXPR_DECL (x)) = 1;
+ return true;
+
+ case CONSTRUCTOR:
+ TREE_ADDRESSABLE (x) = 1;
+ return true;
+
+ case VAR_DECL:
+ case CONST_DECL:
+ case PARM_DECL:
+ case RESULT_DECL:
+ // (we don't have a concept of a "register" declaration)
+ // fallthrough */
+
+ /* FALLTHRU */
+ case FUNCTION_DECL:
+ TREE_ADDRESSABLE (x) = 1;
+
+ /* FALLTHRU */
+ default:
+ return true;
+ }
+
+ return false;
+}
+
+tree
+HIRCompileBase::address_expression (tree expr, Location location)
+{
+ if (expr == error_mark_node)
+ return error_mark_node;
+
+ if (!mark_addressable (expr, location))
+ return error_mark_node;
+
+ return build_fold_addr_expr_loc (location.gcc_location (), expr);
+}
+
+tree
+HIRCompileBase::indirect_expression (tree expr, Location locus)
+{
+ if (expr == error_mark_node)
+ return error_mark_node;
+
+ return build_fold_indirect_ref_loc (locus.gcc_location (), expr);
+}
+
+std::vector<Bvariable *>
+HIRCompileBase::compile_locals_for_block (Context *ctx, Resolver::Rib &rib,
+ tree fndecl)
+{
+ std::vector<Bvariable *> locals;
+ for (auto it : rib.get_declarations ())
+ {
+ NodeId node_id = it.first;
+ HirId ref = UNKNOWN_HIRID;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (node_id, &ref))
+ continue;
+
+ // we only care about local patterns
+ HIR::Pattern *pattern = ctx->get_mappings ()->lookup_hir_pattern (ref);
+ if (pattern == nullptr)
+ continue;
+
+ // lookup the type
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (ref, &tyty))
+ continue;
+
+ // compile the local
+ tree type = TyTyResolveCompile::compile (ctx, tyty);
+ Bvariable *compiled
+ = CompileVarDecl::compile (fndecl, type, pattern, ctx);
+ locals.push_back (compiled);
+ }
+ return locals;
+}
+
+void
+HIRCompileBase::compile_function_body (Context *ctx, tree fndecl,
+ HIR::BlockExpr &function_body,
+ bool has_return_type)
+{
+ for (auto &s : function_body.get_statements ())
+ {
+ auto compiled_expr = CompileStmt::Compile (s.get (), ctx);
+ if (compiled_expr != nullptr)
+ {
+ tree s = convert_to_void (compiled_expr, ICV_STATEMENT);
+ ctx->add_statement (s);
+ }
+ }
+
+ if (function_body.has_expr ())
+ {
+ // the previous passes will ensure this is a valid return
+ // or a valid trailing expression
+ tree compiled_expr
+ = CompileExpr::Compile (function_body.expr.get (), ctx);
+
+ if (compiled_expr != nullptr)
+ {
+ if (has_return_type)
+ {
+ std::vector<tree> retstmts;
+ retstmts.push_back (compiled_expr);
+
+ auto ret = ctx->get_backend ()->return_statement (
+ fndecl, retstmts,
+ function_body.get_final_expr ()->get_locus ());
+ ctx->add_statement (ret);
+ }
+ else
+ {
+ // FIXME can this actually happen?
+ ctx->add_statement (compiled_expr);
+ }
+ }
+ }
+}
+
+tree
+HIRCompileBase::compile_function (
+ Context *ctx, const std::string &fn_name, HIR::SelfParam &self_param,
+ std::vector<HIR::FunctionParam> &function_params,
+ const HIR::FunctionQualifiers &qualifiers, HIR::Visibility &visibility,
+ AST::AttrVec &outer_attrs, Location locus, HIR::BlockExpr *function_body,
+ const Resolver::CanonicalPath *canonical_path, TyTy::FnType *fntype,
+ bool function_has_return)
+{
+ tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype);
+ std::string ir_symbol_name
+ = canonical_path->get () + fntype->subst_as_string ();
+
+ // we don't mangle the main fn since we haven't implemented the main shim
+ bool is_main_fn = fn_name.compare ("main") == 0;
+ std::string asm_name = fn_name;
+
+ unsigned int flags = 0;
+ tree fndecl = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name,
+ "" /* asm_name */, flags, locus);
+
+ setup_fndecl (fndecl, is_main_fn, fntype->has_subsititions_defined (),
+ visibility, qualifiers, outer_attrs);
+ setup_abi_options (fndecl, qualifiers.get_abi ());
+
+ // conditionally mangle the function name
+ bool should_mangle = should_mangle_item (fndecl);
+ if (!is_main_fn && should_mangle)
+ asm_name = ctx->mangle_item (fntype, *canonical_path);
+ SET_DECL_ASSEMBLER_NAME (fndecl,
+ get_identifier_with_length (asm_name.data (),
+ asm_name.length ()));
+
+ // insert into the context
+ ctx->insert_function_decl (fntype, fndecl);
+
+ // setup the params
+ TyTy::BaseType *tyret = fntype->get_return_type ();
+ std::vector<Bvariable *> param_vars;
+ if (!self_param.is_error ())
+ {
+ rust_assert (fntype->is_method ());
+ TyTy::BaseType *self_tyty_lookup = fntype->get_self_type ();
+
+ tree self_type = TyTyResolveCompile::compile (ctx, self_tyty_lookup);
+ Bvariable *compiled_self_param
+ = CompileSelfParam::compile (ctx, fndecl, self_param, self_type,
+ self_param.get_locus ());
+
+ param_vars.push_back (compiled_self_param);
+ ctx->insert_var_decl (self_param.get_mappings ().get_hirid (),
+ compiled_self_param);
+ }
+
+ // offset from + 1 for the TyTy::FnType being used when this is a method to
+ // skip over Self on the FnType
+ bool is_method = !self_param.is_error ();
+ size_t i = is_method ? 1 : 0;
+ for (auto &referenced_param : function_params)
+ {
+ auto tyty_param = fntype->param_at (i++);
+ auto param_tyty = tyty_param.second;
+ auto compiled_param_type = TyTyResolveCompile::compile (ctx, param_tyty);
+
+ Location param_locus = referenced_param.get_locus ();
+ Bvariable *compiled_param_var
+ = CompileFnParam::compile (ctx, fndecl, &referenced_param,
+ compiled_param_type, param_locus);
+
+ param_vars.push_back (compiled_param_var);
+
+ const HIR::Pattern &param_pattern = *referenced_param.get_param_name ();
+ ctx->insert_var_decl (param_pattern.get_pattern_mappings ().get_hirid (),
+ compiled_param_var);
+ }
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ // lookup locals
+ auto body_mappings = function_body->get_mappings ();
+ Resolver::Rib *rib = nullptr;
+ bool ok
+ = ctx->get_resolver ()->find_name_rib (body_mappings.get_nodeid (), &rib);
+ rust_assert (ok);
+
+ std::vector<Bvariable *> locals
+ = compile_locals_for_block (ctx, *rib, fndecl);
+
+ tree enclosing_scope = NULL_TREE;
+ Location start_location = function_body->get_locus ();
+ Location end_location = function_body->get_end_locus ();
+
+ tree code_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (code_block);
+
+ Bvariable *return_address = nullptr;
+ if (function_has_return)
+ {
+ tree return_type = TyTyResolveCompile::compile (ctx, tyret);
+
+ bool address_is_taken = false;
+ tree ret_var_stmt = NULL_TREE;
+
+ return_address
+ = ctx->get_backend ()->temporary_variable (fndecl, code_block,
+ return_type, NULL,
+ address_is_taken, locus,
+ &ret_var_stmt);
+
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ ctx->push_fn (fndecl, return_address);
+ compile_function_body (ctx, fndecl, *function_body, function_has_return);
+ tree bind_tree = ctx->pop_block ();
+
+ gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR);
+ DECL_SAVED_TREE (fndecl) = bind_tree;
+
+ ctx->pop_fn ();
+ ctx->push_function (fndecl);
+
+ if (DECL_DECLARED_CONSTEXPR_P (fndecl))
+ {
+ maybe_save_constexpr_fundef (fndecl);
+ }
+
+ return fndecl;
+}
+
+tree
+HIRCompileBase::compile_constant_item (
+ Context *ctx, TyTy::BaseType *resolved_type,
+ const Resolver::CanonicalPath *canonical_path, HIR::Expr *const_value_expr,
+ Location locus)
+{
+ const std::string &ident = canonical_path->get ();
+ tree type = TyTyResolveCompile::compile (ctx, resolved_type);
+ tree const_type = build_qualified_type (type, TYPE_QUAL_CONST);
+
+ bool is_block_expr
+ = const_value_expr->get_expression_type () == HIR::Expr::ExprType::Block;
+
+ // in order to compile a block expr we want to reuse as much existing
+ // machineary that we already have. This means the best approach is to
+ // make a _fake_ function with a block so it can hold onto temps then
+ // use our constexpr code to fold it completely or error_mark_node
+ Backend::typed_identifier receiver;
+ tree compiled_fn_type = ctx->get_backend ()->function_type (
+ receiver, {}, {Backend::typed_identifier ("_", const_type, locus)}, NULL,
+ locus);
+
+ tree fndecl
+ = ctx->get_backend ()->function (compiled_fn_type, ident, "", 0, locus);
+ TREE_READONLY (fndecl) = 1;
+
+ tree enclosing_scope = NULL_TREE;
+
+ Location start_location = const_value_expr->get_locus ();
+ Location end_location = const_value_expr->get_locus ();
+ if (is_block_expr)
+ {
+ HIR::BlockExpr *function_body
+ = static_cast<HIR::BlockExpr *> (const_value_expr);
+ start_location = function_body->get_locus ();
+ end_location = function_body->get_end_locus ();
+ }
+
+ tree code_block = ctx->get_backend ()->block (fndecl, enclosing_scope, {},
+ start_location, end_location);
+ ctx->push_block (code_block);
+
+ bool address_is_taken = false;
+ tree ret_var_stmt = NULL_TREE;
+ Bvariable *return_address
+ = ctx->get_backend ()->temporary_variable (fndecl, code_block, const_type,
+ NULL, address_is_taken, locus,
+ &ret_var_stmt);
+
+ ctx->add_statement (ret_var_stmt);
+ ctx->push_fn (fndecl, return_address);
+
+ if (is_block_expr)
+ {
+ HIR::BlockExpr *function_body
+ = static_cast<HIR::BlockExpr *> (const_value_expr);
+ compile_function_body (ctx, fndecl, *function_body, true);
+ }
+ else
+ {
+ tree value = CompileExpr::Compile (const_value_expr, ctx);
+ tree return_expr = ctx->get_backend ()->return_statement (
+ fndecl, {value}, const_value_expr->get_locus ());
+ ctx->add_statement (return_expr);
+ }
+
+ tree bind_tree = ctx->pop_block ();
+
+ gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR);
+ DECL_SAVED_TREE (fndecl) = bind_tree;
+ DECL_DECLARED_CONSTEXPR_P (fndecl) = 1;
+ maybe_save_constexpr_fundef (fndecl);
+
+ ctx->pop_fn ();
+
+ // lets fold it into a call expr
+ tree call
+ = build_call_array_loc (locus.gcc_location (), const_type, fndecl, 0, NULL);
+ tree folded_expr = fold_expr (call);
+
+ return named_constant_expression (const_type, ident, folded_expr, locus);
+}
+
+tree
+HIRCompileBase::named_constant_expression (tree type_tree,
+ const std::string &name,
+ tree const_val, Location location)
+{
+ if (type_tree == error_mark_node || const_val == error_mark_node)
+ return error_mark_node;
+
+ tree name_tree = get_identifier_with_length (name.data (), name.length ());
+ tree decl
+ = build_decl (location.gcc_location (), CONST_DECL, name_tree, type_tree);
+ DECL_INITIAL (decl) = const_val;
+ TREE_CONSTANT (decl) = 1;
+ TREE_READONLY (decl) = 1;
+
+ rust_preserve_from_gc (decl);
+ return decl;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-base.h b/gcc/rust/backend/rust-compile-base.h
new file mode 100644
index 0000000..4f039d2
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-base.h
@@ -0,0 +1,146 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_BASE
+#define RUST_COMPILE_BASE
+
+#include "rust-compile-context.h"
+#include "rust-compile-type.h"
+#include "rust-hir-visitor.h"
+#include "rust-hir-full.h"
+
+namespace Rust {
+namespace Compile {
+
+class HIRCompileBase
+{
+public:
+ virtual ~HIRCompileBase () {}
+
+protected:
+ HIRCompileBase (Context *ctx) : ctx (ctx) {}
+
+ Context *ctx;
+
+protected:
+ Context *get_context () { return ctx; }
+
+ tree coercion_site (HirId id, tree rvalue, const TyTy::BaseType *actual,
+ const TyTy::BaseType *expected, Location lvalue_locus,
+ Location rvalue_locus);
+ tree coercion_site1 (tree rvalue, const TyTy::BaseType *actual,
+ const TyTy::BaseType *expected, Location lvalue_locus,
+ Location rvalue_locus);
+
+ tree coerce_to_dyn_object (tree compiled_ref, const TyTy::BaseType *actual,
+ const TyTy::DynamicObjectType *ty, Location locus);
+
+ tree compute_address_for_trait_item (
+ const Resolver::TraitItemReference *ref,
+ const TyTy::TypeBoundPredicate *predicate,
+ std::vector<std::pair<Resolver::TraitReference *, HIR::ImplBlock *>>
+ &receiver_bounds,
+ const TyTy::BaseType *receiver, const TyTy::BaseType *root, Location locus);
+
+ bool verify_array_capacities (tree ltype, tree rtype, Location ltype_locus,
+ Location rtype_locus);
+
+ tree query_compile (HirId ref, TyTy::BaseType *lookup,
+ const HIR::PathIdentSegment &final_segment,
+ const Analysis::NodeMapping &mappings,
+ Location expr_locus, bool is_qualified_path);
+
+ tree resolve_adjustements (std::vector<Resolver::Adjustment> &adjustments,
+ tree expression, Location locus);
+
+ tree resolve_deref_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus);
+
+ tree resolve_indirection_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus);
+
+ tree resolve_unsized_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus);
+
+ tree resolve_unsized_slice_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus);
+
+ tree resolve_unsized_dyn_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus);
+
+ static void setup_fndecl (tree fndecl, bool is_main_entry_point,
+ bool is_generic_fn, HIR::Visibility &visibility,
+ const HIR::FunctionQualifiers &qualifiers,
+ const AST::AttrVec &attrs);
+
+ static void handle_inline_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr);
+
+ static void handle_cold_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr);
+
+ static void handle_must_use_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr);
+
+ static void
+ handle_link_section_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr);
+ static void
+ handle_deprecated_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr);
+
+ static void handle_no_mangle_attribute_on_fndecl (tree fndecl,
+ const AST::Attribute &attr);
+
+ static void setup_abi_options (tree fndecl, ABI abi);
+
+ static tree address_expression (tree expr, Location locus);
+
+ static tree indirect_expression (tree expr, Location locus);
+
+ static bool mark_addressable (tree, Location);
+
+ static std::vector<Bvariable *>
+ compile_locals_for_block (Context *ctx, Resolver::Rib &rib, tree fndecl);
+
+ static void compile_function_body (Context *ctx, tree fndecl,
+ HIR::BlockExpr &function_body,
+ bool has_return_type);
+
+ static tree compile_function (
+ Context *ctx, const std::string &fn_name, HIR::SelfParam &self_param,
+ std::vector<HIR::FunctionParam> &function_params,
+ const HIR::FunctionQualifiers &qualifiers, HIR::Visibility &visibility,
+ AST::AttrVec &outer_attrs, Location locus, HIR::BlockExpr *function_body,
+ const Resolver::CanonicalPath *canonical_path, TyTy::FnType *fntype,
+ bool function_has_return);
+
+ static tree
+ compile_constant_item (Context *ctx, TyTy::BaseType *resolved_type,
+ const Resolver::CanonicalPath *canonical_path,
+ HIR::Expr *const_value_expr, Location locus);
+
+ static tree named_constant_expression (tree type_tree,
+ const std::string &name,
+ tree const_val, Location location);
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_BASE
diff --git a/gcc/rust/backend/rust-compile-block.cc b/gcc/rust/backend/rust-compile-block.cc
new file mode 100644
index 0000000..83f2a37
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-block.cc
@@ -0,0 +1,158 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-block.h"
+#include "rust-compile-stmt.h"
+#include "rust-compile-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileBlock::CompileBlock (Context *ctx, Bvariable *result)
+ : HIRCompileBase (ctx), translated (nullptr), result (result)
+{}
+
+tree
+CompileBlock::compile (HIR::BlockExpr *expr, Context *ctx, Bvariable *result)
+{
+ CompileBlock compiler (ctx, result);
+ compiler.visit (*expr);
+ return compiler.translated;
+}
+
+void
+CompileBlock::visit (HIR::BlockExpr &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ Location start_location = expr.get_locus ();
+ Location end_location = expr.get_end_locus ();
+ auto body_mappings = expr.get_mappings ();
+
+ Resolver::Rib *rib = nullptr;
+ if (!ctx->get_resolver ()->find_name_rib (body_mappings.get_nodeid (), &rib))
+ {
+ rust_fatal_error (expr.get_locus (), "failed to setup locals per block");
+ return;
+ }
+
+ std::vector<Bvariable *> locals
+ = compile_locals_for_block (ctx, *rib, fndecl);
+
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree new_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (new_block);
+
+ for (auto &s : expr.get_statements ())
+ {
+ auto compiled_expr = CompileStmt::Compile (s.get (), ctx);
+ if (compiled_expr != nullptr)
+ {
+ tree s = convert_to_void (compiled_expr, ICV_STATEMENT);
+ ctx->add_statement (s);
+ }
+ }
+
+ if (expr.has_expr ())
+ {
+ // the previous passes will ensure this is a valid return or
+ // a valid trailing expression
+ tree compiled_expr = CompileExpr::Compile (expr.expr.get (), ctx);
+ if (compiled_expr != nullptr)
+ {
+ if (result == nullptr)
+ {
+ ctx->add_statement (compiled_expr);
+ }
+ else
+ {
+ tree result_reference = ctx->get_backend ()->var_expression (
+ result, expr.get_final_expr ()->get_locus ());
+
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (result_reference,
+ compiled_expr,
+ expr.get_locus ());
+ ctx->add_statement (assignment);
+ }
+ }
+ }
+
+ ctx->pop_block ();
+ translated = new_block;
+}
+
+void
+CompileConditionalBlocks::visit (HIR::IfExpr &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx);
+ tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result);
+
+ translated
+ = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block,
+ NULL, expr.get_locus ());
+}
+
+void
+CompileConditionalBlocks::visit (HIR::IfExprConseqElse &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx);
+ tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result);
+ tree else_block = CompileBlock::compile (expr.get_else_block (), ctx, result);
+
+ translated
+ = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block,
+ else_block, expr.get_locus ());
+}
+
+void
+CompileConditionalBlocks::visit (HIR::IfExprConseqIf &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ tree fndecl = fnctx.fndecl;
+ tree condition_expr = CompileExpr::Compile (expr.get_if_condition (), ctx);
+ tree then_block = CompileBlock::compile (expr.get_if_block (), ctx, result);
+
+ // else block
+ std::vector<Bvariable *> locals;
+ Location start_location = expr.get_conseq_if_expr ()->get_locus ();
+ Location end_location = expr.get_conseq_if_expr ()->get_locus (); // FIXME
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree else_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (else_block);
+
+ tree else_stmt_decl
+ = CompileConditionalBlocks::compile (expr.get_conseq_if_expr (), ctx,
+ result);
+ ctx->add_statement (else_stmt_decl);
+
+ ctx->pop_block ();
+
+ translated
+ = ctx->get_backend ()->if_statement (fndecl, condition_expr, then_block,
+ else_block, expr.get_locus ());
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-block.h b/gcc/rust/backend/rust-compile-block.h
new file mode 100644
index 0000000..0ccf81f
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-block.h
@@ -0,0 +1,209 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_BLOCK
+#define RUST_COMPILE_BLOCK
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileBlock : private HIRCompileBase
+{
+public:
+ static tree compile (HIR::BlockExpr *expr, Context *ctx, Bvariable *result);
+
+protected:
+ void visit (HIR::BlockExpr &expr);
+
+private:
+ CompileBlock (Context *ctx, Bvariable *result);
+
+ tree translated;
+ Bvariable *result;
+};
+
+class CompileConditionalBlocks : public HIRCompileBase,
+ public HIR::HIRExpressionVisitor
+{
+public:
+ static tree compile (HIR::IfExpr *expr, Context *ctx, Bvariable *result)
+ {
+ CompileConditionalBlocks resolver (ctx, result);
+ expr->accept_vis (resolver);
+ return resolver.translated;
+ }
+
+ void visit (HIR::IfExpr &expr) override;
+ void visit (HIR::IfExprConseqElse &expr) override;
+ void visit (HIR::IfExprConseqIf &expr) override;
+
+ // Empty visit for unused Expression HIR nodes.
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::ClosureExpr &) override {}
+ void visit (HIR::StructExprFieldIdentifier &) override {}
+ void visit (HIR::StructExprFieldIdentifierValue &) override {}
+ void visit (HIR::StructExprFieldIndexValue &) override {}
+ void visit (HIR::StructExprStruct &) override {}
+ void visit (HIR::StructExprStructFields &) override {}
+ void visit (HIR::LiteralExpr &) override {}
+ void visit (HIR::BorrowExpr &) override {}
+ void visit (HIR::DereferenceExpr &) override {}
+ void visit (HIR::ErrorPropagationExpr &) override {}
+ void visit (HIR::NegationExpr &) override {}
+ void visit (HIR::ArithmeticOrLogicalExpr &) override {}
+ void visit (HIR::ComparisonExpr &) override {}
+ void visit (HIR::LazyBooleanExpr &) override {}
+ void visit (HIR::TypeCastExpr &) override {}
+ void visit (HIR::AssignmentExpr &) override {}
+ void visit (HIR::CompoundAssignmentExpr &) override {}
+ void visit (HIR::GroupedExpr &) override {}
+ void visit (HIR::ArrayExpr &) override {}
+ void visit (HIR::ArrayIndexExpr &) override {}
+ void visit (HIR::TupleExpr &) override {}
+ void visit (HIR::TupleIndexExpr &) override {}
+ void visit (HIR::CallExpr &) override {}
+ void visit (HIR::MethodCallExpr &) override {}
+ void visit (HIR::FieldAccessExpr &) override {}
+ void visit (HIR::BlockExpr &) override {}
+ void visit (HIR::ContinueExpr &) override {}
+ void visit (HIR::BreakExpr &) override {}
+ void visit (HIR::RangeFromToExpr &) override {}
+ void visit (HIR::RangeFromExpr &) override {}
+ void visit (HIR::RangeToExpr &) override {}
+ void visit (HIR::RangeFullExpr &) override {}
+ void visit (HIR::RangeFromToInclExpr &) override {}
+ void visit (HIR::RangeToInclExpr &) override {}
+ void visit (HIR::ReturnExpr &) override {}
+ void visit (HIR::UnsafeBlockExpr &) override {}
+ void visit (HIR::LoopExpr &) override {}
+ void visit (HIR::WhileLoopExpr &) override {}
+ void visit (HIR::WhileLetLoopExpr &) override {}
+ void visit (HIR::ForLoopExpr &) override {}
+ void visit (HIR::IfExprConseqIfLet &) override {}
+ void visit (HIR::IfLetExpr &) override {}
+ void visit (HIR::IfLetExprConseqElse &) override {}
+ void visit (HIR::IfLetExprConseqIf &) override {}
+ void visit (HIR::IfLetExprConseqIfLet &) override {}
+ void visit (HIR::MatchExpr &) override {}
+ void visit (HIR::AwaitExpr &) override {}
+ void visit (HIR::AsyncBlockExpr &) override {}
+
+private:
+ CompileConditionalBlocks (Context *ctx, Bvariable *result)
+ : HIRCompileBase (ctx), translated (nullptr), result (result)
+ {}
+
+ tree translated;
+ Bvariable *result;
+};
+
+class CompileExprWithBlock : public HIRCompileBase,
+ public HIR::HIRExpressionVisitor
+{
+public:
+ static tree compile (HIR::ExprWithBlock *expr, Context *ctx,
+ Bvariable *result)
+ {
+ CompileExprWithBlock resolver (ctx, result);
+ expr->accept_vis (resolver);
+ return resolver.translated;
+ }
+
+ void visit (HIR::IfExpr &expr) override
+ {
+ translated = CompileConditionalBlocks::compile (&expr, ctx, result);
+ }
+
+ void visit (HIR::IfExprConseqElse &expr) override
+ {
+ translated = CompileConditionalBlocks::compile (&expr, ctx, result);
+ }
+
+ void visit (HIR::IfExprConseqIf &expr) override
+ {
+ translated = CompileConditionalBlocks::compile (&expr, ctx, result);
+ }
+
+ // Empty visit for unused Expression HIR nodes.
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::ClosureExpr &) override {}
+ void visit (HIR::StructExprFieldIdentifier &) override {}
+ void visit (HIR::StructExprFieldIdentifierValue &) override {}
+ void visit (HIR::StructExprFieldIndexValue &) override {}
+ void visit (HIR::StructExprStruct &) override {}
+ void visit (HIR::StructExprStructFields &) override {}
+ void visit (HIR::LiteralExpr &) override {}
+ void visit (HIR::BorrowExpr &) override {}
+ void visit (HIR::DereferenceExpr &) override {}
+ void visit (HIR::ErrorPropagationExpr &) override {}
+ void visit (HIR::NegationExpr &) override {}
+ void visit (HIR::ArithmeticOrLogicalExpr &) override {}
+ void visit (HIR::ComparisonExpr &) override {}
+ void visit (HIR::LazyBooleanExpr &) override {}
+ void visit (HIR::TypeCastExpr &) override {}
+ void visit (HIR::AssignmentExpr &) override {}
+ void visit (HIR::CompoundAssignmentExpr &) override {}
+ void visit (HIR::GroupedExpr &) override {}
+ void visit (HIR::ArrayExpr &) override {}
+ void visit (HIR::ArrayIndexExpr &) override {}
+ void visit (HIR::TupleExpr &) override {}
+ void visit (HIR::TupleIndexExpr &) override {}
+ void visit (HIR::CallExpr &) override {}
+ void visit (HIR::MethodCallExpr &) override {}
+ void visit (HIR::FieldAccessExpr &) override {}
+ void visit (HIR::BlockExpr &) override {}
+ void visit (HIR::ContinueExpr &) override {}
+ void visit (HIR::BreakExpr &) override {}
+ void visit (HIR::RangeFromToExpr &) override {}
+ void visit (HIR::RangeFromExpr &) override {}
+ void visit (HIR::RangeToExpr &) override {}
+ void visit (HIR::RangeFullExpr &) override {}
+ void visit (HIR::RangeFromToInclExpr &) override {}
+ void visit (HIR::RangeToInclExpr &) override {}
+ void visit (HIR::ReturnExpr &) override {}
+ void visit (HIR::UnsafeBlockExpr &) override {}
+ void visit (HIR::LoopExpr &) override {}
+ void visit (HIR::WhileLoopExpr &) override {}
+ void visit (HIR::WhileLetLoopExpr &) override {}
+ void visit (HIR::ForLoopExpr &) override {}
+ void visit (HIR::IfExprConseqIfLet &) override {}
+ void visit (HIR::IfLetExpr &) override {}
+ void visit (HIR::IfLetExprConseqElse &) override {}
+ void visit (HIR::IfLetExprConseqIf &) override {}
+ void visit (HIR::IfLetExprConseqIfLet &) override {}
+ void visit (HIR::MatchExpr &) override {}
+ void visit (HIR::AwaitExpr &) override {}
+ void visit (HIR::AsyncBlockExpr &) override {}
+
+private:
+ CompileExprWithBlock (Context *ctx, Bvariable *result)
+ : HIRCompileBase (ctx), translated (nullptr), result (result)
+ {}
+
+ tree translated;
+ Bvariable *result;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_BLOCK
diff --git a/gcc/rust/backend/rust-compile-context.cc b/gcc/rust/backend/rust-compile-context.cc
new file mode 100644
index 0000000..018897e
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-context.cc
@@ -0,0 +1,193 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-context.h"
+#include "rust-compile-type.h"
+
+namespace Rust {
+namespace Compile {
+
+Context::Context (::Backend *backend)
+ : backend (backend), resolver (Resolver::Resolver::get ()),
+ tyctx (Resolver::TypeCheckContext::get ()),
+ mappings (Analysis::Mappings::get ()), mangler (Mangler ())
+{
+ setup_builtins ();
+}
+
+void
+Context::setup_builtins ()
+{
+ auto builtins = resolver->get_builtin_types ();
+ for (auto it = builtins.begin (); it != builtins.end (); it++)
+ {
+ HirId ref;
+ bool ok = tyctx->lookup_type_by_node_id ((*it)->get_node_id (), &ref);
+ rust_assert (ok);
+
+ TyTy::BaseType *lookup;
+ ok = tyctx->lookup_type (ref, &lookup);
+ rust_assert (ok);
+
+ TyTyResolveCompile::compile (this, lookup);
+ }
+}
+
+hashval_t
+Context::type_hasher (tree type)
+{
+ inchash::hash hstate;
+
+ hstate.add_int (TREE_CODE (type));
+
+ if (TYPE_NAME (type))
+ {
+ hashval_t record_name_hash
+ = IDENTIFIER_HASH_VALUE (DECL_NAME (TYPE_NAME (type)));
+ hstate.add_object (record_name_hash);
+ }
+
+ for (tree t = TYPE_ATTRIBUTES (type); t; t = TREE_CHAIN (t))
+ /* Just the identifier is adequate to distinguish. */
+ hstate.add_object (IDENTIFIER_HASH_VALUE (TREE_PURPOSE (t)));
+
+ switch (TREE_CODE (type))
+ {
+ case METHOD_TYPE:
+ hstate.add_object (TYPE_HASH (TYPE_METHOD_BASETYPE (type)));
+ /* FALLTHROUGH. */
+ case FUNCTION_TYPE:
+ for (tree t = TYPE_ARG_TYPES (type); t; t = TREE_CHAIN (t))
+ if (TREE_VALUE (t) != error_mark_node)
+ hstate.add_object (TYPE_HASH (TREE_VALUE (t)));
+ break;
+
+ case OFFSET_TYPE:
+ hstate.add_object (TYPE_HASH (TYPE_OFFSET_BASETYPE (type)));
+ break;
+
+ case ARRAY_TYPE: {
+ if (TYPE_DOMAIN (type))
+ hstate.add_object (TYPE_HASH (TYPE_DOMAIN (type)));
+ if (!AGGREGATE_TYPE_P (TREE_TYPE (type)))
+ {
+ unsigned typeless = TYPE_TYPELESS_STORAGE (type);
+ hstate.add_object (typeless);
+ }
+ }
+ break;
+
+ case INTEGER_TYPE: {
+ tree t = TYPE_MAX_VALUE (type);
+ if (!t)
+ t = TYPE_MIN_VALUE (type);
+ for (int i = 0; i < TREE_INT_CST_NUNITS (t); i++)
+ hstate.add_object (TREE_INT_CST_ELT (t, i));
+ break;
+ }
+
+ case REAL_TYPE:
+ case FIXED_POINT_TYPE: {
+ unsigned prec = TYPE_PRECISION (type);
+ hstate.add_object (prec);
+ break;
+ }
+
+ case VECTOR_TYPE:
+ hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type));
+ break;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ case QUAL_UNION_TYPE: {
+ for (tree t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
+ {
+ hashval_t name_hash = IDENTIFIER_HASH_VALUE (DECL_NAME (t));
+ hashval_t type_hash = type_hasher (TREE_TYPE (t));
+ hstate.add_object (name_hash);
+ hstate.add_object (type_hash);
+ }
+ }
+ break;
+
+ case BOOLEAN_TYPE:
+ break;
+
+ case REFERENCE_TYPE:
+ case POINTER_TYPE: {
+ hashval_t type_hash = type_hasher (TREE_TYPE (type));
+ hstate.add_object (type_hash);
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return hstate.end ();
+}
+
+void
+Context::push_closure_context (HirId id)
+{
+ auto it = closure_bindings.find (id);
+ rust_assert (it == closure_bindings.end ());
+
+ closure_bindings.insert ({id, {}});
+ closure_scope_bindings.push_back (id);
+}
+
+void
+Context::pop_closure_context ()
+{
+ rust_assert (!closure_scope_bindings.empty ());
+
+ HirId ref = closure_scope_bindings.back ();
+ closure_scope_bindings.pop_back ();
+ closure_bindings.erase (ref);
+}
+
+void
+Context::insert_closure_binding (HirId id, tree expr)
+{
+ rust_assert (!closure_scope_bindings.empty ());
+
+ HirId ref = closure_scope_bindings.back ();
+ closure_bindings[ref].insert ({id, expr});
+}
+
+bool
+Context::lookup_closure_binding (HirId id, tree *expr)
+{
+ if (closure_scope_bindings.empty ())
+ return false;
+
+ HirId ref = closure_scope_bindings.back ();
+ auto it = closure_bindings.find (ref);
+ rust_assert (it != closure_bindings.end ());
+
+ auto iy = it->second.find (id);
+ if (iy == it->second.end ())
+ return false;
+
+ *expr = iy->second;
+ return true;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-context.h b/gcc/rust/backend/rust-compile-context.h
new file mode 100644
index 0000000..8e8fac8
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-context.h
@@ -0,0 +1,402 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_CONTEXT
+#define RUST_COMPILE_CONTEXT
+
+#include "rust-system.h"
+#include "rust-hir-map.h"
+#include "rust-name-resolver.h"
+#include "rust-hir-type-check.h"
+#include "rust-backend.h"
+#include "rust-hir-full.h"
+#include "rust-mangle.h"
+#include "rust-tree.h"
+
+namespace Rust {
+namespace Compile {
+
+struct fncontext
+{
+ tree fndecl;
+ ::Bvariable *ret_addr;
+};
+
+class Context
+{
+public:
+ Context (::Backend *backend);
+
+ void setup_builtins ();
+
+ bool lookup_compiled_types (tree t, tree *type)
+ {
+ hashval_t h = type_hasher (t);
+ auto it = compiled_type_map.find (h);
+ if (it == compiled_type_map.end ())
+ return false;
+
+ *type = it->second;
+ return true;
+ }
+
+ tree insert_compiled_type (tree type)
+ {
+ hashval_t h = type_hasher (type);
+ auto it = compiled_type_map.find (h);
+ if (it != compiled_type_map.end ())
+ return it->second;
+
+ compiled_type_map.insert ({h, type});
+ push_type (type);
+ return type;
+ }
+
+ tree insert_main_variant (tree type)
+ {
+ hashval_t h = type_hasher (type);
+ auto it = main_variants.find (h);
+ if (it != main_variants.end ())
+ return it->second;
+
+ main_variants.insert ({h, type});
+ return type;
+ }
+
+ ::Backend *get_backend () { return backend; }
+ Resolver::Resolver *get_resolver () { return resolver; }
+ Resolver::TypeCheckContext *get_tyctx () { return tyctx; }
+ Analysis::Mappings *get_mappings () { return mappings; }
+
+ void push_block (tree scope)
+ {
+ scope_stack.push_back (scope);
+ statements.push_back ({});
+ }
+
+ tree pop_block ()
+ {
+ auto block = scope_stack.back ();
+ scope_stack.pop_back ();
+
+ auto stmts = statements.back ();
+ statements.pop_back ();
+
+ backend->block_add_statements (block, stmts);
+
+ return block;
+ }
+
+ tree peek_enclosing_scope ()
+ {
+ if (scope_stack.size () == 0)
+ return nullptr;
+
+ return scope_stack.back ();
+ }
+
+ void add_statement_to_enclosing_scope (tree stmt)
+ {
+ statements.at (statements.size () - 2).push_back (stmt);
+ }
+
+ void add_statement (tree stmt) { statements.back ().push_back (stmt); }
+
+ void insert_var_decl (HirId id, ::Bvariable *decl)
+ {
+ compiled_var_decls[id] = decl;
+ }
+
+ bool lookup_var_decl (HirId id, ::Bvariable **decl)
+ {
+ auto it = compiled_var_decls.find (id);
+ if (it == compiled_var_decls.end ())
+ return false;
+
+ *decl = it->second;
+ return true;
+ }
+
+ void insert_function_decl (const TyTy::FnType *ref, tree fn)
+ {
+ auto id = ref->get_ty_ref ();
+ auto dId = ref->get_id ();
+
+ rust_assert (compiled_fn_map.find (id) == compiled_fn_map.end ());
+ compiled_fn_map[id] = fn;
+
+ auto it = mono_fns.find (dId);
+ if (it == mono_fns.end ())
+ mono_fns[dId] = {};
+
+ mono_fns[dId].push_back ({ref, fn});
+ }
+
+ void insert_closure_decl (const TyTy::ClosureType *ref, tree fn)
+ {
+ auto dId = ref->get_def_id ();
+ auto it = mono_closure_fns.find (dId);
+ if (it == mono_closure_fns.end ())
+ mono_closure_fns[dId] = {};
+
+ mono_closure_fns[dId].push_back ({ref, fn});
+ }
+
+ tree lookup_closure_decl (const TyTy::ClosureType *ref)
+ {
+ auto dId = ref->get_def_id ();
+ auto it = mono_closure_fns.find (dId);
+ if (it == mono_closure_fns.end ())
+ return error_mark_node;
+
+ for (auto &i : it->second)
+ {
+ const TyTy::ClosureType *t = i.first;
+ tree fn = i.second;
+
+ if (ref->is_equal (*t))
+ return fn;
+ }
+
+ return error_mark_node;
+ }
+
+ bool lookup_function_decl (HirId id, tree *fn, DefId dId = UNKNOWN_DEFID,
+ const TyTy::BaseType *ref = nullptr,
+ const std::string &asm_name = std::string ())
+ {
+ // for for any monomorphized fns
+ if (ref != nullptr)
+ {
+ rust_assert (dId != UNKNOWN_DEFID);
+
+ auto it = mono_fns.find (dId);
+ if (it == mono_fns.end ())
+ return false;
+
+ for (auto &e : mono_fns[dId])
+ {
+ const TyTy::BaseType *r = e.first;
+ tree f = e.second;
+
+ if (ref->is_equal (*r))
+ {
+ *fn = f;
+ return true;
+ }
+
+ if (DECL_ASSEMBLER_NAME_SET_P (f) && !asm_name.empty ())
+ {
+ tree raw = DECL_ASSEMBLER_NAME_RAW (f);
+ const char *rptr = IDENTIFIER_POINTER (raw);
+
+ bool lengths_match_p
+ = IDENTIFIER_LENGTH (raw) == asm_name.size ();
+ if (lengths_match_p
+ && strncmp (rptr, asm_name.c_str (),
+ IDENTIFIER_LENGTH (raw))
+ == 0)
+ {
+ *fn = f;
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
+ auto it = compiled_fn_map.find (id);
+ if (it == compiled_fn_map.end ())
+ return false;
+
+ *fn = it->second;
+ return true;
+ }
+
+ void insert_const_decl (HirId id, tree expr) { compiled_consts[id] = expr; }
+
+ bool lookup_const_decl (HirId id, tree *expr)
+ {
+ auto it = compiled_consts.find (id);
+ if (it == compiled_consts.end ())
+ return false;
+
+ *expr = it->second;
+ return true;
+ }
+
+ void insert_label_decl (HirId id, tree label) { compiled_labels[id] = label; }
+
+ bool lookup_label_decl (HirId id, tree *label)
+ {
+ auto it = compiled_labels.find (id);
+ if (it == compiled_labels.end ())
+ return false;
+
+ *label = it->second;
+ return true;
+ }
+
+ void insert_pattern_binding (HirId id, tree binding)
+ {
+ implicit_pattern_bindings[id] = binding;
+ }
+
+ bool lookup_pattern_binding (HirId id, tree *binding)
+ {
+ auto it = implicit_pattern_bindings.find (id);
+ if (it == implicit_pattern_bindings.end ())
+ return false;
+
+ *binding = it->second;
+ return true;
+ }
+
+ void push_fn (tree fn, ::Bvariable *ret_addr)
+ {
+ fn_stack.push_back (fncontext{fn, ret_addr});
+ }
+ void pop_fn () { fn_stack.pop_back (); }
+
+ bool in_fn () { return fn_stack.size () != 0; }
+
+ // Note: it is undefined behavior to call peek_fn () if fn_stack is empty.
+ fncontext peek_fn ()
+ {
+ rust_assert (!fn_stack.empty ());
+ return fn_stack.back ();
+ }
+
+ void push_type (tree t) { type_decls.push_back (t); }
+ void push_var (::Bvariable *v) { var_decls.push_back (v); }
+ void push_const (tree c) { const_decls.push_back (c); }
+ void push_function (tree f) { func_decls.push_back (f); }
+
+ void write_to_backend ()
+ {
+ backend->write_global_definitions (type_decls, const_decls, func_decls,
+ var_decls);
+ }
+
+ bool function_completed (tree fn)
+ {
+ for (auto it = func_decls.begin (); it != func_decls.end (); it++)
+ {
+ tree i = (*it);
+ if (i == fn)
+ {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void push_loop_context (Bvariable *var) { loop_value_stack.push_back (var); }
+
+ Bvariable *peek_loop_context () { return loop_value_stack.back (); }
+
+ Bvariable *pop_loop_context ()
+ {
+ auto back = loop_value_stack.back ();
+ loop_value_stack.pop_back ();
+ return back;
+ }
+
+ void push_loop_begin_label (tree label)
+ {
+ loop_begin_labels.push_back (label);
+ }
+
+ tree peek_loop_begin_label () { return loop_begin_labels.back (); }
+
+ tree pop_loop_begin_label ()
+ {
+ tree pop = loop_begin_labels.back ();
+ loop_begin_labels.pop_back ();
+ return pop;
+ }
+
+ void push_const_context (void) { const_context++; }
+ void pop_const_context (void)
+ {
+ if (const_context > 0)
+ const_context--;
+ }
+ bool const_context_p (void) { return (const_context > 0); }
+
+ std::string mangle_item (const TyTy::BaseType *ty,
+ const Resolver::CanonicalPath &path) const
+ {
+ return mangler.mangle_item (ty, path);
+ }
+
+ void push_closure_context (HirId id);
+ void pop_closure_context ();
+ void insert_closure_binding (HirId id, tree expr);
+ bool lookup_closure_binding (HirId id, tree *expr);
+
+ std::vector<tree> &get_type_decls () { return type_decls; }
+ std::vector<::Bvariable *> &get_var_decls () { return var_decls; }
+ std::vector<tree> &get_const_decls () { return const_decls; }
+ std::vector<tree> &get_func_decls () { return func_decls; }
+
+ static hashval_t type_hasher (tree type);
+
+private:
+ ::Backend *backend;
+ Resolver::Resolver *resolver;
+ Resolver::TypeCheckContext *tyctx;
+ Analysis::Mappings *mappings;
+ Mangler mangler;
+
+ // state
+ std::vector<fncontext> fn_stack;
+ std::map<HirId, ::Bvariable *> compiled_var_decls;
+ std::map<hashval_t, tree> compiled_type_map;
+ std::map<HirId, tree> compiled_fn_map;
+ std::map<HirId, tree> compiled_consts;
+ std::map<HirId, tree> compiled_labels;
+ std::vector<::std::vector<tree>> statements;
+ std::vector<tree> scope_stack;
+ std::vector<::Bvariable *> loop_value_stack;
+ std::vector<tree> loop_begin_labels;
+ std::map<DefId, std::vector<std::pair<const TyTy::BaseType *, tree>>>
+ mono_fns;
+ std::map<DefId, std::vector<std::pair<const TyTy::ClosureType *, tree>>>
+ mono_closure_fns;
+ std::map<HirId, tree> implicit_pattern_bindings;
+ std::map<hashval_t, tree> main_variants;
+
+ // closure bindings
+ std::vector<HirId> closure_scope_bindings;
+ std::map<HirId, std::map<HirId, tree>> closure_bindings;
+
+ // To GCC middle-end
+ std::vector<tree> type_decls;
+ std::vector<::Bvariable *> var_decls;
+ std::vector<tree> const_decls;
+ std::vector<tree> func_decls;
+
+ // Nonzero iff we are currently compiling something inside a constant context.
+ unsigned int const_context = 0;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_CONTEXT
diff --git a/gcc/rust/backend/rust-compile-expr.cc b/gcc/rust/backend/rust-compile-expr.cc
new file mode 100644
index 0000000..436fc92
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-expr.cc
@@ -0,0 +1,3139 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-expr.h"
+#include "rust-compile-struct-field-expr.h"
+#include "rust-hir-trait-resolve.h"
+#include "rust-hir-path-probe.h"
+#include "rust-hir-type-bounds.h"
+#include "rust-compile-pattern.h"
+#include "rust-compile-resolve-path.h"
+#include "rust-compile-block.h"
+#include "rust-compile-implitem.h"
+#include "rust-constexpr.h"
+#include "rust-gcc.h"
+
+#include "fold-const.h"
+#include "realmpfr.h"
+#include "convert.h"
+#include "print-tree.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileExpr::CompileExpr (Context *ctx)
+ : HIRCompileBase (ctx), translated (error_mark_node)
+{}
+
+tree
+CompileExpr::Compile (HIR::Expr *expr, Context *ctx)
+{
+ CompileExpr compiler (ctx);
+ expr->accept_vis (compiler);
+ return compiler.translated;
+}
+
+void
+CompileExpr::visit (HIR::TupleIndexExpr &expr)
+{
+ HIR::Expr *tuple_expr = expr.get_tuple_expr ().get ();
+ TupleIndex index = expr.get_tuple_index ();
+
+ tree receiver_ref = CompileExpr::Compile (tuple_expr, ctx);
+
+ TyTy::BaseType *tuple_expr_ty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (tuple_expr->get_mappings ().get_hirid (),
+ &tuple_expr_ty);
+ rust_assert (ok);
+
+ // do we need to add an indirect reference
+ if (tuple_expr_ty->get_kind () == TyTy::TypeKind::REF)
+ {
+ tree indirect = indirect_expression (receiver_ref, expr.get_locus ());
+ receiver_ref = indirect;
+ }
+
+ translated
+ = ctx->get_backend ()->struct_field_expression (receiver_ref, index,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::TupleExpr &expr)
+{
+ if (expr.is_unit ())
+ {
+ translated = ctx->get_backend ()->unit_expression ();
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_fatal_error (expr.get_locus (),
+ "did not resolve type for this TupleExpr");
+ return;
+ }
+
+ tree tuple_type = TyTyResolveCompile::compile (ctx, tyty);
+ rust_assert (tuple_type != nullptr);
+
+ // this assumes all fields are in order from type resolution
+ std::vector<tree> vals;
+ for (auto &elem : expr.get_tuple_elems ())
+ {
+ auto e = CompileExpr::Compile (elem.get (), ctx);
+ vals.push_back (e);
+ }
+
+ translated
+ = ctx->get_backend ()->constructor_expression (tuple_type, false, vals, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::ReturnExpr &expr)
+{
+ auto fncontext = ctx->peek_fn ();
+
+ std::vector<tree> retstmts;
+ if (expr.has_return_expr ())
+ {
+ tree compiled_expr = CompileExpr::Compile (expr.return_expr.get (), ctx);
+ rust_assert (compiled_expr != nullptr);
+
+ retstmts.push_back (compiled_expr);
+ }
+
+ auto s = ctx->get_backend ()->return_statement (fncontext.fndecl, retstmts,
+ expr.get_locus ());
+ ctx->add_statement (s);
+}
+
+void
+CompileExpr::visit (HIR::ArithmeticOrLogicalExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx);
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type
+ = Analysis::RustLangItem::OperatorToLangItem (expr.get_expr_type ());
+ translated = resolve_operator_overload (lang_item_type, expr, lhs, rhs,
+ expr.get_lhs (), expr.get_rhs ());
+ return;
+ }
+
+ if (ctx->in_fn () && !ctx->const_context_p ())
+ {
+ auto receiver_tmp = NULL_TREE;
+ auto receiver
+ = ctx->get_backend ()->temporary_variable (ctx->peek_fn ().fndecl,
+ NULL_TREE, TREE_TYPE (lhs),
+ lhs, true, expr.get_locus (),
+ &receiver_tmp);
+ auto check
+ = ctx->get_backend ()->arithmetic_or_logical_expression_checked (
+ op, lhs, rhs, expr.get_locus (), receiver);
+
+ ctx->add_statement (check);
+ translated = receiver->get_tree (expr.get_locus ());
+ }
+ else
+ {
+ translated = ctx->get_backend ()->arithmetic_or_logical_expression (
+ op, lhs, rhs, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::CompoundAssignmentExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_left_expr ().get (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_right_expr ().get (), ctx);
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type
+ = Analysis::RustLangItem::CompoundAssignmentOperatorToLangItem (
+ expr.get_expr_type ());
+ auto compound_assignment
+ = resolve_operator_overload (lang_item_type, expr, lhs, rhs,
+ expr.get_left_expr ().get (),
+ expr.get_right_expr ().get ());
+ ctx->add_statement (compound_assignment);
+
+ return;
+ }
+
+ if (ctx->in_fn () && !ctx->const_context_p ())
+ {
+ auto tmp = NULL_TREE;
+ auto receiver
+ = ctx->get_backend ()->temporary_variable (ctx->peek_fn ().fndecl,
+ NULL_TREE, TREE_TYPE (lhs),
+ lhs, true, expr.get_locus (),
+ &tmp);
+ auto check
+ = ctx->get_backend ()->arithmetic_or_logical_expression_checked (
+ op, lhs, rhs, expr.get_locus (), receiver);
+ ctx->add_statement (check);
+
+ translated = ctx->get_backend ()->assignment_statement (
+ lhs, receiver->get_tree (expr.get_locus ()), expr.get_locus ());
+ }
+ else
+ {
+ translated = ctx->get_backend ()->arithmetic_or_logical_expression (
+ op, lhs, rhs, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::NegationExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto negated_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+ auto location = expr.get_locus ();
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type
+ = Analysis::RustLangItem::NegationOperatorToLangItem (op);
+ translated
+ = resolve_operator_overload (lang_item_type, expr, negated_expr,
+ nullptr, expr.get_expr ().get (), nullptr);
+ return;
+ }
+
+ translated
+ = ctx->get_backend ()->negation_expression (op, negated_expr, location);
+}
+
+void
+CompileExpr::visit (HIR::ComparisonExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx);
+ auto location = expr.get_locus ();
+
+ translated
+ = ctx->get_backend ()->comparison_expression (op, lhs, rhs, location);
+}
+
+void
+CompileExpr::visit (HIR::LazyBooleanExpr &expr)
+{
+ auto op = expr.get_expr_type ();
+ auto lhs = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rhs = CompileExpr::Compile (expr.get_rhs (), ctx);
+ auto location = expr.get_locus ();
+
+ translated
+ = ctx->get_backend ()->lazy_boolean_expression (op, lhs, rhs, location);
+}
+
+void
+CompileExpr::visit (HIR::TypeCastExpr &expr)
+{
+ TyTy::BaseType *type_to_cast_to_ty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &type_to_cast_to_ty))
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *casted_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_casted_expr ()->get_mappings ().get_hirid (), &casted_tyty))
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ auto type_to_cast_to = TyTyResolveCompile::compile (ctx, type_to_cast_to_ty);
+ auto casted_expr = CompileExpr::Compile (expr.get_casted_expr ().get (), ctx);
+
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_cast_autoderef_mappings (
+ expr.get_mappings ().get_hirid (), &adjustments);
+ if (ok)
+ {
+ casted_expr
+ = resolve_adjustements (*adjustments, casted_expr, expr.get_locus ());
+ }
+
+ translated
+ = type_cast_expression (type_to_cast_to, casted_expr, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::IfExpr &expr)
+{
+ auto stmt = CompileConditionalBlocks::compile (&expr, ctx, nullptr);
+ ctx->add_statement (stmt);
+}
+
+void
+CompileExpr::visit (HIR::IfExprConseqElse &expr)
+{
+ TyTy::BaseType *if_type = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &if_type))
+ {
+ rust_error_at (expr.get_locus (),
+ "failed to lookup type of IfExprConseqElse");
+ return;
+ }
+
+ Bvariable *tmp = NULL;
+ bool needs_temp = !if_type->is_unit ();
+ if (needs_temp)
+ {
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, if_type);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ auto stmt = CompileConditionalBlocks::compile (&expr, ctx, tmp);
+ ctx->add_statement (stmt);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::IfExprConseqIf &expr)
+{
+ TyTy::BaseType *if_type = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &if_type))
+ {
+ rust_error_at (expr.get_locus (),
+ "failed to lookup type of IfExprConseqElse");
+ return;
+ }
+
+ Bvariable *tmp = NULL;
+ bool needs_temp = !if_type->is_unit ();
+ if (needs_temp)
+ {
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, if_type);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ auto stmt = CompileConditionalBlocks::compile (&expr, ctx, tmp);
+ ctx->add_statement (stmt);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::BlockExpr &expr)
+{
+ TyTy::BaseType *block_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &block_tyty))
+ {
+ rust_error_at (expr.get_locus (), "failed to lookup type of BlockExpr");
+ return;
+ }
+
+ Bvariable *tmp = NULL;
+ bool needs_temp = !block_tyty->is_unit ();
+ if (needs_temp)
+ {
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, block_tyty);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ auto block_stmt = CompileBlock::compile (&expr, ctx, tmp);
+ rust_assert (TREE_CODE (block_stmt) == BIND_EXPR);
+ ctx->add_statement (block_stmt);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::UnsafeBlockExpr &expr)
+{
+ expr.get_block_expr ()->accept_vis (*this);
+}
+
+void
+CompileExpr::visit (HIR::StructExprStruct &struct_expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (struct_expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_error_at (struct_expr.get_locus (), "unknown type");
+ return;
+ }
+
+ rust_assert (tyty->is_unit ());
+ translated = ctx->get_backend ()->unit_expression ();
+}
+
+void
+CompileExpr::visit (HIR::StructExprStructFields &struct_expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (struct_expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_error_at (struct_expr.get_locus (), "unknown type");
+ return;
+ }
+
+ // it must be an ADT
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (tyty);
+
+ // what variant is it?
+ int union_disriminator = struct_expr.union_index;
+ TyTy::VariantDef *variant = nullptr;
+ if (!adt->is_enum ())
+ {
+ rust_assert (adt->number_of_variants () == 1);
+ variant = adt->get_variants ().at (0);
+ }
+ else
+ {
+ HirId variant_id;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ struct_expr.get_struct_name ().get_mappings ().get_hirid (),
+ &variant_id);
+ rust_assert (ok);
+
+ ok
+ = adt->lookup_variant_by_id (variant_id, &variant, &union_disriminator);
+ rust_assert (ok);
+ }
+
+ // compile it
+ tree compiled_adt_type = TyTyResolveCompile::compile (ctx, tyty);
+
+ std::vector<tree> arguments;
+ if (adt->is_union ())
+ {
+ rust_assert (struct_expr.get_fields ().size () == 1);
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ auto respective_field = variant->get_field_at_index (union_disriminator);
+ auto expected = respective_field->get_field_type ();
+
+ // process arguments
+ auto &argument = struct_expr.get_fields ().at (0);
+ auto lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ auto rvalue_locus = argument->get_locus ();
+ auto rvalue = CompileStructExprField::Compile (argument.get (), ctx);
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+
+ if (ok)
+ {
+ rvalue
+ = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+ }
+
+ // add it to the list
+ arguments.push_back (rvalue);
+ }
+ else
+ {
+ // this assumes all fields are in order from type resolution and if a
+ // base struct was specified those fields are filed via accesors
+ for (size_t i = 0; i < struct_expr.get_fields ().size (); i++)
+ {
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ auto respective_field = variant->get_field_at_index (i);
+ auto expected = respective_field->get_field_type ();
+
+ // process arguments
+ auto &argument = struct_expr.get_fields ().at (i);
+ auto lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ auto rvalue_locus = argument->get_locus ();
+ auto rvalue = CompileStructExprField::Compile (argument.get (), ctx);
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+
+ // coerce it if required/possible see
+ // compile/torture/struct_base_init_1.rs
+ if (ok)
+ {
+ rvalue
+ = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+ }
+
+ // add it to the list
+ arguments.push_back (rvalue);
+ }
+ }
+
+ // the constructor depends on whether this is actually an enum or not if
+ // its an enum we need to setup the discriminator
+ std::vector<tree> ctor_arguments;
+ if (adt->is_enum ())
+ {
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree qualifier = folded_discrim_expr;
+
+ ctor_arguments.push_back (qualifier);
+ }
+ for (auto &arg : arguments)
+ ctor_arguments.push_back (arg);
+
+ translated = ctx->get_backend ()->constructor_expression (
+ compiled_adt_type, adt->is_enum (), ctor_arguments, union_disriminator,
+ struct_expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::GroupedExpr &expr)
+{
+ translated = CompileExpr::Compile (expr.get_expr_in_parens ().get (), ctx);
+}
+
+void
+CompileExpr::visit (HIR::FieldAccessExpr &expr)
+{
+ HIR::Expr *receiver_expr = expr.get_receiver_expr ().get ();
+ tree receiver_ref = CompileExpr::Compile (receiver_expr, ctx);
+
+ // resolve the receiver back to ADT type
+ TyTy::BaseType *receiver = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_receiver_expr ()->get_mappings ().get_hirid (), &receiver))
+ {
+ rust_error_at (expr.get_receiver_expr ()->get_locus (),
+ "unresolved type for receiver");
+ return;
+ }
+
+ size_t field_index = 0;
+ if (receiver->get_kind () == TyTy::TypeKind::ADT)
+ {
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (receiver);
+ rust_assert (!adt->is_enum ());
+ rust_assert (adt->number_of_variants () == 1);
+
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ bool ok
+ = variant->lookup_field (expr.get_field_name (), nullptr, &field_index);
+ rust_assert (ok);
+ }
+ else if (receiver->get_kind () == TyTy::TypeKind::REF)
+ {
+ TyTy::ReferenceType *r = static_cast<TyTy::ReferenceType *> (receiver);
+ TyTy::BaseType *b = r->get_base ();
+ rust_assert (b->get_kind () == TyTy::TypeKind::ADT);
+
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (b);
+ rust_assert (!adt->is_enum ());
+ rust_assert (adt->number_of_variants () == 1);
+
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ bool ok
+ = variant->lookup_field (expr.get_field_name (), nullptr, &field_index);
+ rust_assert (ok);
+
+ tree indirect = indirect_expression (receiver_ref, expr.get_locus ());
+ receiver_ref = indirect;
+ }
+
+ translated
+ = ctx->get_backend ()->struct_field_expression (receiver_ref, field_index,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::QualifiedPathInExpression &expr)
+{
+ translated = ResolvePathRef::Compile (expr, ctx);
+}
+
+void
+CompileExpr::visit (HIR::PathInExpression &expr)
+{
+ translated = ResolvePathRef::Compile (expr, ctx);
+}
+
+void
+CompileExpr::visit (HIR::LoopExpr &expr)
+{
+ TyTy::BaseType *block_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &block_tyty))
+ {
+ rust_error_at (expr.get_locus (), "failed to lookup type of BlockExpr");
+ return;
+ }
+
+ fncontext fnctx = ctx->peek_fn ();
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, block_tyty);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = NULL_TREE;
+ Bvariable *tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ ctx->push_loop_context (tmp);
+
+ if (expr.has_loop_label ())
+ {
+ HIR::LoopLabel &loop_label = expr.get_loop_label ();
+ tree label
+ = ctx->get_backend ()->label (fnctx.fndecl,
+ loop_label.get_lifetime ().get_name (),
+ loop_label.get_locus ());
+ tree label_decl = ctx->get_backend ()->label_definition_statement (label);
+ ctx->add_statement (label_decl);
+ ctx->insert_label_decl (
+ loop_label.get_lifetime ().get_mappings ().get_hirid (), label);
+ }
+
+ tree loop_begin_label
+ = ctx->get_backend ()->label (fnctx.fndecl, "", expr.get_locus ());
+ tree loop_begin_label_decl
+ = ctx->get_backend ()->label_definition_statement (loop_begin_label);
+ ctx->add_statement (loop_begin_label_decl);
+ ctx->push_loop_begin_label (loop_begin_label);
+
+ tree code_block
+ = CompileBlock::compile (expr.get_loop_block ().get (), ctx, nullptr);
+ tree loop_expr
+ = ctx->get_backend ()->loop_expression (code_block, expr.get_locus ());
+ ctx->add_statement (loop_expr);
+
+ ctx->pop_loop_context ();
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+
+ ctx->pop_loop_begin_label ();
+}
+
+void
+CompileExpr::visit (HIR::WhileLoopExpr &expr)
+{
+ fncontext fnctx = ctx->peek_fn ();
+ if (expr.has_loop_label ())
+ {
+ HIR::LoopLabel &loop_label = expr.get_loop_label ();
+ tree label
+ = ctx->get_backend ()->label (fnctx.fndecl,
+ loop_label.get_lifetime ().get_name (),
+ loop_label.get_locus ());
+ tree label_decl = ctx->get_backend ()->label_definition_statement (label);
+ ctx->add_statement (label_decl);
+ ctx->insert_label_decl (
+ loop_label.get_lifetime ().get_mappings ().get_hirid (), label);
+ }
+
+ std::vector<Bvariable *> locals;
+ Location start_location = expr.get_loop_block ()->get_locus ();
+ Location end_location = expr.get_loop_block ()->get_locus (); // FIXME
+
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree loop_block
+ = ctx->get_backend ()->block (fnctx.fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (loop_block);
+
+ tree loop_begin_label
+ = ctx->get_backend ()->label (fnctx.fndecl, "", expr.get_locus ());
+ tree loop_begin_label_decl
+ = ctx->get_backend ()->label_definition_statement (loop_begin_label);
+ ctx->add_statement (loop_begin_label_decl);
+ ctx->push_loop_begin_label (loop_begin_label);
+
+ tree condition
+ = CompileExpr::Compile (expr.get_predicate_expr ().get (), ctx);
+ tree exit_condition
+ = fold_build1_loc (expr.get_locus ().gcc_location (), TRUTH_NOT_EXPR,
+ boolean_type_node, condition);
+ tree exit_expr
+ = ctx->get_backend ()->exit_expression (exit_condition, expr.get_locus ());
+ ctx->add_statement (exit_expr);
+
+ tree code_block_stmt
+ = CompileBlock::compile (expr.get_loop_block ().get (), ctx, nullptr);
+ rust_assert (TREE_CODE (code_block_stmt) == BIND_EXPR);
+ ctx->add_statement (code_block_stmt);
+
+ ctx->pop_loop_begin_label ();
+ ctx->pop_block ();
+
+ tree loop_expr
+ = ctx->get_backend ()->loop_expression (loop_block, expr.get_locus ());
+ ctx->add_statement (loop_expr);
+}
+
+void
+CompileExpr::visit (HIR::BreakExpr &expr)
+{
+ if (expr.has_break_expr ())
+ {
+ tree compiled_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+
+ Bvariable *loop_result_holder = ctx->peek_loop_context ();
+ tree result_reference
+ = ctx->get_backend ()->var_expression (loop_result_holder,
+ expr.get_expr ()->get_locus ());
+
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (result_reference,
+ compiled_expr,
+ expr.get_locus ());
+ ctx->add_statement (assignment);
+ }
+
+ if (expr.has_label ())
+ {
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_label (
+ expr.get_label ().get_mappings ().get_nodeid (), &resolved_node_id))
+ {
+ rust_error_at (
+ expr.get_label ().get_locus (),
+ "failed to resolve compiled label for label %s",
+ expr.get_label ().get_mappings ().as_string ().c_str ());
+ return;
+ }
+
+ HirId ref = UNKNOWN_HIRID;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref))
+ {
+ rust_fatal_error (expr.get_locus (), "reverse lookup label failure");
+ return;
+ }
+
+ tree label = NULL_TREE;
+ if (!ctx->lookup_label_decl (ref, &label))
+ {
+ rust_error_at (expr.get_label ().get_locus (),
+ "failed to lookup compiled label");
+ return;
+ }
+
+ tree goto_label
+ = ctx->get_backend ()->goto_statement (label, expr.get_locus ());
+ ctx->add_statement (goto_label);
+ }
+ else
+ {
+ tree exit_expr = ctx->get_backend ()->exit_expression (
+ ctx->get_backend ()->boolean_constant_expression (true),
+ expr.get_locus ());
+ ctx->add_statement (exit_expr);
+ }
+}
+
+void
+CompileExpr::visit (HIR::ContinueExpr &expr)
+{
+ tree label = ctx->peek_loop_begin_label ();
+ if (expr.has_label ())
+ {
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_label (
+ expr.get_label ().get_mappings ().get_nodeid (), &resolved_node_id))
+ {
+ rust_error_at (
+ expr.get_label ().get_locus (),
+ "failed to resolve compiled label for label %s",
+ expr.get_label ().get_mappings ().as_string ().c_str ());
+ return;
+ }
+
+ HirId ref = UNKNOWN_HIRID;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref))
+ {
+ rust_fatal_error (expr.get_locus (), "reverse lookup label failure");
+ return;
+ }
+
+ if (!ctx->lookup_label_decl (ref, &label))
+ {
+ rust_error_at (expr.get_label ().get_locus (),
+ "failed to lookup compiled label");
+ return;
+ }
+ }
+
+ translated = ctx->get_backend ()->goto_statement (label, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::BorrowExpr &expr)
+{
+ tree main_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+ if (SLICE_TYPE_P (TREE_TYPE (main_expr)))
+ {
+ translated = main_expr;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ return;
+
+ translated = address_expression (main_expr, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::DereferenceExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_fatal_error (expr.get_locus (),
+ "did not resolve type for this TupleExpr");
+ return;
+ }
+
+ tree main_expr = CompileExpr::Compile (expr.get_expr ().get (), ctx);
+
+ // this might be an operator overload situation lets check
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type = Analysis::RustLangItem::ItemType::DEREF;
+ tree operator_overload_call
+ = resolve_operator_overload (lang_item_type, expr, main_expr, nullptr,
+ expr.get_expr ().get (), nullptr);
+
+ // rust deref always returns a reference from this overload then we can
+ // actually do the indirection
+ main_expr = operator_overload_call;
+ }
+
+ tree expected_type = TyTyResolveCompile::compile (ctx, tyty);
+ if (SLICE_TYPE_P (TREE_TYPE (main_expr)) && SLICE_TYPE_P (expected_type))
+ {
+ translated = main_expr;
+ return;
+ }
+
+ translated = indirect_expression (main_expr, expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::LiteralExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ return;
+
+ switch (expr.get_lit_type ())
+ {
+ case HIR::Literal::BOOL:
+ translated = compile_bool_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::INT:
+ translated = compile_integer_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::FLOAT:
+ translated = compile_float_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::CHAR:
+ translated = compile_char_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::BYTE:
+ translated = compile_byte_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::STRING:
+ translated = compile_string_literal (expr, tyty);
+ return;
+
+ case HIR::Literal::BYTE_STRING:
+ translated = compile_byte_string_literal (expr, tyty);
+ return;
+ }
+}
+
+void
+CompileExpr::visit (HIR::AssignmentExpr &expr)
+{
+ auto lvalue = CompileExpr::Compile (expr.get_lhs (), ctx);
+ auto rvalue = CompileExpr::Compile (expr.get_rhs (), ctx);
+
+ // assignments are coercion sites so lets convert the rvalue if necessary
+ TyTy::BaseType *expected = nullptr;
+ TyTy::BaseType *actual = nullptr;
+
+ bool ok;
+ ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_lhs ()->get_mappings ().get_hirid (), &expected);
+ rust_assert (ok);
+
+ ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_rhs ()->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ rvalue = coercion_site (expr.get_mappings ().get_hirid (), rvalue, actual,
+ expected, expr.get_lhs ()->get_locus (),
+ expr.get_rhs ()->get_locus ());
+
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (lvalue, rvalue,
+ expr.get_locus ());
+
+ ctx->add_statement (assignment);
+}
+
+// Helper for sort_tuple_patterns.
+// Determine whether Patterns a and b are really the same pattern.
+// FIXME: This is a nasty hack to avoid properly implementing a comparison
+// for Patterns, which we really probably do want at some point.
+static bool
+patterns_mergeable (HIR::Pattern *a, HIR::Pattern *b)
+{
+ if (!a || !b)
+ return false;
+
+ HIR::Pattern::PatternType pat_type = a->get_pattern_type ();
+ if (b->get_pattern_type () != pat_type)
+ return false;
+
+ switch (pat_type)
+ {
+ case HIR::Pattern::PatternType::PATH: {
+ // FIXME: this is far too naive
+ HIR::PathPattern &aref = *static_cast<HIR::PathPattern *> (a);
+ HIR::PathPattern &bref = *static_cast<HIR::PathPattern *> (b);
+ if (aref.get_num_segments () != bref.get_num_segments ())
+ return false;
+
+ const auto &asegs = aref.get_segments ();
+ const auto &bsegs = bref.get_segments ();
+ for (size_t i = 0; i < asegs.size (); i++)
+ {
+ if (asegs[i].as_string () != bsegs[i].as_string ())
+ return false;
+ }
+ return true;
+ }
+ break;
+ case HIR::Pattern::PatternType::LITERAL: {
+ HIR::LiteralPattern &aref = *static_cast<HIR::LiteralPattern *> (a);
+ HIR::LiteralPattern &bref = *static_cast<HIR::LiteralPattern *> (b);
+ return aref.get_literal ().is_equal (bref.get_literal ());
+ }
+ break;
+ case HIR::Pattern::PatternType::IDENTIFIER: {
+ // TODO
+ }
+ break;
+ case HIR::Pattern::PatternType::WILDCARD:
+ return true;
+ break;
+
+ // TODO
+
+ default:;
+ }
+ return false;
+}
+
+// A little container for rearranging the patterns and cases in a match
+// expression while simplifying.
+struct PatternMerge
+{
+ std::unique_ptr<HIR::MatchCase> wildcard;
+ std::vector<std::unique_ptr<HIR::Pattern>> heads;
+ std::vector<std::vector<HIR::MatchCase>> cases;
+};
+
+// Helper for simplify_tuple_match.
+// For each tuple pattern in a given match, pull out the first elt of the
+// tuple and construct a new MatchCase with the remaining tuple elts as the
+// pattern. Return a mapping from each _unique_ first tuple element to a
+// vec of cases for a new match.
+//
+// FIXME: This used to be a std::map<Pattern, Vec<MatchCase>>, but it doesn't
+// actually work like we want - the Pattern includes an HIR ID, which is unique
+// per Pattern object. This means we don't have a good means for comparing
+// Patterns. It would probably be best to actually implement a means of
+// properly comparing patterns, and then use an actual map.
+//
+static struct PatternMerge
+sort_tuple_patterns (HIR::MatchExpr &expr)
+{
+ rust_assert (expr.get_scrutinee_expr ()->get_expression_type ()
+ == HIR::Expr::ExprType::Tuple);
+
+ struct PatternMerge result;
+ result.wildcard = nullptr;
+ result.heads = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ result.cases = std::vector<std::vector<HIR::MatchCase>> ();
+
+ for (auto &match_case : expr.get_match_cases ())
+ {
+ HIR::MatchArm &case_arm = match_case.get_arm ();
+
+ // FIXME: Note we are only dealing with the first pattern in the arm.
+ // The patterns vector in the arm might hold many patterns, which are the
+ // patterns separated by the '|' token. Rustc abstracts these as "Or"
+ // patterns, and part of its simplification process is to get rid of them.
+ // We should get rid of the ORs too, maybe here or earlier than here?
+ auto pat = case_arm.get_patterns ()[0]->clone_pattern ();
+
+ // Record wildcards so we can add them in inner matches.
+ if (pat->get_pattern_type () == HIR::Pattern::PatternType::WILDCARD)
+ {
+ // The *whole* pattern is a wild card (_).
+ result.wildcard
+ = std::unique_ptr<HIR::MatchCase> (new HIR::MatchCase (match_case));
+ continue;
+ }
+
+ rust_assert (pat->get_pattern_type ()
+ == HIR::Pattern::PatternType::TUPLE);
+
+ auto ref = *static_cast<HIR::TuplePattern *> (pat.get ());
+
+ rust_assert (ref.has_tuple_pattern_items ());
+
+ auto items
+ = HIR::TuplePattern (ref).get_items ()->clone_tuple_pattern_items ();
+ if (items->get_pattern_type ()
+ == HIR::TuplePatternItems::TuplePatternItemType::MULTIPLE)
+ {
+ auto items_ref
+ = *static_cast<HIR::TuplePatternItemsMultiple *> (items.get ());
+
+ // Pop the first pattern out
+ auto patterns = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ auto first = items_ref.get_patterns ()[0]->clone_pattern ();
+ for (auto p = items_ref.get_patterns ().begin () + 1;
+ p != items_ref.get_patterns ().end (); p++)
+ {
+ patterns.push_back ((*p)->clone_pattern ());
+ }
+
+ // if there is only one pattern left, don't make a tuple out of it
+ std::unique_ptr<HIR::Pattern> result_pattern;
+ if (patterns.size () == 1)
+ {
+ result_pattern = std::move (patterns[0]);
+ }
+ else
+ {
+ auto new_items = std::unique_ptr<HIR::TuplePatternItems> (
+ new HIR::TuplePatternItemsMultiple (std::move (patterns)));
+
+ // Construct a TuplePattern from the rest of the patterns
+ result_pattern = std::unique_ptr<HIR::Pattern> (
+ new HIR::TuplePattern (ref.get_pattern_mappings (),
+ std::move (new_items),
+ ref.get_locus ()));
+ }
+
+ // I don't know why we need to make foo separately here but
+ // using the { new_tuple } syntax in new_arm constructor does not
+ // compile.
+ auto foo = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ foo.emplace_back (std::move (result_pattern));
+ HIR::MatchArm new_arm (std::move (foo), Location (), nullptr,
+ AST::AttrVec ());
+
+ HIR::MatchCase new_case (match_case.get_mappings (), new_arm,
+ match_case.get_expr ()->clone_expr ());
+
+ bool pushed = false;
+ for (size_t i = 0; i < result.heads.size (); i++)
+ {
+ if (patterns_mergeable (result.heads[i].get (), first.get ()))
+ {
+ result.cases[i].push_back (new_case);
+ pushed = true;
+ }
+ }
+
+ if (!pushed)
+ {
+ result.heads.push_back (std::move (first));
+ result.cases.push_back ({new_case});
+ }
+ }
+ else /* TuplePatternItemType::RANGED */
+ {
+ // FIXME
+ gcc_unreachable ();
+ }
+ }
+
+ return result;
+}
+
+// Helper for CompileExpr::visit (HIR::MatchExpr).
+// Given a MatchExpr where the scrutinee is some kind of tuple, build an
+// equivalent match where only one element of the tuple is examined at a time.
+// This resulting match can then be lowered to a SWITCH_EXPR tree directly.
+//
+// The approach is as follows:
+// 1. Split the scrutinee and each pattern into the first (head) and the
+// rest (tail).
+// 2. Build a mapping of unique pattern heads to the cases (tail and expr)
+// that shared that pattern head in the original match.
+// (This is the job of sort_tuple_patterns ()).
+// 3. For each unique pattern head, build a new MatchCase where the pattern
+// is the unique head, and the expression is a new match where:
+// - The scrutinee is the tail of the original scrutinee
+// - The cases are are those built by the mapping in step 2, i.e. the
+// tails of the patterns and the corresponing expressions from the
+// original match expression.
+// 4. Do this recursively for each inner match, until there is nothing more
+// to simplify.
+// 5. Build the resulting match which scrutinizes the head of the original
+// scrutinee, using the cases built in step 3.
+static HIR::MatchExpr
+simplify_tuple_match (HIR::MatchExpr &expr)
+{
+ if (expr.get_scrutinee_expr ()->get_expression_type ()
+ != HIR::Expr::ExprType::Tuple)
+ return expr;
+
+ auto ref = *static_cast<HIR::TupleExpr *> (expr.get_scrutinee_expr ().get ());
+
+ auto &tail = ref.get_tuple_elems ();
+ rust_assert (tail.size () > 1);
+
+ auto head = std::move (tail[0]);
+ tail.erase (tail.begin (), tail.begin () + 1);
+
+ // e.g.
+ // match (tupA, tupB, tupC) {
+ // (a1, b1, c1) => { blk1 },
+ // (a2, b2, c2) => { blk2 },
+ // (a1, b3, c3) => { blk3 },
+ // }
+ // tail = (tupB, tupC)
+ // head = tupA
+
+ // Make sure the tail is only a tuple if it consists of at least 2 elements.
+ std::unique_ptr<HIR::Expr> remaining;
+ if (tail.size () == 1)
+ remaining = std::move (tail[0]);
+ else
+ remaining = std::unique_ptr<HIR::Expr> (
+ new HIR::TupleExpr (ref.get_mappings (), std::move (tail),
+ AST::AttrVec (), ref.get_outer_attrs (),
+ ref.get_locus ()));
+
+ // e.g.
+ // a1 -> [(b1, c1) => { blk1 },
+ // (b3, c3) => { blk3 }]
+ // a2 -> [(b2, c2) => { blk2 }]
+ struct PatternMerge map = sort_tuple_patterns (expr);
+
+ std::vector<HIR::MatchCase> cases;
+ // Construct the inner match for each unique first elt of the tuple
+ // patterns
+ for (size_t i = 0; i < map.heads.size (); i++)
+ {
+ auto inner_match_cases = map.cases[i];
+
+ // If there is a wildcard at the outer match level, then need to
+ // propegate the wildcard case into *every* inner match.
+ // FIXME: It is probably not correct to add this unconditionally, what if
+ // we have a pattern like (a, _, c)? Then there is already a wildcard in
+ // the inner matches, and having two will cause two 'default:' blocks
+ // which is an error.
+ if (map.wildcard != nullptr)
+ {
+ inner_match_cases.push_back (*(map.wildcard.get ()));
+ }
+
+ // match (tupB, tupC) {
+ // (b1, c1) => { blk1 },
+ // (b3, c3) => { blk3 }
+ // }
+ HIR::MatchExpr inner_match (expr.get_mappings (),
+ remaining->clone_expr (), inner_match_cases,
+ AST::AttrVec (), expr.get_outer_attrs (),
+ expr.get_locus ());
+
+ inner_match = simplify_tuple_match (inner_match);
+
+ auto outer_arm_pat = std::vector<std::unique_ptr<HIR::Pattern>> ();
+ outer_arm_pat.emplace_back (map.heads[i]->clone_pattern ());
+
+ HIR::MatchArm outer_arm (std::move (outer_arm_pat), expr.get_locus ());
+
+ // Need to move the inner match to the heap and put it in a unique_ptr to
+ // build the actual match case of the outer expression
+ // auto inner_expr = std::unique_ptr<HIR::Expr> (new HIR::MatchExpr
+ // (inner_match));
+ auto inner_expr = inner_match.clone_expr ();
+
+ // a1 => match (tupB, tupC) { ... }
+ HIR::MatchCase outer_case (expr.get_mappings (), outer_arm,
+ std::move (inner_expr));
+
+ cases.push_back (outer_case);
+ }
+
+ // If there was a wildcard, make sure to include it at the outer match level
+ // too.
+ if (map.wildcard != nullptr)
+ {
+ cases.push_back (*(map.wildcard.get ()));
+ }
+
+ // match tupA {
+ // a1 => match (tupB, tupC) {
+ // (b1, c1) => { blk1 },
+ // (b3, c3) => { blk3 }
+ // }
+ // a2 => match (tupB, tupC) {
+ // (b2, c2) => { blk2 }
+ // }
+ // }
+ HIR::MatchExpr outer_match (expr.get_mappings (), std::move (head), cases,
+ AST::AttrVec (), expr.get_outer_attrs (),
+ expr.get_locus ());
+
+ return outer_match;
+}
+
+// Helper for CompileExpr::visit (HIR::MatchExpr).
+// Check that the scrutinee of EXPR is a valid kind of expression to match on.
+// Return the TypeKind of the scrutinee if it is valid, or TyTy::TypeKind::ERROR
+// if not.
+static TyTy::TypeKind
+check_match_scrutinee (HIR::MatchExpr &expr, Context *ctx)
+{
+ TyTy::BaseType *scrutinee_expr_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_scrutinee_expr ()->get_mappings ().get_hirid (),
+ &scrutinee_expr_tyty))
+ {
+ return TyTy::TypeKind::ERROR;
+ }
+
+ TyTy::TypeKind scrutinee_kind = scrutinee_expr_tyty->get_kind ();
+ rust_assert ((TyTy::is_primitive_type_kind (scrutinee_kind)
+ && scrutinee_kind != TyTy::TypeKind::NEVER)
+ || scrutinee_kind == TyTy::TypeKind::ADT
+ || scrutinee_kind == TyTy::TypeKind::TUPLE);
+
+ if (scrutinee_kind == TyTy::TypeKind::ADT)
+ {
+ // this will need to change but for now the first pass implementation,
+ // lets assert this is the case
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (scrutinee_expr_tyty);
+ rust_assert (adt->is_enum ());
+ rust_assert (adt->number_of_variants () > 0);
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::FLOAT)
+ {
+ // FIXME: CASE_LABEL_EXPR does not support floating point types.
+ // Find another way to compile these.
+ rust_sorry_at (expr.get_locus (),
+ "match on floating-point types is not yet supported");
+ }
+
+ TyTy::BaseType *expr_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &expr_tyty))
+ {
+ return TyTy::TypeKind::ERROR;
+ }
+
+ return scrutinee_kind;
+}
+
+void
+CompileExpr::visit (HIR::MatchExpr &expr)
+{
+ // https://gcc.gnu.org/onlinedocs/gccint/Basic-Statements.html#Basic-Statements
+ // TODO
+ // SWITCH_ALL_CASES_P is true if the switch includes a default label or the
+ // case label ranges cover all possible values of the condition expression
+
+ /* Switch expression.
+
+ TREE_TYPE is the original type of the condition, before any
+ language required type conversions. It may be NULL, in which case
+ the original type and final types are assumed to be the same.
+
+ Operand 0 is the expression used to perform the branch,
+ Operand 1 is the body of the switch, which probably contains
+ CASE_LABEL_EXPRs. It may also be NULL, in which case operand 2
+ must not be NULL. */
+ // DEFTREECODE (SWITCH_EXPR, "switch_expr", tcc_statement, 2)
+
+ /* Used to represent a case label.
+
+ Operand 0 is CASE_LOW. It may be NULL_TREE, in which case the label
+ is a 'default' label.
+ Operand 1 is CASE_HIGH. If it is NULL_TREE, the label is a simple
+ (one-value) case label. If it is non-NULL_TREE, the case is a range.
+ Operand 2 is CASE_LABEL, which has the corresponding LABEL_DECL.
+ Operand 3 is CASE_CHAIN. This operand is only used in tree-cfg.cc to
+ speed up the lookup of case labels which use a particular edge in
+ the control flow graph. */
+ // DEFTREECODE (CASE_LABEL_EXPR, "case_label_expr", tcc_statement, 4)
+
+ TyTy::TypeKind scrutinee_kind = check_match_scrutinee (expr, ctx);
+ if (scrutinee_kind == TyTy::TypeKind::ERROR)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *expr_tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &expr_tyty))
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ fncontext fnctx = ctx->peek_fn ();
+ Bvariable *tmp = NULL;
+ bool needs_temp = !expr_tyty->is_unit ();
+ if (needs_temp)
+ {
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree block_type = TyTyResolveCompile::compile (ctx, expr_tyty);
+
+ bool is_address_taken = false;
+ tree ret_var_stmt = nullptr;
+ tmp = ctx->get_backend ()->temporary_variable (
+ fnctx.fndecl, enclosing_scope, block_type, NULL, is_address_taken,
+ expr.get_locus (), &ret_var_stmt);
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ // lets compile the scrutinee expression
+ tree match_scrutinee_expr
+ = CompileExpr::Compile (expr.get_scrutinee_expr ().get (), ctx);
+
+ tree match_scrutinee_expr_qualifier_expr;
+ if (TyTy::is_primitive_type_kind (scrutinee_kind))
+ {
+ match_scrutinee_expr_qualifier_expr = match_scrutinee_expr;
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::ADT)
+ {
+ // need to access qualifier the field, if we use QUAL_UNION_TYPE this
+ // would be DECL_QUALIFIER i think. For now this will just access the
+ // first record field and its respective qualifier because it will always
+ // be set because this is all a big special union
+ tree scrutinee_first_record_expr
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, 0, expr.get_scrutinee_expr ()->get_locus ());
+ match_scrutinee_expr_qualifier_expr
+ = ctx->get_backend ()->struct_field_expression (
+ scrutinee_first_record_expr, 0,
+ expr.get_scrutinee_expr ()->get_locus ());
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::TUPLE)
+ {
+ // match on tuple becomes a series of nested switches, with one level
+ // for each element of the tuple from left to right.
+ auto exprtype = expr.get_scrutinee_expr ()->get_expression_type ();
+ switch (exprtype)
+ {
+ case HIR::Expr::ExprType::Tuple: {
+ // Build an equivalent expression which is nicer to lower.
+ HIR::MatchExpr outer_match = simplify_tuple_match (expr);
+
+ // We've rearranged the match into something that lowers better
+ // to GENERIC trees.
+ // For actually doing the lowering we need to compile the match
+ // we've just made. But we're half-way through compiling the
+ // original one.
+ // ...
+ // For now, let's just replace the original with the rearranged one
+ // we just made, and compile that instead. What could go wrong? :)
+ //
+ // FIXME: What about when we decide a temporary is needed above?
+ // We might have already pushed a statement for it that
+ // we no longer need. Probably need to rearrange the order
+ // of these steps.
+ expr = outer_match;
+
+ scrutinee_kind = check_match_scrutinee (expr, ctx);
+ if (scrutinee_kind == TyTy::TypeKind::ERROR)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ // Now compile the scrutinee of the simplified match.
+ // FIXME: this part is duplicated from above.
+ match_scrutinee_expr
+ = CompileExpr::Compile (expr.get_scrutinee_expr ().get (), ctx);
+
+ if (TyTy::is_primitive_type_kind (scrutinee_kind))
+ {
+ match_scrutinee_expr_qualifier_expr = match_scrutinee_expr;
+ }
+ else if (scrutinee_kind == TyTy::TypeKind::ADT)
+ {
+ // need to access qualifier the field, if we use QUAL_UNION_TYPE
+ // this would be DECL_QUALIFIER i think. For now this will just
+ // access the first record field and its respective qualifier
+ // because it will always be set because this is all a big
+ // special union
+ tree scrutinee_first_record_expr
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, 0,
+ expr.get_scrutinee_expr ()->get_locus ());
+ match_scrutinee_expr_qualifier_expr
+ = ctx->get_backend ()->struct_field_expression (
+ scrutinee_first_record_expr, 0,
+ expr.get_scrutinee_expr ()->get_locus ());
+ }
+ else
+ {
+ // FIXME: There are other cases, but it better not be a Tuple
+ gcc_unreachable ();
+ }
+ }
+ break;
+
+ case HIR::Expr::ExprType::Path: {
+ // FIXME
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else
+ {
+ // FIXME: match on other types of expressions not yet implemented.
+ gcc_unreachable ();
+ }
+
+ // setup the end label so the cases can exit properly
+ tree fndecl = fnctx.fndecl;
+ Location end_label_locus = expr.get_locus (); // FIXME
+ tree end_label
+ = ctx->get_backend ()->label (fndecl,
+ "" /* empty creates an artificial label */,
+ end_label_locus);
+ tree end_label_decl_statement
+ = ctx->get_backend ()->label_definition_statement (end_label);
+
+ // setup the switch-body-block
+ Location start_location; // FIXME
+ Location end_location; // FIXME
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree switch_body_block
+ = ctx->get_backend ()->block (fndecl, enclosing_scope, {}, start_location,
+ end_location);
+ ctx->push_block (switch_body_block);
+
+ for (auto &kase : expr.get_match_cases ())
+ {
+ // for now lets just get single pattern's working
+ HIR::MatchArm &kase_arm = kase.get_arm ();
+ rust_assert (kase_arm.get_patterns ().size () > 0);
+
+ // generate implicit label
+ Location arm_locus = kase_arm.get_locus ();
+ tree case_label = ctx->get_backend ()->label (
+ fndecl, "" /* empty creates an artificial label */, arm_locus);
+
+ // setup the bindings for the block
+ for (auto &kase_pattern : kase_arm.get_patterns ())
+ {
+ tree switch_kase_expr
+ = CompilePatternCaseLabelExpr::Compile (kase_pattern.get (),
+ case_label, ctx);
+ ctx->add_statement (switch_kase_expr);
+
+ CompilePatternBindings::Compile (kase_pattern.get (),
+ match_scrutinee_expr, ctx);
+ }
+
+ // compile the expr and setup the assignment if required when tmp != NULL
+ tree kase_expr_tree = CompileExpr::Compile (kase.get_expr ().get (), ctx);
+ if (tmp != NULL)
+ {
+ tree result_reference
+ = ctx->get_backend ()->var_expression (tmp, arm_locus);
+ tree assignment
+ = ctx->get_backend ()->assignment_statement (result_reference,
+ kase_expr_tree,
+ arm_locus);
+ ctx->add_statement (assignment);
+ }
+
+ // go to end label
+ tree goto_end_label = build1_loc (arm_locus.gcc_location (), GOTO_EXPR,
+ void_type_node, end_label);
+ ctx->add_statement (goto_end_label);
+ }
+
+ // setup the switch expression
+ tree match_body = ctx->pop_block ();
+ tree match_expr_stmt
+ = build2_loc (expr.get_locus ().gcc_location (), SWITCH_EXPR,
+ TREE_TYPE (match_scrutinee_expr_qualifier_expr),
+ match_scrutinee_expr_qualifier_expr, match_body);
+ ctx->add_statement (match_expr_stmt);
+ ctx->add_statement (end_label_decl_statement);
+
+ if (tmp != NULL)
+ {
+ translated = ctx->get_backend ()->var_expression (tmp, expr.get_locus ());
+ }
+}
+
+void
+CompileExpr::visit (HIR::CallExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ expr.get_fnexpr ()->get_mappings ().get_hirid (), &tyty))
+ {
+ rust_error_at (expr.get_locus (), "unknown type");
+ return;
+ }
+
+ // must be a tuple constructor
+ bool is_adt_ctor = tyty->get_kind () == TyTy::TypeKind::ADT;
+ if (is_adt_ctor)
+ {
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (tyty);
+ tree compiled_adt_type = TyTyResolveCompile::compile (ctx, tyty);
+
+ // what variant is it?
+ int union_disriminator = -1;
+ TyTy::VariantDef *variant = nullptr;
+ if (!adt->is_enum ())
+ {
+ rust_assert (adt->number_of_variants () == 1);
+ variant = adt->get_variants ().at (0);
+ }
+ else
+ {
+ HirId variant_id;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ expr.get_fnexpr ()->get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ ok = adt->lookup_variant_by_id (variant_id, &variant,
+ &union_disriminator);
+ rust_assert (ok);
+ }
+
+ // this assumes all fields are in order from type resolution and if a
+ // base struct was specified those fields are filed via accesors
+ std::vector<tree> arguments;
+ for (size_t i = 0; i < expr.get_arguments ().size (); i++)
+ {
+ auto &argument = expr.get_arguments ().at (i);
+ auto rvalue = CompileExpr::Compile (argument.get (), ctx);
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ auto respective_field = variant->get_field_at_index (i);
+ auto expected = respective_field->get_field_type ();
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ // coerce it if required
+ Location lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ Location rvalue_locus = argument->get_locus ();
+ rvalue
+ = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+
+ // add it to the list
+ arguments.push_back (rvalue);
+ }
+
+ // the constructor depends on whether this is actually an enum or not if
+ // its an enum we need to setup the discriminator
+ std::vector<tree> ctor_arguments;
+ if (adt->is_enum ())
+ {
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree qualifier = folded_discrim_expr;
+
+ ctor_arguments.push_back (qualifier);
+ }
+ for (auto &arg : arguments)
+ ctor_arguments.push_back (arg);
+
+ translated = ctx->get_backend ()->constructor_expression (
+ compiled_adt_type, adt->is_enum (), ctor_arguments, union_disriminator,
+ expr.get_locus ());
+
+ return;
+ }
+
+ auto get_parameter_tyty_at_index
+ = [] (const TyTy::BaseType *base, size_t index,
+ TyTy::BaseType **result) -> bool {
+ bool is_fn = base->get_kind () == TyTy::TypeKind::FNDEF
+ || base->get_kind () == TyTy::TypeKind::FNPTR;
+ rust_assert (is_fn);
+
+ if (base->get_kind () == TyTy::TypeKind::FNPTR)
+ {
+ const TyTy::FnPtr *fn = static_cast<const TyTy::FnPtr *> (base);
+ *result = fn->param_at (index);
+
+ return true;
+ }
+
+ const TyTy::FnType *fn = static_cast<const TyTy::FnType *> (base);
+ auto param = fn->param_at (index);
+ *result = param.second;
+
+ return true;
+ };
+
+ auto fn_address = CompileExpr::Compile (expr.get_fnexpr (), ctx);
+
+ // is this a closure call?
+ bool possible_trait_call
+ = generate_possible_fn_trait_call (expr, fn_address, &translated);
+ if (possible_trait_call)
+ return;
+
+ bool is_varadic = false;
+ if (tyty->get_kind () == TyTy::TypeKind::FNDEF)
+ {
+ const TyTy::FnType *fn = static_cast<const TyTy::FnType *> (tyty);
+ is_varadic = fn->is_varadic ();
+ }
+
+ size_t required_num_args = expr.get_arguments ().size ();
+ if (tyty->get_kind () == TyTy::TypeKind::FNDEF)
+ {
+ const TyTy::FnType *fn = static_cast<const TyTy::FnType *> (tyty);
+ required_num_args = fn->num_params ();
+ }
+ else if (tyty->get_kind () == TyTy::TypeKind::FNPTR)
+ {
+ const TyTy::FnPtr *fn = static_cast<const TyTy::FnPtr *> (tyty);
+ required_num_args = fn->num_params ();
+ }
+
+ std::vector<tree> args;
+ for (size_t i = 0; i < expr.get_arguments ().size (); i++)
+ {
+ auto &argument = expr.get_arguments ().at (i);
+ auto rvalue = CompileExpr::Compile (argument.get (), ctx);
+
+ if (is_varadic && i >= required_num_args)
+ {
+ args.push_back (rvalue);
+ continue;
+ }
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary
+ bool ok;
+ TyTy::BaseType *expected = nullptr;
+ ok = get_parameter_tyty_at_index (tyty, i, &expected);
+ rust_assert (ok);
+
+ TyTy::BaseType *actual = nullptr;
+ ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ // coerce it if required
+ Location lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ Location rvalue_locus = argument->get_locus ();
+ rvalue = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+
+ // add it to the list
+ args.push_back (rvalue);
+ }
+
+ // must be a regular call to a function
+ translated = ctx->get_backend ()->call_expression (fn_address, args, nullptr,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::MethodCallExpr &expr)
+{
+ // method receiver
+ tree self = CompileExpr::Compile (expr.get_receiver ().get (), ctx);
+
+ // lookup the resolved name
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_name (
+ expr.get_mappings ().get_nodeid (), &resolved_node_id))
+ {
+ rust_error_at (expr.get_locus (), "failed to lookup resolved MethodCall");
+ return;
+ }
+
+ // reverse lookup
+ HirId ref;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref))
+ {
+ rust_fatal_error (expr.get_locus (), "reverse lookup failure");
+ return;
+ }
+
+ // lookup the expected function type
+ TyTy::BaseType *lookup_fntype = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_method_name ().get_mappings ().get_hirid (), &lookup_fntype);
+ rust_assert (ok);
+ rust_assert (lookup_fntype->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (lookup_fntype);
+
+ TyTy::BaseType *receiver = nullptr;
+ ok = ctx->get_tyctx ()->lookup_receiver (expr.get_mappings ().get_hirid (),
+ &receiver);
+ rust_assert (ok);
+
+ bool is_dyn_dispatch
+ = receiver->get_root ()->get_kind () == TyTy::TypeKind::DYNAMIC;
+ bool is_generic_receiver = receiver->get_kind () == TyTy::TypeKind::PARAM;
+ if (is_generic_receiver)
+ {
+ TyTy::ParamType *p = static_cast<TyTy::ParamType *> (receiver);
+ receiver = p->resolve ();
+ }
+
+ tree fn_expr = error_mark_node;
+ if (is_dyn_dispatch)
+ {
+ const TyTy::DynamicObjectType *dyn
+ = static_cast<const TyTy::DynamicObjectType *> (receiver->get_root ());
+
+ std::vector<HIR::Expr *> arguments;
+ for (auto &arg : expr.get_arguments ())
+ arguments.push_back (arg.get ());
+
+ fn_expr
+ = get_fn_addr_from_dyn (dyn, receiver, fntype, self, expr.get_locus ());
+ self = get_receiver_from_dyn (dyn, receiver, fntype, self,
+ expr.get_locus ());
+ }
+ else
+ {
+ // lookup compiled functions since it may have already been compiled
+ HIR::PathExprSegment method_name = expr.get_method_name ();
+ HIR::PathIdentSegment segment_name = method_name.get_segment ();
+ fn_expr
+ = resolve_method_address (fntype, ref, receiver, segment_name,
+ expr.get_mappings (), expr.get_locus ());
+ }
+
+ // lookup the autoderef mappings
+ HirId autoderef_mappings_id
+ = expr.get_receiver ()->get_mappings ().get_hirid ();
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ ok = ctx->get_tyctx ()->lookup_autoderef_mappings (autoderef_mappings_id,
+ &adjustments);
+ rust_assert (ok);
+
+ // apply adjustments for the fn call
+ self = resolve_adjustements (*adjustments, self,
+ expr.get_receiver ()->get_locus ());
+
+ std::vector<tree> args;
+ args.push_back (self); // adjusted self
+
+ // normal args
+ for (size_t i = 0; i < expr.get_arguments ().size (); i++)
+ {
+ auto &argument = expr.get_arguments ().at (i);
+ auto rvalue = CompileExpr::Compile (argument.get (), ctx);
+
+ // assignments are coercion sites so lets convert the rvalue if
+ // necessary, offset from the already adjusted implicit self
+ bool ok;
+ TyTy::BaseType *expected = fntype->param_at (i + 1).second;
+
+ TyTy::BaseType *actual = nullptr;
+ ok = ctx->get_tyctx ()->lookup_type (
+ argument->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+
+ // coerce it if required
+ Location lvalue_locus
+ = ctx->get_mappings ()->lookup_location (expected->get_ty_ref ());
+ Location rvalue_locus = argument->get_locus ();
+ rvalue = coercion_site (argument->get_mappings ().get_hirid (), rvalue,
+ actual, expected, lvalue_locus, rvalue_locus);
+
+ // add it to the list
+ args.push_back (rvalue);
+ }
+
+ translated = ctx->get_backend ()->call_expression (fn_expr, args, nullptr,
+ expr.get_locus ());
+}
+
+tree
+CompileExpr::get_fn_addr_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver,
+ TyTy::FnType *fntype, tree receiver_ref,
+ Location expr_locus)
+{
+ size_t offs = 0;
+ const Resolver::TraitItemReference *ref = nullptr;
+ for (auto &bound : dyn->get_object_items ())
+ {
+ const Resolver::TraitItemReference *item = bound.first;
+ auto t = item->get_tyty ();
+ rust_assert (t->get_kind () == TyTy::TypeKind::FNDEF);
+ auto ft = static_cast<TyTy::FnType *> (t);
+
+ if (ft->get_id () == fntype->get_id ())
+ {
+ ref = item;
+ break;
+ }
+ offs++;
+ }
+
+ if (ref == nullptr)
+ return error_mark_node;
+
+ // get any indirection sorted out
+ if (receiver->get_kind () == TyTy::TypeKind::REF)
+ {
+ tree indirect = indirect_expression (receiver_ref, expr_locus);
+ receiver_ref = indirect;
+ }
+
+ // cast it to the correct fntype
+ tree expected_fntype = TyTyResolveCompile::compile (ctx, fntype, true);
+ tree idx = build_int_cst (size_type_node, offs);
+
+ tree vtable_ptr
+ = ctx->get_backend ()->struct_field_expression (receiver_ref, 1,
+ expr_locus);
+ tree vtable_array_access = build4_loc (expr_locus.gcc_location (), ARRAY_REF,
+ TREE_TYPE (TREE_TYPE (vtable_ptr)),
+ vtable_ptr, idx, NULL_TREE, NULL_TREE);
+
+ tree vcall
+ = build3_loc (expr_locus.gcc_location (), OBJ_TYPE_REF, expected_fntype,
+ vtable_array_access, receiver_ref, idx);
+
+ return vcall;
+}
+
+tree
+CompileExpr::get_receiver_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver,
+ TyTy::FnType *fntype, tree receiver_ref,
+ Location expr_locus)
+{
+ // get any indirection sorted out
+ if (receiver->get_kind () == TyTy::TypeKind::REF)
+ {
+ tree indirect = indirect_expression (receiver_ref, expr_locus);
+ receiver_ref = indirect;
+ }
+
+ // access the offs + 1 for the fnptr and offs=0 for the reciever obj
+ return ctx->get_backend ()->struct_field_expression (receiver_ref, 0,
+ expr_locus);
+}
+
+tree
+CompileExpr::resolve_method_address (TyTy::FnType *fntype, HirId ref,
+ TyTy::BaseType *receiver,
+ HIR::PathIdentSegment &segment,
+ Analysis::NodeMapping expr_mappings,
+ Location expr_locus)
+{
+ // lookup compiled functions since it may have already been compiled
+ tree fn = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &fn))
+ {
+ return address_expression (fn, expr_locus);
+ }
+
+ // Now we can try and resolve the address since this might be a forward
+ // declared function, generic function which has not be compiled yet or
+ // its an not yet trait bound function
+ HIR::ImplItem *resolved_item
+ = ctx->get_mappings ()->lookup_hir_implitem (ref, nullptr);
+ if (resolved_item != nullptr)
+ {
+ if (!fntype->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (resolved_item, ctx);
+
+ return CompileInherentImplItem::Compile (resolved_item, ctx, fntype);
+ }
+
+ // it might be resolved to a trait item
+ HIR::TraitItem *trait_item
+ = ctx->get_mappings ()->lookup_hir_trait_item (ref);
+ HIR::Trait *trait = ctx->get_mappings ()->lookup_trait_item_mapping (
+ trait_item->get_mappings ().get_hirid ());
+
+ Resolver::TraitReference *trait_ref
+ = &Resolver::TraitReference::error_node ();
+ bool ok = ctx->get_tyctx ()->lookup_trait_reference (
+ trait->get_mappings ().get_defid (), &trait_ref);
+ rust_assert (ok);
+
+ // the type resolver can only resolve type bounds to their trait
+ // item so its up to us to figure out if this path should resolve
+ // to an trait-impl-block-item or if it can be defaulted to the
+ // trait-impl-item's definition
+
+ auto root = receiver->get_root ();
+ auto candidates
+ = Resolver::PathProbeType::Probe (root, segment, true /* probe_impls */,
+ false /* probe_bounds */,
+ true /* ignore_mandatory_trait_items */);
+ if (candidates.size () == 0)
+ {
+ // this means we are defaulting back to the trait_item if
+ // possible
+ Resolver::TraitItemReference *trait_item_ref = nullptr;
+ bool ok = trait_ref->lookup_hir_trait_item (*trait_item, &trait_item_ref);
+ rust_assert (ok); // found
+ rust_assert (trait_item_ref->is_optional ()); // has definition
+
+ // FIXME Optional means it has a definition and an associated
+ // block which can be a default implementation, if it does not
+ // contain an implementation we should actually return
+ // error_mark_node
+
+ return CompileTraitItem::Compile (trait_item_ref->get_hir_trait_item (),
+ ctx, fntype, true, expr_locus);
+ }
+ else
+ {
+ // FIXME this will be a case to return error_mark_node, there is
+ // an error scenario where a Trait Foo has a method Bar, but this
+ // receiver does not implement this trait or has an incompatible
+ // implementation and we should just return error_mark_node
+
+ rust_assert (candidates.size () == 1);
+ auto &candidate = *candidates.begin ();
+ rust_assert (candidate.is_impl_candidate ());
+ rust_assert (candidate.ty->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *candidate_call = static_cast<TyTy::FnType *> (candidate.ty);
+
+ HIR::ImplItem *impl_item = candidate.item.impl.impl_item;
+ if (!candidate_call->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (impl_item, ctx);
+
+ TyTy::BaseType *monomorphized = candidate_call;
+ if (candidate_call->needs_generic_substitutions ())
+ {
+ TyTy::BaseType *infer_impl_call
+ = candidate_call->infer_substitions (expr_locus);
+ monomorphized = infer_impl_call->unify (fntype);
+ }
+
+ return CompileInherentImplItem::Compile (impl_item, ctx, monomorphized);
+ }
+}
+
+tree
+CompileExpr::resolve_operator_overload (
+ Analysis::RustLangItem::ItemType lang_item_type, HIR::OperatorExprMeta expr,
+ tree lhs, tree rhs, HIR::Expr *lhs_expr, HIR::Expr *rhs_expr)
+{
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ rust_assert (is_op_overload);
+
+ // lookup the resolved name
+ NodeId resolved_node_id = UNKNOWN_NODEID;
+ bool ok = ctx->get_resolver ()->lookup_resolved_name (
+ expr.get_mappings ().get_nodeid (), &resolved_node_id);
+ rust_assert (ok);
+
+ // reverse lookup
+ HirId ref;
+ ok = ctx->get_mappings ()->lookup_node_to_hir (resolved_node_id, &ref);
+ rust_assert (ok);
+
+ TyTy::BaseType *receiver = nullptr;
+ ok = ctx->get_tyctx ()->lookup_receiver (expr.get_mappings ().get_hirid (),
+ &receiver);
+ rust_assert (ok);
+
+ bool is_generic_receiver = receiver->get_kind () == TyTy::TypeKind::PARAM;
+ if (is_generic_receiver)
+ {
+ TyTy::ParamType *p = static_cast<TyTy::ParamType *> (receiver);
+ receiver = p->resolve ();
+ }
+
+ // lookup compiled functions since it may have already been compiled
+ HIR::PathIdentSegment segment_name (
+ Analysis::RustLangItem::ToString (lang_item_type));
+ tree fn_expr
+ = resolve_method_address (fntype, ref, receiver, segment_name,
+ expr.get_mappings (), expr.get_locus ());
+
+ // lookup the autoderef mappings
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ ok = ctx->get_tyctx ()->lookup_autoderef_mappings (
+ expr.get_lvalue_mappings ().get_hirid (), &adjustments);
+ rust_assert (ok);
+
+ // apply adjustments for the fn call
+ tree self = resolve_adjustements (*adjustments, lhs, lhs_expr->get_locus ());
+
+ std::vector<tree> args;
+ args.push_back (self); // adjusted self
+ if (rhs != nullptr) // can be null for negation_expr (unary ones)
+ args.push_back (rhs);
+
+ return ctx->get_backend ()->call_expression (fn_expr, args, nullptr,
+ expr.get_locus ());
+}
+
+tree
+CompileExpr::compile_bool_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::BOOL);
+
+ const auto literal_value = expr.get_literal ();
+ bool bval = literal_value.as_string ().compare ("true") == 0;
+ return ctx->get_backend ()->boolean_constant_expression (bval);
+}
+
+tree
+CompileExpr::compile_integer_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::INT);
+ const auto literal_value = expr.get_literal ();
+
+ tree type = TyTyResolveCompile::compile (ctx, tyty);
+
+ mpz_t ival;
+ if (mpz_init_set_str (ival, literal_value.as_string ().c_str (), 10) != 0)
+ {
+ rust_error_at (expr.get_locus (), "bad number in literal");
+ return error_mark_node;
+ }
+
+ mpz_t type_min;
+ mpz_t type_max;
+ mpz_init (type_min);
+ mpz_init (type_max);
+ get_type_static_bounds (type, type_min, type_max);
+
+ if (mpz_cmp (ival, type_min) < 0 || mpz_cmp (ival, type_max) > 0)
+ {
+ rust_error_at (expr.get_locus (),
+ "integer overflows the respective type %<%s%>",
+ tyty->get_name ().c_str ());
+ return error_mark_node;
+ }
+
+ tree result = wide_int_to_tree (type, wi::from_mpz (type, ival, true));
+
+ mpz_clear (type_min);
+ mpz_clear (type_max);
+ mpz_clear (ival);
+
+ return result;
+}
+
+tree
+CompileExpr::compile_float_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::FLOAT);
+ const auto literal_value = expr.get_literal ();
+
+ mpfr_t fval;
+ if (mpfr_init_set_str (fval, literal_value.as_string ().c_str (), 10,
+ MPFR_RNDN)
+ != 0)
+ {
+ rust_error_at (expr.get_locus (), "bad number in literal");
+ return error_mark_node;
+ }
+
+ tree type = TyTyResolveCompile::compile (ctx, tyty);
+
+ // taken from:
+ // see go/gofrontend/expressions.cc:check_float_type
+ mpfr_exp_t exp = mpfr_get_exp (fval);
+ bool real_value_overflow = exp > TYPE_PRECISION (type);
+
+ REAL_VALUE_TYPE r1;
+ real_from_mpfr (&r1, fval, type, GMP_RNDN);
+ REAL_VALUE_TYPE r2;
+ real_convert (&r2, TYPE_MODE (type), &r1);
+
+ tree real_value = build_real (type, r2);
+ if (TREE_OVERFLOW (real_value) || real_value_overflow)
+ {
+ rust_error_at (expr.get_locus (),
+ "decimal overflows the respective type %<%s%>",
+ tyty->get_name ().c_str ());
+ return error_mark_node;
+ }
+
+ return real_value;
+}
+
+tree
+CompileExpr::compile_char_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::CHAR);
+ const auto literal_value = expr.get_literal ();
+
+ // FIXME needs wchar_t
+ char c = literal_value.as_string ().c_str ()[0];
+ return ctx->get_backend ()->wchar_constant_expression (c);
+}
+
+tree
+CompileExpr::compile_byte_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::BYTE);
+ const auto literal_value = expr.get_literal ();
+
+ tree type = TyTyResolveCompile::compile (ctx, tyty);
+ char c = literal_value.as_string ().c_str ()[0];
+ return build_int_cst (type, c);
+}
+
+tree
+CompileExpr::compile_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ tree fat_pointer = TyTyResolveCompile::compile (ctx, tyty);
+
+ rust_assert (expr.get_lit_type () == HIR::Literal::STRING);
+ const auto literal_value = expr.get_literal ();
+
+ auto base = ctx->get_backend ()->string_constant_expression (
+ literal_value.as_string ());
+ tree data = address_expression (base, expr.get_locus ());
+
+ TyTy::BaseType *usize = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize);
+ rust_assert (ok);
+ tree type = TyTyResolveCompile::compile (ctx, usize);
+
+ tree size = build_int_cstu (type, literal_value.as_string ().size ());
+
+ return ctx->get_backend ()->constructor_expression (fat_pointer, false,
+ {data, size}, -1,
+ expr.get_locus ());
+}
+
+tree
+CompileExpr::compile_byte_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty)
+{
+ rust_assert (expr.get_lit_type () == HIR::Literal::BYTE_STRING);
+
+ // the type here is &[ty; capacity]
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::REF);
+ const auto ref_tyty = static_cast<const TyTy::ReferenceType *> (tyty);
+ auto base_tyty = ref_tyty->get_base ();
+ rust_assert (base_tyty->get_kind () == TyTy::TypeKind::ARRAY);
+ auto array_tyty = static_cast<TyTy::ArrayType *> (base_tyty);
+
+ std::string value_str = expr.get_literal ().as_string ();
+ std::vector<tree> vals;
+ std::vector<unsigned long> indexes;
+ for (size_t i = 0; i < value_str.size (); i++)
+ {
+ char b = value_str.at (i);
+ tree bb = ctx->get_backend ()->char_constant_expression (b);
+ vals.push_back (bb);
+ indexes.push_back (i);
+ }
+
+ tree array_type = TyTyResolveCompile::compile (ctx, array_tyty);
+ tree constructed
+ = ctx->get_backend ()->array_constructor_expression (array_type, indexes,
+ vals,
+ expr.get_locus ());
+
+ return address_expression (constructed, expr.get_locus ());
+}
+
+tree
+CompileExpr::type_cast_expression (tree type_to_cast_to, tree expr_tree,
+ Location location)
+{
+ if (type_to_cast_to == error_mark_node || expr_tree == error_mark_node
+ || TREE_TYPE (expr_tree) == error_mark_node)
+ return error_mark_node;
+
+ if (ctx->get_backend ()->type_size (type_to_cast_to) == 0
+ || TREE_TYPE (expr_tree) == void_type_node)
+ {
+ // Do not convert zero-sized types.
+ return expr_tree;
+ }
+ else if (TREE_CODE (type_to_cast_to) == INTEGER_TYPE)
+ {
+ tree cast = convert_to_integer (type_to_cast_to, expr_tree);
+ // FIXME check for TREE_OVERFLOW?
+ return cast;
+ }
+ else if (TREE_CODE (type_to_cast_to) == REAL_TYPE)
+ {
+ tree cast = convert_to_real (type_to_cast_to, expr_tree);
+ // FIXME
+ // We might need to check that the tree is MAX val and thusly saturate it
+ // to inf. we can get the bounds and check the value if its >= or <= to
+ // the min and max bounds
+ //
+ // https://github.com/Rust-GCC/gccrs/issues/635
+ return cast;
+ }
+ else if (TREE_CODE (type_to_cast_to) == COMPLEX_TYPE)
+ {
+ return convert_to_complex (type_to_cast_to, expr_tree);
+ }
+ else if (TREE_CODE (type_to_cast_to) == POINTER_TYPE
+ && TREE_CODE (TREE_TYPE (expr_tree)) == INTEGER_TYPE)
+ {
+ return convert_to_pointer (type_to_cast_to, expr_tree);
+ }
+ else if (TREE_CODE (type_to_cast_to) == RECORD_TYPE
+ || TREE_CODE (type_to_cast_to) == ARRAY_TYPE)
+ {
+ return fold_build1_loc (location.gcc_location (), VIEW_CONVERT_EXPR,
+ type_to_cast_to, expr_tree);
+ }
+ else if (TREE_CODE (type_to_cast_to) == POINTER_TYPE
+ && SLICE_TYPE_P (TREE_TYPE (expr_tree)))
+ {
+ // returning a raw cast using NOP_EXPR seems to resut in an ICE:
+ //
+ // Analyzing compilation unit
+ // Performing interprocedural optimizations
+ // <*free_lang_data> {heap 2644k} <visibility> {heap 2644k}
+ // <build_ssa_passes> {heap 2644k} <opt_local_passes> {heap 2644k}during
+ // GIMPLE pass: cddce
+ // In function ‘*T::as_ptr<i32>’:
+ // rust1: internal compiler error: in propagate_necessity, at
+ // tree-ssa-dce.cc:984 0x1d5b43e propagate_necessity
+ // ../../gccrs/gcc/tree-ssa-dce.cc:984
+ // 0x1d5e180 perform_tree_ssa_dce
+ // ../../gccrs/gcc/tree-ssa-dce.cc:1876
+ // 0x1d5e2c8 tree_ssa_cd_dce
+ // ../../gccrs/gcc/tree-ssa-dce.cc:1920
+ // 0x1d5e49a execute
+ // ../../gccrs/gcc/tree-ssa-dce.cc:1992
+
+ // this is returning the direct raw pointer of the slice an assumes a very
+ // specific layout
+ return ctx->get_backend ()->struct_field_expression (expr_tree, 0,
+ location);
+ }
+
+ return fold_convert_loc (location.gcc_location (), type_to_cast_to,
+ expr_tree);
+}
+
+void
+CompileExpr::visit (HIR::ArrayExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &tyty))
+ {
+ rust_fatal_error (expr.get_locus (),
+ "did not resolve type for this array expr");
+ return;
+ }
+
+ tree array_type = TyTyResolveCompile::compile (ctx, tyty);
+ if (TREE_CODE (array_type) != ARRAY_TYPE)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ rust_assert (tyty->get_kind () == TyTy::TypeKind::ARRAY);
+ const TyTy::ArrayType &array_tyty
+ = static_cast<const TyTy::ArrayType &> (*tyty);
+
+ HIR::ArrayElems &elements = *expr.get_internal_elements ();
+ switch (elements.get_array_expr_type ())
+ {
+ case HIR::ArrayElems::ArrayExprType::VALUES: {
+ HIR::ArrayElemsValues &elems
+ = static_cast<HIR::ArrayElemsValues &> (elements);
+ translated
+ = array_value_expr (expr.get_locus (), array_tyty, array_type, elems);
+ }
+ return;
+
+ case HIR::ArrayElems::ArrayExprType::COPIED:
+ HIR::ArrayElemsCopied &elems
+ = static_cast<HIR::ArrayElemsCopied &> (elements);
+ translated
+ = array_copied_expr (expr.get_locus (), array_tyty, array_type, elems);
+ }
+}
+
+tree
+CompileExpr::array_value_expr (Location expr_locus,
+ const TyTy::ArrayType &array_tyty,
+ tree array_type, HIR::ArrayElemsValues &elems)
+{
+ std::vector<unsigned long> indexes;
+ std::vector<tree> constructor;
+ size_t i = 0;
+ for (auto &elem : elems.get_values ())
+ {
+ tree translated_expr = CompileExpr::Compile (elem.get (), ctx);
+ constructor.push_back (translated_expr);
+ indexes.push_back (i++);
+ }
+
+ return ctx->get_backend ()->array_constructor_expression (array_type, indexes,
+ constructor,
+ expr_locus);
+}
+
+tree
+CompileExpr::array_copied_expr (Location expr_locus,
+ const TyTy::ArrayType &array_tyty,
+ tree array_type, HIR::ArrayElemsCopied &elems)
+{
+ // see gcc/cp/typeck2.cc:1369-1401
+ gcc_assert (TREE_CODE (array_type) == ARRAY_TYPE);
+ tree domain = TYPE_DOMAIN (array_type);
+ if (!domain)
+ return error_mark_node;
+
+ if (!TREE_CONSTANT (TYPE_MAX_VALUE (domain)))
+ {
+ rust_error_at (expr_locus, "non const capacity domain %qT", array_type);
+ return error_mark_node;
+ }
+
+ ctx->push_const_context ();
+ tree capacity_expr = CompileExpr::Compile (elems.get_num_copies_expr (), ctx);
+ ctx->pop_const_context ();
+
+ if (!TREE_CONSTANT (capacity_expr))
+ {
+ rust_error_at (expr_locus, "non const num copies %qT", array_type);
+ return error_mark_node;
+ }
+
+ // get the compiled value
+ tree translated_expr = CompileExpr::Compile (elems.get_elem_to_copy (), ctx);
+
+ tree max_domain = TYPE_MAX_VALUE (domain);
+ tree min_domain = TYPE_MIN_VALUE (domain);
+
+ auto max = wi::to_offset (max_domain);
+ auto min = wi::to_offset (min_domain);
+ auto precision = TYPE_PRECISION (TREE_TYPE (domain));
+ auto sign = TYPE_SIGN (TREE_TYPE (domain));
+ unsigned HOST_WIDE_INT len
+ = wi::ext (max - min + 1, precision, sign).to_uhwi ();
+
+ // In a const context we must initialize the entire array, which entails
+ // allocating for each element. If the user wants a huge array, we will OOM
+ // and die horribly.
+ if (ctx->const_context_p ())
+ {
+ size_t idx = 0;
+ std::vector<unsigned long> indexes;
+ std::vector<tree> constructor;
+ for (unsigned HOST_WIDE_INT i = 0; i < len; i++)
+ {
+ constructor.push_back (translated_expr);
+ indexes.push_back (idx++);
+ }
+
+ return ctx->get_backend ()->array_constructor_expression (array_type,
+ indexes,
+ constructor,
+ expr_locus);
+ }
+
+ else
+ {
+ // Create a new block scope in which to initialize the array
+ tree fndecl = NULL_TREE;
+ if (ctx->in_fn ())
+ fndecl = ctx->peek_fn ().fndecl;
+
+ std::vector<Bvariable *> locals;
+ tree enclosing_scope = ctx->peek_enclosing_scope ();
+ tree init_block
+ = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ expr_locus, expr_locus);
+ ctx->push_block (init_block);
+
+ tree tmp;
+ tree stmts
+ = ctx->get_backend ()->array_initializer (fndecl, init_block,
+ array_type, capacity_expr,
+ translated_expr, &tmp,
+ expr_locus);
+ ctx->add_statement (stmts);
+
+ tree block = ctx->pop_block ();
+
+ // The result is a compound expression which creates a temporary array,
+ // initializes all the elements in a loop, and then yeilds the array.
+ return ctx->get_backend ()->compound_expression (block, tmp, expr_locus);
+ }
+}
+
+tree
+HIRCompileBase::resolve_adjustements (
+ std::vector<Resolver::Adjustment> &adjustments, tree expression,
+ Location locus)
+{
+ tree e = expression;
+ for (auto &adjustment : adjustments)
+ {
+ switch (adjustment.get_type ())
+ {
+ case Resolver::Adjustment::AdjustmentType::ERROR:
+ return error_mark_node;
+
+ case Resolver::Adjustment::AdjustmentType::IMM_REF:
+ case Resolver::Adjustment::AdjustmentType::MUT_REF: {
+ if (!SLICE_TYPE_P (TREE_TYPE (e)))
+ {
+ e = address_expression (e, locus);
+ }
+ }
+ break;
+
+ case Resolver::Adjustment::AdjustmentType::DEREF:
+ case Resolver::Adjustment::AdjustmentType::DEREF_MUT:
+ e = resolve_deref_adjustment (adjustment, e, locus);
+ break;
+
+ case Resolver::Adjustment::AdjustmentType::INDIRECTION:
+ e = resolve_indirection_adjustment (adjustment, e, locus);
+ break;
+
+ case Resolver::Adjustment::AdjustmentType::UNSIZE:
+ e = resolve_unsized_adjustment (adjustment, e, locus);
+ break;
+ }
+ }
+
+ return e;
+}
+
+tree
+HIRCompileBase::resolve_deref_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus)
+{
+ rust_assert (adjustment.is_deref_adjustment ()
+ || adjustment.is_deref_mut_adjustment ());
+ rust_assert (adjustment.has_operator_overload ());
+
+ TyTy::FnType *lookup = adjustment.get_deref_operator_fn ();
+ HIR::ImplItem *resolved_item = adjustment.get_deref_hir_item ();
+
+ tree fn_address = error_mark_node;
+ if (!lookup->has_subsititions_defined ())
+ fn_address = CompileInherentImplItem::Compile (resolved_item, ctx, nullptr,
+ true, locus);
+ else
+ fn_address = CompileInherentImplItem::Compile (resolved_item, ctx, lookup,
+ true, locus);
+
+ // does it need a reference to call
+ tree adjusted_argument = expression;
+ bool needs_borrow = adjustment.get_deref_adjustment_type ()
+ != Resolver::Adjustment::AdjustmentType::ERROR;
+ if (needs_borrow)
+ {
+ adjusted_argument = address_expression (expression, locus);
+ }
+
+ // make the call
+ return ctx->get_backend ()->call_expression (fn_address, {adjusted_argument},
+ nullptr, locus);
+}
+
+tree
+HIRCompileBase::resolve_indirection_adjustment (
+ Resolver::Adjustment &adjustment, tree expression, Location locus)
+{
+ return indirect_expression (expression, locus);
+}
+
+tree
+HIRCompileBase::resolve_unsized_adjustment (Resolver::Adjustment &adjustment,
+ tree expression, Location locus)
+{
+ bool expect_slice
+ = adjustment.get_expected ()->get_kind () == TyTy::TypeKind::SLICE;
+ bool expect_dyn
+ = adjustment.get_expected ()->get_kind () == TyTy::TypeKind::DYNAMIC;
+
+ // assumes this is an array
+ tree expr_type = TREE_TYPE (expression);
+ if (expect_slice)
+ {
+ rust_assert (TREE_CODE (expr_type) == ARRAY_TYPE);
+ return resolve_unsized_slice_adjustment (adjustment, expression, locus);
+ }
+
+ rust_assert (expect_dyn);
+ return resolve_unsized_dyn_adjustment (adjustment, expression, locus);
+}
+
+tree
+HIRCompileBase::resolve_unsized_slice_adjustment (
+ Resolver::Adjustment &adjustment, tree expression, Location locus)
+{
+ // assumes this is an array
+ tree expr_type = TREE_TYPE (expression);
+ rust_assert (TREE_CODE (expr_type) == ARRAY_TYPE);
+
+ // takes an array and returns a fat-pointer so this becomes a constructor
+ // expression
+ rust_assert (adjustment.get_expected ()->get_kind ()
+ == TyTy::TypeKind::SLICE);
+ tree fat_pointer
+ = TyTyResolveCompile::compile (ctx, adjustment.get_expected ());
+
+ // make a constructor for this
+ tree data = address_expression (expression, locus);
+
+ // fetch the size from the domain
+ tree domain = TYPE_DOMAIN (expr_type);
+ unsigned HOST_WIDE_INT array_size
+ = wi::ext (wi::to_offset (TYPE_MAX_VALUE (domain))
+ - wi::to_offset (TYPE_MIN_VALUE (domain)) + 1,
+ TYPE_PRECISION (TREE_TYPE (domain)),
+ TYPE_SIGN (TREE_TYPE (domain)))
+ .to_uhwi ();
+ tree size = build_int_cstu (size_type_node, array_size);
+
+ return ctx->get_backend ()->constructor_expression (fat_pointer, false,
+ {data, size}, -1, locus);
+}
+
+tree
+HIRCompileBase::resolve_unsized_dyn_adjustment (
+ Resolver::Adjustment &adjustment, tree expression, Location locus)
+{
+ tree rvalue = expression;
+ Location rvalue_locus = locus;
+
+ const TyTy::BaseType *actual = adjustment.get_actual ();
+ const TyTy::BaseType *expected = adjustment.get_expected ();
+
+ const TyTy::DynamicObjectType *dyn
+ = static_cast<const TyTy::DynamicObjectType *> (expected);
+
+ rust_debug ("resolve_unsized_dyn_adjustment actual={%s} dyn={%s}",
+ actual->debug_str ().c_str (), dyn->debug_str ().c_str ());
+
+ return coerce_to_dyn_object (rvalue, actual, dyn, rvalue_locus);
+}
+
+void
+CompileExpr::visit (HIR::RangeFromToExpr &expr)
+{
+ tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx);
+ tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx);
+ if (from == error_mark_node || to == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {from, to}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeFromExpr &expr)
+{
+ tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx);
+ if (from == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {from}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeToExpr &expr)
+{
+ tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx);
+ if (to == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {to}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeFullExpr &expr)
+{
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+ translated = ctx->get_backend ()->constructor_expression (adt, false, {}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::RangeFromToInclExpr &expr)
+{
+ tree from = CompileExpr::Compile (expr.get_from_expr ().get (), ctx);
+ tree to = CompileExpr::Compile (expr.get_to_expr ().get (), ctx);
+ if (from == error_mark_node || to == error_mark_node)
+ {
+ translated = error_mark_node;
+ return;
+ }
+
+ TyTy::BaseType *tyty = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (), &tyty);
+ rust_assert (ok);
+
+ tree adt = TyTyResolveCompile::compile (ctx, tyty);
+
+ // make the constructor
+ translated
+ = ctx->get_backend ()->constructor_expression (adt, false, {from, to}, -1,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::ArrayIndexExpr &expr)
+{
+ tree array_reference = CompileExpr::Compile (expr.get_array_expr (), ctx);
+ tree index = CompileExpr::Compile (expr.get_index_expr (), ctx);
+
+ // this might be an core::ops::index lang item situation
+ TyTy::FnType *fntype;
+ bool is_op_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fntype);
+ if (is_op_overload)
+ {
+ auto lang_item_type = Analysis::RustLangItem::ItemType::INDEX;
+ tree operator_overload_call
+ = resolve_operator_overload (lang_item_type, expr, array_reference,
+ index, expr.get_array_expr (),
+ expr.get_index_expr ());
+
+ tree actual_type = TREE_TYPE (operator_overload_call);
+ bool can_indirect = TYPE_PTR_P (actual_type) || TYPE_REF_P (actual_type);
+ if (!can_indirect)
+ {
+ // nothing to do
+ translated = operator_overload_call;
+ return;
+ }
+
+ // rust deref always returns a reference from this overload then we can
+ // actually do the indirection
+ translated
+ = indirect_expression (operator_overload_call, expr.get_locus ());
+ return;
+ }
+
+ // lets check if the array is a reference type then we can add an
+ // indirection if required
+ TyTy::BaseType *array_expr_ty = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ expr.get_array_expr ()->get_mappings ().get_hirid (), &array_expr_ty);
+ rust_assert (ok);
+
+ // do we need to add an indirect reference
+ if (array_expr_ty->get_kind () == TyTy::TypeKind::REF)
+ {
+ array_reference
+ = indirect_expression (array_reference, expr.get_locus ());
+ }
+
+ translated
+ = ctx->get_backend ()->array_index_expression (array_reference, index,
+ expr.get_locus ());
+}
+
+void
+CompileExpr::visit (HIR::ClosureExpr &expr)
+{
+ TyTy::BaseType *closure_expr_ty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (expr.get_mappings ().get_hirid (),
+ &closure_expr_ty))
+ {
+ rust_fatal_error (expr.get_locus (),
+ "did not resolve type for this ClosureExpr");
+ return;
+ }
+ rust_assert (closure_expr_ty->get_kind () == TyTy::TypeKind::CLOSURE);
+ TyTy::ClosureType *closure_tyty
+ = static_cast<TyTy::ClosureType *> (closure_expr_ty);
+ tree compiled_closure_tyty = TyTyResolveCompile::compile (ctx, closure_tyty);
+
+ // generate closure function
+ generate_closure_function (expr, *closure_tyty, compiled_closure_tyty);
+
+ // lets ignore state capture for now we need to instantiate the struct anyway
+ // then generate the function
+ std::vector<tree> vals;
+ for (const auto &capture : closure_tyty->get_captures ())
+ {
+ // lookup the HirId
+ HirId ref = UNKNOWN_HIRID;
+ bool ok = ctx->get_mappings ()->lookup_node_to_hir (capture, &ref);
+ rust_assert (ok);
+
+ // lookup the var decl
+ Bvariable *var = nullptr;
+ bool found = ctx->lookup_var_decl (ref, &var);
+ rust_assert (found);
+
+ // FIXME
+ // this should bes based on the closure move-ability
+ tree var_expr = var->get_tree (expr.get_locus ());
+ tree val = address_expression (var_expr, expr.get_locus ());
+ vals.push_back (val);
+ }
+
+ translated
+ = ctx->get_backend ()->constructor_expression (compiled_closure_tyty, false,
+ vals, -1, expr.get_locus ());
+}
+
+tree
+CompileExpr::generate_closure_function (HIR::ClosureExpr &expr,
+ TyTy::ClosureType &closure_tyty,
+ tree compiled_closure_tyty)
+{
+ TyTy::FnType *fn_tyty = nullptr;
+ tree compiled_fn_type
+ = generate_closure_fntype (expr, closure_tyty, compiled_closure_tyty,
+ &fn_tyty);
+ if (compiled_fn_type == error_mark_node)
+ return error_mark_node;
+
+ const Resolver::CanonicalPath &parent_canonical_path
+ = closure_tyty.get_ident ().path;
+ Resolver::CanonicalPath path = parent_canonical_path.append (
+ Resolver::CanonicalPath::new_seg (UNKNOWN_NODEID, "{{closure}}"));
+
+ std::string ir_symbol_name = path.get ();
+ std::string asm_name = ctx->mangle_item (&closure_tyty, path);
+
+ unsigned int flags = 0;
+ tree fndecl
+ = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name, asm_name,
+ flags, expr.get_locus ());
+
+ // insert into the context
+ ctx->insert_function_decl (fn_tyty, fndecl);
+ ctx->insert_closure_decl (&closure_tyty, fndecl);
+
+ // setup the parameters
+ std::vector<Bvariable *> param_vars;
+
+ // closure self
+ Bvariable *self_param
+ = ctx->get_backend ()->parameter_variable (fndecl, "$closure",
+ compiled_closure_tyty,
+ expr.get_locus ());
+ DECL_ARTIFICIAL (self_param->get_decl ()) = 1;
+ param_vars.push_back (self_param);
+
+ // push a new context
+ ctx->push_closure_context (expr.get_mappings ().get_hirid ());
+
+ // setup the implicit argument captures
+ size_t idx = 0;
+ for (const auto &capture : closure_tyty.get_captures ())
+ {
+ // lookup the HirId
+ HirId ref = UNKNOWN_HIRID;
+ bool ok = ctx->get_mappings ()->lookup_node_to_hir (capture, &ref);
+ rust_assert (ok);
+
+ // get the assessor
+ tree binding = ctx->get_backend ()->struct_field_expression (
+ self_param->get_tree (expr.get_locus ()), idx, expr.get_locus ());
+ tree indirection = indirect_expression (binding, expr.get_locus ());
+
+ // insert bindings
+ ctx->insert_closure_binding (ref, indirection);
+
+ // continue
+ idx++;
+ }
+
+ // args tuple
+ tree args_type
+ = TyTyResolveCompile::compile (ctx, &closure_tyty.get_parameters ());
+ Bvariable *args_param
+ = ctx->get_backend ()->parameter_variable (fndecl, "args", args_type,
+ expr.get_locus ());
+ param_vars.push_back (args_param);
+
+ // setup the implicit mappings for the arguments. Since argument passing to
+ // closure functions is done via passing a tuple but the closure body expects
+ // just normal arguments this means we need to destructure them similar to
+ // what we do in MatchExpr's. This means when we have a closure-param of a we
+ // actually setup the destructure to take from the args tuple
+
+ tree args_param_expr = args_param->get_tree (expr.get_locus ());
+ size_t i = 0;
+ for (auto &closure_param : expr.get_params ())
+ {
+ tree compiled_param_var = ctx->get_backend ()->struct_field_expression (
+ args_param_expr, i, closure_param.get_locus ());
+
+ const HIR::Pattern &param_pattern = *closure_param.get_pattern ();
+ ctx->insert_pattern_binding (
+ param_pattern.get_pattern_mappings ().get_hirid (), compiled_param_var);
+ i++;
+ }
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ {
+ ctx->pop_closure_context ();
+ return error_mark_node;
+ }
+
+ // lookup locals
+ HIR::Expr *function_body = expr.get_expr ().get ();
+ auto body_mappings = function_body->get_mappings ();
+ Resolver::Rib *rib = nullptr;
+ bool ok
+ = ctx->get_resolver ()->find_name_rib (body_mappings.get_nodeid (), &rib);
+ rust_assert (ok);
+
+ std::vector<Bvariable *> locals
+ = compile_locals_for_block (ctx, *rib, fndecl);
+
+ tree enclosing_scope = NULL_TREE;
+ Location start_location = function_body->get_locus ();
+ Location end_location = function_body->get_locus ();
+ bool is_block_expr
+ = function_body->get_expression_type () == HIR::Expr::ExprType::Block;
+ if (is_block_expr)
+ {
+ HIR::BlockExpr *body = static_cast<HIR::BlockExpr *> (function_body);
+ start_location = body->get_locus ();
+ end_location = body->get_end_locus ();
+ }
+
+ tree code_block = ctx->get_backend ()->block (fndecl, enclosing_scope, locals,
+ start_location, end_location);
+ ctx->push_block (code_block);
+
+ TyTy::BaseType *tyret = &closure_tyty.get_result_type ();
+ bool function_has_return = !closure_tyty.get_result_type ().is_unit ();
+ Bvariable *return_address = nullptr;
+ if (function_has_return)
+ {
+ tree return_type = TyTyResolveCompile::compile (ctx, tyret);
+
+ bool address_is_taken = false;
+ tree ret_var_stmt = NULL_TREE;
+
+ return_address = ctx->get_backend ()->temporary_variable (
+ fndecl, code_block, return_type, NULL, address_is_taken,
+ expr.get_locus (), &ret_var_stmt);
+
+ ctx->add_statement (ret_var_stmt);
+ }
+
+ ctx->push_fn (fndecl, return_address);
+
+ if (is_block_expr)
+ {
+ HIR::BlockExpr *body = static_cast<HIR::BlockExpr *> (function_body);
+ compile_function_body (ctx, fndecl, *body, true);
+ }
+ else
+ {
+ tree value = CompileExpr::Compile (function_body, ctx);
+ tree return_expr
+ = ctx->get_backend ()->return_statement (fndecl, {value},
+ function_body->get_locus ());
+ ctx->add_statement (return_expr);
+ }
+
+ tree bind_tree = ctx->pop_block ();
+
+ gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR);
+ DECL_SAVED_TREE (fndecl) = bind_tree;
+
+ ctx->pop_closure_context ();
+ ctx->pop_fn ();
+ ctx->push_function (fndecl);
+
+ return fndecl;
+}
+
+tree
+CompileExpr::generate_closure_fntype (HIR::ClosureExpr &expr,
+ const TyTy::ClosureType &closure_tyty,
+ tree compiled_closure_tyty,
+ TyTy::FnType **fn_tyty)
+{
+ // grab the specified_bound
+ rust_assert (closure_tyty.num_specified_bounds () == 1);
+ const TyTy::TypeBoundPredicate &predicate
+ = *closure_tyty.get_specified_bounds ().begin ();
+
+ // ensure the fn_once_output associated type is set
+ closure_tyty.setup_fn_once_output ();
+
+ // the function signature is based on the trait bound that the closure
+ // implements which is determined at the type resolution time
+ //
+ // https://github.com/rust-lang/rust/blob/7807a694c2f079fd3f395821bcc357eee8650071/library/core/src/ops/function.rs#L54-L71
+
+ TyTy::TypeBoundPredicateItem item = TyTy::TypeBoundPredicateItem::error ();
+ if (predicate.get_name ().compare ("FnOnce") == 0)
+ {
+ item = predicate.lookup_associated_item ("call_once");
+ }
+ else if (predicate.get_name ().compare ("FnMut") == 0)
+ {
+ item = predicate.lookup_associated_item ("call_mut");
+ }
+ else if (predicate.get_name ().compare ("Fn") == 0)
+ {
+ item = predicate.lookup_associated_item ("call");
+ }
+ else
+ {
+ // FIXME error message?
+ gcc_unreachable ();
+ return error_mark_node;
+ }
+
+ rust_assert (!item.is_error ());
+
+ TyTy::BaseType *item_tyty = item.get_tyty_for_receiver (&closure_tyty);
+ rust_assert (item_tyty->get_kind () == TyTy::TypeKind::FNDEF);
+ *fn_tyty = static_cast<TyTy::FnType *> (item_tyty);
+ return TyTyResolveCompile::compile (ctx, item_tyty);
+}
+
+bool
+CompileExpr::generate_possible_fn_trait_call (HIR::CallExpr &expr,
+ tree receiver, tree *result)
+{
+ TyTy::FnType *fn_sig = nullptr;
+ bool found_overload = ctx->get_tyctx ()->lookup_operator_overload (
+ expr.get_mappings ().get_hirid (), &fn_sig);
+ if (!found_overload)
+ return false;
+
+ auto id = fn_sig->get_ty_ref ();
+ auto dId = fn_sig->get_id ();
+
+ tree function = error_mark_node;
+ bool found_closure = ctx->lookup_function_decl (id, &function, dId, fn_sig);
+ if (!found_closure)
+ {
+ // something went wrong we still return true as this was meant to be an fn
+ // trait call
+ *result = error_mark_node;
+ return true;
+ }
+
+ // need to apply any autoderef's to the self argument
+ HirId autoderef_mappings_id = expr.get_mappings ().get_hirid ();
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_autoderef_mappings (autoderef_mappings_id,
+ &adjustments);
+ rust_assert (ok);
+
+ // apply adjustments for the fn call
+ tree self = resolve_adjustements (*adjustments, receiver, expr.get_locus ());
+
+ // resolve the arguments
+ std::vector<tree> tuple_arg_vals;
+ for (auto &argument : expr.get_arguments ())
+ {
+ auto rvalue = CompileExpr::Compile (argument.get (), ctx);
+ tuple_arg_vals.push_back (rvalue);
+ }
+
+ // this is always the 2nd argument in the function signature
+ tree fnty = TREE_TYPE (function);
+ tree fn_arg_tys = TYPE_ARG_TYPES (fnty);
+ tree tuple_args_tyty_chain = TREE_CHAIN (fn_arg_tys);
+ tree tuple_args_tyty = TREE_VALUE (tuple_args_tyty_chain);
+
+ tree tuple_args
+ = ctx->get_backend ()->constructor_expression (tuple_args_tyty, false,
+ tuple_arg_vals, -1,
+ expr.get_locus ());
+
+ // args are always self, and the tuple of the args we are passing where
+ // self is the path of the call-expr in this case the fn_address
+ std::vector<tree> args;
+ args.push_back (self);
+ args.push_back (tuple_args);
+
+ tree call_address = address_expression (function, expr.get_locus ());
+ *result = ctx->get_backend ()->call_expression (call_address, args,
+ nullptr /* static chain ?*/,
+ expr.get_locus ());
+ return true;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-expr.h b/gcc/rust/backend/rust-compile-expr.h
new file mode 100644
index 0000000..150a7a4
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-expr.h
@@ -0,0 +1,167 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_EXPR
+#define RUST_COMPILE_EXPR
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileExpr : private HIRCompileBase, protected HIR::HIRExpressionVisitor
+{
+public:
+ static tree Compile (HIR::Expr *expr, Context *ctx);
+
+ void visit (HIR::TupleIndexExpr &expr) override;
+ void visit (HIR::TupleExpr &expr) override;
+ void visit (HIR::ReturnExpr &expr) override;
+ void visit (HIR::CallExpr &expr) override;
+ void visit (HIR::MethodCallExpr &expr) override;
+ void visit (HIR::LiteralExpr &expr) override;
+ void visit (HIR::AssignmentExpr &expr) override;
+ void visit (HIR::CompoundAssignmentExpr &expr) override;
+ void visit (HIR::ArrayIndexExpr &expr) override;
+ void visit (HIR::ArrayExpr &expr) override;
+ void visit (HIR::ArithmeticOrLogicalExpr &expr) override;
+ void visit (HIR::ComparisonExpr &expr) override;
+ void visit (HIR::LazyBooleanExpr &expr) override;
+ void visit (HIR::NegationExpr &expr) override;
+ void visit (HIR::TypeCastExpr &expr) override;
+ void visit (HIR::IfExpr &expr) override;
+ void visit (HIR::IfExprConseqIf &expr) override;
+ void visit (HIR::IfExprConseqElse &expr) override;
+ void visit (HIR::BlockExpr &expr) override;
+ void visit (HIR::UnsafeBlockExpr &expr) override;
+ void visit (HIR::StructExprStruct &struct_expr) override;
+ void visit (HIR::StructExprStructFields &struct_expr) override;
+ void visit (HIR::GroupedExpr &expr) override;
+ void visit (HIR::FieldAccessExpr &expr) override;
+ void visit (HIR::QualifiedPathInExpression &expr) override;
+ void visit (HIR::PathInExpression &expr) override;
+ void visit (HIR::LoopExpr &expr) override;
+ void visit (HIR::WhileLoopExpr &expr) override;
+ void visit (HIR::BreakExpr &expr) override;
+ void visit (HIR::ContinueExpr &expr) override;
+ void visit (HIR::BorrowExpr &expr) override;
+ void visit (HIR::DereferenceExpr &expr) override;
+ void visit (HIR::MatchExpr &expr) override;
+ void visit (HIR::RangeFromToExpr &expr) override;
+ void visit (HIR::RangeFromExpr &expr) override;
+ void visit (HIR::RangeToExpr &expr) override;
+ void visit (HIR::RangeFullExpr &expr) override;
+ void visit (HIR::RangeFromToInclExpr &expr) override;
+ void visit (HIR::ClosureExpr &expr) override;
+
+ // TODO
+ void visit (HIR::ErrorPropagationExpr &) override {}
+ void visit (HIR::RangeToInclExpr &) override {}
+ void visit (HIR::ForLoopExpr &) override {}
+
+ // TODO
+ // these need to be sugared in the HIR to if statements and a match
+ void visit (HIR::WhileLetLoopExpr &) override {}
+ void visit (HIR::IfExprConseqIfLet &) override {}
+ void visit (HIR::IfLetExpr &) override {}
+ void visit (HIR::IfLetExprConseqElse &) override {}
+ void visit (HIR::IfLetExprConseqIf &) override {}
+ void visit (HIR::IfLetExprConseqIfLet &) override {}
+
+ // lets not worry about async yet....
+ void visit (HIR::AwaitExpr &) override {}
+ void visit (HIR::AsyncBlockExpr &) override {}
+
+ // nothing to do for these
+ void visit (HIR::StructExprFieldIdentifier &) override {}
+ void visit (HIR::StructExprFieldIdentifierValue &) override {}
+ void visit (HIR::StructExprFieldIndexValue &) override {}
+
+protected:
+ tree get_fn_addr_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver, TyTy::FnType *fntype,
+ tree receiver_ref, Location expr_locus);
+
+ tree get_receiver_from_dyn (const TyTy::DynamicObjectType *dyn,
+ TyTy::BaseType *receiver, TyTy::FnType *fntype,
+ tree receiver_ref, Location expr_locus);
+
+ tree resolve_method_address (TyTy::FnType *fntype, HirId ref,
+ TyTy::BaseType *receiver,
+ HIR::PathIdentSegment &segment,
+ Analysis::NodeMapping expr_mappings,
+ Location expr_locus);
+
+ tree
+ resolve_operator_overload (Analysis::RustLangItem::ItemType lang_item_type,
+ HIR::OperatorExprMeta expr, tree lhs, tree rhs,
+ HIR::Expr *lhs_expr, HIR::Expr *rhs_expr);
+
+ tree compile_bool_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_integer_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_float_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_char_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_byte_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree compile_byte_string_literal (const HIR::LiteralExpr &expr,
+ const TyTy::BaseType *tyty);
+
+ tree type_cast_expression (tree type_to_cast_to, tree expr, Location locus);
+
+ tree array_value_expr (Location expr_locus, const TyTy::ArrayType &array_tyty,
+ tree array_type, HIR::ArrayElemsValues &elems);
+
+ tree array_copied_expr (Location expr_locus,
+ const TyTy::ArrayType &array_tyty, tree array_type,
+ HIR::ArrayElemsCopied &elems);
+
+protected:
+ tree generate_closure_function (HIR::ClosureExpr &expr,
+ TyTy::ClosureType &closure_tyty,
+ tree compiled_closure_tyty);
+
+ tree generate_closure_fntype (HIR::ClosureExpr &expr,
+ const TyTy::ClosureType &closure_tyty,
+ tree compiled_closure_tyty,
+ TyTy::FnType **fn_tyty);
+
+ bool generate_possible_fn_trait_call (HIR::CallExpr &expr, tree receiver,
+ tree *result);
+
+private:
+ CompileExpr (Context *ctx);
+
+ tree translated;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_EXPR
diff --git a/gcc/rust/backend/rust-compile-extern.h b/gcc/rust/backend/rust-compile-extern.h
new file mode 100644
index 0000000..b42878e
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-extern.h
@@ -0,0 +1,172 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_EXTERN_ITEM
+#define RUST_COMPILE_EXTERN_ITEM
+
+#include "rust-compile-base.h"
+#include "rust-compile-intrinsic.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileExternItem : public HIRCompileBase,
+ public HIR::HIRExternalItemVisitor
+{
+public:
+ static tree compile (HIR::ExternalItem *item, Context *ctx,
+ TyTy::BaseType *concrete = nullptr,
+ bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileExternItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile extern item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+ void visit (HIR::ExternalStaticItem &item) override
+ {
+ // check if its already been compiled
+ Bvariable *lookup = ctx->get_backend ()->error_variable ();
+ if (ctx->lookup_var_decl (item.get_mappings ().get_hirid (), &lookup))
+ {
+ reference = ctx->get_backend ()->var_expression (lookup, ref_locus);
+ return;
+ }
+
+ TyTy::BaseType *resolved_type = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (item.get_mappings ().get_hirid (),
+ &resolved_type);
+ rust_assert (ok);
+
+ std::string name = item.get_item_name ();
+ // FIXME this is assuming C ABI
+ std::string asm_name = name;
+
+ tree type = TyTyResolveCompile::compile (ctx, resolved_type);
+ bool is_external = true;
+ bool is_hidden = false;
+ bool in_unique_section = false;
+
+ Bvariable *static_global
+ = ctx->get_backend ()->global_variable (name, asm_name, type, is_external,
+ is_hidden, in_unique_section,
+ item.get_locus ());
+ ctx->insert_var_decl (item.get_mappings ().get_hirid (), static_global);
+ ctx->push_var (static_global);
+
+ reference = ctx->get_backend ()->var_expression (static_global, ref_locus);
+ }
+
+ void visit (HIR::ExternalFunctionItem &function) override
+ {
+ TyTy::BaseType *fntype_tyty;
+ if (!ctx->get_tyctx ()->lookup_type (function.get_mappings ().get_hirid (),
+ &fntype_tyty))
+ {
+ rust_fatal_error (function.get_locus (),
+ "failed to lookup function type");
+ return;
+ }
+
+ rust_assert (fntype_tyty->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (fntype_tyty);
+ if (fntype->has_subsititions_defined ())
+ {
+ // we cant do anything for this only when it is used and a concrete type
+ // is given
+ if (concrete == nullptr)
+ return;
+ else
+ {
+ rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF);
+ fntype = static_cast<TyTy::FnType *> (concrete);
+ }
+ }
+
+ // items can be forward compiled which means we may not need to invoke this
+ // code. We might also have already compiled this generic function as well.
+ tree lookup = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup,
+ fntype->get_id (), fntype))
+ {
+ reference = address_expression (lookup, ref_locus);
+ return;
+ }
+
+ if (fntype->has_subsititions_defined ())
+ {
+ // override the Hir Lookups for the substituions in this context
+ fntype->override_context ();
+ }
+
+ if (fntype->get_abi () == ABI::INTRINSIC)
+ {
+ Intrinsics compile (ctx);
+ tree fndecl = compile.compile (fntype);
+ ctx->insert_function_decl (fntype, fndecl);
+ return;
+ }
+
+ tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype);
+ std::string ir_symbol_name = function.get_item_name ();
+ std::string asm_name = function.get_item_name ();
+ if (fntype->get_abi () == ABI::RUST)
+ {
+ // then we need to get the canonical path of it and mangle it
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ function.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ ir_symbol_name = canonical_path->get () + fntype->subst_as_string ();
+ asm_name = ctx->mangle_item (fntype, *canonical_path);
+ }
+
+ const unsigned int flags = Backend::function_is_declaration;
+ tree fndecl
+ = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name,
+ asm_name, flags, function.get_locus ());
+ TREE_PUBLIC (fndecl) = 1;
+ setup_abi_options (fndecl, fntype->get_abi ());
+
+ ctx->insert_function_decl (fntype, fndecl);
+
+ reference = address_expression (fndecl, ref_locus);
+ }
+
+private:
+ CompileExternItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus)
+ : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node),
+ ref_locus (ref_locus)
+ {}
+
+ TyTy::BaseType *concrete;
+ tree reference;
+ Location ref_locus;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_EXTERN_ITEM
diff --git a/gcc/rust/backend/rust-compile-fnparam.cc b/gcc/rust/backend/rust-compile-fnparam.cc
new file mode 100644
index 0000000..dda47bd
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-fnparam.cc
@@ -0,0 +1,121 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-fnparam.h"
+#include "rust-compile-pattern.h"
+
+#include "gimple-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileFnParam::CompileFnParam (Context *ctx, tree fndecl, tree decl_type,
+ Location locus)
+ : HIRCompileBase (ctx), fndecl (fndecl), decl_type (decl_type), locus (locus),
+ compiled_param (ctx->get_backend ()->error_variable ())
+{}
+
+Bvariable *
+CompileFnParam::compile (Context *ctx, tree fndecl, HIR::FunctionParam *param,
+ tree decl_type, Location locus)
+{
+ CompileFnParam compiler (ctx, fndecl, decl_type, locus);
+ param->get_param_name ()->accept_vis (compiler);
+ return compiler.compiled_param;
+}
+
+Bvariable *
+CompileFnParam::compile (Context *ctx, tree fndecl, HIR::Pattern *param,
+ tree decl_type, Location locus)
+{
+ CompileFnParam compiler (ctx, fndecl, decl_type, locus);
+ param->accept_vis (compiler);
+ return compiler.compiled_param;
+}
+
+void
+CompileFnParam::visit (HIR::IdentifierPattern &pattern)
+{
+ if (!pattern.is_mut ())
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl,
+ pattern.get_identifier (),
+ decl_type, locus);
+}
+
+void
+CompileFnParam::visit (HIR::WildcardPattern &pattern)
+{
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl, "_", decl_type, locus);
+}
+
+void
+CompileFnParam::visit (HIR::StructPattern &pattern)
+{
+ // generate the anon param
+ tree tmp_ident = create_tmp_var_name ("RSTPRM");
+ std::string cpp_str_identifier = std::string (IDENTIFIER_POINTER (tmp_ident));
+
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl, cpp_str_identifier,
+ decl_type, locus);
+
+ // setup the pattern bindings
+ tree anon_param = ctx->get_backend ()->var_expression (compiled_param, locus);
+ CompilePatternBindings::Compile (&pattern, anon_param, ctx);
+}
+
+void
+CompileFnParam::visit (HIR::TupleStructPattern &pattern)
+{
+ // generate the anon param
+ tree tmp_ident = create_tmp_var_name ("RSTPRM");
+ std::string cpp_str_identifier = std::string (IDENTIFIER_POINTER (tmp_ident));
+
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+ compiled_param
+ = ctx->get_backend ()->parameter_variable (fndecl, cpp_str_identifier,
+ decl_type, locus);
+
+ // setup the pattern bindings
+ tree anon_param = ctx->get_backend ()->var_expression (compiled_param, locus);
+ CompilePatternBindings::Compile (&pattern, anon_param, ctx);
+}
+
+Bvariable *
+CompileSelfParam::compile (Context *ctx, tree fndecl, HIR::SelfParam &self,
+ tree decl_type, Location locus)
+{
+ bool is_immutable
+ = self.get_self_kind () == HIR::SelfParam::ImplicitSelfKind::IMM
+ || self.get_self_kind () == HIR::SelfParam::ImplicitSelfKind::IMM_REF;
+ if (is_immutable)
+ decl_type = ctx->get_backend ()->immutable_type (decl_type);
+
+ return ctx->get_backend ()->parameter_variable (fndecl, "self", decl_type,
+ locus);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-fnparam.h b/gcc/rust/backend/rust-compile-fnparam.h
new file mode 100644
index 0000000..0d99814
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-fnparam.h
@@ -0,0 +1,70 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_FNPARAM
+#define RUST_COMPILE_FNPARAM
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileFnParam : private HIRCompileBase, protected HIR::HIRPatternVisitor
+{
+public:
+ static Bvariable *compile (Context *ctx, tree fndecl,
+ HIR::FunctionParam *param, tree decl_type,
+ Location locus);
+ static Bvariable *compile (Context *ctx, tree fndecl, HIR::Pattern *param,
+ tree decl_type, Location locus);
+
+ void visit (HIR::IdentifierPattern &pattern) override;
+ void visit (HIR::WildcardPattern &pattern) override;
+ void visit (HIR::StructPattern &) override;
+ void visit (HIR::TupleStructPattern &) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+
+private:
+ CompileFnParam (Context *ctx, tree fndecl, tree decl_type, Location locus);
+
+ tree fndecl;
+ tree decl_type;
+ Location locus;
+ Bvariable *compiled_param;
+};
+
+class CompileSelfParam : private HIRCompileBase
+{
+public:
+ static Bvariable *compile (Context *ctx, tree fndecl, HIR::SelfParam &self,
+ tree decl_type, Location locus);
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_FNPARAM
diff --git a/gcc/rust/backend/rust-compile-implitem.cc b/gcc/rust/backend/rust-compile-implitem.cc
new file mode 100644
index 0000000..759b925
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-implitem.cc
@@ -0,0 +1,101 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-implitem.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-fnparam.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+CompileTraitItem::visit (HIR::TraitItemConst &constant)
+{
+ rust_assert (concrete != nullptr);
+ TyTy::BaseType *resolved_type = concrete;
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ constant.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ HIR::Expr *const_value_expr = constant.get_expr ().get ();
+ tree const_expr
+ = compile_constant_item (ctx, resolved_type, canonical_path,
+ const_value_expr, constant.get_locus ());
+ ctx->push_const (const_expr);
+ ctx->insert_const_decl (constant.get_mappings ().get_hirid (), const_expr);
+
+ reference = const_expr;
+}
+
+void
+CompileTraitItem::visit (HIR::TraitItemFunc &func)
+{
+ rust_assert (func.has_block_defined ());
+
+ rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (concrete);
+ fntype->monomorphize ();
+
+ // items can be forward compiled which means we may not need to invoke this
+ // code. We might also have already compiled this generic function as well.
+ tree lookup = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup,
+ fntype->get_id (), fntype))
+ {
+ // has this been added to the list then it must be finished
+ if (ctx->function_completed (lookup))
+ {
+ tree dummy = NULL_TREE;
+ if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy))
+ {
+ ctx->insert_function_decl (fntype, lookup);
+ }
+
+ reference = address_expression (lookup, ref_locus);
+ return;
+ }
+ }
+
+ if (fntype->has_subsititions_defined ())
+ {
+ // override the Hir Lookups for the substituions in this context
+ fntype->override_context ();
+ }
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ func.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ // FIXME: How do we get the proper visibility here?
+ auto vis = HIR::Visibility (HIR::Visibility::VisType::PUBLIC);
+ HIR::TraitFunctionDecl &function = func.get_decl ();
+ tree fndecl
+ = compile_function (ctx, function.get_function_name (),
+ function.get_self (), function.get_function_params (),
+ function.get_qualifiers (), vis,
+ func.get_outer_attrs (), func.get_locus (),
+ func.get_block_expr ().get (), canonical_path, fntype,
+ function.has_return_type ());
+ reference = address_expression (fndecl, ref_locus);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-implitem.h b/gcc/rust/backend/rust-compile-implitem.h
new file mode 100644
index 0000000..24cb3d6
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-implitem.h
@@ -0,0 +1,91 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_IMPLITEM_H
+#define RUST_COMPILE_IMPLITEM_H
+
+#include "rust-compile-item.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-fnparam.h"
+
+namespace Rust {
+namespace Compile {
+
+// this is a proxy for HIR::ImplItem's back to use the normel HIR::Item path
+class CompileInherentImplItem : public CompileItem
+{
+public:
+ static tree Compile (HIR::ImplItem *item, Context *ctx,
+ TyTy::BaseType *concrete = nullptr,
+ bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileInherentImplItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile impl item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+private:
+ CompileInherentImplItem (Context *ctx, TyTy::BaseType *concrete,
+ Location ref_locus)
+ : CompileItem (ctx, concrete, ref_locus)
+ {}
+};
+
+class CompileTraitItem : public HIRCompileBase, public HIR::HIRTraitItemVisitor
+{
+public:
+ static tree Compile (HIR::TraitItem *item, Context *ctx,
+ TyTy::BaseType *concrete, bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileTraitItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile trait item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+ void visit (HIR::TraitItemConst &constant) override;
+ void visit (HIR::TraitItemFunc &func) override;
+
+ void visit (HIR::TraitItemType &typ) override {}
+
+private:
+ CompileTraitItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus)
+ : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node),
+ ref_locus (ref_locus)
+ {}
+
+ TyTy::BaseType *concrete;
+ tree reference;
+ Location ref_locus;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_IMPLITEM_H
diff --git a/gcc/rust/backend/rust-compile-intrinsic.cc b/gcc/rust/backend/rust-compile-intrinsic.cc
new file mode 100644
index 0000000..5522211
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-intrinsic.cc
@@ -0,0 +1,886 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-intrinsic.h"
+#include "rust-compile-context.h"
+#include "rust-compile-type.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-fnparam.h"
+#include "rust-builtins.h"
+#include "rust-diagnostics.h"
+#include "rust-location.h"
+#include "rust-constexpr.h"
+#include "rust-tree.h"
+#include "tree-core.h"
+#include "print-tree.h"
+#include "fold-const.h"
+#include "langhooks.h"
+
+#include "print-tree.h"
+
+namespace Rust {
+namespace Compile {
+
+static bool
+is_basic_integer_type (TyTy::BaseType *type)
+{
+ switch (type->get_kind ())
+ {
+ case TyTy::INT:
+ case TyTy::UINT:
+ case TyTy::USIZE:
+ case TyTy::ISIZE:
+ return true;
+ default:
+ return false;
+ break;
+ }
+}
+
+static bool
+check_for_basic_integer_type (const std::string &intrinsic_str, Location locus,
+ TyTy::BaseType *type)
+{
+ auto is_basic_integer = is_basic_integer_type (type);
+ if (!is_basic_integer)
+ {
+ rust_error_at (
+ locus,
+ "%s intrinsics can only be used with basic integer types (got %qs)",
+ intrinsic_str.c_str (), type->get_name ().c_str ());
+ }
+
+ return is_basic_integer;
+}
+
+static tree
+offset_handler (Context *ctx, TyTy::FnType *fntype);
+static tree
+sizeof_handler (Context *ctx, TyTy::FnType *fntype);
+static tree
+transmute_handler (Context *ctx, TyTy::FnType *fntype);
+static tree
+rotate_handler (Context *ctx, TyTy::FnType *fntype, tree_code op);
+static tree
+wrapping_op_handler_inner (Context *ctx, TyTy::FnType *fntype, tree_code op);
+static tree
+copy_nonoverlapping_handler (Context *ctx, TyTy::FnType *fntype);
+
+enum class Prefetch
+{
+ Read,
+ Write
+};
+
+static tree
+prefetch_data_handler (Context *ctx, TyTy::FnType *fntype, Prefetch kind);
+
+static inline tree
+rotate_left_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ return rotate_handler (ctx, fntype, LROTATE_EXPR);
+}
+static inline tree
+rotate_right_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ return rotate_handler (ctx, fntype, RROTATE_EXPR);
+}
+
+const static std::function<tree (Context *, TyTy::FnType *)>
+wrapping_op_handler (tree_code op)
+{
+ return [op] (Context *ctx, TyTy::FnType *fntype) {
+ return wrapping_op_handler_inner (ctx, fntype, op);
+ };
+}
+
+static inline tree
+prefetch_read_data (Context *ctx, TyTy::FnType *fntype)
+{
+ return prefetch_data_handler (ctx, fntype, Prefetch::Read);
+}
+static inline tree
+prefetch_write_data (Context *ctx, TyTy::FnType *fntype)
+{
+ return prefetch_data_handler (ctx, fntype, Prefetch::Write);
+}
+
+static tree
+atomic_store_handler_inner (Context *ctx, TyTy::FnType *fntype, int ordering);
+static tree
+atomic_load_handler_inner (Context *ctx, TyTy::FnType *fntype, int ordering);
+
+static inline std::function<tree (Context *, TyTy::FnType *)>
+atomic_store_handler (int ordering)
+{
+ return [ordering] (Context *ctx, TyTy::FnType *fntype) {
+ return atomic_store_handler_inner (ctx, fntype, ordering);
+ };
+}
+
+static inline std::function<tree (Context *, TyTy::FnType *)>
+atomic_load_handler (int ordering)
+{
+ return [ordering] (Context *ctx, TyTy::FnType *fntype) {
+ return atomic_load_handler_inner (ctx, fntype, ordering);
+ };
+}
+
+static inline tree
+unchecked_op_inner (Context *ctx, TyTy::FnType *fntype, tree_code op);
+
+const static std::function<tree (Context *, TyTy::FnType *)>
+unchecked_op_handler (tree_code op)
+{
+ return [op] (Context *ctx, TyTy::FnType *fntype) {
+ return unchecked_op_inner (ctx, fntype, op);
+ };
+}
+
+inline tree
+sorry_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ rust_sorry_at (fntype->get_locus (), "intrinsic %qs is not yet implemented",
+ fntype->get_identifier ().c_str ());
+
+ return error_mark_node;
+}
+
+static const std::map<std::string,
+ std::function<tree (Context *, TyTy::FnType *)>>
+ generic_intrinsics = {
+ {"offset", offset_handler},
+ {"size_of", sizeof_handler},
+ {"transmute", transmute_handler},
+ {"rotate_left", rotate_left_handler},
+ {"rotate_right", rotate_right_handler},
+ {"wrapping_add", wrapping_op_handler (PLUS_EXPR)},
+ {"wrapping_sub", wrapping_op_handler (MINUS_EXPR)},
+ {"wrapping_mul", wrapping_op_handler (MULT_EXPR)},
+ {"copy_nonoverlapping", copy_nonoverlapping_handler},
+ {"prefetch_read_data", prefetch_read_data},
+ {"prefetch_write_data", prefetch_write_data},
+ {"atomic_store_seqcst", atomic_store_handler (__ATOMIC_SEQ_CST)},
+ {"atomic_store_release", atomic_store_handler (__ATOMIC_RELEASE)},
+ {"atomic_store_relaxed", atomic_store_handler (__ATOMIC_RELAXED)},
+ {"atomic_store_unordered", atomic_store_handler (__ATOMIC_RELAXED)},
+ {"atomic_load_seqcst", atomic_load_handler (__ATOMIC_SEQ_CST)},
+ {"atomic_load_acquire", atomic_load_handler (__ATOMIC_ACQUIRE)},
+ {"atomic_load_relaxed", atomic_load_handler (__ATOMIC_RELAXED)},
+ {"atomic_load_unordered", atomic_load_handler (__ATOMIC_RELAXED)},
+ {"unchecked_add", unchecked_op_handler (PLUS_EXPR)},
+ {"unchecked_sub", unchecked_op_handler (MINUS_EXPR)},
+ {"unchecked_mul", unchecked_op_handler (MULT_EXPR)},
+ {"unchecked_div", unchecked_op_handler (TRUNC_DIV_EXPR)},
+ {"unchecked_rem", unchecked_op_handler (TRUNC_MOD_EXPR)},
+ {"unchecked_shl", unchecked_op_handler (LSHIFT_EXPR)},
+ {"unchecked_shr", unchecked_op_handler (RSHIFT_EXPR)},
+};
+
+Intrinsics::Intrinsics (Context *ctx) : ctx (ctx) {}
+
+tree
+Intrinsics::compile (TyTy::FnType *fntype)
+{
+ rust_assert (fntype->get_abi () == ABI::INTRINSIC);
+
+ tree builtin = error_mark_node;
+ BuiltinsContext &builtin_ctx = BuiltinsContext::get ();
+ if (builtin_ctx.lookup_simple_builtin (fntype->get_identifier (), &builtin))
+ return builtin;
+
+ // is it an generic builtin?
+ auto it = generic_intrinsics.find (fntype->get_identifier ());
+ if (it != generic_intrinsics.end ())
+ return it->second (ctx, fntype);
+
+ Location locus = ctx->get_mappings ()->lookup_location (fntype->get_ref ());
+ rust_error_at (locus, "unknown builtin intrinsic: %s",
+ fntype->get_identifier ().c_str ());
+
+ return error_mark_node;
+}
+
+/**
+ * Items can be forward compiled which means we may not need to invoke this
+ * code. We might also have already compiled this generic function as well.
+ */
+static bool
+check_for_cached_intrinsic (Context *ctx, TyTy::FnType *fntype, tree *lookup)
+{
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), lookup,
+ fntype->get_id (), fntype))
+ {
+ // Has this been added to the list? Then it must be finished
+ if (ctx->function_completed (*lookup))
+ {
+ tree dummy = NULL_TREE;
+ if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy))
+ ctx->insert_function_decl (fntype, *lookup);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/**
+ * Maybe override the Hir Lookups for the substituions in this context
+ */
+static void
+maybe_override_ctx (TyTy::FnType *fntype)
+{
+ if (fntype->has_subsititions_defined ())
+ fntype->override_context ();
+}
+
+/**
+ * Compile and setup a function's parameters
+ */
+static void
+compile_fn_params (Context *ctx, TyTy::FnType *fntype, tree fndecl,
+ std::vector<Bvariable *> *compiled_param_variables,
+ std::vector<tree_node *> *compiled_param_types = nullptr)
+{
+ for (auto &parm : fntype->get_params ())
+ {
+ auto &referenced_param = parm.first;
+ auto &param_tyty = parm.second;
+ auto compiled_param_type = TyTyResolveCompile::compile (ctx, param_tyty);
+
+ Location param_locus = referenced_param->get_locus ();
+ Bvariable *compiled_param_var
+ = CompileFnParam::compile (ctx, fndecl, referenced_param,
+ compiled_param_type, param_locus);
+
+ compiled_param_variables->push_back (compiled_param_var);
+ if (compiled_param_types)
+ compiled_param_types->push_back (compiled_param_type);
+ }
+}
+
+static tree
+compile_intrinsic_function (Context *ctx, TyTy::FnType *fntype)
+{
+ maybe_override_ctx (fntype);
+
+ const Resolver::CanonicalPath &canonical_path = fntype->get_ident ().path;
+
+ tree compiled_fn_type = TyTyResolveCompile::compile (ctx, fntype);
+ std::string ir_symbol_name
+ = canonical_path.get () + fntype->subst_as_string ();
+ std::string asm_name = ctx->mangle_item (fntype, canonical_path);
+
+ unsigned int flags = 0;
+ tree fndecl
+ = ctx->get_backend ()->function (compiled_fn_type, ir_symbol_name, asm_name,
+ flags, fntype->get_ident ().locus);
+
+ TREE_PUBLIC (fndecl) = 0;
+ TREE_READONLY (fndecl) = 1;
+ DECL_ARTIFICIAL (fndecl) = 1;
+ DECL_EXTERNAL (fndecl) = 0;
+ DECL_DECLARED_INLINE_P (fndecl) = 1;
+
+ return fndecl;
+}
+
+static void
+enter_intrinsic_block (Context *ctx, tree fndecl)
+{
+ tree enclosing_scope = NULL_TREE;
+ Location start_location = Location ();
+ Location end_location = Location ();
+
+ auto block = ctx->get_backend ()->block (fndecl, enclosing_scope, {},
+ start_location, end_location);
+
+ ctx->push_block (block);
+}
+
+static void
+finalize_intrinsic_block (Context *ctx, tree fndecl)
+{
+ tree bind_tree = ctx->pop_block ();
+
+ gcc_assert (TREE_CODE (bind_tree) == BIND_EXPR);
+
+ DECL_SAVED_TREE (fndecl) = bind_tree;
+
+ ctx->push_function (fndecl);
+
+ DECL_DECLARED_CONSTEXPR_P (fndecl) = 1;
+ maybe_save_constexpr_fundef (fndecl);
+}
+
+static tree
+offset_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ // offset intrinsic has two params dst pointer and offset isize
+ rust_assert (fntype->get_params ().size () == 2);
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ auto &dst_param = param_vars.at (0);
+ auto &size_param = param_vars.at (1);
+ rust_assert (param_vars.size () == 2);
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN offset FN BODY BEGIN
+ tree dst = ctx->get_backend ()->var_expression (dst_param, Location ());
+ tree size = ctx->get_backend ()->var_expression (size_param, Location ());
+ tree pointer_offset_expr
+ = pointer_offset_expression (dst, size, BUILTINS_LOCATION);
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {pointer_offset_expr},
+ Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN offset FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+sizeof_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ // size_of has _zero_ parameters its parameter is the generic one
+ rust_assert (fntype->get_params ().size () == 0);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // get the template parameter type tree fn size_of<T>();
+ rust_assert (fntype->get_num_substitutions () == 1);
+ auto &param_mapping = fntype->get_substs ().at (0);
+ const TyTy::ParamType *param_tyty = param_mapping.get_param_ty ();
+ TyTy::BaseType *resolved_tyty = param_tyty->resolve ();
+ tree template_parameter_type
+ = TyTyResolveCompile::compile (ctx, resolved_tyty);
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN size_of FN BODY BEGIN
+ tree size_expr = TYPE_SIZE_UNIT (template_parameter_type);
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {size_expr}, Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN size_of FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+transmute_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ // transmute intrinsic has one parameter
+ rust_assert (fntype->get_params ().size () == 1);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ std::vector<Bvariable *> param_vars;
+ std::vector<tree_node *> compiled_types;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars, &compiled_types);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ // param to convert
+ Bvariable *convert_me_param = param_vars.at (0);
+ tree convert_me_expr
+ = ctx->get_backend ()->var_expression (convert_me_param, Location ());
+
+ // check for transmute pre-conditions
+ tree target_type_expr = TREE_TYPE (DECL_RESULT (fndecl));
+ tree source_type_expr = compiled_types.at (0);
+ tree target_size_expr = TYPE_SIZE (target_type_expr);
+ tree source_size_expr = TYPE_SIZE (source_type_expr);
+ // for some reason, unit types and other zero-sized types return NULL for the
+ // size expressions
+ unsigned HOST_WIDE_INT target_size
+ = target_size_expr ? TREE_INT_CST_LOW (target_size_expr) : 0;
+ unsigned HOST_WIDE_INT source_size
+ = source_size_expr ? TREE_INT_CST_LOW (source_size_expr) : 0;
+
+ // size check for concrete types
+ // TODO(liushuyu): check alignment for pointers; check for dependently-sized
+ // types
+ if (target_size != source_size)
+ {
+ rust_error_at (fntype->get_locus (),
+ "cannot transmute between types of different sizes, or "
+ "dependently-sized types");
+ rust_inform (fntype->get_ident ().locus, "source type: %qs (%lu bits)",
+ fntype->get_params ().at (0).second->as_string ().c_str (),
+ (unsigned long) source_size);
+ rust_inform (fntype->get_ident ().locus, "target type: %qs (%lu bits)",
+ fntype->get_return_type ()->as_string ().c_str (),
+ (unsigned long) target_size);
+ }
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN transmute FN BODY BEGIN
+
+ // Return *((orig_type*)&decl) */
+
+ tree t
+ = build_fold_addr_expr_loc (Location ().gcc_location (), convert_me_expr);
+ t = fold_build1_loc (Location ().gcc_location (), NOP_EXPR,
+ build_pointer_type (target_type_expr), t);
+ tree result_expr
+ = build_fold_indirect_ref_loc (Location ().gcc_location (), t);
+
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {result_expr},
+ Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN transmute FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+rotate_handler (Context *ctx, TyTy::FnType *fntype, tree_code op)
+{
+ // rotate intrinsic has two parameter
+ rust_assert (fntype->get_params ().size () == 2);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ auto &x_param = param_vars.at (0);
+ auto &y_param = param_vars.at (1);
+ rust_assert (param_vars.size () == 2);
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN rotate FN BODY BEGIN
+ tree x = ctx->get_backend ()->var_expression (x_param, Location ());
+ tree y = ctx->get_backend ()->var_expression (y_param, Location ());
+ tree rotate_expr
+ = fold_build2_loc (BUILTINS_LOCATION, op, TREE_TYPE (x), x, y);
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {rotate_expr},
+ Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN rotate FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+/**
+ * pub fn wrapping_{add, sub, mul}<T>(lhs: T, rhs: T) -> T;
+ */
+static tree
+wrapping_op_handler_inner (Context *ctx, TyTy::FnType *fntype, tree_code op)
+{
+ // wrapping_<op> intrinsics have two parameter
+ rust_assert (fntype->get_params ().size () == 2);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ auto &lhs_param = param_vars.at (0);
+ auto &rhs_param = param_vars.at (1);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN wrapping_<op> FN BODY BEGIN
+ auto lhs = ctx->get_backend ()->var_expression (lhs_param, Location ());
+ auto rhs = ctx->get_backend ()->var_expression (rhs_param, Location ());
+
+ // Operations are always wrapping in Rust, as we have -fwrapv enabled by
+ // default. The difference between a wrapping_{add, sub, mul} and a regular
+ // arithmetic operation is that these intrinsics do not panic - they always
+ // carry over.
+ auto wrap_expr = build2 (op, TREE_TYPE (lhs), lhs, rhs);
+
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {wrap_expr}, Location ());
+ ctx->add_statement (return_statement);
+ // BUILTIN wrapping_<op> FN BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+/**
+ * fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize);
+ */
+static tree
+copy_nonoverlapping_handler (Context *ctx, TyTy::FnType *fntype)
+{
+ rust_assert (fntype->get_params ().size () == 3);
+ rust_assert (fntype->get_num_substitutions () == 1);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // Most intrinsic functions are pure - not `copy_nonoverlapping`
+ TREE_READONLY (fndecl) = 0;
+ TREE_SIDE_EFFECTS (fndecl) = 1;
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN copy_nonoverlapping BODY BEGIN
+
+ auto src = ctx->get_backend ()->var_expression (param_vars[0], Location ());
+ auto dst = ctx->get_backend ()->var_expression (param_vars[1], Location ());
+ auto count = ctx->get_backend ()->var_expression (param_vars[2], Location ());
+
+ // We want to create the following statement
+ // memcpy(dst, src, size_of::<T>());
+ // so
+ // memcpy(dst, src, size_expr);
+
+ auto *resolved_ty = fntype->get_substs ().at (0).get_param_ty ()->resolve ();
+ auto param_type = TyTyResolveCompile::compile (ctx, resolved_ty);
+
+ tree size_expr
+ = build2 (MULT_EXPR, size_type_node, TYPE_SIZE_UNIT (param_type), count);
+
+ tree memcpy_raw = nullptr;
+ BuiltinsContext::get ().lookup_simple_builtin ("memcpy", &memcpy_raw);
+ rust_assert (memcpy_raw);
+ auto memcpy
+ = build_fold_addr_expr_loc (Location ().gcc_location (), memcpy_raw);
+
+ auto copy_call
+ = ctx->get_backend ()->call_expression (memcpy, {dst, src, size_expr},
+ nullptr, Location ());
+
+ ctx->add_statement (copy_call);
+
+ // BUILTIN copy_nonoverlapping BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+make_unsigned_long_tree (Context *ctx, unsigned long value)
+{
+ mpz_t mpz_value;
+ mpz_init_set_ui (mpz_value, value);
+
+ return ctx->get_backend ()->integer_constant_expression (integer_type_node,
+ mpz_value);
+}
+
+static tree
+prefetch_data_handler (Context *ctx, TyTy::FnType *fntype, Prefetch kind)
+{
+ rust_assert (fntype->get_params ().size () == 2);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // prefetching isn't pure and shouldn't be discarded after GIMPLE
+ TREE_READONLY (fndecl) = 0;
+ TREE_SIDE_EFFECTS (fndecl) = 1;
+
+ std::vector<Bvariable *> args;
+ compile_fn_params (ctx, fntype, fndecl, &args);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, args))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ auto addr = ctx->get_backend ()->var_expression (args[0], Location ());
+ auto locality = ctx->get_backend ()->var_expression (args[1], Location ());
+ auto rw_flag = make_unsigned_long_tree (ctx, kind == Prefetch::Write ? 1 : 0);
+
+ auto prefetch_raw = NULL_TREE;
+ auto ok
+ = BuiltinsContext::get ().lookup_simple_builtin ("prefetch", &prefetch_raw);
+ rust_assert (ok);
+ auto prefetch
+ = build_fold_addr_expr_loc (Location ().gcc_location (), prefetch_raw);
+
+ auto prefetch_call
+ = ctx->get_backend ()->call_expression (prefetch, {addr, rw_flag, locality},
+ nullptr, Location ());
+
+ TREE_READONLY (prefetch_call) = 0;
+ TREE_SIDE_EFFECTS (prefetch_call) = 1;
+
+ ctx->add_statement (prefetch_call);
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static std::string
+build_atomic_builtin_name (const std::string &prefix, Location locus,
+ TyTy::BaseType *operand_type)
+{
+ static const std::map<std::string, std::string> allowed_types = {
+ {"i8", "1"}, {"i16", "2"}, {"i32", "4"}, {"i64", "8"},
+ {"i128", "16"}, {"isize", "8"}, {"u8", "1"}, {"u16", "2"},
+ {"u32", "4"}, {"u64", "8"}, {"u128", "16"}, {"usize", "8"},
+ };
+
+ // TODO: Can we maybe get the generic version (atomic_store_n) to work... This
+ // would be so much better
+
+ std::string result = prefix;
+
+ auto type_name = operand_type->get_name ();
+ if (type_name == "usize" || type_name == "isize")
+ {
+ rust_sorry_at (
+ locus, "atomics are not yet available for size types (usize, isize)");
+ return "";
+ }
+
+ auto type_size_str = allowed_types.find (type_name);
+
+ if (!check_for_basic_integer_type ("atomic", locus, operand_type))
+ return "";
+
+ result += type_size_str->second;
+
+ return result;
+}
+
+static tree
+atomic_store_handler_inner (Context *ctx, TyTy::FnType *fntype, int ordering)
+{
+ rust_assert (fntype->get_params ().size () == 2);
+ rust_assert (fntype->get_num_substitutions () == 1);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // Most intrinsic functions are pure but not the atomic ones
+ TREE_READONLY (fndecl) = 0;
+ TREE_SIDE_EFFECTS (fndecl) = 1;
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ std::vector<tree> types;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars, &types);
+
+ auto ok = ctx->get_backend ()->function_set_parameters (fndecl, param_vars);
+ rust_assert (ok);
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ auto dst = ctx->get_backend ()->var_expression (param_vars[0], Location ());
+ TREE_READONLY (dst) = 0;
+
+ auto value = ctx->get_backend ()->var_expression (param_vars[1], Location ());
+ auto memorder = make_unsigned_long_tree (ctx, ordering);
+
+ auto monomorphized_type
+ = fntype->get_substs ()[0].get_param_ty ()->resolve ();
+
+ auto builtin_name
+ = build_atomic_builtin_name ("atomic_store_", fntype->get_locus (),
+ monomorphized_type);
+ if (builtin_name.empty ())
+ return error_mark_node;
+
+ tree atomic_store_raw = nullptr;
+ BuiltinsContext::get ().lookup_simple_builtin (builtin_name,
+ &atomic_store_raw);
+ rust_assert (atomic_store_raw);
+
+ auto atomic_store
+ = build_fold_addr_expr_loc (Location ().gcc_location (), atomic_store_raw);
+
+ auto store_call
+ = ctx->get_backend ()->call_expression (atomic_store,
+ {dst, value, memorder}, nullptr,
+ Location ());
+ TREE_READONLY (store_call) = 0;
+ TREE_SIDE_EFFECTS (store_call) = 1;
+
+ ctx->add_statement (store_call);
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static tree
+atomic_load_handler_inner (Context *ctx, TyTy::FnType *fntype, int ordering)
+{
+ rust_assert (fntype->get_params ().size () == 1);
+ rust_assert (fntype->get_num_substitutions () == 1);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // Most intrinsic functions are pure but not the atomic ones
+ // FIXME: Is atomic_load_* pure? Feels like it shouldn't so
+ TREE_READONLY (fndecl) = 0;
+ TREE_SIDE_EFFECTS (fndecl) = 1;
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ std::vector<tree> types;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars, &types);
+
+ auto ok = ctx->get_backend ()->function_set_parameters (fndecl, param_vars);
+ rust_assert (ok);
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ auto src = ctx->get_backend ()->var_expression (param_vars[0], Location ());
+ auto memorder = make_unsigned_long_tree (ctx, ordering);
+
+ auto monomorphized_type
+ = fntype->get_substs ()[0].get_param_ty ()->resolve ();
+
+ auto builtin_name
+ = build_atomic_builtin_name ("atomic_load_", fntype->get_locus (),
+ monomorphized_type);
+ if (builtin_name.empty ())
+ return error_mark_node;
+
+ tree atomic_load_raw = nullptr;
+ BuiltinsContext::get ().lookup_simple_builtin (builtin_name,
+ &atomic_load_raw);
+ rust_assert (atomic_load_raw);
+
+ auto atomic_load
+ = build_fold_addr_expr_loc (Location ().gcc_location (), atomic_load_raw);
+
+ auto load_call
+ = ctx->get_backend ()->call_expression (atomic_load, {src, memorder},
+ nullptr, Location ());
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {load_call}, Location ());
+
+ TREE_READONLY (load_call) = 0;
+ TREE_SIDE_EFFECTS (load_call) = 1;
+
+ ctx->add_statement (return_statement);
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+static inline tree
+unchecked_op_inner (Context *ctx, TyTy::FnType *fntype, tree_code op)
+{
+ rust_assert (fntype->get_params ().size () == 2);
+ rust_assert (fntype->get_num_substitutions () == 1);
+
+ tree lookup = NULL_TREE;
+ if (check_for_cached_intrinsic (ctx, fntype, &lookup))
+ return lookup;
+
+ auto fndecl = compile_intrinsic_function (ctx, fntype);
+
+ // setup the params
+ std::vector<Bvariable *> param_vars;
+ compile_fn_params (ctx, fntype, fndecl, &param_vars);
+
+ if (!ctx->get_backend ()->function_set_parameters (fndecl, param_vars))
+ return error_mark_node;
+
+ enter_intrinsic_block (ctx, fndecl);
+
+ // BUILTIN unchecked_<op> BODY BEGIN
+
+ auto x = ctx->get_backend ()->var_expression (param_vars[0], Location ());
+ auto y = ctx->get_backend ()->var_expression (param_vars[1], Location ());
+
+ auto *monomorphized_type
+ = fntype->get_substs ().at (0).get_param_ty ()->resolve ();
+
+ check_for_basic_integer_type ("unchecked operation", fntype->get_locus (),
+ monomorphized_type);
+
+ auto expr = build2 (op, TREE_TYPE (x), x, y);
+ auto return_statement
+ = ctx->get_backend ()->return_statement (fndecl, {expr}, Location ());
+
+ ctx->add_statement (return_statement);
+
+ // BUILTIN unchecked_<op> BODY END
+
+ finalize_intrinsic_block (ctx, fndecl);
+
+ return fndecl;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-intrinsic.h b/gcc/rust/backend/rust-compile-intrinsic.h
new file mode 100644
index 0000000..dceb086
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-intrinsic.h
@@ -0,0 +1,40 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_INTRINSIC
+#define RUST_COMPILE_INTRINSIC
+
+#include "rust-compile-context.h"
+#include "langhooks.h"
+
+namespace Rust {
+namespace Compile {
+
+class Intrinsics
+{
+public:
+ Intrinsics (Context *ctx);
+
+ tree compile (TyTy::FnType *fntype);
+
+private:
+ Context *ctx;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_INTRINSIC
diff --git a/gcc/rust/backend/rust-compile-item.cc b/gcc/rust/backend/rust-compile-item.cc
new file mode 100644
index 0000000..b2e9b3f
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-item.cc
@@ -0,0 +1,219 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-item.h"
+#include "rust-compile-implitem.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-extern.h"
+#include "rust-constexpr.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+CompileItem::visit (HIR::StaticItem &var)
+{
+ // have we already compiled this?
+ Bvariable *static_decl_ref = nullptr;
+ if (ctx->lookup_var_decl (var.get_mappings ().get_hirid (), &static_decl_ref))
+ {
+ reference
+ = ctx->get_backend ()->var_expression (static_decl_ref, ref_locus);
+ return;
+ }
+
+ TyTy::BaseType *resolved_type = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (var.get_mappings ().get_hirid (),
+ &resolved_type);
+ rust_assert (ok);
+
+ tree type = TyTyResolveCompile::compile (ctx, resolved_type);
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ ok = ctx->get_mappings ()->lookup_canonical_path (
+ var.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ HIR::Expr *const_value_expr = var.get_expr ();
+ ctx->push_const_context ();
+ tree value = compile_constant_item (ctx, resolved_type, canonical_path,
+ const_value_expr, var.get_locus ());
+ ctx->pop_const_context ();
+
+ std::string name = canonical_path->get ();
+ std::string asm_name = ctx->mangle_item (resolved_type, *canonical_path);
+
+ bool is_external = false;
+ bool is_hidden = false;
+ bool in_unique_section = true;
+
+ Bvariable *static_global
+ = ctx->get_backend ()->global_variable (name, asm_name, type, is_external,
+ is_hidden, in_unique_section,
+ var.get_locus ());
+ ctx->get_backend ()->global_variable_set_init (static_global, value);
+
+ ctx->insert_var_decl (var.get_mappings ().get_hirid (), static_global);
+ ctx->push_var (static_global);
+
+ reference = ctx->get_backend ()->var_expression (static_global, ref_locus);
+}
+
+void
+CompileItem::visit (HIR::ConstantItem &constant)
+{
+ if (ctx->lookup_const_decl (constant.get_mappings ().get_hirid (),
+ &reference))
+ return;
+
+ // resolve the type
+ TyTy::BaseType *resolved_type = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (constant.get_mappings ().get_hirid (),
+ &resolved_type);
+ rust_assert (ok);
+
+ // canonical path
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ ok = ctx->get_mappings ()->lookup_canonical_path (
+ constant.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ HIR::Expr *const_value_expr = constant.get_expr ();
+ ctx->push_const_context ();
+ tree const_expr
+ = compile_constant_item (ctx, resolved_type, canonical_path,
+ const_value_expr, constant.get_locus ());
+ ctx->pop_const_context ();
+
+ ctx->push_const (const_expr);
+ ctx->insert_const_decl (constant.get_mappings ().get_hirid (), const_expr);
+ reference = const_expr;
+}
+
+void
+CompileItem::visit (HIR::Function &function)
+{
+ TyTy::BaseType *fntype_tyty;
+ if (!ctx->get_tyctx ()->lookup_type (function.get_mappings ().get_hirid (),
+ &fntype_tyty))
+ {
+ rust_fatal_error (function.get_locus (),
+ "failed to lookup function type");
+ return;
+ }
+
+ rust_assert (fntype_tyty->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (fntype_tyty);
+ if (fntype->has_subsititions_defined ())
+ {
+ // we cant do anything for this only when it is used and a concrete type
+ // is given
+ if (concrete == nullptr)
+ return;
+ else
+ {
+ rust_assert (concrete->get_kind () == TyTy::TypeKind::FNDEF);
+ fntype = static_cast<TyTy::FnType *> (concrete);
+ fntype->monomorphize ();
+ }
+ }
+
+ const Resolver::CanonicalPath *canonical_path = nullptr;
+ bool ok = ctx->get_mappings ()->lookup_canonical_path (
+ function.get_mappings ().get_nodeid (), &canonical_path);
+ rust_assert (ok);
+
+ const std::string asm_name = ctx->mangle_item (fntype, *canonical_path);
+
+ // items can be forward compiled which means we may not need to invoke this
+ // code. We might also have already compiled this generic function as well.
+ tree lookup = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &lookup,
+ fntype->get_id (), fntype, asm_name))
+ {
+ // has this been added to the list then it must be finished
+ if (ctx->function_completed (lookup))
+ {
+ tree dummy = NULL_TREE;
+ if (!ctx->lookup_function_decl (fntype->get_ty_ref (), &dummy))
+ {
+ ctx->insert_function_decl (fntype, lookup);
+ }
+
+ reference = address_expression (lookup, ref_locus);
+ return;
+ }
+ }
+
+ if (fntype->has_subsititions_defined ())
+ {
+ // override the Hir Lookups for the substituions in this context
+ fntype->override_context ();
+ }
+
+ if (function.get_qualifiers ().is_const ())
+ ctx->push_const_context ();
+
+ tree fndecl
+ = compile_function (ctx, function.get_function_name (),
+ function.get_self_param (),
+ function.get_function_params (),
+ function.get_qualifiers (), function.get_visibility (),
+ function.get_outer_attrs (), function.get_locus (),
+ function.get_definition ().get (), canonical_path,
+ fntype, function.has_function_return_type ());
+ reference = address_expression (fndecl, ref_locus);
+
+ if (function.get_qualifiers ().is_const ())
+ ctx->pop_const_context ();
+}
+
+void
+CompileItem::visit (HIR::ImplBlock &impl_block)
+{
+ TyTy::BaseType *self_lookup = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (
+ impl_block.get_type ()->get_mappings ().get_hirid (), &self_lookup))
+ {
+ rust_error_at (impl_block.get_locus (), "failed to resolve type of impl");
+ return;
+ }
+
+ for (auto &impl_item : impl_block.get_impl_items ())
+ CompileInherentImplItem::Compile (impl_item.get (), ctx);
+}
+
+void
+CompileItem::visit (HIR::ExternBlock &extern_block)
+{
+ for (auto &item : extern_block.get_extern_items ())
+ {
+ CompileExternItem::compile (item.get (), ctx, concrete);
+ }
+}
+
+void
+CompileItem::visit (HIR::Module &module)
+{
+ for (auto &item : module.get_items ())
+ CompileItem::compile (item.get (), ctx);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-item.h b/gcc/rust/backend/rust-compile-item.h
new file mode 100644
index 0000000..ae3fdf6
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-item.h
@@ -0,0 +1,88 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_ITEM
+#define RUST_COMPILE_ITEM
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileItem : private HIRCompileBase, protected HIR::HIRStmtVisitor
+{
+protected:
+public:
+ static tree compile (HIR::Item *item, Context *ctx,
+ TyTy::BaseType *concrete = nullptr,
+ bool is_query_mode = false,
+ Location ref_locus = Location ())
+ {
+ CompileItem compiler (ctx, concrete, ref_locus);
+ item->accept_vis (compiler);
+
+ if (is_query_mode && compiler.reference == error_mark_node)
+ rust_internal_error_at (ref_locus, "failed to compile item: %s",
+ item->as_string ().c_str ());
+
+ return compiler.reference;
+ }
+
+ void visit (HIR::StaticItem &var) override;
+ void visit (HIR::ConstantItem &constant) override;
+ void visit (HIR::Function &function) override;
+ void visit (HIR::ImplBlock &impl_block) override;
+ void visit (HIR::ExternBlock &extern_block) override;
+ void visit (HIR::Module &module) override;
+
+ // Empty visit for unused Stmt HIR nodes.
+ void visit (HIR::TupleStruct &) override {}
+ void visit (HIR::EnumItem &) override {}
+ void visit (HIR::EnumItemTuple &) override {}
+ void visit (HIR::EnumItemStruct &) override {}
+ void visit (HIR::EnumItemDiscriminant &) override {}
+ void visit (HIR::TypePathSegmentFunction &) override {}
+ void visit (HIR::TypePath &) override {}
+ void visit (HIR::QualifiedPathInType &) override {}
+ void visit (HIR::ExternCrate &) override {}
+ void visit (HIR::UseDeclaration &) override {}
+ void visit (HIR::TypeAlias &) override {}
+ void visit (HIR::StructStruct &) override {}
+ void visit (HIR::Enum &) override {}
+ void visit (HIR::Union &) override {}
+ void visit (HIR::Trait &) override {}
+ void visit (HIR::EmptyStmt &) override {}
+ void visit (HIR::LetStmt &) override {}
+ void visit (HIR::ExprStmtWithoutBlock &) override {}
+ void visit (HIR::ExprStmtWithBlock &) override {}
+
+protected:
+ CompileItem (Context *ctx, TyTy::BaseType *concrete, Location ref_locus)
+ : HIRCompileBase (ctx), concrete (concrete), reference (error_mark_node),
+ ref_locus (ref_locus)
+ {}
+
+ TyTy::BaseType *concrete;
+ tree reference;
+ Location ref_locus;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_ITEM
diff --git a/gcc/rust/backend/rust-compile-pattern.cc b/gcc/rust/backend/rust-compile-pattern.cc
new file mode 100644
index 0000000..fc70d4b
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-pattern.cc
@@ -0,0 +1,333 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-pattern.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-resolve-path.h"
+#include "rust-constexpr.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::PathInExpression &pattern)
+{
+ // lookup the type
+ TyTy::BaseType *lookup = nullptr;
+ bool ok
+ = ctx->get_tyctx ()->lookup_type (pattern.get_mappings ().get_hirid (),
+ &lookup);
+ rust_assert (ok);
+
+ // this must be an enum
+ rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+ rust_assert (adt->is_enum ());
+
+ // lookup the variant
+ HirId variant_id;
+ ok = ctx->get_tyctx ()->lookup_variant_definition (
+ pattern.get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ TyTy::VariantDef *variant = nullptr;
+ ok = adt->lookup_variant_by_id (variant_id, &variant);
+ rust_assert (ok);
+
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree case_low = folded_discrim_expr;
+
+ case_label_expr
+ = build_case_label (case_low, NULL_TREE, associated_case_label);
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::StructPattern &pattern)
+{
+ CompilePatternCaseLabelExpr::visit (pattern.get_path ());
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::TupleStructPattern &pattern)
+{
+ CompilePatternCaseLabelExpr::visit (pattern.get_path ());
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::WildcardPattern &pattern)
+{
+ // operand 0 being NULL_TREE signifies this is the default case label see:
+ // tree.def for documentation for CASE_LABEL_EXPR
+ case_label_expr
+ = build_case_label (NULL_TREE, NULL_TREE, associated_case_label);
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::LiteralPattern &pattern)
+{
+ // Compile the literal
+ HIR::LiteralExpr *litexpr
+ = new HIR::LiteralExpr (pattern.get_pattern_mappings (),
+ pattern.get_literal (), pattern.get_locus (),
+ std::vector<AST::Attribute> ());
+
+ // Note: Floating point literals are currently accepted but will likely be
+ // forbidden in LiteralPatterns in a future version of Rust.
+ // See: https://github.com/rust-lang/rust/issues/41620
+ // For now, we cannot compile them anyway as CASE_LABEL_EXPR does not support
+ // floating point types.
+ if (pattern.get_literal ().get_lit_type () == HIR::Literal::LitType::FLOAT)
+ {
+ rust_sorry_at (pattern.get_locus (), "floating-point literal in pattern");
+ }
+
+ tree lit = CompileExpr::Compile (litexpr, ctx);
+
+ case_label_expr = build_case_label (lit, NULL_TREE, associated_case_label);
+}
+
+static tree
+compile_range_pattern_bound (HIR::RangePatternBound *bound,
+ Analysis::NodeMapping mappings, Location locus,
+ Context *ctx)
+{
+ tree result = NULL_TREE;
+ switch (bound->get_bound_type ())
+ {
+ case HIR::RangePatternBound::RangePatternBoundType::LITERAL: {
+ HIR::RangePatternBoundLiteral &ref
+ = *static_cast<HIR::RangePatternBoundLiteral *> (bound);
+
+ HIR::LiteralExpr *litexpr
+ = new HIR::LiteralExpr (mappings, ref.get_literal (), locus,
+ std::vector<AST::Attribute> ());
+
+ result = CompileExpr::Compile (litexpr, ctx);
+ }
+ break;
+
+ case HIR::RangePatternBound::RangePatternBoundType::PATH: {
+ HIR::RangePatternBoundPath &ref
+ = *static_cast<HIR::RangePatternBoundPath *> (bound);
+
+ result = ResolvePathRef::Compile (ref.get_path (), ctx);
+
+ // If the path resolves to a const expression, fold it.
+ result = fold_expr (result);
+ }
+ break;
+
+ case HIR::RangePatternBound::RangePatternBoundType::QUALPATH: {
+ HIR::RangePatternBoundQualPath &ref
+ = *static_cast<HIR::RangePatternBoundQualPath *> (bound);
+
+ result = ResolvePathRef::Compile (ref.get_qualified_path (), ctx);
+
+ // If the path resolves to a const expression, fold it.
+ result = fold_expr (result);
+ }
+ }
+
+ return result;
+}
+
+void
+CompilePatternCaseLabelExpr::visit (HIR::RangePattern &pattern)
+{
+ tree upper = compile_range_pattern_bound (pattern.get_upper_bound ().get (),
+ pattern.get_pattern_mappings (),
+ pattern.get_locus (), ctx);
+ tree lower = compile_range_pattern_bound (pattern.get_lower_bound ().get (),
+ pattern.get_pattern_mappings (),
+ pattern.get_locus (), ctx);
+
+ case_label_expr = build_case_label (lower, upper, associated_case_label);
+}
+
+// setup the bindings
+
+void
+CompilePatternBindings::visit (HIR::TupleStructPattern &pattern)
+{
+ // lookup the type
+ TyTy::BaseType *lookup = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ pattern.get_path ().get_mappings ().get_hirid (), &lookup);
+ rust_assert (ok);
+
+ // this must be an enum
+ rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+ rust_assert (adt->number_of_variants () > 0);
+
+ int variant_index = 0;
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ if (adt->is_enum ())
+ {
+ HirId variant_id = UNKNOWN_HIRID;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ pattern.get_path ().get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ ok = adt->lookup_variant_by_id (variant_id, &variant, &variant_index);
+ rust_assert (ok);
+ }
+
+ rust_assert (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::TUPLE);
+
+ std::unique_ptr<HIR::TupleStructItems> &items = pattern.get_items ();
+ switch (items->get_item_type ())
+ {
+ case HIR::TupleStructItems::RANGE: {
+ // TODO
+ gcc_unreachable ();
+ }
+ break;
+
+ case HIR::TupleStructItems::NO_RANGE: {
+ HIR::TupleStructItemsNoRange &items_no_range
+ = static_cast<HIR::TupleStructItemsNoRange &> (*items.get ());
+
+ rust_assert (items_no_range.get_patterns ().size ()
+ == variant->num_fields ());
+
+ if (adt->is_enum ())
+ {
+ // we are offsetting by + 1 here since the first field in the record
+ // is always the discriminator
+ size_t tuple_field_index = 1;
+ for (auto &pattern : items_no_range.get_patterns ())
+ {
+ tree variant_accessor
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, variant_index, pattern->get_locus ());
+
+ tree binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, tuple_field_index++, pattern->get_locus ());
+
+ ctx->insert_pattern_binding (
+ pattern->get_pattern_mappings ().get_hirid (), binding);
+ }
+ }
+ else
+ {
+ size_t tuple_field_index = 0;
+ for (auto &pattern : items_no_range.get_patterns ())
+ {
+ tree variant_accessor = match_scrutinee_expr;
+
+ tree binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, tuple_field_index++, pattern->get_locus ());
+
+ ctx->insert_pattern_binding (
+ pattern->get_pattern_mappings ().get_hirid (), binding);
+ }
+ }
+ }
+ break;
+ }
+}
+
+void
+CompilePatternBindings::visit (HIR::StructPattern &pattern)
+{
+ // lookup the type
+ TyTy::BaseType *lookup = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ pattern.get_path ().get_mappings ().get_hirid (), &lookup);
+ rust_assert (ok);
+
+ // this must be an enum
+ rust_assert (lookup->get_kind () == TyTy::TypeKind::ADT);
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+ rust_assert (adt->number_of_variants () > 0);
+
+ int variant_index = 0;
+ TyTy::VariantDef *variant = adt->get_variants ().at (0);
+ if (adt->is_enum ())
+ {
+ HirId variant_id = UNKNOWN_HIRID;
+ bool ok = ctx->get_tyctx ()->lookup_variant_definition (
+ pattern.get_path ().get_mappings ().get_hirid (), &variant_id);
+ rust_assert (ok);
+
+ ok = adt->lookup_variant_by_id (variant_id, &variant, &variant_index);
+ rust_assert (ok);
+ }
+
+ rust_assert (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::STRUCT);
+
+ auto &struct_pattern_elems = pattern.get_struct_pattern_elems ();
+ for (auto &field : struct_pattern_elems.get_struct_pattern_fields ())
+ {
+ switch (field->get_item_type ())
+ {
+ case HIR::StructPatternField::ItemType::TUPLE_PAT: {
+ // TODO
+ gcc_unreachable ();
+ }
+ break;
+
+ case HIR::StructPatternField::ItemType::IDENT_PAT: {
+ // TODO
+ gcc_unreachable ();
+ }
+ break;
+
+ case HIR::StructPatternField::ItemType::IDENT: {
+ HIR::StructPatternFieldIdent &ident
+ = static_cast<HIR::StructPatternFieldIdent &> (*field.get ());
+
+ size_t offs = 0;
+ ok
+ = variant->lookup_field (ident.get_identifier (), nullptr, &offs);
+ rust_assert (ok);
+
+ tree binding = error_mark_node;
+ if (adt->is_enum ())
+ {
+ tree variant_accessor
+ = ctx->get_backend ()->struct_field_expression (
+ match_scrutinee_expr, variant_index, ident.get_locus ());
+
+ // we are offsetting by + 1 here since the first field in the
+ // record is always the discriminator
+ binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, offs + 1, ident.get_locus ());
+ }
+ else
+ {
+ tree variant_accessor = match_scrutinee_expr;
+ binding = ctx->get_backend ()->struct_field_expression (
+ variant_accessor, offs, ident.get_locus ());
+ }
+
+ ctx->insert_pattern_binding (ident.get_mappings ().get_hirid (),
+ binding);
+ }
+ break;
+ }
+ }
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-pattern.h b/gcc/rust/backend/rust-compile-pattern.h
new file mode 100644
index 0000000..22812a4
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-pattern.h
@@ -0,0 +1,95 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompilePatternCaseLabelExpr : public HIRCompileBase,
+ public HIR::HIRPatternVisitor
+{
+public:
+ static tree Compile (HIR::Pattern *pattern, tree associated_case_label,
+ Context *ctx)
+ {
+ CompilePatternCaseLabelExpr compiler (ctx, associated_case_label);
+ pattern->accept_vis (compiler);
+ return compiler.case_label_expr;
+ }
+
+ void visit (HIR::PathInExpression &pattern) override;
+ void visit (HIR::StructPattern &pattern) override;
+ void visit (HIR::TupleStructPattern &pattern) override;
+ void visit (HIR::WildcardPattern &pattern) override;
+ void visit (HIR::RangePattern &pattern) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::IdentifierPattern &) override {}
+ void visit (HIR::LiteralPattern &) override;
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+
+ CompilePatternCaseLabelExpr (Context *ctx, tree associated_case_label)
+ : HIRCompileBase (ctx), case_label_expr (error_mark_node),
+ associated_case_label (associated_case_label)
+ {}
+
+ tree case_label_expr;
+ tree associated_case_label;
+};
+
+class CompilePatternBindings : public HIRCompileBase,
+ public HIR::HIRPatternVisitor
+{
+public:
+ static void Compile (HIR::Pattern *pattern, tree match_scrutinee_expr,
+ Context *ctx)
+ {
+ CompilePatternBindings compiler (ctx, match_scrutinee_expr);
+ pattern->accept_vis (compiler);
+ }
+
+ void visit (HIR::StructPattern &pattern) override;
+ void visit (HIR::TupleStructPattern &pattern) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::IdentifierPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+ void visit (HIR::WildcardPattern &) override {}
+
+protected:
+ CompilePatternBindings (Context *ctx, tree match_scrutinee_expr)
+ : HIRCompileBase (ctx), match_scrutinee_expr (match_scrutinee_expr)
+ {}
+
+ tree match_scrutinee_expr;
+};
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-resolve-path.cc b/gcc/rust/backend/rust-compile-resolve-path.cc
new file mode 100644
index 0000000..8857df2
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-resolve-path.cc
@@ -0,0 +1,309 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-resolve-path.h"
+#include "rust-compile-intrinsic.h"
+#include "rust-compile-item.h"
+#include "rust-compile-implitem.h"
+#include "rust-compile-expr.h"
+#include "rust-hir-trait-resolve.h"
+#include "rust-hir-path-probe.h"
+#include "rust-compile-extern.h"
+#include "rust-constexpr.h"
+
+namespace Rust {
+namespace Compile {
+
+void
+ResolvePathRef::visit (HIR::QualifiedPathInExpression &expr)
+{
+ resolved = resolve (expr.get_final_segment ().get_segment (),
+ expr.get_mappings (), expr.get_locus (), true);
+}
+
+void
+ResolvePathRef::visit (HIR::PathInExpression &expr)
+{
+ resolved = resolve (expr.get_final_segment ().get_segment (),
+ expr.get_mappings (), expr.get_locus (), false);
+}
+
+tree
+ResolvePathRef::resolve (const HIR::PathIdentSegment &final_segment,
+ const Analysis::NodeMapping &mappings,
+ Location expr_locus, bool is_qualified_path)
+{
+ TyTy::BaseType *lookup = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (mappings.get_hirid (), &lookup);
+ rust_assert (ok);
+
+ // need to look up the reference for this identifier
+ NodeId ref_node_id = UNKNOWN_NODEID;
+ if (!ctx->get_resolver ()->lookup_resolved_name (mappings.get_nodeid (),
+ &ref_node_id))
+ {
+ // this can fail because it might be a Constructor for something
+ // in that case the caller should attempt ResolvePathType::Compile
+
+ // it might be an enum data-less enum variant
+ if (lookup->get_kind () != TyTy::TypeKind::ADT)
+ return error_mark_node;
+
+ TyTy::ADTType *adt = static_cast<TyTy::ADTType *> (lookup);
+
+ // it might be a unit-struct
+ if (adt->is_unit ())
+ {
+ return ctx->get_backend ()->unit_expression ();
+ }
+
+ if (!adt->is_enum ())
+ return error_mark_node;
+
+ HirId variant_id;
+ if (!ctx->get_tyctx ()->lookup_variant_definition (mappings.get_hirid (),
+ &variant_id))
+ return error_mark_node;
+
+ int union_disriminator = -1;
+ TyTy::VariantDef *variant = nullptr;
+ if (!adt->lookup_variant_by_id (variant_id, &variant,
+ &union_disriminator))
+ return error_mark_node;
+
+ // this can only be for discriminant variants the others are built up
+ // using call-expr or struct-init
+ rust_assert (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::NUM);
+
+ // we need the actual gcc type
+ tree compiled_adt_type = TyTyResolveCompile::compile (ctx, adt);
+
+ // make the ctor for the union
+ HIR::Expr *discrim_expr = variant->get_discriminant ();
+ tree discrim_expr_node = CompileExpr::Compile (discrim_expr, ctx);
+ tree folded_discrim_expr = fold_expr (discrim_expr_node);
+ tree qualifier = folded_discrim_expr;
+
+ return ctx->get_backend ()->constructor_expression (compiled_adt_type,
+ true, {qualifier},
+ union_disriminator,
+ expr_locus);
+ }
+
+ HirId ref;
+ if (!ctx->get_mappings ()->lookup_node_to_hir (ref_node_id, &ref))
+ {
+ rust_error_at (expr_locus, "reverse call path lookup failure");
+ return error_mark_node;
+ }
+
+ // might be a constant
+ tree constant_expr;
+ if (ctx->lookup_const_decl (ref, &constant_expr))
+ {
+ TREE_USED (constant_expr) = 1;
+ return constant_expr;
+ }
+
+ // maybe closure binding
+ tree closure_binding = error_mark_node;
+ if (ctx->lookup_closure_binding (ref, &closure_binding))
+ {
+ TREE_USED (closure_binding) = 1;
+ return closure_binding;
+ }
+
+ // this might be a variable reference or a function reference
+ Bvariable *var = nullptr;
+ if (ctx->lookup_var_decl (ref, &var))
+ {
+ // TREE_USED is setup in the gcc abstraction here
+ return ctx->get_backend ()->var_expression (var, expr_locus);
+ }
+
+ // might be a match pattern binding
+ tree binding = error_mark_node;
+ if (ctx->lookup_pattern_binding (ref, &binding))
+ {
+ TREE_USED (binding) = 1;
+ return binding;
+ }
+
+ // it might be a function call
+ if (lookup->get_kind () == TyTy::TypeKind::FNDEF)
+ {
+ TyTy::FnType *fntype = static_cast<TyTy::FnType *> (lookup);
+ tree fn = NULL_TREE;
+ if (ctx->lookup_function_decl (fntype->get_ty_ref (), &fn))
+ {
+ TREE_USED (fn) = 1;
+ return address_expression (fn, expr_locus);
+ }
+ else if (fntype->get_abi () == ABI::INTRINSIC)
+ {
+ Intrinsics compile (ctx);
+ fn = compile.compile (fntype);
+ TREE_USED (fn) = 1;
+ return address_expression (fn, expr_locus);
+ }
+ }
+
+ // let the query system figure it out
+ tree resolved_item = query_compile (ref, lookup, final_segment, mappings,
+ expr_locus, is_qualified_path);
+ if (resolved_item != error_mark_node)
+ {
+ TREE_USED (resolved_item) = 1;
+ }
+ return resolved_item;
+}
+
+tree
+HIRCompileBase::query_compile (HirId ref, TyTy::BaseType *lookup,
+ const HIR::PathIdentSegment &final_segment,
+ const Analysis::NodeMapping &mappings,
+ Location expr_locus, bool is_qualified_path)
+{
+ HIR::Item *resolved_item = ctx->get_mappings ()->lookup_hir_item (ref);
+ HirId parent_block;
+ HIR::ExternalItem *resolved_extern_item
+ = ctx->get_mappings ()->lookup_hir_extern_item (ref, &parent_block);
+ bool is_hir_item = resolved_item != nullptr;
+ bool is_hir_extern_item = resolved_extern_item != nullptr;
+ if (is_hir_item)
+ {
+ if (!lookup->has_subsititions_defined ())
+ return CompileItem::compile (resolved_item, ctx, nullptr, true,
+ expr_locus);
+ else
+ return CompileItem::compile (resolved_item, ctx, lookup, true,
+ expr_locus);
+ }
+ else if (is_hir_extern_item)
+ {
+ if (!lookup->has_subsititions_defined ())
+ return CompileExternItem::compile (resolved_extern_item, ctx, nullptr,
+ true, expr_locus);
+ else
+ return CompileExternItem::compile (resolved_extern_item, ctx, lookup,
+ true, expr_locus);
+ }
+ else
+ {
+ HirId parent_impl_id = UNKNOWN_HIRID;
+ HIR::ImplItem *resolved_item
+ = ctx->get_mappings ()->lookup_hir_implitem (ref, &parent_impl_id);
+ bool is_impl_item = resolved_item != nullptr;
+ if (is_impl_item)
+ {
+ rust_assert (parent_impl_id != UNKNOWN_HIRID);
+ HIR::Item *impl_ref
+ = ctx->get_mappings ()->lookup_hir_item (parent_impl_id);
+ rust_assert (impl_ref != nullptr);
+ HIR::ImplBlock *impl = static_cast<HIR::ImplBlock *> (impl_ref);
+
+ TyTy::BaseType *self = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ impl->get_type ()->get_mappings ().get_hirid (), &self);
+ rust_assert (ok);
+
+ if (!lookup->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (resolved_item, ctx,
+ nullptr, true, expr_locus);
+ else
+ return CompileInherentImplItem::Compile (resolved_item, ctx, lookup,
+ true, expr_locus);
+ }
+ else
+ {
+ // it might be resolved to a trait item
+ HIR::TraitItem *trait_item
+ = ctx->get_mappings ()->lookup_hir_trait_item (ref);
+ HIR::Trait *trait = ctx->get_mappings ()->lookup_trait_item_mapping (
+ trait_item->get_mappings ().get_hirid ());
+
+ Resolver::TraitReference *trait_ref
+ = &Resolver::TraitReference::error_node ();
+ bool ok = ctx->get_tyctx ()->lookup_trait_reference (
+ trait->get_mappings ().get_defid (), &trait_ref);
+ rust_assert (ok);
+
+ TyTy::BaseType *receiver = nullptr;
+ ok = ctx->get_tyctx ()->lookup_receiver (mappings.get_hirid (),
+ &receiver);
+ rust_assert (ok);
+
+ if (receiver->get_kind () == TyTy::TypeKind::PARAM)
+ {
+ TyTy::ParamType *p = static_cast<TyTy::ParamType *> (receiver);
+ receiver = p->resolve ();
+ }
+
+ // the type resolver can only resolve type bounds to their trait
+ // item so its up to us to figure out if this path should resolve
+ // to an trait-impl-block-item or if it can be defaulted to the
+ // trait-impl-item's definition
+ auto candidates
+ = Resolver::PathProbeImplTrait::Probe (receiver, final_segment,
+ trait_ref);
+ if (candidates.size () == 0)
+ {
+ // this means we are defaulting back to the trait_item if
+ // possible
+ Resolver::TraitItemReference *trait_item_ref = nullptr;
+ bool ok = trait_ref->lookup_hir_trait_item (*trait_item,
+ &trait_item_ref);
+ rust_assert (ok); // found
+ rust_assert (trait_item_ref->is_optional ()); // has definition
+
+ return CompileTraitItem::Compile (
+ trait_item_ref->get_hir_trait_item (), ctx, lookup, true,
+ expr_locus);
+ }
+ else
+ {
+ rust_assert (candidates.size () == 1);
+
+ auto candidate = *candidates.begin ();
+ rust_assert (candidate.is_impl_candidate ());
+
+ HIR::ImplBlock *impl = candidate.item.impl.parent;
+ HIR::ImplItem *impl_item = candidate.item.impl.impl_item;
+
+ TyTy::BaseType *self = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ impl->get_type ()->get_mappings ().get_hirid (), &self);
+ rust_assert (ok);
+
+ if (!lookup->has_subsititions_defined ())
+ return CompileInherentImplItem::Compile (impl_item, ctx,
+ nullptr, true,
+ expr_locus);
+ else
+ return CompileInherentImplItem::Compile (impl_item, ctx, lookup,
+ true, expr_locus);
+ }
+ }
+ }
+
+ return error_mark_node;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-resolve-path.h b/gcc/rust/backend/rust-compile-resolve-path.h
new file mode 100644
index 0000000..67ff7ee
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-resolve-path.h
@@ -0,0 +1,73 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_RESOLVE_PATH
+#define RUST_COMPILE_RESOLVE_PATH
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class ResolvePathRef : public HIRCompileBase, public HIR::HIRPatternVisitor
+{
+public:
+ static tree Compile (HIR::QualifiedPathInExpression &expr, Context *ctx)
+ {
+ ResolvePathRef resolver (ctx);
+ expr.accept_vis (resolver);
+ return resolver.resolved;
+ }
+
+ static tree Compile (HIR::PathInExpression &expr, Context *ctx)
+ {
+ ResolvePathRef resolver (ctx);
+ expr.accept_vis (resolver);
+ return resolver.resolved;
+ }
+
+ void visit (HIR::PathInExpression &expr) override;
+ void visit (HIR::QualifiedPathInExpression &expr) override;
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::IdentifierPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::StructPattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+ void visit (HIR::TupleStructPattern &) override {}
+ void visit (HIR::WildcardPattern &) override {}
+
+ ResolvePathRef (Context *ctx)
+ : HIRCompileBase (ctx), resolved (error_mark_node)
+ {}
+
+ tree resolve (const HIR::PathIdentSegment &final_segment,
+ const Analysis::NodeMapping &mappings, Location locus,
+ bool is_qualified_path);
+
+ tree resolved;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_RESOLVE_PATH
diff --git a/gcc/rust/backend/rust-compile-stmt.cc b/gcc/rust/backend/rust-compile-stmt.cc
new file mode 100644
index 0000000..3fc2528
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-stmt.cc
@@ -0,0 +1,115 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-stmt.h"
+#include "rust-compile-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileStmt::CompileStmt (Context *ctx)
+ : HIRCompileBase (ctx), translated (nullptr)
+{}
+
+tree
+CompileStmt::Compile (HIR::Stmt *stmt, Context *ctx)
+{
+ CompileStmt compiler (ctx);
+ stmt->accept_vis (compiler);
+ return compiler.translated;
+}
+
+void
+CompileStmt::visit (HIR::ExprStmtWithBlock &stmt)
+{
+ translated = CompileExpr::Compile (stmt.get_expr (), ctx);
+}
+
+void
+CompileStmt::visit (HIR::ExprStmtWithoutBlock &stmt)
+{
+ translated = CompileExpr::Compile (stmt.get_expr (), ctx);
+}
+
+void
+CompileStmt::visit (HIR::LetStmt &stmt)
+{
+ // nothing to do
+ if (!stmt.has_init_expr ())
+ return;
+
+ const HIR::Pattern &stmt_pattern = *stmt.get_pattern ();
+ HirId stmt_id = stmt_pattern.get_pattern_mappings ().get_hirid ();
+
+ TyTy::BaseType *ty = nullptr;
+ if (!ctx->get_tyctx ()->lookup_type (stmt_id, &ty))
+ {
+ // FIXME this should be an assertion instead
+ rust_fatal_error (stmt.get_locus (),
+ "failed to lookup variable declaration type");
+ return;
+ }
+
+ Bvariable *var = nullptr;
+ if (!ctx->lookup_var_decl (stmt_id, &var))
+ {
+ // FIXME this should be an assertion instead and use error mark node
+ rust_fatal_error (stmt.get_locus (),
+ "failed to lookup compiled variable declaration");
+ return;
+ }
+
+ tree init = CompileExpr::Compile (stmt.get_init_expr (), ctx);
+ // FIXME use error_mark_node, check that CompileExpr returns error_mark_node
+ // on failure and make this an assertion
+ if (init == nullptr)
+ return;
+
+ TyTy::BaseType *actual = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ stmt.get_init_expr ()->get_mappings ().get_hirid (), &actual);
+ rust_assert (ok);
+ tree stmt_type = TyTyResolveCompile::compile (ctx, ty);
+
+ Location lvalue_locus = stmt.get_pattern ()->get_locus ();
+ Location rvalue_locus = stmt.get_init_expr ()->get_locus ();
+ TyTy::BaseType *expected = ty;
+ init = coercion_site (stmt.get_mappings ().get_hirid (), init, actual,
+ expected, lvalue_locus, rvalue_locus);
+
+ auto fnctx = ctx->peek_fn ();
+ if (ty->is_unit ())
+ {
+ ctx->add_statement (init);
+
+ auto unit_type_init_expr
+ = ctx->get_backend ()->constructor_expression (stmt_type, false, {}, -1,
+ rvalue_locus);
+ auto s = ctx->get_backend ()->init_statement (fnctx.fndecl, var,
+ unit_type_init_expr);
+ ctx->add_statement (s);
+ }
+ else
+ {
+ auto s = ctx->get_backend ()->init_statement (fnctx.fndecl, var, init);
+ ctx->add_statement (s);
+ }
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-stmt.h b/gcc/rust/backend/rust-compile-stmt.h
new file mode 100644
index 0000000..1f06d54
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-stmt.h
@@ -0,0 +1,69 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_STMT
+#define RUST_COMPILE_STMT
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileStmt : private HIRCompileBase, protected HIR::HIRStmtVisitor
+{
+public:
+ static tree Compile (HIR::Stmt *stmt, Context *ctx);
+
+ void visit (HIR::ExprStmtWithBlock &stmt) override;
+ void visit (HIR::ExprStmtWithoutBlock &stmt) override;
+ void visit (HIR::LetStmt &stmt) override;
+
+ // Empty visit for unused Stmt HIR nodes.
+ void visit (HIR::TupleStruct &) override {}
+ void visit (HIR::EnumItem &) override {}
+ void visit (HIR::EnumItemTuple &) override {}
+ void visit (HIR::EnumItemStruct &) override {}
+ void visit (HIR::EnumItemDiscriminant &) override {}
+ void visit (HIR::TypePathSegmentFunction &) override {}
+ void visit (HIR::TypePath &) override {}
+ void visit (HIR::QualifiedPathInType &) override {}
+ void visit (HIR::Module &) override {}
+ void visit (HIR::ExternCrate &) override {}
+ void visit (HIR::UseDeclaration &) override {}
+ void visit (HIR::Function &) override {}
+ void visit (HIR::TypeAlias &) override {}
+ void visit (HIR::StructStruct &) override {}
+ void visit (HIR::Enum &) override {}
+ void visit (HIR::Union &) override {}
+ void visit (HIR::ConstantItem &) override {}
+ void visit (HIR::StaticItem &) override {}
+ void visit (HIR::Trait &) override {}
+ void visit (HIR::ImplBlock &) override {}
+ void visit (HIR::ExternBlock &) override {}
+ void visit (HIR::EmptyStmt &) override {}
+
+private:
+ CompileStmt (Context *ctx);
+
+ tree translated;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_STMT
diff --git a/gcc/rust/backend/rust-compile-struct-field-expr.cc b/gcc/rust/backend/rust-compile-struct-field-expr.cc
new file mode 100644
index 0000000..8f10c24
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-struct-field-expr.cc
@@ -0,0 +1,81 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-struct-field-expr.h"
+#include "rust-compile-expr.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileStructExprField::CompileStructExprField (Context *ctx)
+ : HIRCompileBase (ctx), translated (error_mark_node)
+{}
+
+tree
+CompileStructExprField::Compile (HIR::StructExprField *field, Context *ctx)
+{
+ CompileStructExprField compiler (ctx);
+ switch (field->get_kind ())
+ {
+ case HIR::StructExprField::StructExprFieldKind::IDENTIFIER:
+ compiler.visit (static_cast<HIR::StructExprFieldIdentifier &> (*field));
+ break;
+
+ case HIR::StructExprField::StructExprFieldKind::IDENTIFIER_VALUE:
+ compiler.visit (
+ static_cast<HIR::StructExprFieldIdentifierValue &> (*field));
+ break;
+
+ case HIR::StructExprField::StructExprFieldKind::INDEX_VALUE:
+ compiler.visit (static_cast<HIR::StructExprFieldIndexValue &> (*field));
+ break;
+ }
+ return compiler.translated;
+}
+
+void
+CompileStructExprField::visit (HIR::StructExprFieldIdentifierValue &field)
+{
+ translated = CompileExpr::Compile (field.get_value (), ctx);
+}
+
+void
+CompileStructExprField::visit (HIR::StructExprFieldIndexValue &field)
+{
+ translated = CompileExpr::Compile (field.get_value (), ctx);
+}
+
+void
+CompileStructExprField::visit (HIR::StructExprFieldIdentifier &field)
+{
+ // we can make the field look like a path expr to take advantage of existing
+ // code
+
+ Analysis::NodeMapping mappings_copy1 = field.get_mappings ();
+ Analysis::NodeMapping mappings_copy2 = field.get_mappings ();
+
+ HIR::PathIdentSegment ident_seg (field.get_field_name ());
+ HIR::PathExprSegment seg (mappings_copy1, ident_seg, field.get_locus (),
+ HIR::GenericArgs::create_empty ());
+ HIR::PathInExpression expr (mappings_copy2, {seg}, field.get_locus (), false,
+ {});
+ translated = CompileExpr::Compile (&expr, ctx);
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-struct-field-expr.h b/gcc/rust/backend/rust-compile-struct-field-expr.h
new file mode 100644
index 0000000..e11eb95
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-struct-field-expr.h
@@ -0,0 +1,46 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_STRUCT_FIELD_EXPR
+#define RUST_COMPILE_STRUCT_FIELD_EXPR
+
+#include "rust-compile-base.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileStructExprField : private HIRCompileBase
+{
+public:
+ static tree Compile (HIR::StructExprField *field, Context *ctx);
+
+protected:
+ void visit (HIR::StructExprFieldIdentifierValue &field);
+ void visit (HIR::StructExprFieldIndexValue &field);
+ void visit (HIR::StructExprFieldIdentifier &field);
+
+private:
+ CompileStructExprField (Context *ctx);
+
+ tree translated;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_STRUCT_FIELD_EXPR
diff --git a/gcc/rust/backend/rust-compile-type.cc b/gcc/rust/backend/rust-compile-type.cc
new file mode 100644
index 0000000..a1db6ad
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-type.cc
@@ -0,0 +1,752 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile-type.h"
+#include "rust-compile-expr.h"
+#include "rust-constexpr.h"
+#include "rust-gcc.h"
+
+#include "tree.h"
+
+namespace Rust {
+namespace Compile {
+
+static const std::string RUST_ENUM_DISR_FIELD_NAME = "RUST$ENUM$DISR";
+
+TyTyResolveCompile::TyTyResolveCompile (Context *ctx, bool trait_object_mode)
+ : ctx (ctx), trait_object_mode (trait_object_mode),
+ translated (error_mark_node), recurisve_ops (0)
+{}
+
+tree
+TyTyResolveCompile::compile (Context *ctx, const TyTy::BaseType *ty,
+ bool trait_object_mode)
+{
+ TyTyResolveCompile compiler (ctx, trait_object_mode);
+ ty->accept_vis (compiler);
+
+ if (compiler.translated != error_mark_node
+ && TYPE_NAME (compiler.translated) != NULL)
+ {
+ // canonicalize the type
+ compiler.translated = ctx->insert_compiled_type (compiler.translated);
+ }
+
+ return compiler.translated;
+}
+
+// see: gcc/c/c-decl.cc:8230-8241
+// https://github.com/Rust-GCC/gccrs/blob/0024bc2f028369b871a65ceb11b2fddfb0f9c3aa/gcc/c/c-decl.c#L8229-L8241
+tree
+TyTyResolveCompile::get_implicit_enumeral_node_type (Context *ctx)
+{
+ // static tree enum_node = NULL_TREE;
+ // if (enum_node == NULL_TREE)
+ // {
+ // enum_node = make_node (ENUMERAL_TYPE);
+ // SET_TYPE_MODE (enum_node, TYPE_MODE (unsigned_type_node));
+ // SET_TYPE_ALIGN (enum_node, TYPE_ALIGN (unsigned_type_node));
+ // TYPE_USER_ALIGN (enum_node) = 0;
+ // TYPE_UNSIGNED (enum_node) = 1;
+ // TYPE_PRECISION (enum_node) = TYPE_PRECISION (unsigned_type_node);
+ // TYPE_MIN_VALUE (enum_node) = TYPE_MIN_VALUE (unsigned_type_node);
+ // TYPE_MAX_VALUE (enum_node) = TYPE_MAX_VALUE (unsigned_type_node);
+
+ // // tree identifier = ctx->get_backend ()->get_identifier_node
+ // // ("enumeral"); tree enum_decl
+ // // = build_decl (BUILTINS_LOCATION, TYPE_DECL, identifier,
+ // enum_node);
+ // // TYPE_NAME (enum_node) = enum_decl;
+ // }
+ // return enum_node;
+
+ static tree enum_node = NULL_TREE;
+ if (enum_node == NULL_TREE)
+ {
+ enum_node = ctx->get_backend ()->named_type (
+ "enumeral", ctx->get_backend ()->integer_type (false, 64),
+ Linemap::predeclared_location ());
+ }
+ return enum_node;
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ErrorType &)
+{
+ translated = error_mark_node;
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::InferType &)
+{
+ translated = error_mark_node;
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ClosureType &type)
+{
+ auto mappings = ctx->get_mappings ();
+
+ std::vector<Backend::typed_identifier> fields;
+
+ size_t i = 0;
+ for (const auto &capture : type.get_captures ())
+ {
+ // lookup the HirId
+ HirId ref = UNKNOWN_HIRID;
+ bool ok = mappings->lookup_node_to_hir (capture, &ref);
+ rust_assert (ok);
+
+ // lookup the var decl type
+ TyTy::BaseType *lookup = nullptr;
+ bool found = ctx->get_tyctx ()->lookup_type (ref, &lookup);
+ rust_assert (found);
+
+ // FIXME get the var pattern name
+ std::string mappings_name = "capture_" + std::to_string (i);
+
+ // FIXME
+ // this should be based on the closure move-ability
+ tree decl_type = TyTyResolveCompile::compile (ctx, lookup);
+ tree capture_type = build_reference_type (decl_type);
+ fields.push_back (Backend::typed_identifier (mappings_name, capture_type,
+ type.get_ident ().locus));
+ }
+
+ tree type_record = ctx->get_backend ()->struct_type (fields);
+ RS_CLOSURE_FLAG (type_record) = 1;
+
+ std::string named_struct_str
+ = type.get_ident ().path.get () + "::{{closure}}";
+ translated = ctx->get_backend ()->named_type (named_struct_str, type_record,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ProjectionType &type)
+{
+ type.get ()->accept_vis (*this);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::PlaceholderType &type)
+{
+ type.resolve ()->accept_vis (*this);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ParamType &param)
+{
+ if (recurisve_ops++ >= rust_max_recursion_depth)
+ {
+ rust_error_at (Location (),
+ "%<recursion depth%> count exceeds limit of %i (use "
+ "%<frust-max-recursion-depth=%> to increase the limit)",
+ rust_max_recursion_depth);
+ translated = error_mark_node;
+ return;
+ }
+
+ param.resolve ()->accept_vis (*this);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::FnType &type)
+{
+ Backend::typed_identifier receiver;
+ std::vector<Backend::typed_identifier> parameters;
+ std::vector<Backend::typed_identifier> results;
+
+ if (!type.get_return_type ()->is_unit ())
+ {
+ auto hir_type = type.get_return_type ();
+ auto ret = TyTyResolveCompile::compile (ctx, hir_type, trait_object_mode);
+ results.push_back (Backend::typed_identifier (
+ "_", ret,
+ ctx->get_mappings ()->lookup_location (hir_type->get_ref ())));
+ }
+
+ for (auto &param_pair : type.get_params ())
+ {
+ auto param_tyty = param_pair.second;
+ auto compiled_param_type
+ = TyTyResolveCompile::compile (ctx, param_tyty, trait_object_mode);
+
+ auto compiled_param = Backend::typed_identifier (
+ param_pair.first->as_string (), compiled_param_type,
+ ctx->get_mappings ()->lookup_location (param_tyty->get_ref ()));
+
+ parameters.push_back (compiled_param);
+ }
+
+ if (!type.is_varadic ())
+ translated
+ = ctx->get_backend ()->function_type (receiver, parameters, results, NULL,
+ type.get_ident ().locus);
+ else
+ translated
+ = ctx->get_backend ()->function_type_varadic (receiver, parameters,
+ results, NULL,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::FnPtr &type)
+{
+ tree result_type = TyTyResolveCompile::compile (ctx, type.get_return_type ());
+
+ std::vector<tree> parameters;
+
+ auto &params = type.get_params ();
+ for (auto &p : params)
+ {
+ tree pty = TyTyResolveCompile::compile (ctx, p.get_tyty ());
+ parameters.push_back (pty);
+ }
+
+ translated = ctx->get_backend ()->function_ptr_type (result_type, parameters,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ADTType &type)
+{
+ tree type_record = error_mark_node;
+ if (!type.is_enum ())
+ {
+ rust_assert (type.number_of_variants () == 1);
+
+ TyTy::VariantDef &variant = *type.get_variants ().at (0);
+ std::vector<Backend::typed_identifier> fields;
+ for (size_t i = 0; i < variant.num_fields (); i++)
+ {
+ const TyTy::StructFieldType *field = variant.get_field_at_index (i);
+ tree compiled_field_ty
+ = TyTyResolveCompile::compile (ctx, field->get_field_type ());
+
+ Backend::typed_identifier f (field->get_name (), compiled_field_ty,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+ }
+
+ type_record = type.is_union ()
+ ? ctx->get_backend ()->union_type (fields)
+ : ctx->get_backend ()->struct_type (fields);
+ }
+ else
+ {
+ // see:
+ // https://github.com/bminor/binutils-gdb/blob/527b8861cd472385fa9160a91dd6d65a25c41987/gdb/dwarf2/read.c#L9010-L9241
+ //
+ // enums are actually a big union so for example the rust enum:
+ //
+ // enum AnEnum {
+ // A,
+ // B,
+ // C (char),
+ // D { x: i64, y: i64 },
+ // }
+ //
+ // we actually turn this into
+ //
+ // union {
+ // struct A { int RUST$ENUM$DISR; }; <- this is a data-less variant
+ // struct B { int RUST$ENUM$DISR; }; <- this is a data-less variant
+ // struct C { int RUST$ENUM$DISR; char __0; };
+ // struct D { int RUST$ENUM$DISR; i64 x; i64 y; };
+ // }
+ //
+ // Ada, qual_union_types might still work for this but I am not 100% sure.
+ // I ran into some issues lets reuse our normal union and ask Ada people
+ // about it.
+
+ std::vector<tree> variant_records;
+ for (auto &variant : type.get_variants ())
+ {
+ std::vector<Backend::typed_identifier> fields;
+
+ // add in the qualifier field for the variant
+ tree enumeral_type
+ = TyTyResolveCompile::get_implicit_enumeral_node_type (ctx);
+ Backend::typed_identifier f (RUST_ENUM_DISR_FIELD_NAME, enumeral_type,
+ ctx->get_mappings ()->lookup_location (
+ variant->get_id ()));
+ fields.push_back (std::move (f));
+
+ // compile the rest of the fields
+ for (size_t i = 0; i < variant->num_fields (); i++)
+ {
+ const TyTy::StructFieldType *field
+ = variant->get_field_at_index (i);
+ tree compiled_field_ty
+ = TyTyResolveCompile::compile (ctx, field->get_field_type ());
+
+ std::string field_name = field->get_name ();
+ if (variant->get_variant_type ()
+ == TyTy::VariantDef::VariantType::TUPLE)
+ field_name = "__" + field->get_name ();
+
+ Backend::typed_identifier f (
+ field_name, compiled_field_ty,
+ ctx->get_mappings ()->lookup_location (type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+ }
+
+ tree variant_record = ctx->get_backend ()->struct_type (fields);
+ tree named_variant_record = ctx->get_backend ()->named_type (
+ variant->get_ident ().path.get (), variant_record,
+ variant->get_ident ().locus);
+
+ // set the qualifier to be a builtin
+ DECL_ARTIFICIAL (TYPE_FIELDS (variant_record)) = 1;
+
+ // add them to the list
+ variant_records.push_back (named_variant_record);
+ }
+
+ // now we need to make the actual union, but first we need to make
+ // named_type TYPE_DECL's out of the variants
+
+ size_t i = 0;
+ std::vector<Backend::typed_identifier> enum_fields;
+ for (auto &variant_record : variant_records)
+ {
+ TyTy::VariantDef *variant = type.get_variants ().at (i++);
+ std::string implicit_variant_name = variant->get_identifier ();
+
+ Backend::typed_identifier f (implicit_variant_name, variant_record,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ enum_fields.push_back (std::move (f));
+ }
+
+ // finally make the union or the enum
+ type_record = ctx->get_backend ()->union_type (enum_fields);
+ }
+
+ // Handle repr options
+ // TODO: "packed" should only narrow type alignment and "align" should only
+ // widen it. Do we need to check and enforce this here, or is it taken care of
+ // later on in the gcc middle-end?
+ TyTy::ADTType::ReprOptions repr = type.get_repr_options ();
+ if (repr.pack)
+ {
+ TYPE_PACKED (type_record) = 1;
+ if (repr.pack > 1)
+ {
+ SET_TYPE_ALIGN (type_record, repr.pack * 8);
+ TYPE_USER_ALIGN (type_record) = 1;
+ }
+ }
+ else if (repr.align)
+ {
+ SET_TYPE_ALIGN (type_record, repr.align * 8);
+ TYPE_USER_ALIGN (type_record) = 1;
+ }
+
+ std::string named_struct_str
+ = type.get_ident ().path.get () + type.subst_as_string ();
+ translated = ctx->get_backend ()->named_type (named_struct_str, type_record,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::TupleType &type)
+{
+ if (type.num_fields () == 0)
+ {
+ translated = ctx->get_backend ()->unit_type ();
+ return;
+ }
+
+ // create implicit struct
+ std::vector<Backend::typed_identifier> fields;
+ for (size_t i = 0; i < type.num_fields (); i++)
+ {
+ TyTy::BaseType *field = type.get_field (i);
+ tree compiled_field_ty = TyTyResolveCompile::compile (ctx, field);
+
+ // rustc uses the convention __N, where N is an integer, to
+ // name the fields of a tuple. We follow this as well,
+ // because this is used by GDB. One further reason to prefer
+ // this, rather than simply emitting the integer, is that this
+ // approach makes it simpler to use a C-only debugger, or
+ // GDB's C mode, when debugging Rust.
+ Backend::typed_identifier f ("__" + std::to_string (i), compiled_field_ty,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+ }
+
+ tree struct_type_record = ctx->get_backend ()->struct_type (fields);
+ translated
+ = ctx->get_backend ()->named_type (type.as_string (), struct_type_record,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ArrayType &type)
+{
+ tree element_type
+ = TyTyResolveCompile::compile (ctx, type.get_element_type ());
+
+ ctx->push_const_context ();
+ tree capacity_expr = CompileExpr::Compile (&type.get_capacity_expr (), ctx);
+ ctx->pop_const_context ();
+
+ tree folded_capacity_expr = fold_expr (capacity_expr);
+
+ translated
+ = ctx->get_backend ()->array_type (element_type, folded_capacity_expr);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::SliceType &type)
+{
+ tree type_record = create_slice_type_record (type);
+
+ std::string named_struct_str
+ = std::string ("[") + type.get_element_type ()->get_name () + "]";
+ translated = ctx->get_backend ()->named_type (named_struct_str, type_record,
+ type.get_ident ().locus);
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::BoolType &)
+{
+ translated
+ = ctx->get_backend ()->named_type ("bool",
+ ctx->get_backend ()->bool_type (),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::IntType &type)
+{
+ switch (type.get_int_kind ())
+ {
+ case TyTy::IntType::I8:
+ translated = ctx->get_backend ()->named_type (
+ "i8", ctx->get_backend ()->integer_type (false, 8),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I16:
+ translated = ctx->get_backend ()->named_type (
+ "i16", ctx->get_backend ()->integer_type (false, 16),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I32:
+ translated = ctx->get_backend ()->named_type (
+ "i32", ctx->get_backend ()->integer_type (false, 32),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I64:
+ translated = ctx->get_backend ()->named_type (
+ "i64", ctx->get_backend ()->integer_type (false, 64),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::IntType::I128:
+ translated = ctx->get_backend ()->named_type (
+ "i128", ctx->get_backend ()->integer_type (false, 128),
+ Linemap::predeclared_location ());
+ return;
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::UintType &type)
+{
+ switch (type.get_uint_kind ())
+ {
+ case TyTy::UintType::U8:
+ translated = ctx->get_backend ()->named_type (
+ "u8", ctx->get_backend ()->integer_type (true, 8),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U16:
+ translated = ctx->get_backend ()->named_type (
+ "u16", ctx->get_backend ()->integer_type (true, 16),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U32:
+ translated = ctx->get_backend ()->named_type (
+ "u32", ctx->get_backend ()->integer_type (true, 32),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U64:
+ translated = ctx->get_backend ()->named_type (
+ "u64", ctx->get_backend ()->integer_type (true, 64),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::UintType::U128:
+ translated = ctx->get_backend ()->named_type (
+ "u128", ctx->get_backend ()->integer_type (true, 128),
+ Linemap::predeclared_location ());
+ return;
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::FloatType &type)
+{
+ switch (type.get_float_kind ())
+ {
+ case TyTy::FloatType::F32:
+ translated
+ = ctx->get_backend ()->named_type ("f32",
+ ctx->get_backend ()->float_type (32),
+ Linemap::predeclared_location ());
+ return;
+
+ case TyTy::FloatType::F64:
+ translated
+ = ctx->get_backend ()->named_type ("f64",
+ ctx->get_backend ()->float_type (64),
+ Linemap::predeclared_location ());
+ return;
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::USizeType &)
+{
+ translated = ctx->get_backend ()->named_type (
+ "usize",
+ ctx->get_backend ()->integer_type (
+ true, ctx->get_backend ()->get_pointer_size ()),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ISizeType &)
+{
+ translated = ctx->get_backend ()->named_type (
+ "isize",
+ ctx->get_backend ()->integer_type (
+ false, ctx->get_backend ()->get_pointer_size ()),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::CharType &)
+{
+ translated
+ = ctx->get_backend ()->named_type ("char",
+ ctx->get_backend ()->wchar_type (),
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::ReferenceType &type)
+{
+ const TyTy::SliceType *slice = nullptr;
+ const TyTy::StrType *str = nullptr;
+ if (type.is_dyn_slice_type (&slice))
+ {
+ tree type_record = create_slice_type_record (*slice);
+ std::string dyn_slice_type_str
+ = std::string (type.is_mutable () ? "&mut " : "&") + "["
+ + slice->get_element_type ()->get_name () + "]";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_slice_type_str, type_record,
+ slice->get_locus ());
+
+ return;
+ }
+ else if (type.is_dyn_str_type (&str))
+ {
+ tree type_record = create_str_type_record (*str);
+ std::string dyn_str_type_str
+ = std::string (type.is_mutable () ? "&mut " : "&") + "str";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_str_type_str, type_record,
+ str->get_locus ());
+
+ return;
+ }
+
+ tree base_compiled_type
+ = TyTyResolveCompile::compile (ctx, type.get_base (), trait_object_mode);
+ if (type.is_mutable ())
+ {
+ translated = ctx->get_backend ()->reference_type (base_compiled_type);
+ }
+ else
+ {
+ auto base = ctx->get_backend ()->immutable_type (base_compiled_type);
+ translated = ctx->get_backend ()->reference_type (base);
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::PointerType &type)
+{
+ const TyTy::SliceType *slice = nullptr;
+ const TyTy::StrType *str = nullptr;
+ if (type.is_dyn_slice_type (&slice))
+ {
+ tree type_record = create_slice_type_record (*slice);
+ std::string dyn_slice_type_str
+ = std::string (type.is_mutable () ? "*mut " : "*const ") + "["
+ + slice->get_element_type ()->get_name () + "]";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_slice_type_str, type_record,
+ slice->get_locus ());
+
+ return;
+ }
+ else if (type.is_dyn_str_type (&str))
+ {
+ tree type_record = create_str_type_record (*str);
+ std::string dyn_str_type_str
+ = std::string (type.is_mutable () ? "*mut " : "*const ") + "str";
+
+ translated
+ = ctx->get_backend ()->named_type (dyn_str_type_str, type_record,
+ str->get_locus ());
+
+ return;
+ }
+
+ tree base_compiled_type
+ = TyTyResolveCompile::compile (ctx, type.get_base (), trait_object_mode);
+ if (type.is_mutable ())
+ {
+ translated = ctx->get_backend ()->pointer_type (base_compiled_type);
+ }
+ else
+ {
+ auto base = ctx->get_backend ()->immutable_type (base_compiled_type);
+ translated = ctx->get_backend ()->pointer_type (base);
+ }
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::StrType &type)
+{
+ tree raw_str = create_str_type_record (type);
+ translated
+ = ctx->get_backend ()->named_type ("str", raw_str,
+ Linemap::predeclared_location ());
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::NeverType &)
+{
+ translated = ctx->get_backend ()->unit_type ();
+}
+
+void
+TyTyResolveCompile::visit (const TyTy::DynamicObjectType &type)
+{
+ if (trait_object_mode)
+ {
+ translated = ctx->get_backend ()->integer_type (
+ true, ctx->get_backend ()->get_pointer_size ());
+ return;
+ }
+
+ // create implicit struct
+ auto items = type.get_object_items ();
+ std::vector<Backend::typed_identifier> fields;
+
+ tree uint = ctx->get_backend ()->integer_type (
+ true, ctx->get_backend ()->get_pointer_size ());
+ tree uintptr_ty = build_pointer_type (uint);
+
+ Backend::typed_identifier f ("pointer", uintptr_ty,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (f));
+
+ tree vtable_size = build_int_cst (size_type_node, items.size ());
+ tree vtable_type = ctx->get_backend ()->array_type (uintptr_ty, vtable_size);
+ Backend::typed_identifier vtf ("vtable", vtable_type,
+ ctx->get_mappings ()->lookup_location (
+ type.get_ty_ref ()));
+ fields.push_back (std::move (vtf));
+
+ tree type_record = ctx->get_backend ()->struct_type (fields);
+ translated = ctx->get_backend ()->named_type (type.get_name (), type_record,
+ type.get_ident ().locus);
+}
+
+tree
+TyTyResolveCompile::create_slice_type_record (const TyTy::SliceType &type)
+{
+ // lookup usize
+ TyTy::BaseType *usize = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize);
+ rust_assert (ok);
+
+ tree element_type
+ = TyTyResolveCompile::compile (ctx, type.get_element_type ());
+ tree data_field_ty = build_pointer_type (element_type);
+ Backend::typed_identifier data_field ("data", data_field_ty,
+ type.get_locus ());
+
+ tree len_field_ty = TyTyResolveCompile::compile (ctx, usize);
+ Backend::typed_identifier len_field ("len", len_field_ty, type.get_locus ());
+
+ tree record = ctx->get_backend ()->struct_type ({data_field, len_field});
+ SLICE_FLAG (record) = 1;
+ TYPE_MAIN_VARIANT (record) = ctx->insert_main_variant (record);
+
+ return record;
+}
+
+tree
+TyTyResolveCompile::create_str_type_record (const TyTy::StrType &type)
+{
+ // lookup usize
+ TyTy::BaseType *usize = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_builtin ("usize", &usize);
+ rust_assert (ok);
+
+ tree char_ptr = build_pointer_type (char_type_node);
+ tree const_char_type = build_qualified_type (char_ptr, TYPE_QUAL_CONST);
+
+ tree element_type = const_char_type;
+ tree data_field_ty = build_pointer_type (element_type);
+ Backend::typed_identifier data_field ("data", data_field_ty,
+ type.get_locus ());
+
+ tree len_field_ty = TyTyResolveCompile::compile (ctx, usize);
+ Backend::typed_identifier len_field ("len", len_field_ty, type.get_locus ());
+
+ tree record = ctx->get_backend ()->struct_type ({data_field, len_field});
+ SLICE_FLAG (record) = 1;
+ TYPE_MAIN_VARIANT (record) = ctx->insert_main_variant (record);
+
+ return record;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile-type.h b/gcc/rust/backend/rust-compile-type.h
new file mode 100644
index 0000000..4fea6ba
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-type.h
@@ -0,0 +1,79 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_TYPE
+#define RUST_COMPILE_TYPE
+
+#include "rust-compile-context.h"
+
+namespace Rust {
+namespace Compile {
+
+class TyTyResolveCompile : protected TyTy::TyConstVisitor
+{
+public:
+ static tree compile (Context *ctx, const TyTy::BaseType *ty,
+ bool trait_object_mode = false);
+
+ static tree get_implicit_enumeral_node_type (Context *ctx);
+
+ void visit (const TyTy::InferType &) override;
+ void visit (const TyTy::ADTType &) override;
+ void visit (const TyTy::TupleType &) override;
+ void visit (const TyTy::FnType &) override;
+ void visit (const TyTy::FnPtr &) override;
+ void visit (const TyTy::ArrayType &) override;
+ void visit (const TyTy::SliceType &) override;
+ void visit (const TyTy::BoolType &) override;
+ void visit (const TyTy::IntType &) override;
+ void visit (const TyTy::UintType &) override;
+ void visit (const TyTy::FloatType &) override;
+ void visit (const TyTy::USizeType &) override;
+ void visit (const TyTy::ISizeType &) override;
+ void visit (const TyTy::ErrorType &) override;
+ void visit (const TyTy::CharType &) override;
+ void visit (const TyTy::ReferenceType &) override;
+ void visit (const TyTy::PointerType &) override;
+ void visit (const TyTy::ParamType &) override;
+ void visit (const TyTy::StrType &) override;
+ void visit (const TyTy::NeverType &) override;
+ void visit (const TyTy::PlaceholderType &) override;
+ void visit (const TyTy::ProjectionType &) override;
+ void visit (const TyTy::DynamicObjectType &) override;
+ void visit (const TyTy::ClosureType &) override;
+
+public:
+ static hashval_t type_hasher (tree type);
+
+protected:
+ tree create_slice_type_record (const TyTy::SliceType &type);
+ tree create_str_type_record (const TyTy::StrType &type);
+
+private:
+ TyTyResolveCompile (Context *ctx, bool trait_object_mode);
+
+ Context *ctx;
+ bool trait_object_mode;
+ tree translated;
+ int recurisve_ops;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_TYPE
diff --git a/gcc/rust/backend/rust-compile-var-decl.h b/gcc/rust/backend/rust-compile-var-decl.h
new file mode 100644
index 0000000..00146a4
--- /dev/null
+++ b/gcc/rust/backend/rust-compile-var-decl.h
@@ -0,0 +1,95 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_VAR_DECL
+#define RUST_COMPILE_VAR_DECL
+
+#include "rust-compile-base.h"
+#include "rust-hir-visitor.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileVarDecl : public HIRCompileBase, public HIR::HIRPatternVisitor
+{
+ using HIR::HIRPatternVisitor::visit;
+
+public:
+ static ::Bvariable *compile (tree fndecl, tree translated_type,
+ HIR::Pattern *pattern, Context *ctx)
+ {
+ CompileVarDecl compiler (ctx, fndecl, translated_type);
+ pattern->accept_vis (compiler);
+ return compiler.compiled_variable;
+ }
+
+ void visit (HIR::IdentifierPattern &pattern) override
+ {
+ if (!pattern.is_mut ())
+ translated_type = ctx->get_backend ()->immutable_type (translated_type);
+
+ compiled_variable
+ = ctx->get_backend ()->local_variable (fndecl, pattern.get_identifier (),
+ translated_type, NULL /*decl_var*/,
+ pattern.get_locus ());
+
+ HirId stmt_id = pattern.get_pattern_mappings ().get_hirid ();
+ ctx->insert_var_decl (stmt_id, compiled_variable);
+ }
+
+ void visit (HIR::WildcardPattern &pattern) override
+ {
+ translated_type = ctx->get_backend ()->immutable_type (translated_type);
+
+ compiled_variable
+ = ctx->get_backend ()->local_variable (fndecl, "_", translated_type,
+ NULL /*decl_var*/,
+ pattern.get_locus ());
+
+ HirId stmt_id = pattern.get_pattern_mappings ().get_hirid ();
+ ctx->insert_var_decl (stmt_id, compiled_variable);
+ }
+
+ // Empty visit for unused Pattern HIR nodes.
+ void visit (HIR::GroupedPattern &) override {}
+ void visit (HIR::LiteralPattern &) override {}
+ void visit (HIR::PathInExpression &) override {}
+ void visit (HIR::QualifiedPathInExpression &) override {}
+ void visit (HIR::RangePattern &) override {}
+ void visit (HIR::ReferencePattern &) override {}
+ void visit (HIR::SlicePattern &) override {}
+ void visit (HIR::StructPattern &) override {}
+ void visit (HIR::TuplePattern &) override {}
+ void visit (HIR::TupleStructPattern &) override {}
+
+private:
+ CompileVarDecl (Context *ctx, tree fndecl, tree translated_type)
+ : HIRCompileBase (ctx), fndecl (fndecl), translated_type (translated_type),
+ compiled_variable (ctx->get_backend ()->error_variable ())
+ {}
+
+ tree fndecl;
+ tree translated_type;
+
+ Bvariable *compiled_variable;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_VAR_DECL
diff --git a/gcc/rust/backend/rust-compile.cc b/gcc/rust/backend/rust-compile.cc
new file mode 100644
index 0000000..db08b3d
--- /dev/null
+++ b/gcc/rust/backend/rust-compile.cc
@@ -0,0 +1,416 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-compile.h"
+#include "rust-compile-item.h"
+#include "rust-compile-implitem.h"
+#include "rust-compile-expr.h"
+#include "rust-compile-struct-field-expr.h"
+#include "rust-compile-stmt.h"
+#include "rust-hir-trait-resolve.h"
+#include "rust-hir-path-probe.h"
+#include "rust-hir-type-bounds.h"
+#include "rust-hir-dot-operator.h"
+#include "rust-compile-block.h"
+
+namespace Rust {
+namespace Compile {
+
+CompileCrate::CompileCrate (HIR::Crate &crate, Context *ctx)
+ : crate (crate), ctx (ctx)
+{}
+
+CompileCrate::~CompileCrate () {}
+
+void
+CompileCrate::Compile (HIR::Crate &crate, Context *ctx)
+{
+ CompileCrate c (crate, ctx);
+ c.go ();
+}
+
+void
+CompileCrate::go ()
+{
+ for (auto &item : crate.items)
+ CompileItem::compile (item.get (), ctx);
+}
+
+// Shared methods in compilation
+
+tree
+HIRCompileBase::coercion_site (HirId id, tree rvalue,
+ const TyTy::BaseType *rval,
+ const TyTy::BaseType *lval,
+ Location lvalue_locus, Location rvalue_locus)
+{
+ std::vector<Resolver::Adjustment> *adjustments = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_autoderef_mappings (id, &adjustments);
+ if (ok)
+ {
+ rvalue = resolve_adjustements (*adjustments, rvalue, rvalue_locus);
+ }
+
+ return coercion_site1 (rvalue, rval, lval, lvalue_locus, rvalue_locus);
+}
+
+tree
+HIRCompileBase::coercion_site1 (tree rvalue, const TyTy::BaseType *rval,
+ const TyTy::BaseType *lval,
+ Location lvalue_locus, Location rvalue_locus)
+{
+ if (rvalue == error_mark_node)
+ return error_mark_node;
+
+ const TyTy::BaseType *actual = rval->destructure ();
+ const TyTy::BaseType *expected = lval->destructure ();
+
+ if (expected->get_kind () == TyTy::TypeKind::REF)
+ {
+ // this is a dyn object
+ if (SLICE_TYPE_P (TREE_TYPE (rvalue)))
+ {
+ return rvalue;
+ }
+
+ // bad coercion... of something to a reference
+ if (actual->get_kind () != TyTy::TypeKind::REF)
+ return error_mark_node;
+
+ const TyTy::ReferenceType *exp
+ = static_cast<const TyTy::ReferenceType *> (expected);
+ const TyTy::ReferenceType *act
+ = static_cast<const TyTy::ReferenceType *> (actual);
+
+ tree deref_rvalue = indirect_expression (rvalue, rvalue_locus);
+ tree coerced
+ = coercion_site1 (deref_rvalue, act->get_base (), exp->get_base (),
+ lvalue_locus, rvalue_locus);
+ if (exp->is_dyn_object () && SLICE_TYPE_P (TREE_TYPE (coerced)))
+ return coerced;
+
+ return address_expression (coerced, rvalue_locus);
+ }
+ else if (expected->get_kind () == TyTy::TypeKind::POINTER)
+ {
+ // this is a dyn object
+ if (SLICE_TYPE_P (TREE_TYPE (rvalue)))
+ {
+ return rvalue;
+ }
+
+ // bad coercion... of something to a reference
+ bool valid_coercion = actual->get_kind () == TyTy::TypeKind::REF
+ || actual->get_kind () == TyTy::TypeKind::POINTER;
+ if (!valid_coercion)
+ return error_mark_node;
+
+ const TyTy::ReferenceType *exp
+ = static_cast<const TyTy::ReferenceType *> (expected);
+
+ TyTy::BaseType *actual_base = nullptr;
+ if (actual->get_kind () == TyTy::TypeKind::REF)
+ {
+ const TyTy::ReferenceType *act
+ = static_cast<const TyTy::ReferenceType *> (actual);
+
+ actual_base = act->get_base ();
+ }
+ else if (actual->get_kind () == TyTy::TypeKind::POINTER)
+ {
+ const TyTy::PointerType *act
+ = static_cast<const TyTy::PointerType *> (actual);
+
+ actual_base = act->get_base ();
+ }
+ rust_assert (actual_base != nullptr);
+
+ tree deref_rvalue = indirect_expression (rvalue, rvalue_locus);
+ tree coerced
+ = coercion_site1 (deref_rvalue, actual_base, exp->get_base (),
+ lvalue_locus, rvalue_locus);
+
+ if (exp->is_dyn_object () && SLICE_TYPE_P (TREE_TYPE (coerced)))
+ return coerced;
+
+ return address_expression (coerced, rvalue_locus);
+ }
+ else if (expected->get_kind () == TyTy::TypeKind::ARRAY)
+ {
+ if (actual->get_kind () != TyTy::TypeKind::ARRAY)
+ return error_mark_node;
+
+ tree tree_rval_type = TyTyResolveCompile::compile (ctx, actual);
+ tree tree_lval_type = TyTyResolveCompile::compile (ctx, expected);
+ if (!verify_array_capacities (tree_lval_type, tree_rval_type,
+ lvalue_locus, rvalue_locus))
+ return error_mark_node;
+ }
+ else if (expected->get_kind () == TyTy::TypeKind::SLICE)
+ {
+ // bad coercion
+ bool valid_coercion = actual->get_kind () == TyTy::TypeKind::SLICE
+ || actual->get_kind () == TyTy::TypeKind::ARRAY;
+ if (!valid_coercion)
+ return error_mark_node;
+
+ // nothing to do here
+ if (actual->get_kind () == TyTy::TypeKind::SLICE)
+ return rvalue;
+
+ // return an unsized coercion
+ Resolver::Adjustment unsize_adj (
+ Resolver::Adjustment::AdjustmentType::UNSIZE, actual, expected);
+ return resolve_unsized_adjustment (unsize_adj, rvalue, rvalue_locus);
+ }
+
+ return rvalue;
+}
+
+tree
+HIRCompileBase::coerce_to_dyn_object (tree compiled_ref,
+ const TyTy::BaseType *actual,
+ const TyTy::DynamicObjectType *ty,
+ Location locus)
+{
+ tree dynamic_object = TyTyResolveCompile::compile (ctx, ty);
+ tree dynamic_object_fields = TYPE_FIELDS (dynamic_object);
+ tree vtable_field = DECL_CHAIN (dynamic_object_fields);
+ rust_assert (TREE_CODE (TREE_TYPE (vtable_field)) == ARRAY_TYPE);
+
+ //' this assumes ordering and current the structure is
+ // __trait_object_ptr
+ // [list of function ptrs]
+
+ std::vector<std::pair<Resolver::TraitReference *, HIR::ImplBlock *>>
+ probed_bounds_for_receiver = Resolver::TypeBoundsProbe::Probe (actual);
+
+ tree address_of_compiled_ref = null_pointer_node;
+ if (!actual->is_unit ())
+ address_of_compiled_ref = address_expression (compiled_ref, locus);
+
+ std::vector<tree> vtable_ctor_elems;
+ std::vector<unsigned long> vtable_ctor_idx;
+ unsigned long i = 0;
+ for (auto &bound : ty->get_object_items ())
+ {
+ const Resolver::TraitItemReference *item = bound.first;
+ const TyTy::TypeBoundPredicate *predicate = bound.second;
+
+ auto address = compute_address_for_trait_item (item, predicate,
+ probed_bounds_for_receiver,
+ actual, actual, locus);
+ vtable_ctor_elems.push_back (address);
+ vtable_ctor_idx.push_back (i++);
+ }
+
+ tree vtable_ctor = ctx->get_backend ()->array_constructor_expression (
+ TREE_TYPE (vtable_field), vtable_ctor_idx, vtable_ctor_elems, locus);
+
+ std::vector<tree> dyn_ctor = {address_of_compiled_ref, vtable_ctor};
+ return ctx->get_backend ()->constructor_expression (dynamic_object, false,
+ dyn_ctor, -1, locus);
+}
+
+tree
+HIRCompileBase::compute_address_for_trait_item (
+ const Resolver::TraitItemReference *ref,
+ const TyTy::TypeBoundPredicate *predicate,
+ std::vector<std::pair<Resolver::TraitReference *, HIR::ImplBlock *>>
+ &receiver_bounds,
+ const TyTy::BaseType *receiver, const TyTy::BaseType *root, Location locus)
+{
+ // There are two cases here one where its an item which has an implementation
+ // within a trait-impl-block. Then there is the case where there is a default
+ // implementation for this within the trait.
+ //
+ // The awkward part here is that this might be a generic trait and we need to
+ // figure out the correct monomorphized type for this so we can resolve the
+ // address of the function , this is stored as part of the
+ // type-bound-predicate
+ //
+ // Algo:
+ // check if there is an impl-item for this trait-item-ref first
+ // else assert that the trait-item-ref has an implementation
+ //
+ // FIXME this does not support super traits
+
+ TyTy::TypeBoundPredicateItem predicate_item
+ = predicate->lookup_associated_item (ref->get_identifier ());
+ rust_assert (!predicate_item.is_error ());
+
+ // this is the expected end type
+ TyTy::BaseType *trait_item_type = predicate_item.get_tyty_for_receiver (root);
+ rust_assert (trait_item_type->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *trait_item_fntype
+ = static_cast<TyTy::FnType *> (trait_item_type);
+
+ // find impl-block for this trait-item-ref
+ HIR::ImplBlock *associated_impl_block = nullptr;
+ const Resolver::TraitReference *predicate_trait_ref = predicate->get ();
+ for (auto &item : receiver_bounds)
+ {
+ Resolver::TraitReference *trait_ref = item.first;
+ HIR::ImplBlock *impl_block = item.second;
+ if (predicate_trait_ref->is_equal (*trait_ref))
+ {
+ associated_impl_block = impl_block;
+ break;
+ }
+ }
+
+ // FIXME this probably should just return error_mark_node but this helps
+ // debug for now since we are wrongly returning early on type-resolution
+ // failures, until we take advantage of more error types and error_mark_node
+ rust_assert (associated_impl_block != nullptr);
+
+ // lookup self for the associated impl
+ std::unique_ptr<HIR::Type> &self_type_path
+ = associated_impl_block->get_type ();
+ TyTy::BaseType *self = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ self_type_path->get_mappings ().get_hirid (), &self);
+ rust_assert (ok);
+
+ // lookup the predicate item from the self
+ TyTy::TypeBoundPredicate *self_bound = nullptr;
+ for (auto &bound : self->get_specified_bounds ())
+ {
+ const Resolver::TraitReference *bound_ref = bound.get ();
+ const Resolver::TraitReference *specified_ref = predicate->get ();
+ if (bound_ref->is_equal (*specified_ref))
+ {
+ self_bound = &bound;
+ break;
+ }
+ }
+ rust_assert (self_bound != nullptr);
+
+ // lookup the associated item from the associated impl block
+ TyTy::TypeBoundPredicateItem associated_self_item
+ = self_bound->lookup_associated_item (ref->get_identifier ());
+ rust_assert (!associated_self_item.is_error ());
+
+ TyTy::BaseType *mono1 = associated_self_item.get_tyty_for_receiver (self);
+ rust_assert (mono1 != nullptr);
+ rust_assert (mono1->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *assocated_item_ty1 = static_cast<TyTy::FnType *> (mono1);
+
+ // Lookup the impl-block for the associated impl_item if it exists
+ HIR::Function *associated_function = nullptr;
+ for (auto &impl_item : associated_impl_block->get_impl_items ())
+ {
+ bool is_function = impl_item->get_impl_item_type ()
+ == HIR::ImplItem::ImplItemType::FUNCTION;
+ if (!is_function)
+ continue;
+
+ HIR::Function *fn = static_cast<HIR::Function *> (impl_item.get ());
+ bool found_associated_item
+ = fn->get_function_name ().compare (ref->get_identifier ()) == 0;
+ if (found_associated_item)
+ associated_function = fn;
+ }
+
+ // we found an impl_item for this
+ if (associated_function != nullptr)
+ {
+ // lookup the associated type for this item
+ TyTy::BaseType *lookup = nullptr;
+ bool ok = ctx->get_tyctx ()->lookup_type (
+ associated_function->get_mappings ().get_hirid (), &lookup);
+ rust_assert (ok);
+ rust_assert (lookup->get_kind () == TyTy::TypeKind::FNDEF);
+ TyTy::FnType *lookup_fntype = static_cast<TyTy::FnType *> (lookup);
+
+ if (lookup_fntype->needs_substitution ())
+ {
+ TyTy::SubstitutionArgumentMappings mappings
+ = assocated_item_ty1->solve_missing_mappings_from_this (
+ *trait_item_fntype, *lookup_fntype);
+ lookup_fntype = lookup_fntype->handle_substitions (mappings);
+ }
+
+ return CompileInherentImplItem::Compile (associated_function, ctx,
+ lookup_fntype, true, locus);
+ }
+
+ // we can only compile trait-items with a body
+ bool trait_item_has_definition = ref->is_optional ();
+ rust_assert (trait_item_has_definition);
+
+ HIR::TraitItem *trait_item = ref->get_hir_trait_item ();
+ return CompileTraitItem::Compile (trait_item, ctx, trait_item_fntype, true,
+ locus);
+}
+
+bool
+HIRCompileBase::verify_array_capacities (tree ltype, tree rtype,
+ Location lvalue_locus,
+ Location rvalue_locus)
+{
+ rust_assert (ltype != NULL_TREE);
+ rust_assert (rtype != NULL_TREE);
+
+ // lets just return ok as other errors have already occurred
+ if (ltype == error_mark_node || rtype == error_mark_node)
+ return true;
+
+ tree ltype_domain = TYPE_DOMAIN (ltype);
+ if (!ltype_domain)
+ return false;
+
+ if (!TREE_CONSTANT (TYPE_MAX_VALUE (ltype_domain)))
+ return false;
+
+ unsigned HOST_WIDE_INT ltype_length
+ = wi::ext (wi::to_offset (TYPE_MAX_VALUE (ltype_domain))
+ - wi::to_offset (TYPE_MIN_VALUE (ltype_domain)) + 1,
+ TYPE_PRECISION (TREE_TYPE (ltype_domain)),
+ TYPE_SIGN (TREE_TYPE (ltype_domain)))
+ .to_uhwi ();
+
+ tree rtype_domain = TYPE_DOMAIN (rtype);
+ if (!rtype_domain)
+ return false;
+
+ if (!TREE_CONSTANT (TYPE_MAX_VALUE (rtype_domain)))
+ return false;
+
+ unsigned HOST_WIDE_INT rtype_length
+ = wi::ext (wi::to_offset (TYPE_MAX_VALUE (rtype_domain))
+ - wi::to_offset (TYPE_MIN_VALUE (rtype_domain)) + 1,
+ TYPE_PRECISION (TREE_TYPE (rtype_domain)),
+ TYPE_SIGN (TREE_TYPE (rtype_domain)))
+ .to_uhwi ();
+
+ if (ltype_length != rtype_length)
+ {
+ rust_error_at (
+ rvalue_locus,
+ "expected an array with a fixed size of " HOST_WIDE_INT_PRINT_UNSIGNED
+ " elements, found one with " HOST_WIDE_INT_PRINT_UNSIGNED " elements",
+ ltype_length, rtype_length);
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-compile.h b/gcc/rust/backend/rust-compile.h
new file mode 100644
index 0000000..31cc086
--- /dev/null
+++ b/gcc/rust/backend/rust-compile.h
@@ -0,0 +1,47 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_COMPILE_H
+#define RUST_COMPILE_H
+
+#include "rust-system.h"
+#include "rust-hir-full.h"
+#include "rust-compile-context.h"
+
+namespace Rust {
+namespace Compile {
+
+class CompileCrate
+{
+public:
+ static void Compile (HIR::Crate &crate, Context *ctx);
+
+ ~CompileCrate ();
+
+private:
+ CompileCrate (HIR::Crate &crate, Context *ctx);
+ void go ();
+
+ HIR::Crate &crate;
+ Context *ctx;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_COMPILE_H
diff --git a/gcc/rust/backend/rust-constexpr.cc b/gcc/rust/backend/rust-constexpr.cc
new file mode 100644
index 0000000..4e581a3
--- /dev/null
+++ b/gcc/rust/backend/rust-constexpr.cc
@@ -0,0 +1,6480 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-constexpr.h"
+#include "rust-location.h"
+#include "rust-diagnostics.h"
+#include "rust-tree.h"
+#include "fold-const.h"
+#include "realmpfr.h"
+#include "convert.h"
+#include "print-tree.h"
+#include "gimplify.h"
+#include "tree-iterator.h"
+#include "timevar.h"
+#include "varasm.h"
+#include "cgraph.h"
+#include "tree-inline.h"
+#include "vec.h"
+#include "function.h"
+#include "diagnostic.h"
+#include "target.h"
+#include "builtins.h"
+
+#define VERIFY_CONSTANT(X) \
+ do \
+ { \
+ if (verify_constant ((X), ctx->quiet, non_constant_p, overflow_p)) \
+ return t; \
+ } \
+ while (0)
+
+namespace Rust {
+namespace Compile {
+
+/* Returns true iff FUN is an instantiation of a constexpr function
+ template or a defaulted constexpr function. */
+
+bool
+is_instantiation_of_constexpr (tree fun)
+{
+ return DECL_DECLARED_CONSTEXPR_P (fun);
+}
+
+/* Return true if T is a literal type. */
+
+bool
+literal_type_p (tree t)
+{
+ if (SCALAR_TYPE_P (t) || VECTOR_TYPE_P (t) || TYPE_REF_P (t)
+ || (VOID_TYPE_P (t)))
+ return true;
+
+ if (TREE_CODE (t) == ARRAY_TYPE)
+ return literal_type_p (strip_array_types (t));
+ return false;
+}
+
+static bool
+verify_constant (tree, bool, bool *, bool *);
+
+static HOST_WIDE_INT
+find_array_ctor_elt (tree ary, tree dindex, bool insert = false);
+static int
+array_index_cmp (tree key, tree index);
+static bool
+potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
+ tsubst_flags_t flags, tree *jump_target);
+bool
+potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
+ tsubst_flags_t flags);
+tree
+unshare_constructor (tree t MEM_STAT_DECL);
+void
+maybe_save_constexpr_fundef (tree fun);
+
+static bool
+returns (tree *jump_target);
+static bool
+breaks (tree *jump_target);
+static bool
+continues (tree *jump_target);
+static bool
+switches (tree *jump_target);
+
+struct constexpr_global_ctx
+{
+ /* Values for any temporaries or local variables within the
+ constant-expression. */
+ hash_map<tree, tree> values;
+ /* Number of cxx_eval_constant_expression calls (except skipped ones,
+ on simple constants or location wrappers) encountered during current
+ cxx_eval_outermost_constant_expr call. */
+ HOST_WIDE_INT constexpr_ops_count;
+ /* Heap VAR_DECLs created during the evaluation of the outermost constant
+ expression. */
+ auto_vec<tree, 16> heap_vars;
+ /* Cleanups that need to be evaluated at the end of CLEANUP_POINT_EXPR. */
+ vec<tree> *cleanups;
+ /* Number of heap VAR_DECL deallocations. */
+ unsigned heap_dealloc_count;
+ /* Constructor. */
+ constexpr_global_ctx ()
+ : constexpr_ops_count (0), cleanups (NULL), heap_dealloc_count (0)
+ {}
+};
+
+/* In constexpr.cc */
+/* Representation of entries in the constexpr function definition table. */
+
+struct GTY ((for_user)) constexpr_fundef
+{
+ tree decl;
+ tree body;
+ tree parms;
+ tree result;
+};
+
+/* Objects of this type represent calls to constexpr functions
+ along with the bindings of parameters to their arguments, for
+ the purpose of compile time evaluation. */
+
+struct GTY ((for_user)) constexpr_call
+{
+ /* Description of the constexpr function definition. */
+ constexpr_fundef *fundef;
+ /* Parameter bindings environment. A TREE_VEC of arguments. */
+ tree bindings;
+ /* Result of the call.
+ NULL means the call is being evaluated.
+ error_mark_node means that the evaluation was erroneous;
+ otherwise, the actuall value of the call. */
+ tree result;
+ /* The hash of this call; we remember it here to avoid having to
+ recalculate it when expanding the hash table. */
+ hashval_t hash;
+ /* Whether __builtin_is_constant_evaluated() should evaluate to true. */
+ bool manifestly_const_eval;
+};
+
+struct constexpr_call_hasher : ggc_ptr_hash<constexpr_call>
+{
+ static hashval_t hash (constexpr_call *);
+ static bool equal (constexpr_call *, constexpr_call *);
+};
+
+enum constexpr_switch_state
+{
+ /* Used when processing a switch for the first time by cxx_eval_switch_expr
+ and default: label for that switch has not been seen yet. */
+ css_default_not_seen,
+ /* Used when processing a switch for the first time by cxx_eval_switch_expr
+ and default: label for that switch has been seen already. */
+ css_default_seen,
+ /* Used when processing a switch for the second time by
+ cxx_eval_switch_expr, where default: label should match. */
+ css_default_processing
+};
+
+struct constexpr_ctx
+{
+ /* The part of the context that needs to be unique to the whole
+ cxx_eval_outermost_constant_expr invocation. */
+ constexpr_global_ctx *global;
+ /* The innermost call we're evaluating. */
+ constexpr_call *call;
+ /* SAVE_EXPRs and TARGET_EXPR_SLOT vars of TARGET_EXPRs that we've seen
+ within the current LOOP_EXPR. NULL if we aren't inside a loop. */
+ vec<tree> *save_exprs;
+ /* The CONSTRUCTOR we're currently building up for an aggregate
+ initializer. */
+ tree ctor;
+ /* The object we're building the CONSTRUCTOR for. */
+ tree object;
+ /* If inside SWITCH_EXPR. */
+ constexpr_switch_state *css_state;
+ /* The aggregate initialization context inside which this one is nested. This
+ is used by lookup_placeholder to resolve PLACEHOLDER_EXPRs. */
+ const constexpr_ctx *parent;
+
+ /* Whether we should error on a non-constant expression or fail quietly.
+ This flag needs to be here, but some of the others could move to global
+ if they get larger than a word. */
+ bool quiet;
+ /* Whether we are strictly conforming to constant expression rules or
+ trying harder to get a constant value. */
+ bool strict;
+ /* Whether __builtin_is_constant_evaluated () should be true. */
+ bool manifestly_const_eval;
+};
+
+struct constexpr_fundef_hasher : ggc_ptr_hash<constexpr_fundef>
+{
+ static hashval_t hash (const constexpr_fundef *);
+ static bool equal (const constexpr_fundef *, const constexpr_fundef *);
+};
+
+/* This table holds all constexpr function definitions seen in
+ the current translation unit. */
+
+static GTY (()) hash_table<constexpr_fundef_hasher> *constexpr_fundef_table;
+
+/* Utility function used for managing the constexpr function table.
+ Return true if the entries pointed to by P and Q are for the
+ same constexpr function. */
+
+inline bool
+constexpr_fundef_hasher::equal (const constexpr_fundef *lhs,
+ const constexpr_fundef *rhs)
+{
+ return lhs->decl == rhs->decl;
+}
+
+/* Utility function used for managing the constexpr function table.
+ Return a hash value for the entry pointed to by Q. */
+
+inline hashval_t
+constexpr_fundef_hasher::hash (const constexpr_fundef *fundef)
+{
+ return DECL_UID (fundef->decl);
+}
+
+/* Return a previously saved definition of function FUN. */
+
+constexpr_fundef *
+retrieve_constexpr_fundef (tree fun)
+{
+ if (constexpr_fundef_table == NULL)
+ return NULL;
+
+ constexpr_fundef fundef = {fun, NULL_TREE, NULL_TREE, NULL_TREE};
+ return constexpr_fundef_table->find (&fundef);
+}
+
+/* This internal flag controls whether we should avoid doing anything during
+ constexpr evaluation that would cause extra DECL_UID generation, such as
+ template instantiation and function body copying. */
+
+static bool uid_sensitive_constexpr_evaluation_value;
+
+/* An internal counter that keeps track of the number of times
+ uid_sensitive_constexpr_evaluation_p returned true. */
+
+static unsigned uid_sensitive_constexpr_evaluation_true_counter;
+
+/* The accessor for uid_sensitive_constexpr_evaluation_value which also
+ increments the corresponding counter. */
+
+static bool
+uid_sensitive_constexpr_evaluation_p ()
+{
+ if (uid_sensitive_constexpr_evaluation_value)
+ {
+ ++uid_sensitive_constexpr_evaluation_true_counter;
+ return true;
+ }
+ else
+ return false;
+}
+
+/* RAII sentinel that saves the value of a variable, optionally
+ overrides it right away, and restores its value when the sentinel
+ id destructed. */
+
+template <typename T> class temp_override
+{
+ T &overridden_variable;
+ T saved_value;
+
+public:
+ temp_override (T &var) : overridden_variable (var), saved_value (var) {}
+ temp_override (T &var, T overrider)
+ : overridden_variable (var), saved_value (var)
+ {
+ overridden_variable = overrider;
+ }
+ ~temp_override () { overridden_variable = saved_value; }
+};
+
+/* An RAII sentinel used to restrict constexpr evaluation so that it
+ doesn't do anything that causes extra DECL_UID generation. */
+
+struct uid_sensitive_constexpr_evaluation_sentinel
+{
+ temp_override<bool> ovr;
+ uid_sensitive_constexpr_evaluation_sentinel ();
+};
+
+/* Used to determine whether uid_sensitive_constexpr_evaluation_p was
+ called and returned true, indicating that we've restricted constexpr
+ evaluation in order to avoid UID generation. We use this to control
+ updates to the fold_cache and cv_cache. */
+
+struct uid_sensitive_constexpr_evaluation_checker
+{
+ const unsigned saved_counter;
+ uid_sensitive_constexpr_evaluation_checker ();
+ bool evaluation_restricted_p () const;
+};
+
+/* The default constructor for uid_sensitive_constexpr_evaluation_sentinel
+ enables the internal flag for uid_sensitive_constexpr_evaluation_p
+ during the lifetime of the sentinel object. Upon its destruction, the
+ previous value of uid_sensitive_constexpr_evaluation_p is restored. */
+
+uid_sensitive_constexpr_evaluation_sentinel ::
+ uid_sensitive_constexpr_evaluation_sentinel ()
+ : ovr (uid_sensitive_constexpr_evaluation_value, true)
+{}
+
+/* The default constructor for uid_sensitive_constexpr_evaluation_checker
+ records the current number of times that uid_sensitive_constexpr_evaluation_p
+ has been called and returned true. */
+
+uid_sensitive_constexpr_evaluation_checker ::
+ uid_sensitive_constexpr_evaluation_checker ()
+ : saved_counter (uid_sensitive_constexpr_evaluation_true_counter)
+{}
+
+/* Returns true iff uid_sensitive_constexpr_evaluation_p is true, and
+ some constexpr evaluation was restricted due to u_s_c_e_p being called
+ and returning true during the lifetime of this checker object. */
+
+bool
+uid_sensitive_constexpr_evaluation_checker::evaluation_restricted_p () const
+{
+ return (uid_sensitive_constexpr_evaluation_value
+ && saved_counter != uid_sensitive_constexpr_evaluation_true_counter);
+}
+
+/* A table of all constexpr calls that have been evaluated by the
+ compiler in this translation unit. */
+
+static GTY (()) hash_table<constexpr_call_hasher> *constexpr_call_table;
+
+/* Compute a hash value for a constexpr call representation. */
+
+inline hashval_t
+constexpr_call_hasher::hash (constexpr_call *info)
+{
+ return info->hash;
+}
+
+/* Return true if the objects pointed to by P and Q represent calls
+ to the same constexpr function with the same arguments.
+ Otherwise, return false. */
+
+bool
+constexpr_call_hasher::equal (constexpr_call *lhs, constexpr_call *rhs)
+{
+ if (lhs == rhs)
+ return true;
+ if (lhs->hash != rhs->hash)
+ return false;
+ if (lhs->manifestly_const_eval != rhs->manifestly_const_eval)
+ return false;
+ if (!constexpr_fundef_hasher::equal (lhs->fundef, rhs->fundef))
+ return false;
+ return rs_tree_equal (lhs->bindings, rhs->bindings);
+}
+
+/* Initialize the constexpr call table, if needed. */
+
+static void
+maybe_initialize_constexpr_call_table (void)
+{
+ if (constexpr_call_table == NULL)
+ constexpr_call_table = hash_table<constexpr_call_hasher>::create_ggc (101);
+}
+
+/* During constexpr CALL_EXPR evaluation, to avoid issues with sharing when
+ a function happens to get called recursively, we unshare the callee
+ function's body and evaluate this unshared copy instead of evaluating the
+ original body.
+
+ FUNDEF_COPIES_TABLE is a per-function freelist of these unshared function
+ copies. The underlying data structure of FUNDEF_COPIES_TABLE is a hash_map
+ that's keyed off of the original FUNCTION_DECL and whose value is a
+ TREE_LIST of this function's unused copies awaiting reuse.
+
+ This is not GC-deletable to avoid GC affecting UID generation. */
+
+static GTY (()) decl_tree_map *fundef_copies_table;
+
+/* Reuse a copy or create a new unshared copy of the function FUN.
+ Return this copy. We use a TREE_LIST whose PURPOSE is body, VALUE
+ is parms, TYPE is result. */
+
+static tree
+get_fundef_copy (constexpr_fundef *fundef)
+{
+ tree copy;
+ bool existed;
+ tree *slot
+ = &(hash_map_safe_get_or_insert<hm_ggc> (fundef_copies_table, fundef->decl,
+ &existed, 127));
+
+ if (!existed)
+ {
+ /* There is no cached function available, or in use. We can use
+ the function directly. That the slot is now created records
+ that this function is now in use. */
+ copy = build_tree_list (fundef->body, fundef->parms);
+ TREE_TYPE (copy) = fundef->result;
+ }
+ else if (*slot == NULL_TREE)
+ {
+ if (uid_sensitive_constexpr_evaluation_p ())
+ return NULL_TREE;
+
+ /* We've already used the function itself, so make a copy. */
+ copy = build_tree_list (NULL, NULL);
+ tree saved_body = DECL_SAVED_TREE (fundef->decl);
+ tree saved_parms = DECL_ARGUMENTS (fundef->decl);
+ tree saved_result = DECL_RESULT (fundef->decl);
+ tree saved_fn = current_function_decl;
+ DECL_SAVED_TREE (fundef->decl) = fundef->body;
+ DECL_ARGUMENTS (fundef->decl) = fundef->parms;
+ DECL_RESULT (fundef->decl) = fundef->result;
+ current_function_decl = fundef->decl;
+ TREE_PURPOSE (copy)
+ = copy_fn (fundef->decl, TREE_VALUE (copy), TREE_TYPE (copy));
+ current_function_decl = saved_fn;
+ DECL_RESULT (fundef->decl) = saved_result;
+ DECL_ARGUMENTS (fundef->decl) = saved_parms;
+ DECL_SAVED_TREE (fundef->decl) = saved_body;
+ }
+ else
+ {
+ /* We have a cached function available. */
+ copy = *slot;
+ *slot = TREE_CHAIN (copy);
+ }
+
+ return copy;
+}
+
+/* Save the copy COPY of function FUN for later reuse by
+ get_fundef_copy(). By construction, there will always be an entry
+ to find. */
+
+static void
+save_fundef_copy (tree fun, tree copy)
+{
+ tree *slot = fundef_copies_table->get (fun);
+ TREE_CHAIN (copy) = *slot;
+ *slot = copy;
+}
+
+static tree
+constant_value_1 (tree decl, bool strict_p, bool return_aggregate_cst_ok_p,
+ bool unshare_p);
+tree
+decl_constant_value (tree decl, bool unshare_p);
+
+static void
+non_const_var_error (location_t loc, tree r);
+
+static tree
+eval_constant_expression (const constexpr_ctx *ctx, tree, bool, bool *, bool *,
+ tree * = NULL);
+
+static tree
+constexpr_fn_retval (const constexpr_ctx *ctx, tree r);
+
+static tree
+eval_store_expression (const constexpr_ctx *ctx, tree r, bool, bool *, bool *);
+
+static tree
+eval_call_expression (const constexpr_ctx *ctx, tree r, bool, bool *, bool *);
+
+static tree
+eval_binary_expression (const constexpr_ctx *ctx, tree r, bool, bool *, bool *);
+
+static tree
+get_function_named_in_call (tree t);
+
+static tree
+eval_statement_list (const constexpr_ctx *ctx, tree t, bool *non_constant_p,
+ bool *overflow_p, tree *jump_target);
+static tree
+extract_string_elt (tree string, unsigned chars_per_elt, unsigned index);
+
+static tree
+eval_conditional_expression (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p,
+ tree *jump_target);
+
+static tree
+eval_bit_field_ref (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p);
+
+static tree
+eval_loop_expr (const constexpr_ctx *ctx, tree t, bool *non_constant_p,
+ bool *overflow_p, tree *jump_target);
+
+static tree
+eval_switch_expr (const constexpr_ctx *ctx, tree t, bool *non_constant_p,
+ bool *overflow_p, tree *jump_target);
+
+static tree
+eval_unary_expression (const constexpr_ctx *ctx, tree t, bool /*lval*/,
+ bool *non_constant_p, bool *overflow_p);
+
+/* Variables and functions to manage constexpr call expansion context.
+ These do not need to be marked for PCH or GC. */
+
+/* FIXME remember and print actual constant arguments. */
+static vec<tree> call_stack;
+static int call_stack_tick;
+static int last_cx_error_tick;
+
+static int
+push_cx_call_context (tree call)
+{
+ ++call_stack_tick;
+ if (!EXPR_HAS_LOCATION (call))
+ SET_EXPR_LOCATION (call, input_location);
+ call_stack.safe_push (call);
+ int len = call_stack.length ();
+ if (len > max_constexpr_depth)
+ return false;
+ return len;
+}
+
+static void
+pop_cx_call_context (void)
+{
+ ++call_stack_tick;
+ call_stack.pop ();
+}
+
+vec<tree>
+cx_error_context (void)
+{
+ vec<tree> r = vNULL;
+ if (call_stack_tick != last_cx_error_tick && !call_stack.is_empty ())
+ r = call_stack;
+ last_cx_error_tick = call_stack_tick;
+ return r;
+}
+
+// this is ported from cxx_eval_outermost_constant_expr
+tree
+fold_expr (tree expr)
+{
+ bool allow_non_constant = false;
+ bool strict = true;
+ bool manifestly_const_eval = false;
+
+ constexpr_global_ctx global_ctx;
+ constexpr_ctx ctx
+ = {&global_ctx, NULL,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, allow_non_constant,
+ strict, manifestly_const_eval || !allow_non_constant};
+
+ auto_vec<tree, 16> cleanups;
+ global_ctx.cleanups = &cleanups;
+
+ bool non_constant_p = false;
+ bool overflow_p = false;
+
+ tree folded = eval_constant_expression (&ctx, expr, false, &non_constant_p,
+ &overflow_p);
+ rust_assert (folded != NULL_TREE);
+
+ // more logic here to possibly port
+ return folded;
+}
+
+static bool
+same_type_ignoring_tlq_and_bounds_p (tree type1, tree type2)
+{
+ while (TREE_CODE (type1) == ARRAY_TYPE && TREE_CODE (type2) == ARRAY_TYPE
+ && (!TYPE_DOMAIN (type1) || !TYPE_DOMAIN (type2)))
+ {
+ type1 = TREE_TYPE (type1);
+ type2 = TREE_TYPE (type2);
+ }
+ return same_type_ignoring_top_level_qualifiers_p (type1, type2);
+}
+
+// forked from gcc/cp/constexpr.cc cxx_union_active_member
+
+/* Try to determine the currently active union member for an expression
+ with UNION_TYPE. If it can be determined, return the FIELD_DECL,
+ otherwise return NULL_TREE. */
+
+static tree
+union_active_member (const constexpr_ctx *ctx, tree t)
+{
+ constexpr_ctx new_ctx = *ctx;
+ new_ctx.quiet = true;
+ bool non_constant_p = false, overflow_p = false;
+ tree ctor = eval_constant_expression (&new_ctx, t, false, &non_constant_p,
+ &overflow_p);
+ if (TREE_CODE (ctor) == CONSTRUCTOR && CONSTRUCTOR_NELTS (ctor) == 1
+ && CONSTRUCTOR_ELT (ctor, 0)->index
+ && TREE_CODE (CONSTRUCTOR_ELT (ctor, 0)->index) == FIELD_DECL)
+ return CONSTRUCTOR_ELT (ctor, 0)->index;
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_fold_indirect_ref_1
+
+static tree
+fold_indirect_ref_1 (const constexpr_ctx *ctx, location_t loc, tree type,
+ tree op, unsigned HOST_WIDE_INT off, bool *empty_base)
+{
+ tree optype = TREE_TYPE (op);
+ unsigned HOST_WIDE_INT const_nunits;
+ if (off == 0 && similar_type_p (optype, type))
+ return op;
+ else if (TREE_CODE (optype) == COMPLEX_TYPE
+ && similar_type_p (type, TREE_TYPE (optype)))
+ {
+ /* *(foo *)&complexfoo => __real__ complexfoo */
+ if (off == 0)
+ return build1_loc (loc, REALPART_EXPR, type, op);
+ /* ((foo*)&complexfoo)[1] => __imag__ complexfoo */
+ else if (tree_to_uhwi (TYPE_SIZE_UNIT (type)) == off)
+ return build1_loc (loc, IMAGPART_EXPR, type, op);
+ }
+ /* ((foo*)&vectorfoo)[x] => BIT_FIELD_REF<vectorfoo,...> */
+ else if (VECTOR_TYPE_P (optype) && similar_type_p (type, TREE_TYPE (optype))
+ && TYPE_VECTOR_SUBPARTS (optype).is_constant (&const_nunits))
+ {
+ unsigned HOST_WIDE_INT part_width = tree_to_uhwi (TYPE_SIZE_UNIT (type));
+ unsigned HOST_WIDE_INT max_offset = part_width * const_nunits;
+ if (off < max_offset && off % part_width == 0)
+ {
+ tree index = bitsize_int (off * BITS_PER_UNIT);
+ return build3_loc (loc, BIT_FIELD_REF, type, op, TYPE_SIZE (type),
+ index);
+ }
+ }
+ /* ((foo *)&fooarray)[x] => fooarray[x] */
+ else if (TREE_CODE (optype) == ARRAY_TYPE
+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (optype)))
+ && !integer_zerop (TYPE_SIZE_UNIT (TREE_TYPE (optype))))
+ {
+ tree type_domain = TYPE_DOMAIN (optype);
+ tree min_val = size_zero_node;
+ if (type_domain && TYPE_MIN_VALUE (type_domain))
+ min_val = TYPE_MIN_VALUE (type_domain);
+ unsigned HOST_WIDE_INT el_sz
+ = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (optype)));
+ unsigned HOST_WIDE_INT idx = off / el_sz;
+ unsigned HOST_WIDE_INT rem = off % el_sz;
+ if (tree_fits_uhwi_p (min_val))
+ {
+ tree index = size_int (idx + tree_to_uhwi (min_val));
+ op = build4_loc (loc, ARRAY_REF, TREE_TYPE (optype), op, index,
+ NULL_TREE, NULL_TREE);
+ return fold_indirect_ref_1 (ctx, loc, type, op, rem, empty_base);
+ }
+ }
+ /* ((foo *)&struct_with_foo_field)[x] => COMPONENT_REF */
+ else if (TREE_CODE (optype) == RECORD_TYPE
+ || TREE_CODE (optype) == UNION_TYPE)
+ {
+ if (TREE_CODE (optype) == UNION_TYPE)
+ /* For unions prefer the currently active member. */
+ if (tree field = union_active_member (ctx, op))
+ {
+ unsigned HOST_WIDE_INT el_sz
+ = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (field)));
+ if (off < el_sz)
+ {
+ tree cop = build3 (COMPONENT_REF, TREE_TYPE (field), op, field,
+ NULL_TREE);
+ if (tree ret = fold_indirect_ref_1 (ctx, loc, type, cop, off,
+ empty_base))
+ return ret;
+ }
+ }
+ for (tree field = TYPE_FIELDS (optype); field; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL
+ && TREE_TYPE (field) != error_mark_node
+ && tree_fits_uhwi_p (TYPE_SIZE_UNIT (TREE_TYPE (field))))
+ {
+ tree pos = byte_position (field);
+ if (!tree_fits_uhwi_p (pos))
+ continue;
+ unsigned HOST_WIDE_INT upos = tree_to_uhwi (pos);
+ unsigned HOST_WIDE_INT el_sz
+ = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (field)));
+ if (upos <= off && off < upos + el_sz)
+ {
+ tree cop = build3 (COMPONENT_REF, TREE_TYPE (field), op, field,
+ NULL_TREE);
+ if (tree ret = fold_indirect_ref_1 (ctx, loc, type, cop,
+ off - upos, empty_base))
+ return ret;
+ }
+ }
+ /* Also handle conversion to an empty base class, which
+ is represented with a NOP_EXPR. */
+ if (is_empty_class (type) && CLASS_TYPE_P (optype))
+ {
+ *empty_base = true;
+ return op;
+ }
+ }
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_fold_indirect_ref
+
+/* A less strict version of fold_indirect_ref_1, which requires cv-quals to
+ match. We want to be less strict for simple *& folding; if we have a
+ non-const temporary that we access through a const pointer, that should
+ work. We handle this here rather than change fold_indirect_ref_1
+ because we're dealing with things like ADDR_EXPR of INTEGER_CST which
+ don't really make sense outside of constant expression evaluation. Also
+ we want to allow folding to COMPONENT_REF, which could cause trouble
+ with TBAA in fold_indirect_ref_1. */
+
+static tree
+rs_fold_indirect_ref (const constexpr_ctx *ctx, location_t loc, tree type,
+ tree op0, bool *empty_base)
+{
+ tree sub = op0;
+ tree subtype;
+ poly_uint64 const_op01;
+
+ /* STRIP_NOPS, but stop if REINTERPRET_CAST_P. */
+ while (CONVERT_EXPR_P (sub) || TREE_CODE (sub) == NON_LVALUE_EXPR
+ || TREE_CODE (sub) == VIEW_CONVERT_EXPR)
+ {
+ if (TREE_CODE (sub) == NOP_EXPR && REINTERPRET_CAST_P (sub))
+ return NULL_TREE;
+ sub = TREE_OPERAND (sub, 0);
+ }
+
+ subtype = TREE_TYPE (sub);
+ if (!INDIRECT_TYPE_P (subtype))
+ return NULL_TREE;
+
+ /* Canonicalizes the given OBJ/OFF pair by iteratively absorbing
+ the innermost component into the offset until it would make the
+ offset positive, so that cxx_fold_indirect_ref_1 can identify
+ more folding opportunities. */
+ auto canonicalize_obj_off = [] (tree &obj, tree &off) {
+ while (TREE_CODE (obj) == COMPONENT_REF
+ && (tree_int_cst_sign_bit (off) || integer_zerop (off)))
+ {
+ tree field = TREE_OPERAND (obj, 1);
+ tree pos = byte_position (field);
+ if (integer_zerop (off) && integer_nonzerop (pos))
+ /* If the offset is already 0, keep going as long as the
+ component is at position 0. */
+ break;
+ off = int_const_binop (PLUS_EXPR, off, pos);
+ obj = TREE_OPERAND (obj, 0);
+ }
+ };
+
+ if (TREE_CODE (sub) == ADDR_EXPR)
+ {
+ tree op = TREE_OPERAND (sub, 0);
+ tree optype = TREE_TYPE (op);
+
+ /* *&CONST_DECL -> to the value of the const decl. */
+ if (TREE_CODE (op) == CONST_DECL)
+ return DECL_INITIAL (op);
+ /* *&p => p; make sure to handle *&"str"[cst] here. */
+ if (similar_type_p (optype, type))
+ {
+ tree fop = fold_read_from_constant_string (op);
+ if (fop)
+ return fop;
+ else
+ return op;
+ }
+ else
+ {
+ tree off = integer_zero_node;
+ canonicalize_obj_off (op, off);
+ gcc_assert (integer_zerop (off));
+ return fold_indirect_ref_1 (ctx, loc, type, op, 0, empty_base);
+ }
+ }
+ else if (TREE_CODE (sub) == POINTER_PLUS_EXPR
+ && tree_fits_uhwi_p (TREE_OPERAND (sub, 1)))
+ {
+ tree op00 = TREE_OPERAND (sub, 0);
+ tree off = TREE_OPERAND (sub, 1);
+
+ STRIP_NOPS (op00);
+ if (TREE_CODE (op00) == ADDR_EXPR)
+ {
+ tree obj = TREE_OPERAND (op00, 0);
+ canonicalize_obj_off (obj, off);
+ return fold_indirect_ref_1 (ctx, loc, type, obj, tree_to_uhwi (off),
+ empty_base);
+ }
+ }
+ /* *(foo *)fooarrptr => (*fooarrptr)[0] */
+ else if (TREE_CODE (TREE_TYPE (subtype)) == ARRAY_TYPE
+ && similar_type_p (type, TREE_TYPE (TREE_TYPE (subtype))))
+ {
+ tree type_domain;
+ tree min_val = size_zero_node;
+ tree newsub
+ = rs_fold_indirect_ref (ctx, loc, TREE_TYPE (subtype), sub, NULL);
+ if (newsub)
+ sub = newsub;
+ else
+ sub = build1_loc (loc, INDIRECT_REF, TREE_TYPE (subtype), sub);
+ type_domain = TYPE_DOMAIN (TREE_TYPE (sub));
+ if (type_domain && TYPE_MIN_VALUE (type_domain))
+ min_val = TYPE_MIN_VALUE (type_domain);
+ return build4_loc (loc, ARRAY_REF, type, sub, min_val, NULL_TREE,
+ NULL_TREE);
+ }
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_indirect_ref
+
+static tree
+rs_eval_indirect_ref (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ tree orig_op0 = TREE_OPERAND (t, 0);
+ bool empty_base = false;
+
+ /* We can handle a MEM_REF like an INDIRECT_REF, if MEM_REF's second
+ operand is an integer-zero. Otherwise reject the MEM_REF for now. */
+
+ if (TREE_CODE (t) == MEM_REF
+ && (!TREE_OPERAND (t, 1) || !integer_zerop (TREE_OPERAND (t, 1))))
+ {
+ gcc_assert (ctx->quiet);
+ *non_constant_p = true;
+ return t;
+ }
+
+ /* First try to simplify it directly. */
+ tree r = rs_fold_indirect_ref (ctx, EXPR_LOCATION (t), TREE_TYPE (t),
+ orig_op0, &empty_base);
+ if (!r)
+ {
+ /* If that didn't work, evaluate the operand first. */
+ tree op0
+ = eval_constant_expression (ctx, orig_op0,
+ /*lval*/ false, non_constant_p, overflow_p);
+ /* Don't VERIFY_CONSTANT here. */
+ if (*non_constant_p)
+ return t;
+
+ if (!lval && integer_zerop (op0))
+ {
+ if (!ctx->quiet)
+ error ("dereferencing a null pointer");
+ *non_constant_p = true;
+ return t;
+ }
+
+ r = rs_fold_indirect_ref (ctx, EXPR_LOCATION (t), TREE_TYPE (t), op0,
+ &empty_base);
+ if (r == NULL_TREE)
+ {
+ /* We couldn't fold to a constant value. Make sure it's not
+ something we should have been able to fold. */
+ tree sub = op0;
+ STRIP_NOPS (sub);
+ if (TREE_CODE (sub) == ADDR_EXPR)
+ {
+ gcc_assert (
+ !similar_type_p (TREE_TYPE (TREE_TYPE (sub)), TREE_TYPE (t)));
+ /* DR 1188 says we don't have to deal with this. */
+ if (!ctx->quiet)
+ error_at (rs_expr_loc_or_input_loc (t),
+ "accessing value of %qE through a %qT glvalue in a "
+ "constant expression",
+ build_fold_indirect_ref (sub), TREE_TYPE (t));
+ *non_constant_p = true;
+ return t;
+ }
+
+ if (lval && op0 != orig_op0)
+ return build1 (INDIRECT_REF, TREE_TYPE (t), op0);
+ if (!lval)
+ VERIFY_CONSTANT (t);
+ return t;
+ }
+ }
+
+ r = eval_constant_expression (ctx, r, lval, non_constant_p, overflow_p);
+ if (*non_constant_p)
+ return t;
+
+ /* If we're pulling out the value of an empty base, just return an empty
+ CONSTRUCTOR. */
+ if (empty_base && !lval)
+ {
+ r = build_constructor (TREE_TYPE (t), NULL);
+ TREE_CONSTANT (r) = true;
+ }
+
+ return r;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_logical_expression
+
+/* Subroutine of cxx_eval_constant_expression.
+ Evaluate a short-circuited logical expression T in the context
+ of a given constexpr CALL. BAILOUT_VALUE is the value for
+ early return. CONTINUE_VALUE is used here purely for
+ sanity check purposes. */
+
+static tree
+eval_logical_expression (const constexpr_ctx *ctx, tree t, tree bailout_value,
+ tree continue_value, bool lval, bool *non_constant_p,
+ bool *overflow_p)
+{
+ tree r;
+ tree lhs = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p);
+ VERIFY_CONSTANT (lhs);
+ if (tree_int_cst_equal (lhs, bailout_value))
+ return lhs;
+ gcc_assert (tree_int_cst_equal (lhs, continue_value));
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 1), lval, non_constant_p,
+ overflow_p);
+ VERIFY_CONSTANT (r);
+ return r;
+}
+
+// forked from gcc/cp/constexp.rcc lookup_placeholder
+
+/* Find the object of TYPE under initialization in CTX. */
+
+static tree
+lookup_placeholder (const constexpr_ctx *ctx, bool lval, tree type)
+{
+ if (!ctx)
+ return NULL_TREE;
+
+ /* Prefer the outermost matching object, but don't cross
+ CONSTRUCTOR_PLACEHOLDER_BOUNDARY constructors. */
+ if (ctx->ctor && !CONSTRUCTOR_PLACEHOLDER_BOUNDARY (ctx->ctor))
+ if (tree outer_ob = lookup_placeholder (ctx->parent, lval, type))
+ return outer_ob;
+
+ /* We could use ctx->object unconditionally, but using ctx->ctor when we
+ can is a minor optimization. */
+ if (!lval && ctx->ctor && same_type_p (TREE_TYPE (ctx->ctor), type))
+ return ctx->ctor;
+
+ if (!ctx->object)
+ return NULL_TREE;
+
+ /* Since an object cannot have a field of its own type, we can search outward
+ from ctx->object to find the unique containing object of TYPE. */
+ tree ob = ctx->object;
+ while (ob)
+ {
+ if (same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (ob), type))
+ break;
+ if (handled_component_p (ob))
+ ob = TREE_OPERAND (ob, 0);
+ else
+ ob = NULL_TREE;
+ }
+
+ return ob;
+}
+
+// forked from gcc/cp/constexp.rcc inline_asm_in_constexpr_error
+
+/* Complain about an attempt to evaluate inline assembly. */
+
+static void
+inline_asm_in_constexpr_error (location_t loc)
+{
+ auto_diagnostic_group d;
+ error_at (loc, "inline assembly is not a constant expression");
+ inform (loc, "only unevaluated inline assembly is allowed in a "
+ "%<constexpr%> function in C++20");
+}
+
+// forked from gcc/cp/constexpr.cc verify_ctor_sanity
+
+/* We're about to process an initializer for a class or array TYPE. Make
+ sure that CTX is set up appropriately. */
+
+static void
+verify_ctor_sanity (const constexpr_ctx *ctx, tree type)
+{
+ /* We don't bother building a ctor for an empty base subobject. */
+ if (is_empty_class (type))
+ return;
+
+ /* We're in the middle of an initializer that might involve placeholders;
+ our caller should have created a CONSTRUCTOR for us to put the
+ initializer into. We will either return that constructor or T. */
+ gcc_assert (ctx->ctor);
+ gcc_assert (
+ same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (ctx->ctor)));
+ /* We used to check that ctx->ctor was empty, but that isn't the case when
+ the object is zero-initialized before calling the constructor. */
+ if (ctx->object)
+ {
+ tree otype = TREE_TYPE (ctx->object);
+ gcc_assert (same_type_ignoring_top_level_qualifiers_p (type, otype)
+ /* Handle flexible array members. */
+ || (TREE_CODE (otype) == ARRAY_TYPE
+ && TYPE_DOMAIN (otype) == NULL_TREE
+ && TREE_CODE (type) == ARRAY_TYPE
+ && (same_type_ignoring_top_level_qualifiers_p (
+ TREE_TYPE (type), TREE_TYPE (otype)))));
+ }
+ gcc_assert (!ctx->object || !DECL_P (ctx->object)
+ || *(ctx->global->values.get (ctx->object)) == ctx->ctor);
+}
+
+// forked from gcc/cp/constexpr.cc array_index_cmp
+
+/* Some of the expressions fed to the constexpr mechanism are calls to
+ constructors, which have type void. In that case, return the type being
+ initialized by the constructor. */
+
+static tree
+initialized_type (tree t)
+{
+ if (TYPE_P (t))
+ return t;
+ tree type = TREE_TYPE (t);
+ if (TREE_CODE (t) == CALL_EXPR)
+ {
+ /* A constructor call has void type, so we need to look deeper. */
+ tree fn = get_function_named_in_call (t);
+ if (fn && TREE_CODE (fn) == FUNCTION_DECL && DECL_CXX_CONSTRUCTOR_P (fn))
+ type = DECL_CONTEXT (fn);
+ }
+ else if (TREE_CODE (t) == COMPOUND_EXPR)
+ return initialized_type (TREE_OPERAND (t, 1));
+
+ return cv_unqualified (type);
+}
+
+// forked from gcc/cp/constexpr.cc init_subob_ctx
+
+/* We're about to initialize element INDEX of an array or class from VALUE.
+ Set up NEW_CTX appropriately by adjusting .object to refer to the
+ subobject and creating a new CONSTRUCTOR if the element is itself
+ a class or array. */
+
+static void
+init_subob_ctx (const constexpr_ctx *ctx, constexpr_ctx &new_ctx, tree index,
+ tree &value)
+{
+ new_ctx = *ctx;
+
+ if (index && TREE_CODE (index) != INTEGER_CST
+ && TREE_CODE (index) != FIELD_DECL && TREE_CODE (index) != RANGE_EXPR)
+ /* This won't have an element in the new CONSTRUCTOR. */
+ return;
+
+ tree type = initialized_type (value);
+ if (!AGGREGATE_TYPE_P (type) && !VECTOR_TYPE_P (type))
+ /* A non-aggregate member doesn't get its own CONSTRUCTOR. */
+ return;
+
+ /* The sub-aggregate initializer might contain a placeholder;
+ update object to refer to the subobject and ctor to refer to
+ the (newly created) sub-initializer. */
+ if (ctx->object)
+ {
+ if (index == NULL_TREE || TREE_CODE (index) == RANGE_EXPR)
+ /* There's no well-defined subobject for this index. */
+ new_ctx.object = NULL_TREE;
+ else
+ {
+ // Faisal: commenting this out as not sure if it's needed and it's
+ // huge new_ctx.object = build_ctor_subob_ref (index, type,
+ // ctx->object);
+ }
+ }
+ tree elt = build_constructor (type, NULL);
+ CONSTRUCTOR_NO_CLEARING (elt) = true;
+ new_ctx.ctor = elt;
+
+ if (TREE_CODE (value) == TARGET_EXPR)
+ /* Avoid creating another CONSTRUCTOR when we expand the TARGET_EXPR. */
+ value = TARGET_EXPR_INITIAL (value);
+}
+
+// forked from gcc/cp/constexpr.cc base_field_constructor_elt
+
+/* REF is a COMPONENT_REF designating a particular field. V is a vector of
+ CONSTRUCTOR elements to initialize (part of) an object containing that
+ field. Return a pointer to the constructor_elt corresponding to the
+ initialization of the field. */
+
+static constructor_elt *
+base_field_constructor_elt (vec<constructor_elt, va_gc> *v, tree ref)
+{
+ tree aggr = TREE_OPERAND (ref, 0);
+ tree field = TREE_OPERAND (ref, 1);
+ HOST_WIDE_INT i;
+ constructor_elt *ce;
+
+ gcc_assert (TREE_CODE (ref) == COMPONENT_REF);
+
+ if (TREE_CODE (aggr) == COMPONENT_REF)
+ {
+ constructor_elt *base_ce = base_field_constructor_elt (v, aggr);
+ v = CONSTRUCTOR_ELTS (base_ce->value);
+ }
+
+ for (i = 0; vec_safe_iterate (v, i, &ce); ++i)
+ if (ce->index == field)
+ return ce;
+
+ gcc_unreachable ();
+ return NULL;
+}
+
+/* Return a pointer to the constructor_elt of CTOR which matches INDEX. If no
+ matching constructor_elt exists, then add one to CTOR.
+
+ As an optimization, if POS_HINT is non-negative then it is used as a guess
+ for the (integer) index of the matching constructor_elt within CTOR. */
+
+static constructor_elt *
+get_or_insert_ctor_field (tree ctor, tree index, int pos_hint = -1)
+{
+ /* Check the hint first. */
+ if (pos_hint >= 0 && (unsigned) pos_hint < CONSTRUCTOR_NELTS (ctor)
+ && CONSTRUCTOR_ELT (ctor, pos_hint)->index == index)
+ return CONSTRUCTOR_ELT (ctor, pos_hint);
+
+ tree type = TREE_TYPE (ctor);
+ if (TREE_CODE (type) == VECTOR_TYPE && index == NULL_TREE)
+ {
+ CONSTRUCTOR_APPEND_ELT (CONSTRUCTOR_ELTS (ctor), index, NULL_TREE);
+ return &CONSTRUCTOR_ELTS (ctor)->last ();
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE || TREE_CODE (type) == VECTOR_TYPE)
+ {
+ if (TREE_CODE (index) == RANGE_EXPR)
+ {
+ /* Support for RANGE_EXPR index lookups is currently limited to
+ accessing an existing element via POS_HINT, or appending a new
+ element to the end of CTOR. ??? Support for other access
+ patterns may also be needed. */
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (ctor);
+ if (vec_safe_length (elts))
+ {
+ tree lo = TREE_OPERAND (index, 0);
+ gcc_assert (array_index_cmp (elts->last ().index, lo) < 0);
+ }
+ CONSTRUCTOR_APPEND_ELT (elts, index, NULL_TREE);
+ return &elts->last ();
+ }
+
+ HOST_WIDE_INT i = find_array_ctor_elt (ctor, index, /*insert*/ true);
+ gcc_assert (i >= 0);
+ constructor_elt *cep = CONSTRUCTOR_ELT (ctor, i);
+ gcc_assert (cep->index == NULL_TREE
+ || TREE_CODE (cep->index) != RANGE_EXPR);
+ return cep;
+ }
+ else
+ {
+ gcc_assert (
+ TREE_CODE (index) == FIELD_DECL
+ && (same_type_ignoring_top_level_qualifiers_p (DECL_CONTEXT (index),
+ TREE_TYPE (ctor))));
+
+ /* We must keep the CONSTRUCTOR's ELTS in FIELD order.
+ Usually we meet initializers in that order, but it is
+ possible for base types to be placed not in program
+ order. */
+ tree fields = TYPE_FIELDS (DECL_CONTEXT (index));
+ unsigned HOST_WIDE_INT idx = 0;
+ constructor_elt *cep = NULL;
+
+ /* Check if we're changing the active member of a union. */
+ if (TREE_CODE (type) == UNION_TYPE && CONSTRUCTOR_NELTS (ctor)
+ && CONSTRUCTOR_ELT (ctor, 0)->index != index)
+ vec_safe_truncate (CONSTRUCTOR_ELTS (ctor), 0);
+ /* If the bit offset of INDEX is larger than that of the last
+ constructor_elt, then we can just immediately append a new
+ constructor_elt to the end of CTOR. */
+ else if (CONSTRUCTOR_NELTS (ctor)
+ && tree_int_cst_compare (
+ bit_position (index),
+ bit_position (CONSTRUCTOR_ELTS (ctor)->last ().index))
+ > 0)
+ {
+ idx = CONSTRUCTOR_NELTS (ctor);
+ goto insert;
+ }
+
+ /* Otherwise, we need to iterate over CTOR to find or insert INDEX
+ appropriately. */
+
+ for (; vec_safe_iterate (CONSTRUCTOR_ELTS (ctor), idx, &cep);
+ idx++, fields = DECL_CHAIN (fields))
+ {
+ if (index == cep->index)
+ goto found;
+
+ /* The field we're initializing must be on the field
+ list. Look to see if it is present before the
+ field the current ELT initializes. */
+ for (; fields != cep->index; fields = DECL_CHAIN (fields))
+ if (index == fields)
+ goto insert;
+ }
+ /* We fell off the end of the CONSTRUCTOR, so insert a new
+ entry at the end. */
+
+ insert : {
+ constructor_elt ce = {index, NULL_TREE};
+
+ vec_safe_insert (CONSTRUCTOR_ELTS (ctor), idx, ce);
+ cep = CONSTRUCTOR_ELT (ctor, idx);
+ }
+ found:;
+
+ return cep;
+ }
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_vector_conditional_expression
+
+/* Subroutine of cxx_eval_constant_expression.
+ Attempt to evaluate vector condition expressions. Unlike
+ cxx_eval_conditional_expression, VEC_COND_EXPR acts like a normal
+ ternary arithmetics operation, where all 3 arguments have to be
+ evaluated as constants and then folding computes the result from
+ them. */
+
+static tree
+eval_vector_conditional_expression (const constexpr_ctx *ctx, tree t,
+ bool *non_constant_p, bool *overflow_p)
+{
+ tree arg1
+ = eval_constant_expression (ctx, TREE_OPERAND (t, 0),
+ /*lval*/ false, non_constant_p, overflow_p);
+ VERIFY_CONSTANT (arg1);
+ tree arg2
+ = eval_constant_expression (ctx, TREE_OPERAND (t, 1),
+ /*lval*/ false, non_constant_p, overflow_p);
+ VERIFY_CONSTANT (arg2);
+ tree arg3
+ = eval_constant_expression (ctx, TREE_OPERAND (t, 2),
+ /*lval*/ false, non_constant_p, overflow_p);
+ VERIFY_CONSTANT (arg3);
+ location_t loc = EXPR_LOCATION (t);
+ tree type = TREE_TYPE (t);
+ tree r = fold_ternary_loc (loc, VEC_COND_EXPR, type, arg1, arg2, arg3);
+ if (r == NULL_TREE)
+ {
+ if (arg1 == TREE_OPERAND (t, 0) && arg2 == TREE_OPERAND (t, 1)
+ && arg3 == TREE_OPERAND (t, 2))
+ r = t;
+ else
+ r = build3_loc (loc, VEC_COND_EXPR, type, arg1, arg2, arg3);
+ }
+ VERIFY_CONSTANT (r);
+ return r;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_bare_aggregate
+
+/* Subroutine of cxx_eval_constant_expression.
+ The expression tree T denotes a C-style array or a C-style
+ aggregate. Reduce it to a constant expression. */
+
+static tree
+eval_bare_aggregate (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (t);
+ bool changed = false;
+ gcc_assert (!BRACE_ENCLOSED_INITIALIZER_P (t));
+ tree type = TREE_TYPE (t);
+
+ constexpr_ctx new_ctx;
+ if (TYPE_PTRMEMFUNC_P (type) || VECTOR_TYPE_P (type))
+ {
+ /* We don't really need the ctx->ctor business for a PMF or
+ vector, but it's simpler to use the same code. */
+ new_ctx = *ctx;
+ new_ctx.ctor = build_constructor (type, NULL);
+ new_ctx.object = NULL_TREE;
+ ctx = &new_ctx;
+ };
+ verify_ctor_sanity (ctx, type);
+ vec<constructor_elt, va_gc> **p = &CONSTRUCTOR_ELTS (ctx->ctor);
+ vec_alloc (*p, vec_safe_length (v));
+
+ if (CONSTRUCTOR_PLACEHOLDER_BOUNDARY (t))
+ CONSTRUCTOR_PLACEHOLDER_BOUNDARY (ctx->ctor) = 1;
+
+ unsigned i;
+ tree index, value;
+ bool constant_p = true;
+ bool side_effects_p = false;
+ FOR_EACH_CONSTRUCTOR_ELT (v, i, index, value)
+ {
+ tree orig_value = value;
+ /* Like in cxx_eval_store_expression, omit entries for empty fields. */
+ bool no_slot = TREE_CODE (type) == RECORD_TYPE && is_empty_field (index);
+ if (no_slot)
+ new_ctx = *ctx;
+ else
+ init_subob_ctx (ctx, new_ctx, index, value);
+ int pos_hint = -1;
+ if (new_ctx.ctor != ctx->ctor)
+ {
+ /* If we built a new CONSTRUCTOR, attach it now so that other
+ initializers can refer to it. */
+ constructor_elt *cep = get_or_insert_ctor_field (ctx->ctor, index);
+ cep->value = new_ctx.ctor;
+ pos_hint = cep - (*p)->begin ();
+ }
+ else if (TREE_CODE (type) == UNION_TYPE)
+ /* Otherwise if we're constructing a non-aggregate union member, set
+ the active union member now so that we can later detect and diagnose
+ if its initializer attempts to activate another member. */
+ get_or_insert_ctor_field (ctx->ctor, index);
+ tree elt = eval_constant_expression (&new_ctx, value, lval,
+ non_constant_p, overflow_p);
+ /* Don't VERIFY_CONSTANT here. */
+ if (ctx->quiet && *non_constant_p)
+ break;
+ if (elt != orig_value)
+ changed = true;
+
+ if (!TREE_CONSTANT (elt))
+ constant_p = false;
+ if (TREE_SIDE_EFFECTS (elt))
+ side_effects_p = true;
+ if (index && TREE_CODE (index) == COMPONENT_REF)
+ {
+ /* This is an initialization of a vfield inside a base
+ subaggregate that we already initialized; push this
+ initialization into the previous initialization. */
+ constructor_elt *inner = base_field_constructor_elt (*p, index);
+ inner->value = elt;
+ changed = true;
+ }
+ else if (index
+ && (TREE_CODE (index) == NOP_EXPR
+ || TREE_CODE (index) == POINTER_PLUS_EXPR))
+ {
+ /* This is an initializer for an empty base; now that we've
+ checked that it's constant, we can ignore it. */
+ gcc_assert (is_empty_class (TREE_TYPE (TREE_TYPE (index))));
+ changed = true;
+ }
+ else if (no_slot)
+ changed = true;
+ else
+ {
+ if (TREE_CODE (type) == UNION_TYPE && (*p)->last ().index != index)
+ /* The initializer erroneously changed the active union member that
+ we're initializing. */
+ gcc_assert (*non_constant_p);
+ else
+ {
+ /* The initializer might have mutated the underlying CONSTRUCTOR,
+ so recompute the location of the target constructer_elt. */
+ constructor_elt *cep
+ = get_or_insert_ctor_field (ctx->ctor, index, pos_hint);
+ cep->value = elt;
+ }
+
+ /* Adding or replacing an element might change the ctor's flags. */
+ TREE_CONSTANT (ctx->ctor) = constant_p;
+ TREE_SIDE_EFFECTS (ctx->ctor) = side_effects_p;
+ }
+ }
+ if (*non_constant_p || !changed)
+ return t;
+ t = ctx->ctor;
+ /* We're done building this CONSTRUCTOR, so now we can interpret an
+ element without an explicit initializer as value-initialized. */
+ CONSTRUCTOR_NO_CLEARING (t) = false;
+ TREE_CONSTANT (t) = constant_p;
+ TREE_SIDE_EFFECTS (t) = side_effects_p;
+ if (VECTOR_TYPE_P (type))
+ t = fold (t);
+ return t;
+}
+
+/* Subroutine of cxx_eval_constant_expression.
+ Like cxx_eval_unary_expression, except for trinary expressions. */
+
+static tree
+cxx_eval_trinary_expression (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ int i;
+ tree args[3];
+ tree val;
+
+ for (i = 0; i < 3; i++)
+ {
+ args[i] = eval_constant_expression (ctx, TREE_OPERAND (t, i), lval,
+ non_constant_p, overflow_p);
+ VERIFY_CONSTANT (args[i]);
+ }
+
+ val = fold_ternary_loc (EXPR_LOCATION (t), TREE_CODE (t), TREE_TYPE (t),
+ args[0], args[1], args[2]);
+ if (val == NULL_TREE)
+ return t;
+ VERIFY_CONSTANT (val);
+ return val;
+}
+
+/* Return true if T is a valid constant initializer. If a CONSTRUCTOR
+ initializes all the members, the CONSTRUCTOR_NO_CLEARING flag will be
+ cleared.
+ FIXME speed this up, it's taking 16% of compile time on sieve testcase. */
+
+bool
+reduced_constant_expression_p (tree t)
+{
+ if (t == NULL_TREE)
+ return false;
+
+ switch (TREE_CODE (t))
+ {
+ case PTRMEM_CST:
+ /* Even if we can't lower this yet, it's constant. */
+ return true;
+
+ case CONSTRUCTOR:
+ /* And we need to handle PTRMEM_CST wrapped in a CONSTRUCTOR. */
+ tree field;
+ if (CONSTRUCTOR_NO_CLEARING (t))
+ {
+ if (TREE_CODE (TREE_TYPE (t)) == VECTOR_TYPE)
+ /* An initialized vector would have a VECTOR_CST. */
+ return false;
+ else if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)
+ {
+ /* There must be a valid constant initializer at every array
+ index. */
+ tree min = TYPE_MIN_VALUE (TYPE_DOMAIN (TREE_TYPE (t)));
+ tree max = TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (t)));
+ tree cursor = min;
+ for (auto &e : CONSTRUCTOR_ELTS (t))
+ {
+ if (!reduced_constant_expression_p (e.value))
+ return false;
+ if (array_index_cmp (cursor, e.index) != 0)
+ return false;
+ if (TREE_CODE (e.index) == RANGE_EXPR)
+ cursor = TREE_OPERAND (e.index, 1);
+ cursor = int_const_binop (PLUS_EXPR, cursor, size_one_node);
+ }
+ if (find_array_ctor_elt (t, max) == -1)
+ return false;
+ goto ok;
+ }
+ else if (TREE_CODE (TREE_TYPE (t)) == UNION_TYPE)
+ {
+ if (CONSTRUCTOR_NELTS (t) == 0)
+ /* An initialized union has a constructor element. */
+ return false;
+ /* And it only initializes one member. */
+ field = NULL_TREE;
+ }
+ else
+ field = next_initializable_field (TYPE_FIELDS (TREE_TYPE (t)));
+ }
+ else
+ field = NULL_TREE;
+ for (auto &e : CONSTRUCTOR_ELTS (t))
+ {
+ /* If VAL is null, we're in the middle of initializing this
+ element. */
+ if (!reduced_constant_expression_p (e.value))
+ return false;
+ /* Empty class field may or may not have an initializer. */
+ for (; field && e.index != field;
+ field = next_initializable_field (DECL_CHAIN (field)))
+ if (!is_really_empty_class (TREE_TYPE (field),
+ /*ignore_vptr*/ false))
+ return false;
+ if (field)
+ field = next_initializable_field (DECL_CHAIN (field));
+ }
+ /* There could be a non-empty field at the end. */
+ for (; field; field = next_initializable_field (DECL_CHAIN (field)))
+ if (!is_really_empty_class (TREE_TYPE (field), /*ignore_vptr*/ false))
+ return false;
+ ok:
+ if (CONSTRUCTOR_NO_CLEARING (t))
+ /* All the fields are initialized. */
+ CONSTRUCTOR_NO_CLEARING (t) = false;
+ return true;
+
+ default:
+ /* FIXME are we calling this too much? */
+ return initializer_constant_valid_p (t, TREE_TYPE (t)) != NULL_TREE;
+ }
+}
+
+/* TEMP is the constant value of a temporary object of type TYPE. Adjust
+ the type of the value to match. */
+
+static tree
+adjust_temp_type (tree type, tree temp)
+{
+ if (same_type_p (TREE_TYPE (temp), type))
+ return temp;
+
+ gcc_assert (scalarish_type_p (type));
+ /* Now we know we're dealing with a scalar, and a prvalue of non-class
+ type is cv-unqualified. */
+ return fold_convert (cv_unqualified (type), temp);
+}
+
+// forked from gcc/cp/constexpr.cc free_constructor
+
+/* If T is a CONSTRUCTOR, ggc_free T and any sub-CONSTRUCTORs. */
+
+static void
+free_constructor (tree t)
+{
+ if (!t || TREE_CODE (t) != CONSTRUCTOR)
+ return;
+ releasing_vec ctors;
+ vec_safe_push (ctors, t);
+ while (!ctors->is_empty ())
+ {
+ tree c = ctors->pop ();
+ if (vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (c))
+ {
+ constructor_elt *ce;
+ for (HOST_WIDE_INT i = 0; vec_safe_iterate (elts, i, &ce); ++i)
+ if (TREE_CODE (ce->value) == CONSTRUCTOR)
+ vec_safe_push (ctors, ce->value);
+ ggc_free (elts);
+ }
+ ggc_free (c);
+ }
+}
+
+static tree
+eval_and_check_array_index (const constexpr_ctx *ctx, tree t,
+ bool allow_one_past, bool *non_constant_p,
+ bool *overflow_p);
+
+// forked from gcc/cp/constexpr.cc cxx_eval_array_reference
+
+/* Subroutine of cxx_eval_constant_expression.
+ Attempt to reduce a reference to an array slot. */
+
+static tree
+eval_array_reference (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ tree oldary = TREE_OPERAND (t, 0);
+ tree ary
+ = eval_constant_expression (ctx, oldary, lval, non_constant_p, overflow_p);
+ if (*non_constant_p)
+ return t;
+ if (!lval && TREE_CODE (ary) == VIEW_CONVERT_EXPR
+ && VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (ary, 0)))
+ && TREE_TYPE (t) == TREE_TYPE (TREE_TYPE (TREE_OPERAND (ary, 0))))
+ ary = TREE_OPERAND (ary, 0);
+
+ tree oldidx = TREE_OPERAND (t, 1);
+ tree index
+ = eval_and_check_array_index (ctx, t, lval, non_constant_p, overflow_p);
+ if (*non_constant_p)
+ return t;
+
+ if (lval && ary == oldary && index == oldidx)
+ return t;
+ else if (lval)
+ return build4 (ARRAY_REF, TREE_TYPE (t), ary, index, NULL, NULL);
+
+ unsigned len = 0, elem_nchars = 1;
+ tree elem_type = TREE_TYPE (TREE_TYPE (ary));
+ if (TREE_CODE (ary) == CONSTRUCTOR)
+ len = CONSTRUCTOR_NELTS (ary);
+ else if (TREE_CODE (ary) == STRING_CST)
+ {
+ elem_nchars
+ = (TYPE_PRECISION (elem_type) / TYPE_PRECISION (char_type_node));
+ len = (unsigned) TREE_STRING_LENGTH (ary) / elem_nchars;
+ }
+ else if (TREE_CODE (ary) == VECTOR_CST)
+ /* We don't create variable-length VECTOR_CSTs. */
+ len = VECTOR_CST_NELTS (ary).to_constant ();
+ else
+ {
+ /* We can't do anything with other tree codes, so use
+ VERIFY_CONSTANT to complain and fail. */
+ VERIFY_CONSTANT (ary);
+ gcc_unreachable ();
+ }
+
+ bool found;
+ HOST_WIDE_INT i = 0;
+ if (TREE_CODE (ary) == CONSTRUCTOR)
+ {
+ HOST_WIDE_INT ix = find_array_ctor_elt (ary, index);
+ found = (ix >= 0);
+ if (found)
+ i = ix;
+ }
+ else
+ {
+ i = tree_to_shwi (index);
+ found = (i < len);
+ }
+
+ if (found)
+ {
+ tree r;
+ if (TREE_CODE (ary) == CONSTRUCTOR)
+ r = (*CONSTRUCTOR_ELTS (ary))[i].value;
+ else if (TREE_CODE (ary) == VECTOR_CST)
+ r = VECTOR_CST_ELT (ary, i);
+ else
+ r = extract_string_elt (ary, elem_nchars, i);
+
+ if (r)
+ /* Don't VERIFY_CONSTANT here. */
+ return r;
+
+ /* Otherwise the element doesn't have a value yet. */
+ }
+
+ /* Not found. */
+
+ if (TREE_CODE (ary) == CONSTRUCTOR && CONSTRUCTOR_NO_CLEARING (ary))
+ {
+ /* 'ary' is part of the aggregate initializer we're currently
+ building; if there's no initializer for this element yet,
+ that's an error. */
+ if (!ctx->quiet)
+ error ("accessing uninitialized array element");
+ *non_constant_p = true;
+ return t;
+ }
+
+ /* If it's within the array bounds but doesn't have an explicit
+ initializer, it's initialized from {}. But use build_value_init
+ directly for non-aggregates to avoid creating a garbage CONSTRUCTOR. */
+ tree val = NULL_TREE;
+ sorry ("array size expression is not supported yet");
+
+ constexpr_ctx new_ctx;
+ if (is_really_empty_class (elem_type, /*ignore_vptr*/ false))
+ return build_constructor (elem_type, NULL);
+ // else if (CP_AGGREGATE_TYPE_P (elem_type))
+ // {
+ // tree empty_ctor = build_constructor (init_list_type_node, NULL);
+ // //val = digest_init (elem_type, empty_ctor, tf_warning_or_error);
+ // }
+ // else
+ // val = build_value_init (elem_type, tf_warning_or_error);
+
+ if (!SCALAR_TYPE_P (elem_type))
+ {
+ new_ctx = *ctx;
+ if (ctx->object)
+ /* If there was no object, don't add one: it could confuse us
+ into thinking we're modifying a const object. */
+ new_ctx.object = t;
+ new_ctx.ctor = build_constructor (elem_type, NULL);
+ ctx = &new_ctx;
+ }
+ t = eval_constant_expression (ctx, val, lval, non_constant_p, overflow_p);
+ if (!SCALAR_TYPE_P (elem_type) && t != ctx->ctor)
+ free_constructor (ctx->ctor);
+ return t;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_component_reference
+
+/* Subroutine of cxx_eval_constant_expression.
+ Attempt to reduce a field access of a value of class type. */
+
+static tree
+eval_component_reference (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ unsigned HOST_WIDE_INT i;
+ tree field;
+ tree value;
+ tree part = TREE_OPERAND (t, 1);
+ tree orig_whole = TREE_OPERAND (t, 0);
+ tree whole = eval_constant_expression (ctx, orig_whole, lval, non_constant_p,
+ overflow_p);
+ if (INDIRECT_REF_P (whole) && integer_zerop (TREE_OPERAND (whole, 0)))
+ {
+ if (!ctx->quiet)
+ error ("dereferencing a null pointer in %qE", orig_whole);
+ *non_constant_p = true;
+ return t;
+ }
+
+ if (whole == orig_whole)
+ return t;
+ if (lval)
+ return fold_build3 (COMPONENT_REF, TREE_TYPE (t), whole, part, NULL_TREE);
+ /* Don't VERIFY_CONSTANT here; we only want to check that we got a
+ CONSTRUCTOR. */
+ if (!*non_constant_p && TREE_CODE (whole) != CONSTRUCTOR)
+ {
+ if (!ctx->quiet)
+ error ("%qE is not a constant expression", orig_whole);
+ *non_constant_p = true;
+ }
+ if (DECL_MUTABLE_P (part))
+ {
+ if (!ctx->quiet)
+ error ("mutable %qD is not usable in a constant expression", part);
+ *non_constant_p = true;
+ }
+ if (*non_constant_p)
+ return t;
+ bool pmf = TYPE_PTRMEMFUNC_P (TREE_TYPE (whole));
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (whole), i, field, value)
+ {
+ /* Use name match for PMF fields, as a variant will have a
+ different FIELD_DECL with a different type. */
+ if (pmf ? DECL_NAME (field) == DECL_NAME (part) : field == part)
+ {
+ if (value)
+ {
+ STRIP_ANY_LOCATION_WRAPPER (value);
+ return value;
+ }
+ else
+ /* We're in the middle of initializing it. */
+ break;
+ }
+ }
+ if (TREE_CODE (TREE_TYPE (whole)) == UNION_TYPE
+ && CONSTRUCTOR_NELTS (whole) > 0)
+ {
+ /* DR 1188 says we don't have to deal with this. */
+ if (!ctx->quiet)
+ {
+ constructor_elt *cep = CONSTRUCTOR_ELT (whole, 0);
+ if (cep->value == NULL_TREE)
+ error ("accessing uninitialized member %qD", part);
+ else
+ error ("accessing %qD member instead of initialized %qD member in "
+ "constant expression",
+ part, cep->index);
+ }
+ *non_constant_p = true;
+ return t;
+ }
+
+ /* We only create a CONSTRUCTOR for a subobject when we modify it, so empty
+ classes never get represented; throw together a value now. */
+ if (is_really_empty_class (TREE_TYPE (t), /*ignore_vptr*/ false))
+ return build_constructor (TREE_TYPE (t), NULL);
+
+ gcc_assert (DECL_CONTEXT (part) == TYPE_MAIN_VARIANT (TREE_TYPE (whole)));
+
+ if (CONSTRUCTOR_NO_CLEARING (whole))
+ {
+ /* 'whole' is part of the aggregate initializer we're currently
+ building; if there's no initializer for this member yet, that's an
+ error. */
+ if (!ctx->quiet)
+ error ("accessing uninitialized member %qD", part);
+ *non_constant_p = true;
+ return t;
+ }
+
+ value = NULL_TREE;
+ sorry ("constant folding not supported for this tree code");
+ /* If there's no explicit init for this field, it's value-initialized. */
+ // Faisal: commenting this out as not sure if we need this but we need to come
+ // back to handle this to assign suitable value to value before sending it in
+ // eval_constant_expression below
+ // value = build_value_init (TREE_TYPE (t), tf_warning_or_error);
+ return eval_constant_expression (ctx, value, lval, non_constant_p,
+ overflow_p);
+}
+
+/* Subroutine of cxx_eval_statement_list. Determine whether the statement
+ STMT matches *jump_target. If we're looking for a case label and we see
+ the default label, note it in ctx->css_state. */
+
+static bool
+label_matches (const constexpr_ctx *ctx, tree *jump_target, tree stmt)
+{
+ switch (TREE_CODE (*jump_target))
+ {
+ case LABEL_DECL:
+ if (TREE_CODE (stmt) == LABEL_EXPR
+ && LABEL_EXPR_LABEL (stmt) == *jump_target)
+ return true;
+ break;
+
+ case INTEGER_CST:
+ if (TREE_CODE (stmt) == CASE_LABEL_EXPR)
+ {
+ gcc_assert (ctx->css_state != NULL);
+ if (!CASE_LOW (stmt))
+ {
+ /* default: should appear just once in a SWITCH_EXPR
+ body (excluding nested SWITCH_EXPR). */
+ gcc_assert (*ctx->css_state != css_default_seen);
+ /* When evaluating SWITCH_EXPR body for the second time,
+ return true for the default: label. */
+ if (*ctx->css_state == css_default_processing)
+ return true;
+ *ctx->css_state = css_default_seen;
+ }
+ else if (CASE_HIGH (stmt))
+ {
+ if (tree_int_cst_le (CASE_LOW (stmt), *jump_target)
+ && tree_int_cst_le (*jump_target, CASE_HIGH (stmt)))
+ return true;
+ }
+ else if (tree_int_cst_equal (*jump_target, CASE_LOW (stmt)))
+ return true;
+ }
+ break;
+
+ case BREAK_STMT:
+ case CONTINUE_STMT:
+ /* These two are handled directly in cxx_eval_loop_expr by testing
+ breaks (jump_target) or continues (jump_target). */
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+ return false;
+}
+
+static tree
+eval_constant_expression (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p,
+ tree *jump_target /* = NULL */)
+{
+ if (jump_target && *jump_target)
+ {
+ /* If we are jumping, ignore all statements/expressions except those
+ that could have LABEL_EXPR or CASE_LABEL_EXPR in their bodies. */
+ switch (TREE_CODE (t))
+ {
+ case BIND_EXPR:
+ case STATEMENT_LIST:
+ case LOOP_EXPR:
+ case COND_EXPR:
+ case IF_STMT:
+ case DO_STMT:
+ case WHILE_STMT:
+ case FOR_STMT:
+ break;
+ case LABEL_EXPR:
+ case CASE_LABEL_EXPR:
+ if (label_matches (ctx, jump_target, t))
+ /* Found it. */
+ *jump_target = NULL_TREE;
+ return NULL_TREE;
+ default:
+ return NULL_TREE;
+ }
+ }
+ if (error_operand_p (t))
+ {
+ *non_constant_p = true;
+ return t;
+ }
+
+ location_t loc = EXPR_LOCATION (t);
+
+ if (CONSTANT_CLASS_P (t))
+ {
+ if (TREE_OVERFLOW (t))
+ {
+ error_at (loc, "overflow in constant expression");
+ return t;
+ }
+
+ return t;
+ }
+
+ // Avoid excessively long constexpr evaluations
+ if (++ctx->global->constexpr_ops_count >= constexpr_ops_limit)
+ {
+ rust_error_at (
+ Location (loc),
+ "%<constexpr%> evaluation operation count exceeds limit of "
+ "%wd (use %<-fconstexpr-ops-limit=%> to increase the limit)",
+ constexpr_ops_limit);
+
+ return t;
+ }
+
+ constexpr_ctx new_ctx;
+ tree r = t;
+ tree_code tcode = TREE_CODE (t);
+ switch (tcode)
+ {
+ case VAR_DECL:
+ if (DECL_HAS_VALUE_EXPR_P (t))
+ {
+ r = DECL_VALUE_EXPR (t);
+ return eval_constant_expression (ctx, r, lval, non_constant_p,
+ overflow_p);
+ }
+ /* fall through */
+ case CONST_DECL: {
+ /* We used to not check lval for CONST_DECL, but darwin.cc uses
+ CONST_DECL for aggregate constants. */
+ if (lval)
+ return t;
+ else if (t == ctx->object)
+ return ctx->ctor;
+ if (VAR_P (t))
+ if (tree *p = ctx->global->values.get (t))
+ if (*p != NULL_TREE)
+ {
+ r = *p;
+ break;
+ }
+ r = decl_constant_value (t, /*unshare_p=*/false);
+ if (TREE_CODE (r) == TARGET_EXPR
+ && TREE_CODE (TARGET_EXPR_INITIAL (r)) == CONSTRUCTOR)
+ r = TARGET_EXPR_INITIAL (r);
+ if (DECL_P (r))
+ {
+ non_const_var_error (loc, r);
+ return r;
+ }
+ }
+ break;
+
+ case PARM_DECL:
+ if (lval && !TYPE_REF_P (TREE_TYPE (t)))
+ /* glvalue use. */;
+ else if (tree *p = ctx->global->values.get (r))
+ r = *p;
+ else if (lval)
+ /* Defer in case this is only used for its type. */;
+ else if (COMPLETE_TYPE_P (TREE_TYPE (t))
+ && is_really_empty_class (TREE_TYPE (t), /*ignore_vptr*/ false))
+ {
+ /* If the class is empty, we aren't actually loading anything. */
+ r = build_constructor (TREE_TYPE (t), NULL);
+ TREE_CONSTANT (r) = true;
+ }
+ else
+ {
+ if (!ctx->quiet)
+ error ("%qE is not a constant expression", t);
+ *non_constant_p = true;
+ }
+ break;
+
+ case POINTER_PLUS_EXPR:
+ case POINTER_DIFF_EXPR:
+ case PLUS_EXPR:
+ case MINUS_EXPR:
+ case MULT_EXPR:
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case ROUND_MOD_EXPR:
+ case RDIV_EXPR:
+ case EXACT_DIV_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_AND_EXPR:
+ case TRUTH_XOR_EXPR:
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+ case SPACESHIP_EXPR:
+ case UNORDERED_EXPR:
+ case ORDERED_EXPR:
+ case UNLT_EXPR:
+ case UNLE_EXPR:
+ case UNGT_EXPR:
+ case UNGE_EXPR:
+ case UNEQ_EXPR:
+ case LTGT_EXPR:
+ case RANGE_EXPR:
+ case COMPLEX_EXPR:
+ r = eval_binary_expression (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ /* fold can introduce non-IF versions of these; still treat them as
+ short-circuiting. */
+ case TRUTH_AND_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ r = eval_logical_expression (ctx, t, boolean_false_node,
+ boolean_true_node, lval, non_constant_p,
+ overflow_p);
+ break;
+
+ case TRUTH_OR_EXPR:
+ case TRUTH_ORIF_EXPR:
+ r = eval_logical_expression (ctx, t, boolean_true_node,
+ boolean_false_node, lval, non_constant_p,
+ overflow_p);
+ break;
+
+ case TARGET_EXPR: {
+ tree type = TREE_TYPE (t);
+
+ if (!literal_type_p (type))
+ {
+ if (!ctx->quiet)
+ {
+ auto_diagnostic_group d;
+ error ("temporary of non-literal type %qT in a "
+ "constant expression",
+ type);
+ explain_non_literal_class (type);
+ }
+ *non_constant_p = true;
+ break;
+ }
+ gcc_checking_assert (!TARGET_EXPR_DIRECT_INIT_P (t));
+ /* Avoid evaluating a TARGET_EXPR more than once. */
+ tree slot = TARGET_EXPR_SLOT (t);
+ if (tree *p = ctx->global->values.get (slot))
+ {
+ if (lval)
+ return slot;
+ r = *p;
+ break;
+ }
+ if ((AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type)))
+ {
+ /* We're being expanded without an explicit target, so start
+ initializing a new object; expansion with an explicit target
+ strips the TARGET_EXPR before we get here. */
+ new_ctx = *ctx;
+ /* Link CTX to NEW_CTX so that lookup_placeholder can resolve
+ any PLACEHOLDER_EXPR within the initializer that refers to the
+ former object under construction. */
+ new_ctx.parent = ctx;
+ new_ctx.ctor = build_constructor (type, NULL);
+ CONSTRUCTOR_NO_CLEARING (new_ctx.ctor) = true;
+ new_ctx.object = slot;
+ ctx->global->values.put (new_ctx.object, new_ctx.ctor);
+ ctx = &new_ctx;
+ }
+ /* Pass false for 'lval' because this indicates
+ initialization of a temporary. */
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 1), false,
+ non_constant_p, overflow_p);
+ if (*non_constant_p)
+ break;
+ /* Adjust the type of the result to the type of the temporary. */
+ r = adjust_temp_type (type, r);
+ if (TARGET_EXPR_CLEANUP (t) && !CLEANUP_EH_ONLY (t))
+ ctx->global->cleanups->safe_push (TARGET_EXPR_CLEANUP (t));
+ r = unshare_constructor (r);
+ ctx->global->values.put (slot, r);
+ if (ctx->save_exprs)
+ ctx->save_exprs->safe_push (slot);
+ if (lval)
+ return slot;
+ }
+ break;
+
+ case CALL_EXPR:
+ r = eval_call_expression (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ case RETURN_EXPR:
+ if (TREE_OPERAND (t, 0) != NULL_TREE)
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p);
+ /* FALLTHRU */
+ case BREAK_STMT:
+ case CONTINUE_STMT:
+ if (jump_target)
+ *jump_target = t;
+ else
+ {
+ /* Can happen with ({ return true; }) && false; passed to
+ maybe_constant_value. There is nothing to jump over in this
+ case, and the bug will be diagnosed later. */
+ gcc_assert (ctx->quiet);
+ *non_constant_p = true;
+ }
+ break;
+
+ case DECL_EXPR: {
+ r = DECL_EXPR_DECL (t);
+
+ if (AGGREGATE_TYPE_P (TREE_TYPE (r)) || VECTOR_TYPE_P (TREE_TYPE (r)))
+ {
+ new_ctx = *ctx;
+ new_ctx.object = r;
+ new_ctx.ctor = build_constructor (TREE_TYPE (r), NULL);
+ CONSTRUCTOR_NO_CLEARING (new_ctx.ctor) = true;
+ ctx->global->values.put (r, new_ctx.ctor);
+ ctx = &new_ctx;
+ }
+
+ if (tree init = DECL_INITIAL (r))
+ {
+ init = eval_constant_expression (ctx, init, false, non_constant_p,
+ overflow_p);
+ /* Don't share a CONSTRUCTOR that might be changed. */
+ init = unshare_constructor (init);
+ /* Remember that a constant object's constructor has already
+ run. */
+ if (CLASS_TYPE_P (TREE_TYPE (r)) && RS_TYPE_CONST_P (TREE_TYPE (r)))
+ TREE_READONLY (init) = true;
+ ctx->global->values.put (r, init);
+ }
+ else if (ctx == &new_ctx)
+ /* We gave it a CONSTRUCTOR above. */;
+ else
+ ctx->global->values.put (r, NULL_TREE);
+ }
+ break;
+
+ /* These differ from cxx_eval_unary_expression in that this doesn't
+ check for a constant operand or result; an address can be
+ constant without its operand being, and vice versa. */
+ case MEM_REF:
+ case INDIRECT_REF:
+ r = rs_eval_indirect_ref (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ case VEC_PERM_EXPR:
+ r = cxx_eval_trinary_expression (ctx, t, lval, non_constant_p,
+ overflow_p);
+ break;
+
+ case PAREN_EXPR:
+ gcc_assert (!REF_PARENTHESIZED_P (t));
+ /* A PAREN_EXPR resulting from __builtin_assoc_barrier has no effect in
+ constant expressions since it's unaffected by -fassociative-math. */
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p);
+ break;
+
+ case MODIFY_EXPR:
+ r = eval_store_expression (ctx, t, false, non_constant_p, overflow_p);
+ break;
+
+ case STATEMENT_LIST:
+ new_ctx = *ctx;
+ new_ctx.ctor = new_ctx.object = NULL_TREE;
+ return eval_statement_list (&new_ctx, t, non_constant_p, overflow_p,
+ jump_target);
+
+ case BIND_EXPR:
+ return eval_constant_expression (ctx, BIND_EXPR_BODY (t), lval,
+ non_constant_p, overflow_p, jump_target);
+
+ case OBJ_TYPE_REF:
+ /* Virtual function lookup. We don't need to do anything fancy. */
+ return eval_constant_expression (ctx, OBJ_TYPE_REF_EXPR (t), lval,
+ non_constant_p, overflow_p);
+
+ case EXIT_EXPR: {
+ tree cond = TREE_OPERAND (t, 0);
+ cond = eval_constant_expression (ctx, cond, /*lval*/ false,
+ non_constant_p, overflow_p);
+ VERIFY_CONSTANT (cond);
+ if (integer_nonzerop (cond))
+ *jump_target = t;
+ }
+ break;
+
+ case RESULT_DECL:
+ if (lval)
+ return t;
+ /* We ask for an rvalue for the RESULT_DECL when indirecting
+ through an invisible reference, or in named return value
+ optimization. */
+ if (tree *p = ctx->global->values.get (t))
+ return *p;
+ else
+ {
+ if (!ctx->quiet)
+ error ("%qE is not a constant expression", t);
+ *non_constant_p = true;
+ }
+ break;
+
+ case SAVE_EXPR:
+ /* Avoid evaluating a SAVE_EXPR more than once. */
+ if (tree *p = ctx->global->values.get (t))
+ r = *p;
+ else
+ {
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), false,
+ non_constant_p, overflow_p);
+ if (*non_constant_p)
+ break;
+ ctx->global->values.put (t, r);
+ if (ctx->save_exprs)
+ ctx->save_exprs->safe_push (t);
+ }
+ break;
+
+ case ADDR_EXPR: {
+ tree oldop = TREE_OPERAND (t, 0);
+ tree op = eval_constant_expression (ctx, oldop,
+ /*lval*/ true, non_constant_p,
+ overflow_p);
+ /* Don't VERIFY_CONSTANT here. */
+ if (*non_constant_p)
+ return t;
+ gcc_checking_assert (TREE_CODE (op) != CONSTRUCTOR);
+ /* This function does more aggressive folding than fold itself. */
+ r = build_fold_addr_expr_with_type (op, TREE_TYPE (t));
+ if (TREE_CODE (r) == ADDR_EXPR && TREE_OPERAND (r, 0) == oldop)
+ {
+ ggc_free (r);
+ return t;
+ }
+ break;
+ }
+
+ case COMPOUND_EXPR: {
+ /* check_return_expr sometimes wraps a TARGET_EXPR in a
+ COMPOUND_EXPR; don't get confused. Also handle EMPTY_CLASS_EXPR
+ introduced by build_call_a. */
+ tree op0 = TREE_OPERAND (t, 0);
+ tree op1 = TREE_OPERAND (t, 1);
+ STRIP_NOPS (op1);
+ if ((TREE_CODE (op0) == TARGET_EXPR && op1 == TARGET_EXPR_SLOT (op0))
+ || TREE_CODE (op1) == EMPTY_CLASS_EXPR)
+ r = eval_constant_expression (ctx, op0, lval, non_constant_p,
+ overflow_p, jump_target);
+ else
+ {
+ /* Check that the LHS is constant and then discard it. */
+ eval_constant_expression (ctx, op0, true, non_constant_p,
+ overflow_p, jump_target);
+ if (*non_constant_p)
+ return t;
+ op1 = TREE_OPERAND (t, 1);
+ r = eval_constant_expression (ctx, op1, lval, non_constant_p,
+ overflow_p, jump_target);
+ }
+ }
+ break;
+
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ if (lval)
+ {
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p);
+ if (r == error_mark_node)
+ ;
+ else if (r == TREE_OPERAND (t, 0))
+ r = t;
+ else
+ r = fold_build1 (TREE_CODE (t), TREE_TYPE (t), r);
+ break;
+ }
+ /* FALLTHRU */
+ case CONJ_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ case NEGATE_EXPR:
+ case ABS_EXPR:
+ case ABSU_EXPR:
+ case BIT_NOT_EXPR:
+ case TRUTH_NOT_EXPR:
+ case FIXED_CONVERT_EXPR:
+ r = eval_unary_expression (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ case LOOP_EXPR:
+ case WHILE_STMT:
+ case FOR_STMT:
+ eval_loop_expr (ctx, t, non_constant_p, overflow_p, jump_target);
+ break;
+
+ case SWITCH_EXPR:
+ case SWITCH_STMT:
+ eval_switch_expr (ctx, t, non_constant_p, overflow_p, jump_target);
+ break;
+
+ case ARRAY_REF:
+ r = eval_array_reference (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ case COMPONENT_REF:
+ if (is_overloaded_fn (t))
+ {
+ /* We can only get here in checking mode via
+ build_non_dependent_expr, because any expression that
+ calls or takes the address of the function will have
+ pulled a FUNCTION_DECL out of the COMPONENT_REF. */
+ gcc_checking_assert (ctx->quiet || errorcount);
+ *non_constant_p = true;
+ return t;
+ }
+ r = eval_component_reference (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ case BIT_FIELD_REF:
+ r = eval_bit_field_ref (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ case COND_EXPR:
+ case IF_STMT: // comes from cp-tree.def
+ if (jump_target && *jump_target)
+ {
+ tree orig_jump = *jump_target;
+ tree arg = ((TREE_CODE (t) != IF_STMT || TREE_OPERAND (t, 1))
+ ? TREE_OPERAND (t, 1)
+ : void_node);
+ /* When jumping to a label, the label might be either in the
+ then or else blocks, so process then block first in skipping
+ mode first, and if we are still in the skipping mode at its end,
+ process the else block too. */
+ r = eval_constant_expression (ctx, arg, lval, non_constant_p,
+ overflow_p, jump_target);
+ /* It's possible that we found the label in the then block. But
+ it could have been followed by another jumping statement, e.g.
+ say we're looking for case 1:
+ if (cond)
+ {
+ // skipped statements
+ case 1:; // clears up *jump_target
+ return 1; // and sets it to a RETURN_EXPR
+ }
+ else { ... }
+ in which case we need not go looking to the else block.
+ (goto is not allowed in a constexpr function.) */
+ if (*jump_target == orig_jump)
+ {
+ arg = ((TREE_CODE (t) != IF_STMT || TREE_OPERAND (t, 2))
+ ? TREE_OPERAND (t, 2)
+ : void_node);
+ r = eval_constant_expression (ctx, arg, lval, non_constant_p,
+ overflow_p, jump_target);
+ }
+ break;
+ }
+ r = eval_conditional_expression (ctx, t, lval, non_constant_p, overflow_p,
+ jump_target);
+ break;
+
+ case VEC_COND_EXPR:
+ r = eval_vector_conditional_expression (ctx, t, non_constant_p,
+ overflow_p);
+ break;
+
+ case TRY_CATCH_EXPR:
+ if (TREE_OPERAND (t, 0) == NULL_TREE)
+ {
+ r = void_node;
+ break;
+ }
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p, jump_target);
+ break;
+
+ case CLEANUP_POINT_EXPR: {
+ auto_vec<tree, 2> cleanups;
+ vec<tree> *prev_cleanups = ctx->global->cleanups;
+ ctx->global->cleanups = &cleanups;
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p, jump_target);
+ ctx->global->cleanups = prev_cleanups;
+ unsigned int i;
+ tree cleanup;
+ /* Evaluate the cleanups. */
+ FOR_EACH_VEC_ELT_REVERSE (cleanups, i, cleanup)
+ eval_constant_expression (ctx, cleanup, false, non_constant_p,
+ overflow_p);
+ }
+ break;
+
+ case TRY_FINALLY_EXPR:
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p, jump_target);
+ if (!*non_constant_p)
+ /* Also evaluate the cleanup. */
+ eval_constant_expression (ctx, TREE_OPERAND (t, 1), true,
+ non_constant_p, overflow_p);
+ break;
+
+ case CONSTRUCTOR:
+ if (TREE_CONSTANT (t) && reduced_constant_expression_p (t))
+ {
+ /* Don't re-process a constant CONSTRUCTOR, but do fold it to
+ VECTOR_CST if applicable. */
+ verify_constructor_flags (t);
+ if (TREE_CONSTANT (t))
+ return fold (t);
+ }
+ r = eval_bare_aggregate (ctx, t, lval, non_constant_p, overflow_p);
+ break;
+
+ /* FALLTHROUGH. */
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case VIEW_CONVERT_EXPR: {
+ tree oldop = TREE_OPERAND (t, 0);
+
+ tree op = eval_constant_expression (ctx, oldop, lval, non_constant_p,
+ overflow_p);
+ if (*non_constant_p)
+ return t;
+ tree type = TREE_TYPE (t);
+
+ if (VOID_TYPE_P (type))
+ return void_node;
+
+ if (TREE_CODE (t) == CONVERT_EXPR && ARITHMETIC_TYPE_P (type)
+ && INDIRECT_TYPE_P (TREE_TYPE (op)) && ctx->manifestly_const_eval)
+ {
+ if (!ctx->quiet)
+ error_at (loc,
+ "conversion from pointer type %qT to arithmetic type "
+ "%qT in a constant expression",
+ TREE_TYPE (op), type);
+ *non_constant_p = true;
+ return t;
+ }
+
+ if (TYPE_PTROB_P (type) && TYPE_PTR_P (TREE_TYPE (op))
+ && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (op))))
+ {
+ /* Likewise, don't error when casting from void* when OP is
+ &heap uninit and similar. */
+ tree sop = tree_strip_nop_conversions (op);
+ if (TREE_CODE (sop) == ADDR_EXPR && VAR_P (TREE_OPERAND (sop, 0))
+ && DECL_ARTIFICIAL (TREE_OPERAND (sop, 0)))
+ /* OK */;
+ else
+ {
+ if (!ctx->quiet)
+ error_at (loc, "cast from %qT is not allowed",
+ TREE_TYPE (op));
+ *non_constant_p = true;
+ return t;
+ }
+ }
+
+ if (INDIRECT_TYPE_P (type) && TREE_CODE (op) == INTEGER_CST)
+ {
+ if (integer_zerop (op))
+ {
+ if (TYPE_REF_P (type))
+ {
+ if (!ctx->quiet)
+ error_at (loc, "dereferencing a null pointer");
+ *non_constant_p = true;
+ return t;
+ }
+ }
+ else
+ {
+ /* This detects for example:
+ reinterpret_cast<void*>(sizeof 0)
+ */
+ if (!ctx->quiet)
+ error_at (loc,
+ "%<reinterpret_cast<%T>(%E)%> is not "
+ "a constant expression",
+ type, op);
+ *non_constant_p = true;
+ return t;
+ }
+ }
+
+ if (INDIRECT_TYPE_P (type) && TREE_CODE (op) == NOP_EXPR
+ && TREE_TYPE (op) == ptr_type_node
+ && TREE_CODE (TREE_OPERAND (op, 0)) == ADDR_EXPR
+ && VAR_P (TREE_OPERAND (TREE_OPERAND (op, 0), 0))
+ && (DECL_NAME (TREE_OPERAND (TREE_OPERAND (op, 0), 0))
+ == heap_uninit_identifier
+ || DECL_NAME (TREE_OPERAND (TREE_OPERAND (op, 0), 0))
+ == heap_vec_uninit_identifier))
+ {
+ tree var = TREE_OPERAND (TREE_OPERAND (op, 0), 0);
+ tree var_size = TYPE_SIZE_UNIT (TREE_TYPE (var));
+ tree elt_type = TREE_TYPE (type);
+ tree cookie_size = NULL_TREE;
+ if (TREE_CODE (elt_type) == RECORD_TYPE
+ && TYPE_NAME (elt_type) == heap_identifier)
+ {
+ tree fld1 = TYPE_FIELDS (elt_type);
+ tree fld2 = DECL_CHAIN (fld1);
+ elt_type = TREE_TYPE (TREE_TYPE (fld2));
+ cookie_size = TYPE_SIZE_UNIT (TREE_TYPE (fld1));
+ }
+ DECL_NAME (var) = (DECL_NAME (var) == heap_uninit_identifier
+ ? heap_identifier
+ : heap_vec_identifier);
+ TREE_TYPE (var)
+ = build_new_constexpr_heap_type (elt_type, cookie_size, var_size);
+ TREE_TYPE (TREE_OPERAND (op, 0))
+ = build_pointer_type (TREE_TYPE (var));
+ }
+
+ if (op == oldop)
+ /* We didn't fold at the top so we could check for ptr-int
+ conversion. */
+ return fold (t);
+
+ tree sop;
+
+ /* Handle an array's bounds having been deduced after we built
+ the wrapping expression. */
+ if (same_type_ignoring_tlq_and_bounds_p (type, TREE_TYPE (op)))
+ r = op;
+ else if (sop = tree_strip_nop_conversions (op),
+ sop != op
+ && (same_type_ignoring_tlq_and_bounds_p (type,
+ TREE_TYPE (sop))))
+ r = sop;
+ else
+ r = fold_build1 (tcode, type, op);
+
+ /* Conversion of an out-of-range value has implementation-defined
+ behavior; the language considers it different from arithmetic
+ overflow, which is undefined. */
+ if (TREE_OVERFLOW_P (r) && !TREE_OVERFLOW_P (op))
+ TREE_OVERFLOW (r) = false;
+ }
+ break;
+
+ case PLACEHOLDER_EXPR:
+ /* Use of the value or address of the current object. */
+ if (tree ctor = lookup_placeholder (ctx, lval, TREE_TYPE (t)))
+ {
+ if (TREE_CODE (ctor) == CONSTRUCTOR)
+ return ctor;
+ else
+ return eval_constant_expression (ctx, ctor, lval, non_constant_p,
+ overflow_p);
+ }
+ /* A placeholder without a referent. We can get here when
+ checking whether NSDMIs are noexcept, or in massage_init_elt;
+ just say it's non-constant for now. */
+ gcc_assert (ctx->quiet);
+ *non_constant_p = true;
+ break;
+
+ case ANNOTATE_EXPR:
+ r = eval_constant_expression (ctx, TREE_OPERAND (t, 0), lval,
+ non_constant_p, overflow_p, jump_target);
+ break;
+
+ case ASM_EXPR:
+ if (!ctx->quiet)
+ inline_asm_in_constexpr_error (loc);
+ *non_constant_p = true;
+ return t;
+
+ default:
+ break;
+ }
+
+ return r;
+}
+
+/* Complain about a const object OBJ being modified in a constant expression.
+ EXPR is the MODIFY_EXPR expression performing the modification. */
+
+static void
+modifying_const_object_error (tree expr, tree obj)
+{
+ location_t loc = EXPR_LOCATION (expr);
+ auto_diagnostic_group d;
+ error_at (loc,
+ "modifying a const object %qE is not allowed in "
+ "a constant expression",
+ TREE_OPERAND (expr, 0));
+ inform (location_of (obj), "originally declared %<const%> here");
+}
+
+/* Return true iff DECL is an empty field, either for an empty base or a
+ [[no_unique_address]] data member. */
+
+bool
+is_empty_field (tree decl)
+{
+ if (!decl || TREE_CODE (decl) != FIELD_DECL)
+ return false;
+
+ bool r = is_empty_class (TREE_TYPE (decl));
+
+ /* Empty fields should have size zero. */
+ gcc_checking_assert (!r || integer_zerop (DECL_SIZE (decl)));
+
+ return r;
+}
+
+static tree
+eval_store_expression (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ constexpr_ctx new_ctx = *ctx;
+
+ tree init = TREE_OPERAND (t, 1);
+ if (TREE_CLOBBER_P (init))
+ /* Just ignore clobbers. */
+ return void_node;
+
+ /* First we figure out where we're storing to. */
+ tree target = TREE_OPERAND (t, 0);
+
+ tree type = TREE_TYPE (target);
+ bool preeval = SCALAR_TYPE_P (type) || TREE_CODE (t) == MODIFY_EXPR;
+ if (preeval)
+ {
+ /* Evaluate the value to be stored without knowing what object it will be
+ stored in, so that any side-effects happen first. */
+ if (!SCALAR_TYPE_P (type))
+ new_ctx.ctor = new_ctx.object = NULL_TREE;
+ init = eval_constant_expression (&new_ctx, init, false, non_constant_p,
+ overflow_p);
+ if (*non_constant_p)
+ return t;
+ }
+
+ bool evaluated = false;
+ if (lval)
+ {
+ /* If we want to return a reference to the target, we need to evaluate it
+ as a whole; otherwise, only evaluate the innermost piece to avoid
+ building up unnecessary *_REFs. */
+ target = eval_constant_expression (ctx, target, true, non_constant_p,
+ overflow_p);
+ evaluated = true;
+ if (*non_constant_p)
+ return t;
+ }
+
+ /* Find the underlying variable. */
+ releasing_vec refs;
+ tree object = NULL_TREE;
+ /* If we're modifying a const object, save it. */
+ tree const_object_being_modified = NULL_TREE;
+ // bool mutable_p = false;
+ for (tree probe = target; object == NULL_TREE;)
+ {
+ switch (TREE_CODE (probe))
+ {
+ case BIT_FIELD_REF:
+ case COMPONENT_REF:
+ case ARRAY_REF: {
+ tree ob = TREE_OPERAND (probe, 0);
+ tree elt = TREE_OPERAND (probe, 1);
+ if (TREE_CODE (elt) == FIELD_DECL /*&& DECL_MUTABLE_P (elt)*/)
+ {
+ // mutable_p = true;
+ }
+ if (TREE_CODE (probe) == ARRAY_REF)
+ {
+ // TODO
+ gcc_unreachable ();
+ // elt = eval_and_check_array_index (ctx, probe, false,
+ // non_constant_p, overflow_p);
+ if (*non_constant_p)
+ return t;
+ }
+ /* We don't check modifying_const_object_p for ARRAY_REFs. Given
+ "int a[10]", an ARRAY_REF "a[2]" can be "const int", even though
+ the array isn't const. Instead, check "a" in the next iteration;
+ that will detect modifying "const int a[10]". */
+ // else if (evaluated
+ // && modifying_const_object_p (TREE_CODE (t), probe,
+ // mutable_p)
+ // && const_object_being_modified == NULL_TREE)
+ // const_object_being_modified = probe;
+ vec_safe_push (refs, elt);
+ vec_safe_push (refs, TREE_TYPE (probe));
+ probe = ob;
+ }
+ break;
+
+ default:
+ if (evaluated)
+ object = probe;
+ else
+ {
+ probe = eval_constant_expression (ctx, probe, true,
+ non_constant_p, overflow_p);
+ evaluated = true;
+ if (*non_constant_p)
+ return t;
+ }
+ break;
+ }
+ }
+
+ // if (modifying_const_object_p (TREE_CODE (t), object, mutable_p)
+ // && const_object_being_modified == NULL_TREE)
+ // const_object_being_modified = object;
+
+ /* And then find/build up our initializer for the path to the subobject
+ we're initializing. */
+ tree *valp;
+ if (DECL_P (object))
+ valp = ctx->global->values.get (object);
+ else
+ valp = NULL;
+ if (!valp)
+ {
+ /* A constant-expression cannot modify objects from outside the
+ constant-expression. */
+ if (!ctx->quiet)
+ error ("modification of %qE is not a constant expression", object);
+ *non_constant_p = true;
+ return t;
+ }
+ type = TREE_TYPE (object);
+ bool no_zero_init = true;
+
+ releasing_vec ctors, indexes;
+ auto_vec<int> index_pos_hints;
+ bool activated_union_member_p = false;
+ while (!refs->is_empty ())
+ {
+ if (*valp == NULL_TREE)
+ {
+ *valp = build_constructor (type, NULL);
+ CONSTRUCTOR_NO_CLEARING (*valp) = no_zero_init;
+ }
+ else if (TREE_CODE (*valp) == STRING_CST)
+ {
+ /* An array was initialized with a string constant, and now
+ we're writing into one of its elements. Explode the
+ single initialization into a set of element
+ initializations. */
+ gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
+
+ tree string = *valp;
+ tree elt_type = TREE_TYPE (type);
+ unsigned chars_per_elt
+ = (TYPE_PRECISION (elt_type) / TYPE_PRECISION (char_type_node));
+ unsigned num_elts = TREE_STRING_LENGTH (string) / chars_per_elt;
+ tree ary_ctor = build_constructor (type, NULL);
+
+ vec_safe_reserve (CONSTRUCTOR_ELTS (ary_ctor), num_elts);
+ for (unsigned ix = 0; ix != num_elts; ix++)
+ {
+ constructor_elt elt
+ = {build_int_cst (size_type_node, ix),
+ extract_string_elt (string, chars_per_elt, ix)};
+ CONSTRUCTOR_ELTS (ary_ctor)->quick_push (elt);
+ }
+
+ *valp = ary_ctor;
+ }
+
+ /* If the value of object is already zero-initialized, any new ctors for
+ subobjects will also be zero-initialized. */
+ no_zero_init = CONSTRUCTOR_NO_CLEARING (*valp);
+
+ enum tree_code code = TREE_CODE (type);
+ type = refs->pop ();
+ tree index = refs->pop ();
+
+ if (code == RECORD_TYPE && is_empty_field (index))
+ /* Don't build a sub-CONSTRUCTOR for an empty base or field, as they
+ have no data and might have an offset lower than previously declared
+ fields, which confuses the middle-end. The code below will notice
+ that we don't have a CONSTRUCTOR for our inner target and just
+ return init. */
+ break;
+
+ if (code == UNION_TYPE && CONSTRUCTOR_NELTS (*valp)
+ && CONSTRUCTOR_ELT (*valp, 0)->index != index)
+ {
+ if (TREE_CODE (t) == MODIFY_EXPR && CONSTRUCTOR_NO_CLEARING (*valp))
+ {
+ /* Diagnose changing the active union member while the union
+ is in the process of being initialized. */
+ if (!ctx->quiet)
+ error_at (EXPR_LOCATION (t),
+ "change of the active member of a union "
+ "from %qD to %qD during initialization",
+ CONSTRUCTOR_ELT (*valp, 0)->index, index);
+ *non_constant_p = true;
+ }
+ no_zero_init = true;
+ }
+
+ vec_safe_push (ctors, *valp);
+ vec_safe_push (indexes, index);
+
+ constructor_elt *cep = get_or_insert_ctor_field (*valp, index);
+ index_pos_hints.safe_push (cep - CONSTRUCTOR_ELTS (*valp)->begin ());
+
+ if (code == UNION_TYPE)
+ activated_union_member_p = true;
+
+ valp = &cep->value;
+ }
+
+ /* Detect modifying a constant object in constexpr evaluation.
+ We have found a const object that is being modified. Figure out
+ if we need to issue an error. Consider
+
+ struct A {
+ int n;
+ constexpr A() : n(1) { n = 2; } // #1
+ };
+ struct B {
+ const A a;
+ constexpr B() { a.n = 3; } // #2
+ };
+ constexpr B b{};
+
+ #1 is OK, since we're modifying an object under construction, but
+ #2 is wrong, since "a" is const and has been fully constructed.
+ To track it, we use the TREE_READONLY bit in the object's CONSTRUCTOR
+ which means that the object is read-only. For the example above, the
+ *ctors stack at the point of #2 will look like:
+
+ ctors[0] = {.a={.n=2}} TREE_READONLY = 0
+ ctors[1] = {.n=2} TREE_READONLY = 1
+
+ and we're modifying "b.a", so we search the stack and see if the
+ constructor for "b.a" has already run. */
+ if (const_object_being_modified)
+ {
+ bool fail = false;
+ tree const_objtype
+ = strip_array_types (TREE_TYPE (const_object_being_modified));
+ if (!CLASS_TYPE_P (const_objtype))
+ fail = true;
+ else
+ {
+ /* [class.ctor]p5 "A constructor can be invoked for a const,
+ volatile, or const volatile object. const and volatile
+ semantics are not applied on an object under construction.
+ They come into effect when the constructor for the most
+ derived object ends." */
+ for (tree elt : *ctors)
+ if (same_type_ignoring_top_level_qualifiers_p (
+ TREE_TYPE (const_object_being_modified), TREE_TYPE (elt)))
+ {
+ fail = TREE_READONLY (elt);
+ break;
+ }
+ }
+ if (fail)
+ {
+ if (!ctx->quiet)
+ modifying_const_object_error (t, const_object_being_modified);
+ *non_constant_p = true;
+ return t;
+ }
+ }
+
+ if (!preeval)
+ {
+ /* We're handling an INIT_EXPR of class type, so the value of the
+ initializer can depend on the object it's initializing. */
+
+ /* Create a new CONSTRUCTOR in case evaluation of the initializer
+ wants to modify it. */
+ if (*valp == NULL_TREE)
+ {
+ *valp = build_constructor (type, NULL);
+ CONSTRUCTOR_NO_CLEARING (*valp) = no_zero_init;
+ }
+ new_ctx.ctor = *valp;
+ new_ctx.object = target;
+ /* Avoid temporary materialization when initializing from a TARGET_EXPR.
+ We don't need to mess with AGGR_EXPR_SLOT/VEC_INIT_EXPR_SLOT because
+ expansion of those trees uses ctx instead. */
+ if (TREE_CODE (init) == TARGET_EXPR)
+ if (tree tinit = TARGET_EXPR_INITIAL (init))
+ init = tinit;
+ init = eval_constant_expression (&new_ctx, init, false, non_constant_p,
+ overflow_p);
+ /* The hash table might have moved since the get earlier, and the
+ initializer might have mutated the underlying CONSTRUCTORs, so we must
+ recompute VALP. */
+ valp = ctx->global->values.get (object);
+ for (unsigned i = 0; i < vec_safe_length (indexes); i++)
+ {
+ constructor_elt *cep
+ = get_or_insert_ctor_field (*valp, indexes[i], index_pos_hints[i]);
+ valp = &cep->value;
+ }
+ }
+
+ /* Don't share a CONSTRUCTOR that might be changed later. */
+ init = unshare_constructor (init);
+
+ if (*valp && TREE_CODE (*valp) == CONSTRUCTOR
+ && TREE_CODE (init) == CONSTRUCTOR)
+ {
+ /* An outer ctx->ctor might be pointing to *valp, so replace
+ its contents. */
+ if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (init),
+ TREE_TYPE (*valp)))
+ {
+ /* For initialization of an empty base, the original target will be
+ *(base*)this, evaluation of which resolves to the object
+ argument, which has the derived type rather than the base type. In
+ this situation, just evaluate the initializer and return, since
+ there's no actual data to store. */
+ gcc_assert (is_empty_class (TREE_TYPE (init)));
+ return lval ? target : init;
+ }
+ CONSTRUCTOR_ELTS (*valp) = CONSTRUCTOR_ELTS (init);
+ TREE_CONSTANT (*valp) = TREE_CONSTANT (init);
+ TREE_SIDE_EFFECTS (*valp) = TREE_SIDE_EFFECTS (init);
+ CONSTRUCTOR_NO_CLEARING (*valp) = CONSTRUCTOR_NO_CLEARING (init);
+ }
+ // else if (TREE_CODE (init) == CONSTRUCTOR
+ // && !same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (init),
+ // type))
+ // {
+ // /* See above on initialization of empty bases. */
+ // // gcc_assert (is_empty_class (TREE_TYPE (init)) && !lval);
+ // return init;
+ // }
+ else
+ *valp = init;
+
+ /* After initialization, 'const' semantics apply to the value of the
+ object. Make a note of this fact by marking the CONSTRUCTOR
+ TREE_READONLY. */
+ if (TREE_CODE (t) == INIT_EXPR && TREE_CODE (*valp) == CONSTRUCTOR
+ && TYPE_READONLY (type))
+ {
+ // this vs self? can rust's self be anything other than self or &self in
+ // constexpr mode? if (INDIRECT_REF_P (target)
+ // && (is_this_parameter (
+ // tree_strip_nop_conversions (TREE_OPERAND (target, 0)))))
+ /* We've just initialized '*this' (perhaps via the target
+ constructor of a delegating constructor). Leave it up to the
+ caller that set 'this' to set TREE_READONLY appropriately. */
+ // gcc_checking_assert (
+ // same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (target),
+ // type));
+ // else
+ // TREE_READONLY (*valp) = true;
+ }
+
+ /* Update TREE_CONSTANT and TREE_SIDE_EFFECTS on enclosing
+ CONSTRUCTORs, if any. */
+ bool c = TREE_CONSTANT (init);
+ bool s = TREE_SIDE_EFFECTS (init);
+ if (!c || s || activated_union_member_p)
+ for (tree elt : *ctors)
+ {
+ if (!c)
+ TREE_CONSTANT (elt) = false;
+ if (s)
+ TREE_SIDE_EFFECTS (elt) = true;
+ /* Clear CONSTRUCTOR_NO_CLEARING since we've activated a member of
+ this union. */
+ if (TREE_CODE (TREE_TYPE (elt)) == UNION_TYPE)
+ CONSTRUCTOR_NO_CLEARING (elt) = false;
+ }
+
+ if (*non_constant_p)
+ return t;
+ else if (lval)
+ return target;
+ else
+ return init;
+}
+
+/* Subroutine of cxx_eval_constant_expression.
+ Like cxx_eval_unary_expression, except for binary expressions. */
+static tree
+eval_binary_expression (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ tree orig_lhs = TREE_OPERAND (t, 0);
+ tree orig_rhs = TREE_OPERAND (t, 1);
+ tree lhs, rhs;
+
+ lhs = eval_constant_expression (ctx, orig_lhs, lval, non_constant_p,
+ overflow_p);
+ rhs = eval_constant_expression (ctx, orig_rhs, lval, non_constant_p,
+ overflow_p);
+
+ location_t loc = EXPR_LOCATION (t);
+ enum tree_code code = TREE_CODE (t);
+ tree type = TREE_TYPE (t);
+
+ return fold_binary_loc (loc, code, type, lhs, rhs);
+}
+
+/* Helper function of cxx_bind_parameters_in_call. Return non-NULL
+ if *TP is address of a static variable (or part of it) currently being
+ constructed or of a heap artificial variable. */
+
+static tree
+addr_of_non_const_var (tree *tp, int *walk_subtrees, void *data)
+{
+ if (TREE_CODE (*tp) == ADDR_EXPR)
+ if (tree var = get_base_address (TREE_OPERAND (*tp, 0)))
+ if (VAR_P (var) && TREE_STATIC (var))
+ {
+ if (DECL_NAME (var) == heap_uninit_identifier
+ || DECL_NAME (var) == heap_identifier
+ || DECL_NAME (var) == heap_vec_uninit_identifier
+ || DECL_NAME (var) == heap_vec_identifier)
+ return var;
+
+ constexpr_global_ctx *global = (constexpr_global_ctx *) data;
+ if (global->values.get (var))
+ return var;
+ }
+ if (TYPE_P (*tp))
+ *walk_subtrees = false;
+ return NULL_TREE;
+}
+
+/* Subroutine of cxx_eval_call_expression.
+ We are processing a call expression (either CALL_EXPR or
+ AGGR_INIT_EXPR) in the context of CTX. Evaluate
+ all arguments and bind their values to correspondings
+ parameters, making up the NEW_CALL context. */
+
+static tree
+rs_bind_parameters_in_call (const constexpr_ctx *ctx, tree t, tree fun,
+ bool *non_constant_p, bool *overflow_p,
+ bool *non_constant_args)
+{
+ const int nargs = call_expr_nargs (t);
+ tree parms = DECL_ARGUMENTS (fun);
+ int i;
+ /* We don't record ellipsis args below. */
+ int nparms = list_length (parms);
+ int nbinds = nargs < nparms ? nargs : nparms;
+ tree binds = make_tree_vec (nbinds);
+ for (i = 0; i < nargs; ++i)
+ {
+ tree x, arg;
+ tree type = parms ? TREE_TYPE (parms) : void_type_node;
+ if (parms && DECL_BY_REFERENCE (parms))
+ type = TREE_TYPE (type);
+ x = CALL_EXPR_ARG (t, i);
+
+ if (TREE_ADDRESSABLE (type))
+ /* Undo convert_for_arg_passing work here. */
+ x = convert_from_reference (x);
+ /* Normally we would strip a TARGET_EXPR in an initialization context
+ such as this, but here we do the elision differently: we keep the
+ TARGET_EXPR, and use its CONSTRUCTOR as the value of the parm. */
+ arg = eval_constant_expression (ctx, x, /*lval=*/false, non_constant_p,
+ overflow_p);
+ /* Don't VERIFY_CONSTANT here. */
+ if (*non_constant_p && ctx->quiet)
+ break;
+ /* Just discard ellipsis args after checking their constantitude. */
+ if (!parms)
+ continue;
+
+ if (!*non_constant_p)
+ {
+ /* Make sure the binding has the same type as the parm. But
+ only for constant args. */
+ if (!TYPE_REF_P (type))
+ arg = adjust_temp_type (type, arg);
+ if (!TREE_CONSTANT (arg))
+ *non_constant_args = true;
+ else if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type))
+ /* The destructor needs to see any modifications the callee makes
+ to the argument. */
+ *non_constant_args = true;
+ /* If arg is or contains address of a heap artificial variable or
+ of a static variable being constructed, avoid caching the
+ function call, as those variables might be modified by the
+ function, or might be modified by the callers in between
+ the cached function and just read by the function. */
+ else if (!*non_constant_args
+ && rs_walk_tree (&arg, addr_of_non_const_var, ctx->global,
+ NULL))
+ *non_constant_args = true;
+
+ // /* For virtual calls, adjust the this argument, so that it is
+ // the object on which the method is called, rather than
+ // one of its bases. */
+ // if (i == 0 && DECL_VIRTUAL_P (fun))
+ // {
+ // tree addr = arg;
+ // STRIP_NOPS (addr);
+ // if (TREE_CODE (addr) == ADDR_EXPR)
+ // {
+ // tree obj = TREE_OPERAND (addr, 0);
+ // while (TREE_CODE (obj) == COMPONENT_REF
+ // && DECL_FIELD_IS_BASE (TREE_OPERAND (obj, 1))
+ // && !same_type_ignoring_top_level_qualifiers_p (
+ // TREE_TYPE (obj), DECL_CONTEXT (fun)))
+ // obj = TREE_OPERAND (obj, 0);
+ // if (obj != TREE_OPERAND (addr, 0))
+ // arg = build_fold_addr_expr_with_type (obj, TREE_TYPE
+ // (arg));
+ // }
+ // }
+ TREE_VEC_ELT (binds, i) = arg;
+ }
+ parms = TREE_CHAIN (parms);
+ }
+
+ return binds;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_builtin_function_call
+
+/* Attempt to evaluate T which represents a call to a builtin function.
+ We assume here that all builtin functions evaluate to scalar types
+ represented by _CST nodes. */
+
+static tree
+eval_builtin_function_call (const constexpr_ctx *ctx, tree t, tree fun,
+ bool lval, bool *non_constant_p, bool *overflow_p)
+{
+ const int nargs = call_expr_nargs (t);
+ tree *args = (tree *) alloca (nargs * sizeof (tree));
+ tree new_call;
+ int i;
+
+ /* Don't fold __builtin_constant_p within a constexpr function. */
+ bool bi_const_p = DECL_IS_BUILTIN_CONSTANT_P (fun);
+
+ /* If we aren't requiring a constant expression, defer __builtin_constant_p
+ in a constexpr function until we have values for the parameters. */
+ if (bi_const_p && !ctx->manifestly_const_eval && current_function_decl
+ && DECL_DECLARED_CONSTEXPR_P (current_function_decl))
+ {
+ *non_constant_p = true;
+ return t;
+ }
+
+ /* For __builtin_is_constant_evaluated, defer it if not
+ ctx->manifestly_const_eval (as sometimes we try to constant evaluate
+ without manifestly_const_eval even expressions or parts thereof which
+ will later be manifestly const_eval evaluated), otherwise fold it to
+ true. */
+ if (fndecl_built_in_p (fun, CP_BUILT_IN_IS_CONSTANT_EVALUATED,
+ BUILT_IN_FRONTEND))
+ {
+ if (!ctx->manifestly_const_eval)
+ {
+ *non_constant_p = true;
+ return t;
+ }
+ return boolean_true_node;
+ }
+
+ if (fndecl_built_in_p (fun, CP_BUILT_IN_SOURCE_LOCATION, BUILT_IN_FRONTEND))
+ {
+ temp_override<tree> ovr (current_function_decl);
+ if (ctx->call && ctx->call->fundef)
+ current_function_decl = ctx->call->fundef->decl;
+ return fold_builtin_source_location (EXPR_LOCATION (t));
+ }
+
+ int strops = 0;
+ int strret = 0;
+ if (fndecl_built_in_p (fun, BUILT_IN_NORMAL))
+ switch (DECL_FUNCTION_CODE (fun))
+ {
+ case BUILT_IN_STRLEN:
+ case BUILT_IN_STRNLEN:
+ strops = 1;
+ break;
+ case BUILT_IN_MEMCHR:
+ case BUILT_IN_STRCHR:
+ case BUILT_IN_STRRCHR:
+ strops = 1;
+ strret = 1;
+ break;
+ case BUILT_IN_MEMCMP:
+ case BUILT_IN_STRCMP:
+ strops = 2;
+ break;
+ case BUILT_IN_STRSTR:
+ strops = 2;
+ strret = 1;
+ break;
+ case BUILT_IN_ASAN_POINTER_COMPARE:
+ case BUILT_IN_ASAN_POINTER_SUBTRACT:
+ /* These builtins shall be ignored during constant expression
+ evaluation. */
+ return void_node;
+ default:
+ break;
+ }
+
+ /* Be permissive for arguments to built-ins; __builtin_constant_p should
+ return constant false for a non-constant argument. */
+ constexpr_ctx new_ctx = *ctx;
+ new_ctx.quiet = true;
+ for (i = 0; i < nargs; ++i)
+ {
+ tree arg = CALL_EXPR_ARG (t, i);
+ tree oarg = arg;
+
+ /* To handle string built-ins we need to pass ADDR_EXPR<STRING_CST> since
+ expand_builtin doesn't know how to look in the values table. */
+ bool strop = i < strops;
+ if (strop)
+ {
+ STRIP_NOPS (arg);
+ if (TREE_CODE (arg) == ADDR_EXPR)
+ arg = TREE_OPERAND (arg, 0);
+ else
+ strop = false;
+ }
+
+ /* If builtin_valid_in_constant_expr_p is true,
+ potential_constant_expression_1 has not recursed into the arguments
+ of the builtin, verify it here. */
+ if (!builtin_valid_in_constant_expr_p (fun)
+ || potential_constant_expression (arg))
+ {
+ bool dummy1 = false, dummy2 = false;
+ arg
+ = eval_constant_expression (&new_ctx, arg, false, &dummy1, &dummy2);
+ }
+
+ if (bi_const_p)
+ /* For __builtin_constant_p, fold all expressions with constant values
+ even if they aren't C++ constant-expressions. */
+ arg = cp_fold_rvalue (arg);
+ else if (strop)
+ {
+ if (TREE_CODE (arg) == CONSTRUCTOR)
+ arg = braced_lists_to_strings (TREE_TYPE (arg), arg);
+ if (TREE_CODE (arg) == STRING_CST)
+ arg = build_address (arg);
+ else
+ arg = oarg;
+ }
+
+ args[i] = arg;
+ }
+
+ bool save_ffbcp = force_folding_builtin_constant_p;
+ force_folding_builtin_constant_p |= ctx->manifestly_const_eval;
+ tree save_cur_fn = current_function_decl;
+ /* Return name of ctx->call->fundef->decl for __builtin_FUNCTION (). */
+ if (fndecl_built_in_p (fun, BUILT_IN_FUNCTION) && ctx->call
+ && ctx->call->fundef)
+ current_function_decl = ctx->call->fundef->decl;
+ if (fndecl_built_in_p (fun,
+ CP_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS,
+ BUILT_IN_FRONTEND))
+ {
+ location_t loc = EXPR_LOCATION (t);
+ if (nargs >= 1)
+ VERIFY_CONSTANT (args[0]);
+ new_call
+ = fold_builtin_is_pointer_inverconvertible_with_class (loc, nargs,
+ args);
+ }
+ else if (fndecl_built_in_p (fun, CP_BUILT_IN_IS_CORRESPONDING_MEMBER,
+ BUILT_IN_FRONTEND))
+ {
+ location_t loc = EXPR_LOCATION (t);
+ if (nargs >= 2)
+ {
+ VERIFY_CONSTANT (args[0]);
+ VERIFY_CONSTANT (args[1]);
+ }
+ new_call = fold_builtin_is_corresponding_member (loc, nargs, args);
+ }
+ else
+ new_call = fold_builtin_call_array (EXPR_LOCATION (t), TREE_TYPE (t),
+ CALL_EXPR_FN (t), nargs, args);
+ current_function_decl = save_cur_fn;
+ force_folding_builtin_constant_p = save_ffbcp;
+ if (new_call == NULL)
+ {
+ if (!*non_constant_p && !ctx->quiet)
+ {
+ /* Do not allow__builtin_unreachable in constexpr function.
+ The __builtin_unreachable call with BUILTINS_LOCATION
+ comes from cp_maybe_instrument_return. */
+ if (fndecl_built_in_p (fun, BUILT_IN_UNREACHABLE)
+ && EXPR_LOCATION (t) == BUILTINS_LOCATION)
+ error ("%<constexpr%> call flows off the end of the function");
+ else
+ {
+ new_call = build_call_array_loc (EXPR_LOCATION (t), TREE_TYPE (t),
+ CALL_EXPR_FN (t), nargs, args);
+ error ("%q+E is not a constant expression", new_call);
+ }
+ }
+ *non_constant_p = true;
+ return t;
+ }
+
+ if (!potential_constant_expression (new_call))
+ {
+ if (!*non_constant_p && !ctx->quiet)
+ error ("%q+E is not a constant expression", new_call);
+ *non_constant_p = true;
+ return t;
+ }
+
+ if (strret)
+ {
+ /* memchr returns a pointer into the first argument, but we replaced the
+ argument above with a STRING_CST; put it back it now. */
+ tree op = CALL_EXPR_ARG (t, strret - 1);
+ STRIP_NOPS (new_call);
+ if (TREE_CODE (new_call) == POINTER_PLUS_EXPR)
+ TREE_OPERAND (new_call, 0) = op;
+ else if (TREE_CODE (new_call) == ADDR_EXPR)
+ new_call = op;
+ }
+
+ return eval_constant_expression (&new_ctx, new_call, lval, non_constant_p,
+ overflow_p);
+}
+
+// Subroutine of cxx_eval_constant_expression.
+// Evaluate the call expression tree T in the context of OLD_CALL expression
+// evaluation.
+static tree
+eval_call_expression (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ location_t loc = EXPR_LOCATION (t);
+ tree fun = get_function_named_in_call (t);
+ constexpr_call new_call = {NULL, NULL, NULL, 0, ctx->manifestly_const_eval};
+ int depth_ok;
+
+ if (fun == NULL_TREE)
+ {
+ // return cxx_eval_internal_function (ctx, t, lval,
+ // non_constant_p, overflow_p);
+ gcc_unreachable ();
+ return error_mark_node;
+ }
+
+ if (TREE_CODE (fun) != FUNCTION_DECL)
+ {
+ if (!ctx->quiet && !*non_constant_p)
+ error_at (loc,
+ "expression %qE does not designate a %<constexpr%> "
+ "function",
+ fun);
+ *non_constant_p = true;
+ return t;
+ }
+
+ if (fndecl_built_in_p (fun))
+ return eval_builtin_function_call (ctx, t, fun, lval, non_constant_p,
+ overflow_p);
+
+ bool non_constant_args = false;
+ new_call.bindings
+ = rs_bind_parameters_in_call (ctx, t, fun, non_constant_p, overflow_p,
+ &non_constant_args);
+
+ /* We build up the bindings list before we know whether we already have this
+ call cached. If we don't end up saving these bindings, ggc_free them when
+ this function exits. */
+ class free_bindings
+ {
+ tree *bindings;
+
+ public:
+ free_bindings (tree &b) : bindings (&b) {}
+ ~free_bindings ()
+ {
+ if (bindings)
+ ggc_free (*bindings);
+ }
+ void preserve () { bindings = NULL; }
+ } fb (new_call.bindings);
+
+ if (*non_constant_p)
+ return t;
+
+ /* If in direct recursive call, optimize definition search. */
+ if (ctx && ctx->call && ctx->call->fundef && ctx->call->fundef->decl == fun)
+ new_call.fundef = ctx->call->fundef;
+ else
+ {
+ new_call.fundef = retrieve_constexpr_fundef (fun);
+ if (new_call.fundef == NULL || new_call.fundef->body == NULL
+ || new_call.fundef->result == error_mark_node
+ || fun == current_function_decl)
+ {
+ if (!ctx->quiet)
+ {
+ /* We need to check for current_function_decl here in case we're
+ being called during cp_fold_function, because at that point
+ DECL_INITIAL is set properly and we have a fundef but we
+ haven't lowered invisirefs yet (c++/70344). */
+ if (DECL_INITIAL (fun) == error_mark_node
+ || fun == current_function_decl)
+ error_at (loc,
+ "%qD called in a constant expression before its "
+ "definition is complete",
+ fun);
+ else if (DECL_INITIAL (fun))
+ {
+ // /* The definition of fun was somehow unsuitable. But
+ // pretend
+ // that lambda static thunks don't exist. */
+ // if (!lambda_static_thunk_p (fun))
+ // error_at (loc, "%qD called in a constant expression",
+ // fun);
+ explain_invalid_constexpr_fn (fun);
+ }
+ else
+ error_at (loc, "%qD used before its definition", fun);
+ }
+ *non_constant_p = true;
+ return t;
+ }
+ }
+
+ depth_ok = push_cx_call_context (t);
+
+ tree result = NULL_TREE;
+ constexpr_call *entry = NULL;
+ if (depth_ok && !non_constant_args && ctx->strict)
+ {
+ new_call.hash = constexpr_fundef_hasher::hash (new_call.fundef);
+ new_call.hash = iterative_hash_object (new_call.bindings, new_call.hash);
+ new_call.hash
+ = iterative_hash_object (ctx->manifestly_const_eval, new_call.hash);
+
+ /* If we have seen this call before, we are done. */
+ maybe_initialize_constexpr_call_table ();
+ constexpr_call **slot
+ = constexpr_call_table->find_slot (&new_call, INSERT);
+ entry = *slot;
+ if (entry == NULL)
+ {
+ /* Only cache up to constexpr_cache_depth to limit memory use. */
+ if (depth_ok < constexpr_cache_depth)
+ {
+ /* We need to keep a pointer to the entry, not just the slot, as
+ the slot can move during evaluation of the body. */
+ *slot = entry = ggc_alloc<constexpr_call> ();
+ *entry = new_call;
+ fb.preserve ();
+ }
+ }
+ /* Calls that are in progress have their result set to NULL, so that we
+ can detect circular dependencies. Now that we only cache up to
+ constexpr_cache_depth this won't catch circular dependencies that
+ start deeper, but they'll hit the recursion or ops limit. */
+ else if (entry->result == NULL)
+ {
+ if (!ctx->quiet)
+ error ("call has circular dependency");
+ *non_constant_p = true;
+ entry->result = result = error_mark_node;
+ }
+ else
+ result = entry->result;
+ }
+
+ if (!depth_ok)
+ {
+ if (!ctx->quiet)
+ error ("%<constexpr%> evaluation depth exceeds maximum of %d (use "
+ "%<-fconstexpr-depth=%> to increase the maximum)",
+ max_constexpr_depth);
+ *non_constant_p = true;
+ result = error_mark_node;
+ }
+ else
+ {
+ bool cacheable = true;
+ if (result && result != error_mark_node)
+ /* OK */;
+ else if (!DECL_SAVED_TREE (fun))
+ {
+ /* When at_eof >= 2, cgraph has started throwing away
+ DECL_SAVED_TREE, so fail quietly. FIXME we get here because of
+ late code generation for VEC_INIT_EXPR, which needs to be
+ completely reconsidered. */
+ // gcc_assert (at_eof >= 2 && ctx->quiet);
+ *non_constant_p = true;
+ }
+ else if (tree copy = get_fundef_copy (new_call.fundef))
+ {
+ tree body, parms, res;
+ releasing_vec ctors;
+
+ /* Reuse or create a new unshared copy of this function's body. */
+ body = TREE_PURPOSE (copy);
+ parms = TREE_VALUE (copy);
+ res = TREE_TYPE (copy);
+
+ /* Associate the bindings with the remapped parms. */
+ tree bound = new_call.bindings;
+ tree remapped = parms;
+ for (int i = 0; i < TREE_VEC_LENGTH (bound); ++i)
+ {
+ tree arg = TREE_VEC_ELT (bound, i);
+ if (entry)
+ {
+ /* Unshare args going into the hash table to separate them
+ from the caller's context, for better GC and to avoid
+ problems with verify_gimple. */
+ arg = unshare_expr_without_location (arg);
+ TREE_VEC_ELT (bound, i) = arg;
+
+ /* And then unshare again so the callee doesn't change the
+ argument values in the hash table. XXX Could we unshare
+ lazily in cxx_eval_store_expression? */
+ arg = unshare_constructor (arg);
+ if (TREE_CODE (arg) == CONSTRUCTOR)
+ vec_safe_push (ctors, arg);
+ }
+
+ ctx->global->values.put (remapped, arg);
+ remapped = DECL_CHAIN (remapped);
+ }
+ /* Add the RESULT_DECL to the values map, too. */
+ gcc_assert (!DECL_BY_REFERENCE (res));
+ ctx->global->values.put (res, NULL_TREE);
+
+ /* Track the callee's evaluated SAVE_EXPRs and TARGET_EXPRs so that
+ we can forget their values after the call. */
+ constexpr_ctx ctx_with_save_exprs = *ctx;
+ auto_vec<tree, 10> save_exprs;
+ ctx_with_save_exprs.save_exprs = &save_exprs;
+ ctx_with_save_exprs.call = &new_call;
+ unsigned save_heap_alloc_count = ctx->global->heap_vars.length ();
+ unsigned save_heap_dealloc_count = ctx->global->heap_dealloc_count;
+
+ tree jump_target = NULL_TREE;
+ eval_constant_expression (&ctx_with_save_exprs, body, lval,
+ non_constant_p, overflow_p, &jump_target);
+
+ if (VOID_TYPE_P (TREE_TYPE (res)))
+ result = void_node;
+ else
+ {
+ result = *ctx->global->values.get (res);
+ if (result == NULL_TREE && !*non_constant_p)
+ {
+ if (!ctx->quiet)
+ error ("%<constexpr%> call flows off the end "
+ "of the function");
+ *non_constant_p = true;
+ }
+ }
+
+ /* Forget the saved values of the callee's SAVE_EXPRs and
+ TARGET_EXPRs. */
+ for (tree save_expr : save_exprs)
+ ctx->global->values.remove (save_expr);
+
+ /* Remove the parms/result from the values map. Is it worth
+ bothering to do this when the map itself is only live for
+ one constexpr evaluation? If so, maybe also clear out
+ other vars from call, maybe in BIND_EXPR handling? */
+ ctx->global->values.remove (res);
+ for (tree parm = parms; parm; parm = TREE_CHAIN (parm))
+ ctx->global->values.remove (parm);
+
+ /* Make the unshared function copy we used available for re-use. */
+ save_fundef_copy (fun, copy);
+
+ /* If the call allocated some heap object that hasn't been
+ deallocated during the call, or if it deallocated some heap
+ object it has not allocated, the call isn't really stateless
+ for the constexpr evaluation and should not be cached.
+ It is fine if the call allocates something and deallocates it
+ too. */
+ if (entry
+ && (save_heap_alloc_count != ctx->global->heap_vars.length ()
+ || (save_heap_dealloc_count
+ != ctx->global->heap_dealloc_count)))
+ {
+ tree heap_var;
+ unsigned int i;
+ if ((ctx->global->heap_vars.length ()
+ - ctx->global->heap_dealloc_count)
+ != save_heap_alloc_count - save_heap_dealloc_count)
+ cacheable = false;
+ else
+ FOR_EACH_VEC_ELT_FROM (ctx->global->heap_vars, i, heap_var,
+ save_heap_alloc_count)
+ if (DECL_NAME (heap_var) != heap_deleted_identifier)
+ {
+ cacheable = false;
+ break;
+ }
+ }
+ }
+ else
+ /* Couldn't get a function copy to evaluate. */
+ *non_constant_p = true;
+
+ if (result == error_mark_node)
+ *non_constant_p = true;
+ if (*non_constant_p || *overflow_p)
+ result = error_mark_node;
+ else if (!result)
+ result = void_node;
+ if (entry)
+ entry->result = cacheable ? result : error_mark_node;
+ }
+
+ pop_cx_call_context ();
+ return result;
+}
+
+/* Subroutine of build_data_member_initialization. MEMBER is a COMPONENT_REF
+ for a member of an anonymous aggregate, INIT is the initializer for that
+ member, and VEC_OUTER is the vector of constructor elements for the class
+ whose constructor we are processing. Add the initializer to the vector
+ and return true to indicate success. */
+
+// static bool
+// build_anon_member_initialization (tree member, tree init,
+// vec<constructor_elt, va_gc> **vec_outer)
+// {
+// /* MEMBER presents the relevant fields from the inside out, but we need
+// to build up the initializer from the outside in so that we can reuse
+// previously built CONSTRUCTORs if this is, say, the second field in an
+// anonymous struct. So we use a vec as a stack. */
+// auto_vec<tree, 2> fields;
+// do
+// {
+// fields.safe_push (TREE_OPERAND (member, 1));
+// member = TREE_OPERAND (member, 0);
+// } while (ANON_AGGR_TYPE_P (TREE_TYPE (member))
+// && TREE_CODE (member) == COMPONENT_REF);
+//
+// /* VEC has the constructor elements vector for the context of FIELD.
+// If FIELD is an anonymous aggregate, we will push inside it. */
+// vec<constructor_elt, va_gc> **vec = vec_outer;
+// tree field;
+// while (field = fields.pop (), ANON_AGGR_TYPE_P (TREE_TYPE (field)))
+// {
+// tree ctor;
+// /* If there is already an outer constructor entry for the anonymous
+// aggregate FIELD, use it; otherwise, insert one. */
+// if (vec_safe_is_empty (*vec) || (*vec)->last ().index != field)
+// {
+// ctor = build_constructor (TREE_TYPE (field), NULL);
+// CONSTRUCTOR_APPEND_ELT (*vec, field, ctor);
+// }
+// else
+// ctor = (*vec)->last ().value;
+// vec = &CONSTRUCTOR_ELTS (ctor);
+// }
+//
+// /* Now we're at the innermost field, the one that isn't an anonymous
+// aggregate. Add its initializer to the CONSTRUCTOR and we're done. */
+// gcc_assert (fields.is_empty ());
+// CONSTRUCTOR_APPEND_ELT (*vec, field, init);
+//
+// return true;
+// }
+
+///* V is a vector of constructor elements built up for the base and member
+// initializers of a constructor for TYPE. They need to be in increasing
+// offset order, which they might not be yet if TYPE has a primary base
+// which is not first in the base-clause or a vptr and at least one base
+// all of which are non-primary. */
+//
+// static vec<constructor_elt, va_gc> *
+// sort_constexpr_mem_initializers (tree type, vec<constructor_elt, va_gc> *v)
+//{
+// tree pri = CLASSTYPE_PRIMARY_BINFO (type);
+// tree field_type;
+// unsigned i;
+// constructor_elt *ce;
+//
+// if (pri)
+// field_type = BINFO_TYPE (pri);
+// else if (TYPE_CONTAINS_VPTR_P (type))
+// field_type = vtbl_ptr_type_node;
+// else
+// return v;
+//
+// /* Find the element for the primary base or vptr and move it to the
+// beginning of the vec. */
+// for (i = 0; vec_safe_iterate (v, i, &ce); ++i)
+// if (TREE_TYPE (ce->index) == field_type)
+// break;
+//
+// if (i > 0 && i < vec_safe_length (v))
+// {
+// vec<constructor_elt, va_gc> &vref = *v;
+// constructor_elt elt = vref[i];
+// for (; i > 0; --i)
+// vref[i] = vref[i - 1];
+// vref[0] = elt;
+// }
+//
+// return v;
+//}
+
+/* Subroutine of build_constexpr_constructor_member_initializers.
+ The expression tree T represents a data member initialization
+ in a (constexpr) constructor definition. Build a pairing of
+ the data member with its initializer, and prepend that pair
+ to the existing initialization pair INITS. */
+
+static bool
+build_data_member_initialization (tree t, vec<constructor_elt, va_gc> **vec)
+{
+ tree member;
+ if (TREE_CODE (t) == CLEANUP_POINT_EXPR)
+ t = TREE_OPERAND (t, 0);
+ if (TREE_CODE (t) == EXPR_STMT)
+ t = TREE_OPERAND (t, 0);
+ if (t == error_mark_node)
+ return false;
+ if (TREE_CODE (t) == STATEMENT_LIST)
+ {
+ for (tree stmt : tsi_range (t))
+ if (!build_data_member_initialization (stmt, vec))
+ return false;
+ return true;
+ }
+ if (TREE_CODE (t) == CONVERT_EXPR)
+ t = TREE_OPERAND (t, 0);
+ if (TREE_CODE (t) == INIT_EXPR
+ /* vptr initialization shows up as a MODIFY_EXPR. In C++14 we only
+ use what this function builds for cx_check_missing_mem_inits, and
+ assignment in the ctor body doesn't count. */
+ || (TREE_CODE (t) == MODIFY_EXPR))
+ {
+ member = TREE_OPERAND (t, 0);
+ // Faisal: not sure if we need to port over break_out_target_exprs
+ // if not, then not sure how to handle init in this case
+ // init = break_out_target_exprs (TREE_OPERAND (t, 1));
+ }
+ else if (TREE_CODE (t) == CALL_EXPR)
+ {
+ tree fn = get_callee_fndecl (t);
+ if (!fn || !DECL_CONSTRUCTOR_P (fn))
+ /* We're only interested in calls to subobject constructors. */
+ return true;
+ member = CALL_EXPR_ARG (t, 0);
+ /* We don't use build_cplus_new here because it complains about
+ abstract bases. Leaving the call unwrapped means that it has the
+ wrong type, but cxx_eval_constant_expression doesn't care. */
+ // Faisal: not sure if we need to port over break_out_target_exprs
+ // if not, then not sure how to handle init in this case
+ // init = break_out_target_exprs (t);
+ }
+ else if (TREE_CODE (t) == BIND_EXPR)
+ return build_data_member_initialization (BIND_EXPR_BODY (t), vec);
+ else
+ /* Don't add anything else to the CONSTRUCTOR. */
+ return true;
+ if (INDIRECT_REF_P (member))
+ member = TREE_OPERAND (member, 0);
+ if (TREE_CODE (member) == NOP_EXPR)
+ {
+ tree op = member;
+ STRIP_NOPS (op);
+ if (TREE_CODE (op) == ADDR_EXPR)
+ {
+ gcc_assert (same_type_ignoring_top_level_qualifiers_p (
+ TREE_TYPE (TREE_TYPE (op)), TREE_TYPE (TREE_TYPE (member))));
+ /* Initializing a cv-qualified member; we need to look through
+ the const_cast. */
+ member = op;
+ }
+ else if (op == current_class_ptr
+ && (same_type_ignoring_top_level_qualifiers_p (
+ TREE_TYPE (TREE_TYPE (member)), current_class_type)))
+ /* Delegating constructor. */
+ member = op;
+ else
+ {
+ /* This is an initializer for an empty base; keep it for now so
+ we can check it in cxx_eval_bare_aggregate. */
+ gcc_assert (is_empty_class (TREE_TYPE (TREE_TYPE (member))));
+ }
+ }
+ if (TREE_CODE (member) == ADDR_EXPR)
+ member = TREE_OPERAND (member, 0);
+ if (TREE_CODE (member) == COMPONENT_REF)
+ {
+ tree aggr = TREE_OPERAND (member, 0);
+ if (TREE_CODE (aggr) == VAR_DECL)
+ /* Initializing a local variable, don't add anything. */
+ return true;
+ if (TREE_CODE (aggr) != COMPONENT_REF)
+ /* Normal member initialization. */
+ member = TREE_OPERAND (member, 1);
+ else if (ANON_AGGR_TYPE_P (TREE_TYPE (aggr)))
+ /* Initializing a member of an anonymous union. */
+ rust_sorry_at (Location (), "cannot handle value initialization yet");
+ // return build_anon_member_initialization (member, init, vec);
+ else
+ /* We're initializing a vtable pointer in a base. Leave it as
+ COMPONENT_REF so we remember the path to get to the vfield. */
+ gcc_assert (TREE_TYPE (member) == vtbl_ptr_type_node);
+ }
+
+ /* Value-initialization can produce multiple initializers for the
+ same field; use the last one. */
+ if (!vec_safe_is_empty (*vec) && (*vec)->last ().index == member)
+ rust_sorry_at (Location (), "cannot handle value initialization yet");
+ // (*vec)->last ().value = init;
+ else
+ rust_sorry_at (Location (), "cannot handle value initialization yet");
+ // CONSTRUCTOR_APPEND_ELT (*vec, member, init);
+ return true;
+}
+
+///* Build compile-time evalable representations of member-initializer list
+// for a constexpr constructor. */
+//
+// static tree
+// build_constexpr_constructor_member_initializers (tree type, tree body)
+//{
+// vec<constructor_elt, va_gc> *vec = NULL;
+// bool ok = true;
+// while (true)
+// switch (TREE_CODE (body))
+// {
+// case STATEMENT_LIST:
+// for (tree stmt : tsi_range (body))
+// {
+// body = stmt;
+// if (TREE_CODE (body) == BIND_EXPR)
+// break;
+// }
+// break;
+//
+// case BIND_EXPR:
+// body = BIND_EXPR_BODY (body);
+// goto found;
+//
+// default:
+// gcc_unreachable ();
+// }
+// found:
+//
+// if (TREE_CODE (body) == CLEANUP_POINT_EXPR)
+// {
+// body = TREE_OPERAND (body, 0);
+// if (TREE_CODE (body) == EXPR_STMT)
+// body = TREE_OPERAND (body, 0);
+// if (TREE_CODE (body) == INIT_EXPR
+// && (same_type_ignoring_top_level_qualifiers_p (
+// TREE_TYPE (TREE_OPERAND (body, 0)), current_class_type)))
+// {
+// /* Trivial copy. */
+// return TREE_OPERAND (body, 1);
+// }
+// ok = build_data_member_initialization (body, &vec);
+// }
+// else if (TREE_CODE (body) == STATEMENT_LIST)
+// {
+// for (tree stmt : tsi_range (body))
+// {
+// ok = build_data_member_initialization (stmt, &vec);
+// if (!ok)
+// break;
+// }
+// }
+// else if (EXPR_P (body))
+// ok = build_data_member_initialization (body, &vec);
+// else
+// gcc_assert (errorcount > 0);
+// if (ok)
+// {
+// if (vec_safe_length (vec) > 0)
+// {
+// /* In a delegating constructor, return the target. */
+// constructor_elt *ce = &(*vec)[0];
+// if (ce->index == current_class_ptr)
+// {
+// body = ce->value;
+// vec_free (vec);
+// return body;
+// }
+// }
+// vec = sort_constexpr_mem_initializers (type, vec);
+// return build_constructor (type, vec);
+// }
+// else
+// return error_mark_node;
+//}
+
+// Subroutine of check_constexpr_fundef. BODY is the body of a function
+// declared to be constexpr, or a sub-statement thereof. Returns the
+// return value if suitable, error_mark_node for a statement not allowed in
+// a constexpr function, or NULL_TREE if no return value was found.
+static tree
+constexpr_fn_retval (const constexpr_ctx *ctx, tree body)
+{
+ switch (TREE_CODE (body))
+ {
+ case STATEMENT_LIST: {
+ tree expr = NULL_TREE;
+ for (tree stmt : tsi_range (body))
+ {
+ tree s = constexpr_fn_retval (ctx, stmt);
+ if (s == error_mark_node)
+ return error_mark_node;
+ else if (s == NULL_TREE)
+ /* Keep iterating. */;
+ else if (expr)
+ /* Multiple return statements. */
+ return error_mark_node;
+ else
+ expr = s;
+ }
+ return expr;
+ }
+
+ case RETURN_EXPR: {
+ bool non_constant_p = false;
+ bool overflow_p = false;
+ return eval_constant_expression (ctx, body, false, &non_constant_p,
+ &overflow_p);
+ }
+ case DECL_EXPR: {
+ tree decl = DECL_EXPR_DECL (body);
+ if (TREE_CODE (decl) == USING_DECL
+ /* Accept __func__, __FUNCTION__, and __PRETTY_FUNCTION__. */
+ || DECL_ARTIFICIAL (decl))
+ return NULL_TREE;
+ return error_mark_node;
+ }
+
+ case CLEANUP_POINT_EXPR:
+ return constexpr_fn_retval (ctx, TREE_OPERAND (body, 0));
+
+ case BIND_EXPR: {
+ tree b = BIND_EXPR_BODY (body);
+ return constexpr_fn_retval (ctx, b);
+ }
+ break;
+
+ default:
+ return error_mark_node;
+ }
+ return error_mark_node;
+}
+
+// Taken from cp/constexpr.cc
+//
+// If DECL is a scalar enumeration constant or variable with a
+// constant initializer, return the initializer (or, its initializers,
+// recursively); otherwise, return DECL. If STRICT_P, the
+// initializer is only returned if DECL is a
+// constant-expression. If RETURN_AGGREGATE_CST_OK_P, it is ok to
+// return an aggregate constant. If UNSHARE_P, return an unshared
+// copy of the initializer.
+static tree
+constant_value_1 (tree decl, bool, bool, bool unshare_p)
+{
+ while (TREE_CODE (decl) == CONST_DECL)
+ {
+ tree init;
+ /* If DECL is a static data member in a template
+ specialization, we must instantiate it here. The
+ initializer for the static data member is not processed
+ until needed; we need it now. */
+
+ init = DECL_INITIAL (decl);
+ if (init == error_mark_node)
+ {
+ if (TREE_CODE (decl) == CONST_DECL)
+ /* Treat the error as a constant to avoid cascading errors on
+ excessively recursive template instantiation (c++/9335). */
+ return init;
+ else
+ return decl;
+ }
+
+ decl = init;
+ }
+ return unshare_p ? unshare_expr (decl) : decl;
+}
+
+// A more relaxed version of decl_really_constant_value, used by the
+// common C/C++ code.
+tree
+decl_constant_value (tree decl, bool unshare_p)
+{
+ return constant_value_1 (decl, /*strict_p=*/false,
+ /*return_aggregate_cst_ok_p=*/true,
+ /*unshare_p=*/unshare_p);
+}
+
+static void
+non_const_var_error (location_t loc, tree r)
+{
+ error_at (loc,
+ "the value of %qD is not usable in a constant "
+ "expression",
+ r);
+ /* Avoid error cascade. */
+ if (DECL_INITIAL (r) == error_mark_node)
+ return;
+
+ // more in cp/constexpr.cc
+}
+
+static tree
+get_callee (tree call)
+{
+ if (call == NULL_TREE)
+ return call;
+ else if (TREE_CODE (call) == CALL_EXPR)
+ return CALL_EXPR_FN (call);
+
+ return NULL_TREE;
+}
+
+// We have an expression tree T that represents a call, either CALL_EXPR
+// or AGGR_INIT_EXPR. If the call is lexically to a named function,
+// return the _DECL for that function.
+static tree
+get_function_named_in_call (tree t)
+{
+ tree fun = get_callee (t);
+ if (fun && TREE_CODE (fun) == ADDR_EXPR
+ && TREE_CODE (TREE_OPERAND (fun, 0)) == FUNCTION_DECL)
+ fun = TREE_OPERAND (fun, 0);
+ return fun;
+}
+
+// forked from gcc/cp/constexpr.cc maybe_constexpr_fn
+
+/* True if a function might be declared constexpr */
+
+bool
+maybe_constexpr_fn (tree t)
+{
+ return (DECL_DECLARED_CONSTEXPR_P (t));
+}
+
+// forked from gcc/cp/constexpr.cc var_in_maybe_constexpr_fn
+
+/* True if T was declared in a function that might be constexpr: either a
+ function that was declared constexpr. */
+
+bool
+var_in_maybe_constexpr_fn (tree t)
+{
+ return (DECL_FUNCTION_SCOPE_P (t) && maybe_constexpr_fn (DECL_CONTEXT (t)));
+}
+
+/* P0859: A function is needed for constant evaluation if it is a constexpr
+ function that is named by an expression ([basic.def.odr]) that is
+ potentially constant evaluated.
+
+ So we need to instantiate any constexpr functions mentioned by the
+ expression even if the definition isn't needed for evaluating the
+ expression. */
+
+static tree
+instantiate_cx_fn_r (tree *tp, int *walk_subtrees, void * /*data*/)
+{
+ if (TREE_CODE (*tp) == CALL_EXPR)
+ {
+ if (EXPR_HAS_LOCATION (*tp))
+ input_location = EXPR_LOCATION (*tp);
+ }
+
+ if (!EXPR_P (*tp))
+ *walk_subtrees = 0;
+
+ return NULL_TREE;
+}
+
+static void
+instantiate_constexpr_fns (tree t)
+{
+ location_t loc = input_location;
+ rs_walk_tree_without_duplicates (&t, instantiate_cx_fn_r, NULL);
+ input_location = loc;
+}
+
+/* Returns less than, equal to, or greater than zero if KEY is found to be
+ less than, to match, or to be greater than the constructor_elt's INDEX. */
+
+static int
+array_index_cmp (tree key, tree index)
+{
+ gcc_assert (TREE_CODE (key) == INTEGER_CST);
+
+ switch (TREE_CODE (index))
+ {
+ case INTEGER_CST:
+ return tree_int_cst_compare (key, index);
+ case RANGE_EXPR: {
+ tree lo = TREE_OPERAND (index, 0);
+ tree hi = TREE_OPERAND (index, 1);
+ if (tree_int_cst_lt (key, lo))
+ return -1;
+ else if (tree_int_cst_lt (hi, key))
+ return 1;
+ else
+ return 0;
+ }
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* If T is a CONSTRUCTOR, return an unshared copy of T and any
+ sub-CONSTRUCTORs. Otherwise return T.
+
+ We use this whenever we initialize an object as a whole, whether it's a
+ parameter, a local variable, or a subobject, so that subsequent
+ modifications don't affect other places where it was used. */
+
+tree
+unshare_constructor (tree t MEM_STAT_DECL)
+{
+ if (!t || TREE_CODE (t) != CONSTRUCTOR)
+ return t;
+ auto_vec<tree *, 4> ptrs;
+ ptrs.safe_push (&t);
+ while (!ptrs.is_empty ())
+ {
+ tree *p = ptrs.pop ();
+ tree n = copy_node (*p PASS_MEM_STAT);
+ CONSTRUCTOR_ELTS (n)
+ = vec_safe_copy (CONSTRUCTOR_ELTS (*p) PASS_MEM_STAT);
+ *p = n;
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (n);
+ constructor_elt *ce;
+ for (HOST_WIDE_INT i = 0; vec_safe_iterate (v, i, &ce); ++i)
+ if (ce->value && TREE_CODE (ce->value) == CONSTRUCTOR)
+ ptrs.safe_push (&ce->value);
+ }
+ return t;
+}
+
+/* Returns the index of the constructor_elt of ARY which matches DINDEX, or -1
+ if none. If INSERT is true, insert a matching element rather than fail. */
+
+static HOST_WIDE_INT
+find_array_ctor_elt (tree ary, tree dindex, bool insert)
+{
+ if (tree_int_cst_sgn (dindex) < 0)
+ return -1;
+
+ unsigned HOST_WIDE_INT i = tree_to_uhwi (dindex);
+ vec<constructor_elt, va_gc> *elts = CONSTRUCTOR_ELTS (ary);
+ unsigned HOST_WIDE_INT len = vec_safe_length (elts);
+
+ unsigned HOST_WIDE_INT end = len;
+ unsigned HOST_WIDE_INT begin = 0;
+
+ /* If the last element of the CONSTRUCTOR has its own index, we can assume
+ that the same is true of the other elements and index directly. */
+ if (end > 0)
+ {
+ tree cindex = (*elts)[end - 1].index;
+ if (cindex == NULL_TREE)
+ {
+ /* Verify that if the last index is missing, all indexes
+ are missing. */
+ if (flag_checking)
+ for (unsigned int j = 0; j < len - 1; ++j)
+ gcc_assert ((*elts)[j].index == NULL_TREE);
+ if (i < end)
+ return i;
+ else
+ {
+ begin = end;
+ if (i == end)
+ /* If the element is to be added right at the end,
+ make sure it is added with cleared index too. */
+ dindex = NULL_TREE;
+ else if (insert)
+ /* Otherwise, in order not to break the assumption
+ that CONSTRUCTOR either has all indexes or none,
+ we need to add indexes to all elements. */
+ for (unsigned int j = 0; j < len; ++j)
+ (*elts)[j].index = build_int_cst (TREE_TYPE (dindex), j);
+ }
+ }
+ else if (TREE_CODE (cindex) == INTEGER_CST
+ && compare_tree_int (cindex, end - 1) == 0)
+ {
+ if (i < end)
+ return i;
+ else
+ begin = end;
+ }
+ }
+
+ /* Otherwise, find a matching index by means of a binary search. */
+ while (begin != end)
+ {
+ unsigned HOST_WIDE_INT middle = (begin + end) / 2;
+ constructor_elt &elt = (*elts)[middle];
+ tree idx = elt.index;
+
+ int cmp = array_index_cmp (dindex, idx);
+ if (cmp < 0)
+ end = middle;
+ else if (cmp > 0)
+ begin = middle + 1;
+ else
+ {
+ if (insert && TREE_CODE (idx) == RANGE_EXPR)
+ {
+ /* We need to split the range. */
+ constructor_elt e;
+ tree lo = TREE_OPERAND (idx, 0);
+ tree hi = TREE_OPERAND (idx, 1);
+ tree value = elt.value;
+ dindex = fold_convert (sizetype, dindex);
+ if (tree_int_cst_lt (lo, dindex))
+ {
+ /* There are still some lower elts; shorten the range. */
+ tree new_hi
+ = int_const_binop (MINUS_EXPR, dindex, size_one_node);
+ if (tree_int_cst_equal (lo, new_hi))
+ /* Only one element left, no longer a range. */
+ elt.index = lo;
+ else
+ TREE_OPERAND (idx, 1) = new_hi;
+ /* Append the element we want to insert. */
+ ++middle;
+ e.index = dindex;
+ e.value = unshare_constructor (value);
+ vec_safe_insert (CONSTRUCTOR_ELTS (ary), middle, e);
+ }
+ else
+ /* No lower elts, the range elt is now ours. */
+ elt.index = dindex;
+
+ if (tree_int_cst_lt (dindex, hi))
+ {
+ /* There are still some higher elts; append a range. */
+ tree new_lo
+ = int_const_binop (PLUS_EXPR, dindex, size_one_node);
+ if (tree_int_cst_equal (new_lo, hi))
+ e.index = hi;
+ else
+ e.index = build2 (RANGE_EXPR, sizetype, new_lo, hi);
+ e.value = unshare_constructor (value);
+ vec_safe_insert (CONSTRUCTOR_ELTS (ary), middle + 1, e);
+ }
+ }
+ return middle;
+ }
+ }
+
+ if (insert)
+ {
+ constructor_elt e = {dindex, NULL_TREE};
+ vec_safe_insert (CONSTRUCTOR_ELTS (ary), end, e);
+ return end;
+ }
+
+ return -1;
+}
+
+/* Some expressions may have constant operands but are not constant
+ themselves, such as 1/0. Call this function to check for that
+ condition.
+
+ We only call this in places that require an arithmetic constant, not in
+ places where we might have a non-constant expression that can be a
+ component of a constant expression, such as the address of a constexpr
+ variable that might be dereferenced later. */
+
+static bool
+verify_constant (tree t, bool allow_non_constant, bool *non_constant_p,
+ bool *overflow_p)
+{
+ if (!*non_constant_p && !reduced_constant_expression_p (t) && t != void_node)
+ {
+ if (!allow_non_constant)
+ error ("%q+E is not a constant expression", t);
+ *non_constant_p = true;
+ }
+ if (TREE_OVERFLOW_P (t))
+ {
+ if (!allow_non_constant)
+ {
+ permerror (input_location, "overflow in constant expression");
+ /* If we're being permissive (and are in an enforcing
+ context), ignore the overflow. */
+ if (flag_permissive)
+ return *non_constant_p;
+ }
+ *overflow_p = true;
+ }
+ return *non_constant_p;
+}
+
+// forked from gcc/cp/constexpr.cc find_heap_var_refs
+
+/* Look for heap variables in the expression *TP. */
+
+static tree
+find_heap_var_refs (tree *tp, int *walk_subtrees, void * /*data*/)
+{
+ if (VAR_P (*tp)
+ && (DECL_NAME (*tp) == heap_uninit_identifier
+ || DECL_NAME (*tp) == heap_identifier
+ || DECL_NAME (*tp) == heap_vec_uninit_identifier
+ || DECL_NAME (*tp) == heap_vec_identifier
+ || DECL_NAME (*tp) == heap_deleted_identifier))
+ return *tp;
+
+ if (TYPE_P (*tp))
+ *walk_subtrees = 0;
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/constexpr.cc find_immediate_fndecl
+
+/* Find immediate function decls in *TP if any. */
+
+static tree
+find_immediate_fndecl (tree *tp, int * /*walk_subtrees*/, void * /*data*/)
+{
+ if (TREE_CODE (*tp) == FUNCTION_DECL && DECL_IMMEDIATE_FUNCTION_P (*tp))
+ return *tp;
+ if (TREE_CODE (*tp) == PTRMEM_CST
+ && TREE_CODE (PTRMEM_CST_MEMBER (*tp)) == FUNCTION_DECL
+ && DECL_IMMEDIATE_FUNCTION_P (PTRMEM_CST_MEMBER (*tp)))
+ return PTRMEM_CST_MEMBER (*tp);
+ return NULL_TREE;
+}
+
+// forked in gcc/cp/constexpr.cc diag_array_subscript
+
+/* Under the control of CTX, issue a detailed diagnostic for
+ an out-of-bounds subscript INDEX into the expression ARRAY. */
+
+static void
+diag_array_subscript (location_t loc, const constexpr_ctx *ctx, tree array,
+ tree index)
+{
+ if (!ctx->quiet)
+ {
+ tree arraytype = TREE_TYPE (array);
+
+ /* Convert the unsigned array subscript to a signed integer to avoid
+ printing huge numbers for small negative values. */
+ tree sidx = fold_convert (ssizetype, index);
+ STRIP_ANY_LOCATION_WRAPPER (array);
+ if (DECL_P (array))
+ {
+ if (TYPE_DOMAIN (arraytype))
+ error_at (loc,
+ "array subscript value %qE is outside the bounds "
+ "of array %qD of type %qT",
+ sidx, array, arraytype);
+ else
+ error_at (loc,
+ "nonzero array subscript %qE is used with array %qD of "
+ "type %qT with unknown bounds",
+ sidx, array, arraytype);
+ inform (DECL_SOURCE_LOCATION (array), "declared here");
+ }
+ else if (TYPE_DOMAIN (arraytype))
+ error_at (loc,
+ "array subscript value %qE is outside the bounds "
+ "of array type %qT",
+ sidx, arraytype);
+ else
+ error_at (loc,
+ "nonzero array subscript %qE is used with array of type %qT "
+ "with unknown bounds",
+ sidx, arraytype);
+ }
+}
+
+// forked from gcc/cp/constexpr.cc get_array_or_vector_nelts
+
+/* Return the number of elements for TYPE (which is an ARRAY_TYPE or
+ a VECTOR_TYPE). */
+
+static tree
+get_array_or_vector_nelts (const constexpr_ctx *ctx, tree type,
+ bool *non_constant_p, bool *overflow_p)
+{
+ tree nelts;
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ if (TYPE_DOMAIN (type))
+ nelts = array_type_nelts_top (type);
+ else
+ nelts = size_zero_node;
+ }
+ else if (VECTOR_TYPE_P (type))
+ nelts = size_int (TYPE_VECTOR_SUBPARTS (type));
+ else
+ gcc_unreachable ();
+
+ /* For VLAs, the number of elements won't be an integer constant. */
+ nelts
+ = eval_constant_expression (ctx, nelts, false, non_constant_p, overflow_p);
+ return nelts;
+}
+
+// forked from gcc/cp/constexpr.cc eval_and_check_array_index
+
+/* Subroutine of cxx_eval_array_reference. T is an ARRAY_REF; evaluate the
+ subscript, diagnose any problems with it, and return the result. */
+
+static tree
+eval_and_check_array_index (const constexpr_ctx *ctx, tree t,
+ bool allow_one_past, bool *non_constant_p,
+ bool *overflow_p)
+{
+ location_t loc = rs_expr_loc_or_input_loc (t);
+ tree ary = TREE_OPERAND (t, 0);
+ t = TREE_OPERAND (t, 1);
+ tree index = eval_constant_expression (ctx, t, allow_one_past, non_constant_p,
+ overflow_p);
+ VERIFY_CONSTANT (index);
+
+ if (!tree_fits_shwi_p (index) || tree_int_cst_sgn (index) < 0)
+ {
+ diag_array_subscript (loc, ctx, ary, index);
+ *non_constant_p = true;
+ return t;
+ }
+
+ tree nelts = get_array_or_vector_nelts (ctx, TREE_TYPE (ary), non_constant_p,
+ overflow_p);
+ VERIFY_CONSTANT (nelts);
+ if (allow_one_past ? !tree_int_cst_le (index, nelts)
+ : !tree_int_cst_lt (index, nelts))
+ {
+ diag_array_subscript (loc, ctx, ary, index);
+ *non_constant_p = true;
+ return t;
+ }
+
+ return index;
+}
+
+// forked from gcc/cp/constexpr.cc extract_string_elt
+
+/* Extract element INDEX consisting of CHARS_PER_ELT chars from
+ STRING_CST STRING. */
+
+static tree
+extract_string_elt (tree string, unsigned chars_per_elt, unsigned index)
+{
+ tree type = cv_unqualified (TREE_TYPE (TREE_TYPE (string)));
+ tree r;
+
+ if (chars_per_elt == 1)
+ r = build_int_cst (type, TREE_STRING_POINTER (string)[index]);
+ else
+ {
+ const unsigned char *ptr
+ = ((const unsigned char *) TREE_STRING_POINTER (string)
+ + index * chars_per_elt);
+ r = native_interpret_expr (type, ptr, chars_per_elt);
+ }
+ return r;
+}
+
+/* Check whether the parameter and return types of FUN are valid for a
+ constexpr function, and complain if COMPLAIN. */
+
+bool
+is_valid_constexpr_fn (tree fun, bool complain)
+{
+ bool ret = true;
+
+ for (tree parm = FUNCTION_FIRST_USER_PARM (fun); parm != NULL_TREE;
+ parm = TREE_CHAIN (parm))
+ if (!literal_type_p (TREE_TYPE (parm)))
+ {
+ ret = false;
+ if (complain)
+ {
+ // auto_diagnostic_group d;
+ // error ("invalid type for parameter %d of %<constexpr%> "
+ // "function %q+#D",
+ // DECL_PARM_INDEX (parm), fun);
+ Location locus = Location (DECL_SOURCE_LOCATION (fun));
+ rust_error_at (
+ locus, "invalid type for parameter %d of %<constexpr%> function",
+ DECL_PARM_INDEX (parm));
+ }
+ }
+
+ return ret;
+}
+
+void
+explain_invalid_constexpr_fn (tree fun)
+{
+ static hash_set<tree> *diagnosed;
+ // tree body;
+
+ if (diagnosed == NULL)
+ diagnosed = new hash_set<tree>;
+ if (diagnosed->add (fun))
+ /* Already explained. */
+ return;
+
+ iloc_sentinel ils = input_location;
+ // if (!lambda_static_thunk_p (fun))
+ // {
+ // /* Diagnostics should completely ignore the static thunk, so leave
+ // input_location set to our caller's location. */
+ // input_location = DECL_SOURCE_LOCATION (fun);
+ // inform (input_location,
+ // "%qD is not usable as a %<constexpr%> function because:",
+ // fun);
+ // }
+
+ /* First check the declaration. */
+ if (is_valid_constexpr_fn (fun, true))
+ {
+ // /* Then if it's OK, the body. */
+ // if (!DECL_DECLARED_CONSTEXPR_P (fun))
+ // explain_implicit_non_constexpr (fun);
+ // else
+ // {
+ // if (constexpr_fundef *fd = retrieve_constexpr_fundef (fun))
+ // body = fd->body;
+ // else
+ // body = DECL_SAVED_TREE (fun);
+ // body = massage_constexpr_body (fun, body);
+ // require_potential_rvalue_constant_expression (body);
+ // }
+ }
+}
+
+/* BODY is a validated and massaged definition of a constexpr
+ function. Register it in the hash table. */
+
+void
+register_constexpr_fundef (const constexpr_fundef &value)
+{
+ /* Create the constexpr function table if necessary. */
+ if (constexpr_fundef_table == NULL)
+ constexpr_fundef_table
+ = hash_table<constexpr_fundef_hasher>::create_ggc (101);
+
+ constexpr_fundef **slot = constexpr_fundef_table->find_slot (
+ const_cast<constexpr_fundef *> (&value), INSERT);
+
+ gcc_assert (*slot == NULL);
+ *slot = ggc_alloc<constexpr_fundef> ();
+ **slot = value;
+}
+
+/* We are processing the definition of the constexpr function FUN.
+ Check that its body fulfills the apropriate requirements and
+ enter it in the constexpr function definition table. */
+
+void
+maybe_save_constexpr_fundef (tree fun)
+{
+ // FIXME
+
+ constexpr_fundef entry = {fun, NULL_TREE, NULL_TREE, NULL_TREE};
+ bool clear_ctx = false;
+ if (DECL_RESULT (fun) && DECL_CONTEXT (DECL_RESULT (fun)) == NULL_TREE)
+ {
+ clear_ctx = true;
+ DECL_CONTEXT (DECL_RESULT (fun)) = fun;
+ }
+ tree saved_fn = current_function_decl;
+ current_function_decl = fun;
+ entry.body = copy_fn (entry.decl, entry.parms, entry.result);
+ current_function_decl = saved_fn;
+ if (clear_ctx)
+ DECL_CONTEXT (DECL_RESULT (entry.decl)) = NULL_TREE;
+
+ register_constexpr_fundef (entry);
+}
+
+/* Evaluate a STATEMENT_LIST for side-effects. Handles various jump
+ semantics, for switch, break, continue, and return. */
+
+static tree
+eval_statement_list (const constexpr_ctx *ctx, tree t, bool *non_constant_p,
+ bool *overflow_p, tree *jump_target)
+{
+ tree local_target;
+ /* In a statement-expression we want to return the last value.
+ For empty statement expression return void_node. */
+ tree r = void_node;
+ if (!jump_target)
+ {
+ local_target = NULL_TREE;
+ jump_target = &local_target;
+ }
+ for (tree stmt : tsi_range (t))
+ {
+ /* We've found a continue, so skip everything until we reach
+ the label its jumping to. */
+ if (continues (jump_target))
+ {
+ if (label_matches (ctx, jump_target, stmt))
+ /* Found it. */
+ *jump_target = NULL_TREE;
+ else
+ continue;
+ }
+ if (TREE_CODE (stmt) == DEBUG_BEGIN_STMT)
+ continue;
+ r = eval_constant_expression (ctx, stmt, false, non_constant_p,
+ overflow_p, jump_target);
+ if (*non_constant_p)
+ break;
+ if (returns (jump_target) || breaks (jump_target))
+ break;
+ }
+ if (*jump_target && jump_target == &local_target)
+ {
+ /* We aren't communicating the jump to our caller, so give up. We don't
+ need to support evaluation of jumps out of statement-exprs. */
+ if (!ctx->quiet)
+ error_at (EXPR_LOCATION (r), "statement is not a constant expression");
+ *non_constant_p = true;
+ }
+ return r;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_conditional_expression
+
+/* Subroutine of cxx_eval_constant_expression.
+ Attempt to evaluate condition expressions. Dead branches are not
+ looked into. */
+
+static tree
+eval_conditional_expression (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p,
+ tree *jump_target)
+{
+ tree val
+ = eval_constant_expression (ctx, TREE_OPERAND (t, 0),
+ /*lval*/ false, non_constant_p, overflow_p);
+ VERIFY_CONSTANT (val);
+ if (TREE_CODE (t) == IF_STMT && IF_STMT_CONSTEVAL_P (t))
+ {
+ /* Evaluate the condition as if it was
+ if (__builtin_is_constant_evaluated ()), i.e. defer it if not
+ ctx->manifestly_const_eval (as sometimes we try to constant evaluate
+ without manifestly_const_eval even expressions or parts thereof which
+ will later be manifestly const_eval evaluated), otherwise fold it to
+ true. */
+ if (ctx->manifestly_const_eval)
+ val = boolean_true_node;
+ else
+ {
+ *non_constant_p = true;
+ return t;
+ }
+ }
+ /* Don't VERIFY_CONSTANT the other operands. */
+ if (integer_zerop (val))
+ val = TREE_OPERAND (t, 2);
+ else
+ val = TREE_OPERAND (t, 1);
+ if (/*TREE_CODE (t) == IF_STMT && */ !val)
+ val = void_node;
+ return eval_constant_expression (ctx, val, lval, non_constant_p, overflow_p,
+ jump_target);
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_bit_field_ref
+
+/* Subroutine of cxx_eval_constant_expression.
+ Attempt to reduce a field access of a value of class type that is
+ expressed as a BIT_FIELD_REF. */
+
+static tree
+eval_bit_field_ref (const constexpr_ctx *ctx, tree t, bool lval,
+ bool *non_constant_p, bool *overflow_p)
+{
+ tree orig_whole = TREE_OPERAND (t, 0);
+ tree retval, fldval, utype, mask;
+ bool fld_seen = false;
+ HOST_WIDE_INT istart, isize;
+ tree whole = eval_constant_expression (ctx, orig_whole, lval, non_constant_p,
+ overflow_p);
+ tree start, field, value;
+ unsigned HOST_WIDE_INT i;
+
+ if (whole == orig_whole)
+ return t;
+ /* Don't VERIFY_CONSTANT here; we only want to check that we got a
+ CONSTRUCTOR. */
+ if (!*non_constant_p && TREE_CODE (whole) != VECTOR_CST
+ && TREE_CODE (whole) != CONSTRUCTOR)
+ {
+ if (!ctx->quiet)
+ error ("%qE is not a constant expression", orig_whole);
+ *non_constant_p = true;
+ }
+ if (*non_constant_p)
+ return t;
+
+ if (TREE_CODE (whole) == VECTOR_CST)
+ return fold_ternary (BIT_FIELD_REF, TREE_TYPE (t), whole,
+ TREE_OPERAND (t, 1), TREE_OPERAND (t, 2));
+
+ start = TREE_OPERAND (t, 2);
+ istart = tree_to_shwi (start);
+ isize = tree_to_shwi (TREE_OPERAND (t, 1));
+ utype = TREE_TYPE (t);
+ if (!TYPE_UNSIGNED (utype))
+ utype = build_nonstandard_integer_type (TYPE_PRECISION (utype), 1);
+ retval = build_int_cst (utype, 0);
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (whole), i, field, value)
+ {
+ tree bitpos = bit_position (field);
+ STRIP_ANY_LOCATION_WRAPPER (value);
+ if (bitpos == start && DECL_SIZE (field) == TREE_OPERAND (t, 1))
+ return value;
+ if (TREE_CODE (TREE_TYPE (field)) == INTEGER_TYPE
+ && TREE_CODE (value) == INTEGER_CST && tree_fits_shwi_p (bitpos)
+ && tree_fits_shwi_p (DECL_SIZE (field)))
+ {
+ HOST_WIDE_INT bit = tree_to_shwi (bitpos);
+ HOST_WIDE_INT sz = tree_to_shwi (DECL_SIZE (field));
+ HOST_WIDE_INT shift;
+ if (bit >= istart && bit + sz <= istart + isize)
+ {
+ fldval = fold_convert (utype, value);
+ mask = build_int_cst_type (utype, -1);
+ mask = fold_build2 (LSHIFT_EXPR, utype, mask,
+ size_int (TYPE_PRECISION (utype) - sz));
+ mask = fold_build2 (RSHIFT_EXPR, utype, mask,
+ size_int (TYPE_PRECISION (utype) - sz));
+ fldval = fold_build2 (BIT_AND_EXPR, utype, fldval, mask);
+ shift = bit - istart;
+ if (BYTES_BIG_ENDIAN)
+ shift = TYPE_PRECISION (utype) - shift - sz;
+ fldval
+ = fold_build2 (LSHIFT_EXPR, utype, fldval, size_int (shift));
+ retval = fold_build2 (BIT_IOR_EXPR, utype, retval, fldval);
+ fld_seen = true;
+ }
+ }
+ }
+ if (fld_seen)
+ return fold_convert (TREE_TYPE (t), retval);
+ gcc_unreachable ();
+ return error_mark_node;
+}
+
+// forked from gcc/cp/constexpr.cc returns
+
+/* Predicates for the meaning of *jump_target. */
+
+static bool
+returns (tree *jump_target)
+{
+ return *jump_target
+ && (TREE_CODE (*jump_target) == RETURN_EXPR
+ || (TREE_CODE (*jump_target) == LABEL_DECL
+ && LABEL_DECL_CDTOR (*jump_target)));
+}
+
+// forked from gcc/cp/constexpr.cc breaks
+
+static bool
+breaks (tree *jump_target)
+{
+ return *jump_target
+ && ((TREE_CODE (*jump_target) == LABEL_DECL
+ && LABEL_DECL_BREAK (*jump_target))
+ || TREE_CODE (*jump_target) == BREAK_STMT
+ || TREE_CODE (*jump_target) == EXIT_EXPR);
+}
+
+// forked from gcc/cp/constexpr.cc continues
+
+static bool
+continues (tree *jump_target)
+{
+ return *jump_target
+ && ((TREE_CODE (*jump_target) == LABEL_DECL
+ && LABEL_DECL_CONTINUE (*jump_target))
+ || TREE_CODE (*jump_target) == CONTINUE_STMT);
+}
+
+// forked from gcc/cp/constexpr.cc switches
+
+static bool
+switches (tree *jump_target)
+{
+ return *jump_target && TREE_CODE (*jump_target) == INTEGER_CST;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_loop_expr
+
+/* Evaluate a LOOP_EXPR for side-effects. Handles break and return
+ semantics; continue semantics are covered by cxx_eval_statement_list. */
+
+static tree
+eval_loop_expr (const constexpr_ctx *ctx, tree t, bool *non_constant_p,
+ bool *overflow_p, tree *jump_target)
+{
+ constexpr_ctx new_ctx = *ctx;
+ tree local_target;
+ if (!jump_target)
+ {
+ local_target = NULL_TREE;
+ jump_target = &local_target;
+ }
+
+ tree body, cond = NULL_TREE, expr = NULL_TREE;
+ int count = 0;
+ switch (TREE_CODE (t))
+ {
+ case LOOP_EXPR:
+ body = LOOP_EXPR_BODY (t);
+ break;
+ case WHILE_STMT:
+ body = WHILE_BODY (t);
+ cond = WHILE_COND (t);
+ count = -1;
+ break;
+ case FOR_STMT:
+ if (FOR_INIT_STMT (t))
+ eval_constant_expression (ctx, FOR_INIT_STMT (t), /*lval*/ false,
+ non_constant_p, overflow_p, jump_target);
+ if (*non_constant_p)
+ return NULL_TREE;
+ body = FOR_BODY (t);
+ cond = FOR_COND (t);
+ expr = FOR_EXPR (t);
+ count = -1;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ auto_vec<tree, 10> save_exprs;
+ new_ctx.save_exprs = &save_exprs;
+ do
+ {
+ if (count != -1)
+ {
+ if (body)
+ eval_constant_expression (&new_ctx, body, /*lval*/ false,
+ non_constant_p, overflow_p, jump_target);
+ if (breaks (jump_target))
+ {
+ *jump_target = NULL_TREE;
+ break;
+ }
+
+ if (TREE_CODE (t) != LOOP_EXPR && continues (jump_target))
+ *jump_target = NULL_TREE;
+
+ if (expr)
+ eval_constant_expression (&new_ctx, expr, /*lval*/ false,
+ non_constant_p, overflow_p, jump_target);
+ }
+
+ if (cond)
+ {
+ tree res = eval_constant_expression (&new_ctx, cond, /*lval*/ false,
+ non_constant_p, overflow_p,
+ jump_target);
+ if (res)
+ {
+ if (verify_constant (res, ctx->quiet, non_constant_p, overflow_p))
+ break;
+ if (integer_zerop (res))
+ break;
+ }
+ else
+ gcc_assert (*jump_target);
+ }
+
+ /* Forget saved values of SAVE_EXPRs and TARGET_EXPRs. */
+ for (tree save_expr : save_exprs)
+ ctx->global->values.remove (save_expr);
+ save_exprs.truncate (0);
+
+ if (++count >= constexpr_loop_limit)
+ {
+ if (!ctx->quiet)
+ error_at (rs_expr_loc_or_input_loc (t),
+ "%<constexpr%> loop iteration count exceeds limit of %d "
+ "(use %<-fconstexpr-loop-limit=%> to increase the limit)",
+ constexpr_loop_limit);
+ *non_constant_p = true;
+ break;
+ }
+ }
+ while (!returns (jump_target) && !breaks (jump_target)
+ && !continues (jump_target) && (!switches (jump_target) || count == 0)
+ && !*non_constant_p);
+
+ /* Forget saved values of SAVE_EXPRs and TARGET_EXPRs. */
+ for (tree save_expr : save_exprs)
+ ctx->global->values.remove (save_expr);
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_switch_expr
+
+/* Evaluate a SWITCH_EXPR for side-effects. Handles switch and break jump
+ semantics. */
+
+static tree
+eval_switch_expr (const constexpr_ctx *ctx, tree t, bool *non_constant_p,
+ bool *overflow_p, tree *jump_target)
+{
+ tree cond
+ = TREE_CODE (t) == SWITCH_STMT ? SWITCH_STMT_COND (t) : SWITCH_COND (t);
+ cond
+ = eval_constant_expression (ctx, cond, false, non_constant_p, overflow_p);
+ VERIFY_CONSTANT (cond);
+ *jump_target = cond;
+
+ tree body
+ = TREE_CODE (t) == SWITCH_STMT ? SWITCH_STMT_BODY (t) : SWITCH_BODY (t);
+ constexpr_ctx new_ctx = *ctx;
+ constexpr_switch_state css = css_default_not_seen;
+ new_ctx.css_state = &css;
+ eval_constant_expression (&new_ctx, body, false, non_constant_p, overflow_p,
+ jump_target);
+ if (switches (jump_target) && css == css_default_seen)
+ {
+ /* If the SWITCH_EXPR body has default: label, process it once again,
+ this time instructing label_matches to return true for default:
+ label on switches (jump_target). */
+ css = css_default_processing;
+ eval_constant_expression (&new_ctx, body, false, non_constant_p,
+ overflow_p, jump_target);
+ }
+ if (breaks (jump_target) || switches (jump_target))
+ *jump_target = NULL_TREE;
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/constexpr.cc eval_unary_expression
+
+/* Subroutine of cxx_eval_constant_expression.
+ Attempt to reduce the unary expression tree T to a compile time value.
+ If successful, return the value. Otherwise issue a diagnostic
+ and return error_mark_node. */
+
+static tree
+eval_unary_expression (const constexpr_ctx *ctx, tree t, bool /*lval*/,
+ bool *non_constant_p, bool *overflow_p)
+{
+ tree r;
+ tree orig_arg = TREE_OPERAND (t, 0);
+ tree arg = eval_constant_expression (ctx, orig_arg, /*lval*/ false,
+ non_constant_p, overflow_p);
+ VERIFY_CONSTANT (arg);
+ location_t loc = EXPR_LOCATION (t);
+ enum tree_code code = TREE_CODE (t);
+ tree type = TREE_TYPE (t);
+ r = fold_unary_loc (loc, code, type, arg);
+ if (r == NULL_TREE)
+ {
+ if (arg == orig_arg)
+ r = t;
+ else
+ r = build1_loc (loc, code, type, arg);
+ }
+ VERIFY_CONSTANT (r);
+ return r;
+}
+
+// forked from gcc/cp/constexpr.cc cxx_eval_outermost_constant_expr
+
+/* ALLOW_NON_CONSTANT is false if T is required to be a constant expression.
+ STRICT has the same sense as for constant_value_1: true if we only allow
+ conforming C++ constant expressions, or false if we want a constant value
+ even if it doesn't conform.
+ MANIFESTLY_CONST_EVAL is true if T is manifestly const-evaluated as
+ per P0595 even when ALLOW_NON_CONSTANT is true.
+ CONSTEXPR_DTOR is true when evaluating the dtor of a constexpr variable.
+ OBJECT must be non-NULL in that case. */
+
+static tree
+cxx_eval_outermost_constant_expr (tree t, bool allow_non_constant,
+ bool strict = true,
+ bool manifestly_const_eval = false,
+ bool constexpr_dtor = false,
+ tree object = NULL_TREE)
+{
+ auto_timevar time (TV_CONSTEXPR);
+
+ bool non_constant_p = false;
+ bool overflow_p = false;
+
+ if (BRACE_ENCLOSED_INITIALIZER_P (t))
+ {
+ gcc_checking_assert (allow_non_constant);
+ return t;
+ }
+
+ constexpr_global_ctx global_ctx;
+ constexpr_ctx ctx
+ = {&global_ctx, NULL,
+ NULL, NULL,
+ NULL, NULL,
+ NULL, allow_non_constant,
+ strict, manifestly_const_eval || !allow_non_constant};
+
+ /* Turn off -frounding-math for manifestly constant evaluation. */
+ warning_sentinel rm (flag_rounding_math, ctx.manifestly_const_eval);
+ tree type = initialized_type (t);
+ tree r = t;
+ bool is_consteval = false;
+ if (VOID_TYPE_P (type))
+ {
+ if (constexpr_dtor)
+ /* Used for destructors of array elements. */
+ type = TREE_TYPE (object);
+ else
+ {
+ if (TREE_CODE (t) != CALL_EXPR)
+ return t;
+ /* Calls to immediate functions returning void need to be
+ evaluated. */
+ tree fndecl = rs_get_callee_fndecl_nofold (t);
+ if (fndecl == NULL_TREE || !DECL_IMMEDIATE_FUNCTION_P (fndecl))
+ return t;
+ else
+ is_consteval = true;
+ }
+ }
+ else if ((TREE_CODE (t) == CALL_EXPR || TREE_CODE (t) == TARGET_EXPR))
+ {
+ /* For non-concept checks, determine if it is consteval. */
+ tree x = t;
+ if (TREE_CODE (x) == TARGET_EXPR)
+ x = TARGET_EXPR_INITIAL (x);
+ tree fndecl = rs_get_callee_fndecl_nofold (x);
+ if (fndecl && DECL_IMMEDIATE_FUNCTION_P (fndecl))
+ is_consteval = true;
+ }
+ if (AGGREGATE_TYPE_P (type) || VECTOR_TYPE_P (type))
+ {
+ /* In C++14 an NSDMI can participate in aggregate initialization,
+ and can refer to the address of the object being initialized, so
+ we need to pass in the relevant VAR_DECL if we want to do the
+ evaluation in a single pass. The evaluation will dynamically
+ update ctx.values for the VAR_DECL. We use the same strategy
+ for C++11 constexpr constructors that refer to the object being
+ initialized. */
+ if (constexpr_dtor)
+ {
+ gcc_assert (object && VAR_P (object));
+ gcc_assert (DECL_DECLARED_CONSTEXPR_P (object));
+ gcc_assert (DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (object));
+ if (error_operand_p (DECL_INITIAL (object)))
+ return t;
+ ctx.ctor = unshare_expr (DECL_INITIAL (object));
+ TREE_READONLY (ctx.ctor) = false;
+ /* Temporarily force decl_really_constant_value to return false
+ for it, we want to use ctx.ctor for the current value instead. */
+ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (object) = false;
+ }
+ else
+ {
+ ctx.ctor = build_constructor (type, NULL);
+ CONSTRUCTOR_NO_CLEARING (ctx.ctor) = true;
+ }
+ if (!object)
+ {
+ if (TREE_CODE (t) == TARGET_EXPR)
+ object = TARGET_EXPR_SLOT (t);
+ }
+ ctx.object = object;
+ if (object)
+ gcc_assert (
+ same_type_ignoring_top_level_qualifiers_p (type, TREE_TYPE (object)));
+ if (object && DECL_P (object))
+ global_ctx.values.put (object, ctx.ctor);
+ if (TREE_CODE (r) == TARGET_EXPR)
+ /* Avoid creating another CONSTRUCTOR when we expand the
+ TARGET_EXPR. */
+ r = TARGET_EXPR_INITIAL (r);
+ }
+
+ auto_vec<tree, 16> cleanups;
+ global_ctx.cleanups = &cleanups;
+
+ if (manifestly_const_eval)
+ instantiate_constexpr_fns (r);
+ r = eval_constant_expression (&ctx, r, false, &non_constant_p, &overflow_p);
+
+ if (!constexpr_dtor)
+ verify_constant (r, allow_non_constant, &non_constant_p, &overflow_p);
+ else
+ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (object) = true;
+
+ unsigned int i;
+ tree cleanup;
+ /* Evaluate the cleanups. */
+ FOR_EACH_VEC_ELT_REVERSE (cleanups, i, cleanup)
+ eval_constant_expression (&ctx, cleanup, false, &non_constant_p,
+ &overflow_p);
+
+ /* Mutable logic is a bit tricky: we want to allow initialization of
+ constexpr variables with mutable members, but we can't copy those
+ members to another constexpr variable. */
+ if (TREE_CODE (r) == CONSTRUCTOR && CONSTRUCTOR_MUTABLE_POISON (r))
+ {
+ if (!allow_non_constant)
+ error ("%qE is not a constant expression because it refers to "
+ "mutable subobjects of %qT",
+ t, type);
+ non_constant_p = true;
+ }
+
+ if (TREE_CODE (r) == CONSTRUCTOR && CONSTRUCTOR_NO_CLEARING (r))
+ {
+ if (!allow_non_constant)
+ error ("%qE is not a constant expression because it refers to "
+ "an incompletely initialized variable",
+ t);
+ TREE_CONSTANT (r) = false;
+ non_constant_p = true;
+ }
+
+ if (!global_ctx.heap_vars.is_empty ())
+ {
+ tree heap_var
+ = rs_walk_tree_without_duplicates (&r, find_heap_var_refs, NULL);
+ unsigned int i;
+ if (heap_var)
+ {
+ if (!allow_non_constant && !non_constant_p)
+ error_at (DECL_SOURCE_LOCATION (heap_var),
+ "%qE is not a constant expression because it refers to "
+ "a result of %<operator new%>",
+ t);
+ r = t;
+ non_constant_p = true;
+ }
+ FOR_EACH_VEC_ELT (global_ctx.heap_vars, i, heap_var)
+ {
+ if (DECL_NAME (heap_var) != heap_deleted_identifier)
+ {
+ if (!allow_non_constant && !non_constant_p)
+ error_at (DECL_SOURCE_LOCATION (heap_var),
+ "%qE is not a constant expression because allocated "
+ "storage has not been deallocated",
+ t);
+ r = t;
+ non_constant_p = true;
+ }
+ varpool_node::get (heap_var)->remove ();
+ }
+ }
+
+ /* Check that immediate invocation does not return an expression referencing
+ any immediate function decls. */
+ if (is_consteval || in_immediate_context ())
+ if (tree immediate_fndecl
+ = rs_walk_tree_without_duplicates (&r, find_immediate_fndecl, NULL))
+ {
+ if (!allow_non_constant && !non_constant_p)
+ error_at (rs_expr_loc_or_input_loc (t),
+ "immediate evaluation returns address of immediate "
+ "function %qD",
+ immediate_fndecl);
+ r = t;
+ non_constant_p = true;
+ }
+
+ if (non_constant_p)
+ /* If we saw something bad, go back to our argument. The wrapping below is
+ only for the cases of TREE_CONSTANT argument or overflow. */
+ r = t;
+
+ if (!non_constant_p && overflow_p)
+ non_constant_p = true;
+
+ /* Unshare the result. */
+ bool should_unshare = true;
+ if (r == t || (TREE_CODE (t) == TARGET_EXPR && TARGET_EXPR_INITIAL (t) == r))
+ should_unshare = false;
+
+ if (non_constant_p && !allow_non_constant)
+ return error_mark_node;
+ else if (constexpr_dtor)
+ return r;
+ else if (non_constant_p && TREE_CONSTANT (r))
+ {
+ /* This isn't actually constant, so unset TREE_CONSTANT.
+ Don't clear TREE_CONSTANT on ADDR_EXPR, as the middle-end requires
+ it to be set if it is invariant address, even when it is not
+ a valid C++ constant expression. Wrap it with a NOP_EXPR
+ instead. */
+ if (EXPR_P (r) && TREE_CODE (r) != ADDR_EXPR)
+ r = copy_node (r);
+ else if (TREE_CODE (r) == CONSTRUCTOR)
+ r = build1 (VIEW_CONVERT_EXPR, TREE_TYPE (r), r);
+ else
+ r = build_nop (TREE_TYPE (r), r);
+ TREE_CONSTANT (r) = false;
+ }
+ else if (non_constant_p)
+ return t;
+
+ if (should_unshare)
+ r = unshare_expr (r);
+
+ if (TREE_CODE (r) == CONSTRUCTOR && CLASS_TYPE_P (TREE_TYPE (r)))
+ {
+ r = adjust_temp_type (type, r);
+ if (TREE_CODE (t) == TARGET_EXPR && TARGET_EXPR_INITIAL (t) == r)
+ return t;
+ }
+
+ /* Remember the original location if that wouldn't need a wrapper. */
+ if (location_t loc = EXPR_LOCATION (t))
+ protected_set_expr_location (r, loc);
+
+ return r;
+}
+
+/* Like is_constant_expression, but allow const variables that are not allowed
+ under constexpr rules. */
+
+bool
+is_static_init_expression (tree t)
+{
+ return potential_constant_expression_1 (t, false, false, true, tf_none);
+}
+
+/* Like potential_constant_expression, but don't consider possible constexpr
+ substitution of the current function. That is, PARM_DECL qualifies under
+ potential_constant_expression, but not here.
+
+ This is basically what you can check when any actual constant values might
+ be value-dependent. */
+
+bool
+is_constant_expression (tree t)
+{
+ return potential_constant_expression_1 (t, false, true, true, tf_none);
+}
+
+/* Returns true if T is a potential static initializer expression that is not
+ instantiation-dependent. */
+
+bool
+is_nondependent_static_init_expression (tree t)
+{
+ return (!type_unknown_p (t) && is_static_init_expression (t));
+}
+
+/* Like maybe_constant_value, but returns a CONSTRUCTOR directly, rather
+ than wrapped in a TARGET_EXPR.
+ ALLOW_NON_CONSTANT is false if T is required to be a constant expression.
+ MANIFESTLY_CONST_EVAL is true if T is manifestly const-evaluated as
+ per P0595 even when ALLOW_NON_CONSTANT is true. */
+
+static tree
+maybe_constant_init_1 (tree t, tree decl, bool allow_non_constant,
+ bool manifestly_const_eval)
+{
+ if (!t)
+ return t;
+ if (TREE_CODE (t) == EXPR_STMT)
+ t = TREE_OPERAND (t, 0);
+ if (TREE_CODE (t) == CONVERT_EXPR && VOID_TYPE_P (TREE_TYPE (t)))
+ t = TREE_OPERAND (t, 0);
+ if (TREE_CODE (t) == INIT_EXPR)
+ t = TREE_OPERAND (t, 1);
+ if (TREE_CODE (t) == TARGET_EXPR)
+ t = TARGET_EXPR_INITIAL (t);
+ if (!is_nondependent_static_init_expression (t))
+ /* Don't try to evaluate it. */;
+ else if (CONSTANT_CLASS_P (t) && allow_non_constant)
+ /* No evaluation needed. */;
+ else
+ t = cxx_eval_outermost_constant_expr (t, allow_non_constant,
+ /*strict*/ false,
+ manifestly_const_eval, false, decl);
+ if (TREE_CODE (t) == TARGET_EXPR)
+ {
+ tree init = TARGET_EXPR_INITIAL (t);
+ if (TREE_CODE (init) == CONSTRUCTOR)
+ t = init;
+ }
+ return t;
+}
+
+/* Wrapper for maybe_constant_init_1 which permits non constants. */
+
+tree
+maybe_constant_init (tree t, tree decl, bool manifestly_const_eval)
+{
+ return maybe_constant_init_1 (t, decl, true, manifestly_const_eval);
+}
+
+/* Returns true if T is a potential constant expression that is not
+ instantiation-dependent, and therefore a candidate for constant folding even
+ in a template. */
+
+bool
+is_nondependent_constant_expression (tree t)
+{
+ return (!type_unknown_p (t) && is_constant_expression (t)
+ && !instantiation_dependent_expression_p (t));
+}
+
+// forked from gcc/cp/parser.cc cp_unevaluated_operand
+
+/* Nonzero if we are parsing an unevaluated operand: an operand to
+ sizeof, typeof, or alignof. */
+int cp_unevaluated_operand;
+
+// forked from gcc/cp/constexpr.cc cv_cache
+
+/* If T is a constant expression, returns its reduced value.
+ Otherwise, if T does not have TREE_CONSTANT set, returns T.
+ Otherwise, returns a version of T without TREE_CONSTANT.
+ MANIFESTLY_CONST_EVAL is true if T is manifestly const-evaluated
+ as per P0595. */
+
+static GTY ((deletable)) hash_map<tree, tree> *cv_cache;
+
+// forked from gcc/cp/constexpr.cc maybe_constant_value
+
+tree
+maybe_constant_value (tree t, tree decl, bool manifestly_const_eval)
+{
+ tree r;
+
+ if (!is_nondependent_constant_expression (t))
+ {
+ if (TREE_OVERFLOW_P (t))
+ {
+ t = build_nop (TREE_TYPE (t), t);
+ TREE_CONSTANT (t) = false;
+ }
+ return t;
+ }
+ else if (CONSTANT_CLASS_P (t))
+ /* No caching or evaluation needed. */
+ return t;
+
+ if (manifestly_const_eval)
+ return cxx_eval_outermost_constant_expr (t, true, true, true, false, decl);
+
+ if (cv_cache == NULL)
+ cv_cache = hash_map<tree, tree>::create_ggc (101);
+ if (tree *cached = cv_cache->get (t))
+ {
+ r = *cached;
+ if (r != t)
+ {
+ // Faisal: commenting this out as not sure if it's needed and it's
+ // huge r = break_out_target_exprs (r, /*clear_loc*/true);
+ protected_set_expr_location (r, EXPR_LOCATION (t));
+ }
+ return r;
+ }
+
+ /* Don't evaluate an unevaluated operand. */
+ if (cp_unevaluated_operand)
+ return t;
+
+ uid_sensitive_constexpr_evaluation_checker c;
+ r = cxx_eval_outermost_constant_expr (t, true, true, false, false, decl);
+ gcc_checking_assert (
+ r == t || CONVERT_EXPR_P (t) || TREE_CODE (t) == VIEW_CONVERT_EXPR
+ || (TREE_CONSTANT (t) && !TREE_CONSTANT (r)) || !rs_tree_equal (r, t));
+ if (!c.evaluation_restricted_p ())
+ cv_cache->put (t, r);
+ return r;
+}
+
+// forked from gcc/cp/constexpr.cc
+
+bool
+potential_constant_expression (tree t)
+{
+ return potential_constant_expression_1 (t, false, true, false, tf_none);
+}
+
+/* Data structure for passing data from potential_constant_expression_1
+ to check_for_return_continue via cp_walk_tree. */
+struct check_for_return_continue_data
+{
+ hash_set<tree> *pset;
+ tree continue_stmt;
+ tree break_stmt;
+};
+
+/* Helper function for potential_constant_expression_1 SWITCH_STMT handling,
+ called through cp_walk_tree. Return the first RETURN_EXPR found, or note
+ the first CONTINUE_STMT and/or BREAK_STMT if RETURN_EXPR is not found. */
+static tree
+check_for_return_continue (tree *tp, int *walk_subtrees, void *data)
+{
+ tree t = *tp, s, b;
+ check_for_return_continue_data *d = (check_for_return_continue_data *) data;
+ switch (TREE_CODE (t))
+ {
+ case RETURN_EXPR:
+ return t;
+
+ case CONTINUE_STMT:
+ if (d->continue_stmt == NULL_TREE)
+ d->continue_stmt = t;
+ break;
+
+ case BREAK_STMT:
+ if (d->break_stmt == NULL_TREE)
+ d->break_stmt = t;
+ break;
+
+#define RECUR(x) \
+ if (tree r = rs_walk_tree (&x, check_for_return_continue, data, d->pset)) \
+ return r
+
+ /* For loops, walk subtrees manually, so that continue stmts found
+ inside of the bodies of the loops are ignored. */
+
+ case WHILE_STMT:
+ *walk_subtrees = 0;
+ RECUR (WHILE_COND (t));
+ s = d->continue_stmt;
+ b = d->break_stmt;
+ RECUR (WHILE_BODY (t));
+ d->continue_stmt = s;
+ d->break_stmt = b;
+ break;
+
+ case FOR_STMT:
+ *walk_subtrees = 0;
+ RECUR (FOR_INIT_STMT (t));
+ RECUR (FOR_COND (t));
+ RECUR (FOR_EXPR (t));
+ s = d->continue_stmt;
+ b = d->break_stmt;
+ RECUR (FOR_BODY (t));
+ d->continue_stmt = s;
+ d->break_stmt = b;
+ break;
+
+ case RANGE_FOR_STMT:
+ *walk_subtrees = 0;
+ RECUR (RANGE_FOR_EXPR (t));
+ s = d->continue_stmt;
+ b = d->break_stmt;
+ RECUR (RANGE_FOR_BODY (t));
+ d->continue_stmt = s;
+ d->break_stmt = b;
+ break;
+
+ case SWITCH_STMT:
+ *walk_subtrees = 0;
+ RECUR (SWITCH_STMT_COND (t));
+ b = d->break_stmt;
+ RECUR (SWITCH_STMT_BODY (t));
+ d->break_stmt = b;
+ break;
+#undef RECUR
+
+ case STATEMENT_LIST:
+ case CONSTRUCTOR:
+ break;
+
+ default:
+ if (!EXPR_P (t))
+ *walk_subtrees = 0;
+ break;
+ }
+
+ return NULL_TREE;
+}
+
+/* Returns the namespace that contains DECL, whether directly or
+ indirectly. */
+
+tree
+decl_namespace_context (tree decl)
+{
+ while (1)
+ {
+ if (TREE_CODE (decl) == NAMESPACE_DECL)
+ return decl;
+ else if (TYPE_P (decl))
+ decl = CP_DECL_CONTEXT (TYPE_MAIN_DECL (decl));
+ else
+ decl = CP_DECL_CONTEXT (decl);
+ }
+}
+
+/* Returns true if DECL is in the std namespace. */
+
+bool
+decl_in_std_namespace_p (tree decl)
+{
+ while (decl)
+ {
+ decl = decl_namespace_context (decl);
+ if (DECL_NAMESPACE_STD_P (decl))
+ return true;
+ /* Allow inline namespaces inside of std namespace, e.g. with
+ --enable-symvers=gnu-versioned-namespace std::forward would be
+ actually std::_8::forward. */
+ if (!DECL_NAMESPACE_INLINE_P (decl))
+ return false;
+ decl = CP_DECL_CONTEXT (decl);
+ }
+ return false;
+}
+
+/* Return true if FNDECL is std::construct_at. */
+
+static inline bool
+is_std_construct_at (tree fndecl)
+{
+ if (!decl_in_std_namespace_p (fndecl))
+ return false;
+
+ tree name = DECL_NAME (fndecl);
+ return name && id_equal (name, "construct_at");
+}
+
+/* Return true if FNDECL is __dynamic_cast. */
+
+static inline bool
+cxx_dynamic_cast_fn_p (tree fndecl)
+{
+ return (id_equal (DECL_NAME (fndecl), "__dynamic_cast")
+ && CP_DECL_CONTEXT (fndecl) == global_namespace);
+}
+
+/* Return true if FNDECL is std::allocator<T>::{,de}allocate. */
+
+static inline bool
+is_std_allocator_allocate (tree fndecl)
+{
+ tree name = DECL_NAME (fndecl);
+ if (name == NULL_TREE
+ || !(id_equal (name, "allocate") || id_equal (name, "deallocate")))
+ return false;
+
+ tree ctx = DECL_CONTEXT (fndecl);
+ if (ctx == NULL_TREE || !CLASS_TYPE_P (ctx) || !TYPE_MAIN_DECL (ctx))
+ return false;
+
+ tree decl = TYPE_MAIN_DECL (ctx);
+ name = DECL_NAME (decl);
+ if (name == NULL_TREE || !id_equal (name, "allocator"))
+ return false;
+
+ return decl_in_std_namespace_p (decl);
+}
+
+/* Overload for the above taking constexpr_call*. */
+
+static inline bool
+is_std_allocator_allocate (const constexpr_call *call)
+{
+ return (call && call->fundef
+ && is_std_allocator_allocate (call->fundef->decl));
+}
+
+/* Return true if T denotes a potentially constant expression. Issue
+ diagnostic as appropriate under control of FLAGS. If WANT_RVAL is true,
+ an lvalue-rvalue conversion is implied. If NOW is true, we want to
+ consider the expression in the current context, independent of constexpr
+ substitution.
+
+ C++0x [expr.const] used to say
+
+ 6 An expression is a potential constant expression if it is
+ a constant expression where all occurrences of function
+ parameters are replaced by arbitrary constant expressions
+ of the appropriate type.
+
+ 2 A conditional expression is a constant expression unless it
+ involves one of the following as a potentially evaluated
+ subexpression (3.2), but subexpressions of logical AND (5.14),
+ logical OR (5.15), and conditional (5.16) operations that are
+ not evaluated are not considered. */
+
+static bool
+potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
+ tsubst_flags_t flags, tree *jump_target)
+{
+#define RECUR(T, RV) \
+ potential_constant_expression_1 ((T), (RV), strict, now, flags, jump_target)
+
+ enum
+ {
+ any = false,
+ rval = true
+ };
+ int i;
+ tree tmp;
+
+ if (t == error_mark_node)
+ return false;
+ if (t == NULL_TREE)
+ return true;
+ location_t loc = rs_expr_loc_or_input_loc (t);
+
+ if (*jump_target)
+ /* If we are jumping, ignore everything. This is simpler than the
+ cxx_eval_constant_expression handling because we only need to be
+ conservatively correct, and we don't necessarily have a constant value
+ available, so we don't bother with switch tracking. */
+ return true;
+
+ if (TREE_THIS_VOLATILE (t) && want_rval)
+ {
+ if (flags & tf_error)
+ error_at (loc,
+ "lvalue-to-rvalue conversion of a volatile lvalue "
+ "%qE with type %qT",
+ t, TREE_TYPE (t));
+ return false;
+ }
+ if (CONSTANT_CLASS_P (t))
+ return true;
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (t), TS_TYPED)
+ && TREE_TYPE (t) == error_mark_node)
+ return false;
+
+ switch (TREE_CODE (t))
+ {
+ case FUNCTION_DECL:
+ case OVERLOAD:
+ case LABEL_DECL:
+ case CASE_LABEL_EXPR:
+ case PREDICT_EXPR:
+ case CONST_DECL:
+ case IDENTIFIER_NODE:
+ /* We can see a FIELD_DECL in a pointer-to-member expression. */
+ case FIELD_DECL:
+ case RESULT_DECL:
+ case PLACEHOLDER_EXPR:
+ case STATIC_ASSERT:
+ return true;
+
+ case RETURN_EXPR:
+ if (!RECUR (TREE_OPERAND (t, 0), any))
+ return false;
+ /* FALLTHROUGH */
+
+ case BREAK_STMT:
+ case CONTINUE_STMT:
+ *jump_target = t;
+ return true;
+
+ case PARM_DECL:
+ if (now && want_rval)
+ {
+ tree type = TREE_TYPE (t);
+ if (is_really_empty_class (type, /*ignore_vptr*/ false))
+ /* An empty class has no data to read. */
+ return true;
+ if (flags & tf_error)
+ error ("%qE is not a constant expression", t);
+ return false;
+ }
+ return true;
+
+ case CALL_EXPR:
+ /* -- an invocation of a function other than a constexpr function
+ or a constexpr constructor. */
+ {
+ tree fun = get_function_named_in_call (t);
+ const int nargs = call_expr_nargs (t);
+ i = 0;
+
+ if (fun == NULL_TREE)
+ {
+ /* Reset to allow the function to continue past the end
+ of the block below. Otherwise return early. */
+ bool bail = true;
+
+ if (TREE_CODE (t) == CALL_EXPR && CALL_EXPR_FN (t) == NULL_TREE)
+ switch (CALL_EXPR_IFN (t))
+ {
+ /* These should be ignored, they are optimized away from
+ constexpr functions. */
+ case IFN_UBSAN_NULL:
+ case IFN_UBSAN_BOUNDS:
+ case IFN_UBSAN_VPTR:
+ case IFN_FALLTHROUGH:
+ return true;
+
+ case IFN_ADD_OVERFLOW:
+ case IFN_SUB_OVERFLOW:
+ case IFN_MUL_OVERFLOW:
+ case IFN_LAUNDER:
+ case IFN_VEC_CONVERT:
+ bail = false;
+ break;
+
+ default:
+ break;
+ }
+
+ if (bail)
+ {
+ /* fold_call_expr can't do anything with IFN calls. */
+ if (flags & tf_error)
+ error_at (loc, "call to internal function %qE", t);
+ return false;
+ }
+ }
+
+ if (fun && is_overloaded_fn (fun))
+ {
+ if (TREE_CODE (fun) == FUNCTION_DECL)
+ {
+ if (builtin_valid_in_constant_expr_p (fun))
+ return true;
+ if (!maybe_constexpr_fn (fun)
+ /* Allow any built-in function; if the expansion
+ isn't constant, we'll deal with that then. */
+ && !fndecl_built_in_p (fun)
+ /* In C++20, replaceable global allocation functions
+ are constant expressions. */
+ && (/* !cxx_replaceable_global_alloc_fn (fun)
+ ||*/ TREE_CODE (t) != CALL_EXPR
+ || (!CALL_FROM_NEW_OR_DELETE_P (t)
+ && (current_function_decl == NULL_TREE
+ /*|| !is_std_allocator_allocate(current_function_decl)*/)))
+ /* Allow placement new in std::construct_at. */
+ && (/*!cxx_placement_new_fn (fun)
+ ||*/ TREE_CODE (t) != CALL_EXPR
+ || current_function_decl == NULL_TREE
+ /*|| !is_std_construct_at (current_function_decl)*/)
+ /* && !cxx_dynamic_cast_fn_p (fun)*/)
+ {
+ if (flags & tf_error)
+ {
+ error_at (loc, "call to non-%<constexpr%> function %qD",
+ fun);
+ explain_invalid_constexpr_fn (fun);
+ }
+ return false;
+ }
+ /* A call to a non-static member function takes the address
+ of the object as the first argument. But in a constant
+ expression the address will be folded away, so look
+ through it now. */
+ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fun)
+ && !DECL_CONSTRUCTOR_P (fun))
+ {
+ tree x = CALL_EXPR_ARG (t, 0);
+
+ /* Don't require an immediately constant value, as
+ constexpr substitution might not use the value. */
+ bool sub_now = false;
+ if (!potential_constant_expression_1 (x, rval, strict,
+ sub_now, flags,
+ jump_target))
+ return false;
+ i = 1;
+ }
+ }
+ else
+ {
+ if (!RECUR (fun, true))
+ return false;
+ fun = get_first_fn (fun);
+ }
+ fun = DECL_ORIGIN (fun);
+ }
+ else if (fun)
+ {
+ if (RECUR (fun, rval))
+ /* Might end up being a constant function pointer. */;
+ else
+ return false;
+ }
+ for (; i < nargs; ++i)
+ {
+ tree x = CALL_EXPR_ARG (t, i);
+ /* In a template, reference arguments haven't been converted to
+ REFERENCE_TYPE and we might not even know if the parameter
+ is a reference, so accept lvalue constants too. */
+ bool rv = rval;
+ /* Don't require an immediately constant value, as constexpr
+ substitution might not use the value of the argument. */
+ bool sub_now = false;
+ if (!potential_constant_expression_1 (x, rv, strict, sub_now, flags,
+ jump_target))
+ return false;
+ }
+ return true;
+ }
+
+ case NON_LVALUE_EXPR:
+ /* -- an lvalue-to-rvalue conversion (4.1) unless it is applied to
+ -- an lvalue of integral type that refers to a non-volatile
+ const variable or static data member initialized with
+ constant expressions, or
+
+ -- an lvalue of literal type that refers to non-volatile
+ object defined with constexpr, or that refers to a
+ sub-object of such an object; */
+ return RECUR (TREE_OPERAND (t, 0), rval);
+
+ case VAR_DECL:
+ if (DECL_HAS_VALUE_EXPR_P (t))
+ {
+ return RECUR (DECL_VALUE_EXPR (t), rval);
+ }
+ if (want_rval && !var_in_maybe_constexpr_fn (t)
+ && !decl_maybe_constant_var_p (t)
+ && (strict || !RS_TYPE_CONST_NON_VOLATILE_P (TREE_TYPE (t))
+ || (DECL_INITIAL (t)
+ && !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (t)))
+ && COMPLETE_TYPE_P (TREE_TYPE (t))
+ && !is_really_empty_class (TREE_TYPE (t), /*ignore_vptr*/ false))
+ {
+ if (flags & tf_error)
+ non_const_var_error (loc, t);
+ return false;
+ }
+ return true;
+
+ /* FALLTHRU */
+ case NOP_EXPR:
+ case CONVERT_EXPR:
+ case VIEW_CONVERT_EXPR:
+ /* -- a reinterpret_cast. FIXME not implemented, and this rule
+ may change to something more specific to type-punning (DR 1312). */
+ {
+ tree from = TREE_OPERAND (t, 0);
+ if (location_wrapper_p (t))
+ return (RECUR (from, want_rval));
+ if (INDIRECT_TYPE_P (TREE_TYPE (t)))
+ {
+ STRIP_ANY_LOCATION_WRAPPER (from);
+ if (TREE_CODE (from) == INTEGER_CST && !integer_zerop (from))
+ {
+ if (flags & tf_error)
+ error_at (loc,
+ "%<reinterpret_cast%> from integer to pointer");
+ return false;
+ }
+ }
+ return (RECUR (from, TREE_CODE (t) != VIEW_CONVERT_EXPR));
+ }
+
+ case ADDR_EXPR:
+ /* -- a unary operator & that is applied to an lvalue that
+ designates an object with thread or automatic storage
+ duration; */
+ t = TREE_OPERAND (t, 0);
+
+ if (TREE_CODE (t) == OFFSET_REF && PTRMEM_OK_P (t))
+ /* A pointer-to-member constant. */
+ return true;
+
+ // handle_addr_expr:
+#if 0
+ /* FIXME adjust when issue 1197 is fully resolved. For now don't do
+ any checking here, as we might dereference the pointer later. If
+ we remove this code, also remove check_automatic_or_tls. */
+ i = check_automatic_or_tls (t);
+ if (i == ck_ok)
+ return true;
+ if (i == ck_bad)
+ {
+ if (flags & tf_error)
+ error ("address-of an object %qE with thread local or "
+ "automatic storage is not a constant expression", t);
+ return false;
+ }
+#endif
+ return RECUR (t, any);
+
+ case COMPONENT_REF:
+ /* -- a class member access unless its postfix-expression is
+ of literal type or of pointer to literal type. */
+ /* This test would be redundant, as it follows from the
+ postfix-expression being a potential constant expression. */
+ if (type_unknown_p (t))
+ return true;
+ if (is_overloaded_fn (t))
+ /* In a template, a COMPONENT_REF of a function expresses ob.fn(),
+ which uses ob as an lvalue. */
+ want_rval = false;
+ gcc_fallthrough ();
+
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case BIT_FIELD_REF:
+ return RECUR (TREE_OPERAND (t, 0), want_rval);
+
+ case INDIRECT_REF: {
+ tree x = TREE_OPERAND (t, 0);
+ STRIP_NOPS (x);
+ return RECUR (x, rval);
+ }
+
+ case STATEMENT_LIST:
+ for (tree stmt : tsi_range (t))
+ if (!RECUR (stmt, any))
+ return false;
+ return true;
+
+ case MODIFY_EXPR:
+ if (!RECUR (TREE_OPERAND (t, 0), any))
+ return false;
+ /* Just ignore clobbers. */
+ if (TREE_CLOBBER_P (TREE_OPERAND (t, 1)))
+ return true;
+ if (!RECUR (TREE_OPERAND (t, 1), rval))
+ return false;
+ return true;
+
+ case FOR_STMT:
+ if (!RECUR (FOR_INIT_STMT (t), any))
+ return false;
+ tmp = FOR_COND (t);
+ if (!RECUR (tmp, rval))
+ return false;
+ if (tmp)
+ {
+ tmp = cxx_eval_outermost_constant_expr (tmp, true);
+ /* If we couldn't evaluate the condition, it might not ever be
+ true. */
+ if (!integer_onep (tmp))
+ {
+ /* Before returning true, check if the for body can contain
+ a return. */
+ hash_set<tree> pset;
+ check_for_return_continue_data data
+ = {&pset, NULL_TREE, NULL_TREE};
+ if (tree ret_expr
+ = rs_walk_tree (&FOR_BODY (t), check_for_return_continue,
+ &data, &pset))
+ *jump_target = ret_expr;
+ return true;
+ }
+ }
+ if (!RECUR (FOR_EXPR (t), any))
+ return false;
+ if (!RECUR (FOR_BODY (t), any))
+ return false;
+ if (breaks (jump_target) || continues (jump_target))
+ *jump_target = NULL_TREE;
+ return true;
+
+ case WHILE_STMT:
+ tmp = WHILE_COND (t);
+ if (!RECUR (tmp, rval))
+ return false;
+
+ tmp = cxx_eval_outermost_constant_expr (tmp, true);
+ /* If we couldn't evaluate the condition, it might not ever be true. */
+ if (!integer_onep (tmp))
+ {
+ /* Before returning true, check if the while body can contain
+ a return. */
+ hash_set<tree> pset;
+ check_for_return_continue_data data = {&pset, NULL_TREE, NULL_TREE};
+ if (tree ret_expr
+ = rs_walk_tree (&WHILE_BODY (t), check_for_return_continue, &data,
+ &pset))
+ *jump_target = ret_expr;
+ return true;
+ }
+ if (!RECUR (WHILE_BODY (t), any))
+ return false;
+ if (breaks (jump_target) || continues (jump_target))
+ *jump_target = NULL_TREE;
+ return true;
+
+ case SWITCH_STMT:
+ if (!RECUR (SWITCH_STMT_COND (t), rval))
+ return false;
+ /* FIXME we don't check SWITCH_STMT_BODY currently, because even
+ unreachable labels would be checked and it is enough if there is
+ a single switch cond value for which it is a valid constant
+ expression. We need to check if there are any RETURN_EXPRs
+ or CONTINUE_STMTs inside of the body though, as in that case
+ we need to set *jump_target. */
+ else
+ {
+ hash_set<tree> pset;
+ check_for_return_continue_data data = {&pset, NULL_TREE, NULL_TREE};
+ if (tree ret_expr
+ = rs_walk_tree (&SWITCH_STMT_BODY (t), check_for_return_continue,
+ &data, &pset))
+ /* The switch might return. */
+ *jump_target = ret_expr;
+ else if (data.continue_stmt)
+ /* The switch can't return, but might continue. */
+ *jump_target = data.continue_stmt;
+ }
+ return true;
+
+ case DYNAMIC_CAST_EXPR:
+ case PSEUDO_DTOR_EXPR:
+ case NEW_EXPR:
+ case VEC_NEW_EXPR:
+ case DELETE_EXPR:
+ case VEC_DELETE_EXPR:
+ case THROW_EXPR:
+ case OMP_PARALLEL:
+ case OMP_TASK:
+ case OMP_FOR:
+ case OMP_SIMD:
+ case OMP_DISTRIBUTE:
+ case OMP_TASKLOOP:
+ case OMP_LOOP:
+ case OMP_TEAMS:
+ case OMP_TARGET_DATA:
+ case OMP_TARGET:
+ case OMP_SECTIONS:
+ case OMP_ORDERED:
+ case OMP_CRITICAL:
+ case OMP_SINGLE:
+ case OMP_SECTION:
+ case OMP_MASTER:
+ case OMP_MASKED:
+ case OMP_TASKGROUP:
+ case OMP_TARGET_UPDATE:
+ case OMP_TARGET_ENTER_DATA:
+ case OMP_TARGET_EXIT_DATA:
+ case OMP_ATOMIC:
+ case OMP_ATOMIC_READ:
+ case OMP_ATOMIC_CAPTURE_OLD:
+ case OMP_ATOMIC_CAPTURE_NEW:
+ case OMP_DEPOBJ:
+ case OACC_PARALLEL:
+ case OACC_KERNELS:
+ case OACC_SERIAL:
+ case OACC_DATA:
+ case OACC_HOST_DATA:
+ case OACC_LOOP:
+ case OACC_CACHE:
+ case OACC_DECLARE:
+ case OACC_ENTER_DATA:
+ case OACC_EXIT_DATA:
+ case OACC_UPDATE:
+ /* GCC internal stuff. */
+ case VA_ARG_EXPR:
+ case TRANSACTION_EXPR:
+ case AT_ENCODE_EXPR:
+
+ if (flags & tf_error)
+ error_at (loc, "expression %qE is not a constant expression", t);
+ return false;
+
+ case ASM_EXPR:
+ if (flags & tf_error)
+ inline_asm_in_constexpr_error (loc);
+ return false;
+
+ case OBJ_TYPE_REF:
+ return true;
+
+ case POINTER_DIFF_EXPR:
+ case MINUS_EXPR:
+ want_rval = true;
+ goto binary;
+
+ case LT_EXPR:
+ case LE_EXPR:
+ case GT_EXPR:
+ case GE_EXPR:
+ case EQ_EXPR:
+ case NE_EXPR:
+ case SPACESHIP_EXPR:
+ want_rval = true;
+ goto binary;
+
+ case PREINCREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ goto unary;
+
+ case BIT_NOT_EXPR:
+ /* A destructor. */
+ if (TYPE_P (TREE_OPERAND (t, 0)))
+ return true;
+ /* fall through. */
+
+ case CONJ_EXPR:
+ case SAVE_EXPR:
+ case FIX_TRUNC_EXPR:
+ case FLOAT_EXPR:
+ case NEGATE_EXPR:
+ case ABS_EXPR:
+ case ABSU_EXPR:
+ case TRUTH_NOT_EXPR:
+ case FIXED_CONVERT_EXPR:
+ case UNARY_PLUS_EXPR:
+ case UNARY_LEFT_FOLD_EXPR:
+ case UNARY_RIGHT_FOLD_EXPR:
+ unary:
+ return RECUR (TREE_OPERAND (t, 0), rval);
+
+ case BIND_EXPR:
+ return RECUR (BIND_EXPR_BODY (t), want_rval);
+
+ case CLEANUP_POINT_EXPR:
+ case EXPR_STMT:
+ case PAREN_EXPR:
+ case NON_DEPENDENT_EXPR:
+ /* For convenience. */
+ case LOOP_EXPR:
+ case EXIT_EXPR:
+ return RECUR (TREE_OPERAND (t, 0), want_rval);
+
+ case DECL_EXPR:
+ tmp = DECL_EXPR_DECL (t);
+ if (VAR_P (tmp) && !DECL_ARTIFICIAL (tmp))
+ {
+ if (RS_DECL_THREAD_LOCAL_P (tmp))
+ {
+ if (flags & tf_error)
+ error_at (DECL_SOURCE_LOCATION (tmp),
+ "%qD declared "
+ "%<thread_local%> in %<constexpr%> context",
+ tmp);
+ return false;
+ }
+ else if (TREE_STATIC (tmp))
+ {
+ if (flags & tf_error)
+ error_at (DECL_SOURCE_LOCATION (tmp),
+ "%qD declared "
+ "%<static%> in %<constexpr%> context",
+ tmp);
+ return false;
+ }
+ else if (!check_for_uninitialized_const_var (
+ tmp, /*constexpr_context_p=*/true, flags))
+ return false;
+ }
+ return RECUR (tmp, want_rval);
+
+ case TRY_FINALLY_EXPR:
+ return (RECUR (TREE_OPERAND (t, 0), want_rval)
+ && RECUR (TREE_OPERAND (t, 1), any));
+
+ case SCOPE_REF:
+ return RECUR (TREE_OPERAND (t, 1), want_rval);
+
+ case TARGET_EXPR:
+ if (!TARGET_EXPR_DIRECT_INIT_P (t) && !literal_type_p (TREE_TYPE (t)))
+ {
+ if (flags & tf_error)
+ {
+ auto_diagnostic_group d;
+ error_at (loc,
+ "temporary of non-literal type %qT in a "
+ "constant expression",
+ TREE_TYPE (t));
+ explain_non_literal_class (TREE_TYPE (t));
+ }
+ return false;
+ }
+ /* FALLTHRU */
+ case INIT_EXPR:
+ return RECUR (TREE_OPERAND (t, 1), rval);
+
+ case CONSTRUCTOR: {
+ vec<constructor_elt, va_gc> *v = CONSTRUCTOR_ELTS (t);
+ constructor_elt *ce;
+ for (i = 0; vec_safe_iterate (v, i, &ce); ++i)
+ if (!RECUR (ce->value, want_rval))
+ return false;
+ return true;
+ }
+
+ case TREE_LIST: {
+ gcc_assert (TREE_PURPOSE (t) == NULL_TREE || DECL_P (TREE_PURPOSE (t)));
+ if (!RECUR (TREE_VALUE (t), want_rval))
+ return false;
+ if (TREE_CHAIN (t) == NULL_TREE)
+ return true;
+ return RECUR (TREE_CHAIN (t), want_rval);
+ }
+
+ case TRUNC_DIV_EXPR:
+ case CEIL_DIV_EXPR:
+ case FLOOR_DIV_EXPR:
+ case ROUND_DIV_EXPR:
+ case TRUNC_MOD_EXPR:
+ case CEIL_MOD_EXPR:
+ case ROUND_MOD_EXPR: {
+ tree denom = TREE_OPERAND (t, 1);
+ if (!RECUR (denom, rval))
+ return false;
+ /* We can't call cxx_eval_outermost_constant_expr on an expression
+ that hasn't been through instantiate_non_dependent_expr yet. */
+ denom = cxx_eval_outermost_constant_expr (denom, true);
+ if (integer_zerop (denom))
+ {
+ if (flags & tf_error)
+ error ("division by zero is not a constant expression");
+ return false;
+ }
+ else
+ {
+ want_rval = true;
+ return RECUR (TREE_OPERAND (t, 0), want_rval);
+ }
+ }
+
+ case COMPOUND_EXPR: {
+ /* check_return_expr sometimes wraps a TARGET_EXPR in a
+ COMPOUND_EXPR; don't get confused. */
+ tree op0 = TREE_OPERAND (t, 0);
+ tree op1 = TREE_OPERAND (t, 1);
+ STRIP_NOPS (op1);
+ if (TREE_CODE (op0) == TARGET_EXPR && op1 == TARGET_EXPR_SLOT (op0))
+ return RECUR (op0, want_rval);
+ else
+ goto binary;
+ }
+
+ /* If the first operand is the non-short-circuit constant, look at
+ the second operand; otherwise we only care about the first one for
+ potentiality. */
+ case TRUTH_AND_EXPR:
+ case TRUTH_ANDIF_EXPR:
+ tmp = boolean_true_node;
+ goto truth;
+ case TRUTH_OR_EXPR:
+ case TRUTH_ORIF_EXPR:
+ tmp = boolean_false_node;
+ truth : {
+ tree op0 = TREE_OPERAND (t, 0);
+ tree op1 = TREE_OPERAND (t, 1);
+ if (!RECUR (op0, rval))
+ return false;
+ if (!(flags & tf_error) && RECUR (op1, rval))
+ /* When quiet, try to avoid expensive trial evaluation by first
+ checking potentiality of the second operand. */
+ return true;
+ op0 = cxx_eval_outermost_constant_expr (op0, true);
+ if (tree_int_cst_equal (op0, tmp))
+ return (flags & tf_error) ? RECUR (op1, rval) : false;
+ else
+ return true;
+ }
+
+ case PLUS_EXPR:
+ case MULT_EXPR:
+ case POINTER_PLUS_EXPR:
+ case RDIV_EXPR:
+ case EXACT_DIV_EXPR:
+ case MIN_EXPR:
+ case MAX_EXPR:
+ case LSHIFT_EXPR:
+ case RSHIFT_EXPR:
+ case LROTATE_EXPR:
+ case RROTATE_EXPR:
+ case BIT_IOR_EXPR:
+ case BIT_XOR_EXPR:
+ case BIT_AND_EXPR:
+ case TRUTH_XOR_EXPR:
+ case UNORDERED_EXPR:
+ case ORDERED_EXPR:
+ case UNLT_EXPR:
+ case UNLE_EXPR:
+ case UNGT_EXPR:
+ case UNGE_EXPR:
+ case UNEQ_EXPR:
+ case LTGT_EXPR:
+ case RANGE_EXPR:
+ case COMPLEX_EXPR:
+ want_rval = true;
+ /* Fall through. */
+ case ARRAY_REF:
+ case ARRAY_RANGE_REF:
+ case MEMBER_REF:
+ case DOTSTAR_EXPR:
+ case MEM_REF:
+ case BINARY_LEFT_FOLD_EXPR:
+ case BINARY_RIGHT_FOLD_EXPR:
+ binary:
+ for (i = 0; i < 2; ++i)
+ if (!RECUR (TREE_OPERAND (t, i), want_rval))
+ return false;
+ return true;
+
+ case VEC_PERM_EXPR:
+ for (i = 0; i < 3; ++i)
+ if (!RECUR (TREE_OPERAND (t, i), true))
+ return false;
+ return true;
+
+ case COND_EXPR:
+ if (COND_EXPR_IS_VEC_DELETE (t))
+ {
+ if (flags & tf_error)
+ error_at (loc, "%<delete[]%> is not a constant expression");
+ return false;
+ }
+ /* Fall through. */
+ case IF_STMT:
+ case VEC_COND_EXPR:
+ /* If the condition is a known constant, we know which of the legs we
+ care about; otherwise we only require that the condition and
+ either of the legs be potentially constant. */
+ tmp = TREE_OPERAND (t, 0);
+ if (!RECUR (tmp, rval))
+ return false;
+
+ tmp = cxx_eval_outermost_constant_expr (tmp, true);
+ /* potential_constant_expression* isn't told if it is called for
+ manifestly_const_eval or not, so for consteval if always
+ process both branches as if the condition is not a known
+ constant. */
+ if (TREE_CODE (t) != IF_STMT || !IF_STMT_CONSTEVAL_P (t))
+ {
+ if (integer_zerop (tmp))
+ return RECUR (TREE_OPERAND (t, 2), want_rval);
+ else if (TREE_CODE (tmp) == INTEGER_CST)
+ return RECUR (TREE_OPERAND (t, 1), want_rval);
+ }
+ tmp = *jump_target;
+ for (i = 1; i < 3; ++i)
+ {
+ tree this_jump_target = tmp;
+ if (potential_constant_expression_1 (TREE_OPERAND (t, i), want_rval,
+ strict, now, tf_none,
+ &this_jump_target))
+ {
+ if (returns (&this_jump_target))
+ *jump_target = this_jump_target;
+ else if (!returns (jump_target))
+ {
+ if (breaks (&this_jump_target)
+ || continues (&this_jump_target))
+ *jump_target = this_jump_target;
+ if (i == 1)
+ {
+ /* If the then branch is potentially constant, but
+ does not return, check if the else branch
+ couldn't return, break or continue. */
+ hash_set<tree> pset;
+ check_for_return_continue_data data
+ = {&pset, NULL_TREE, NULL_TREE};
+ if (tree ret_expr
+ = rs_walk_tree (&TREE_OPERAND (t, 2),
+ check_for_return_continue, &data,
+ &pset))
+ *jump_target = ret_expr;
+ else if (*jump_target == NULL_TREE)
+ {
+ if (data.continue_stmt)
+ *jump_target = data.continue_stmt;
+ else if (data.break_stmt)
+ *jump_target = data.break_stmt;
+ }
+ }
+ }
+ return true;
+ }
+ }
+ if (flags & tf_error)
+ error_at (loc, "expression %qE is not a constant expression", t);
+ return false;
+
+ case TYPE_DECL:
+ /* We can see these in statement-expressions. */
+ return true;
+
+ case LABEL_EXPR:
+ t = LABEL_EXPR_LABEL (t);
+ if (DECL_ARTIFICIAL (t))
+ return true;
+ else if (flags & tf_error)
+ error_at (loc, "label definition in %<constexpr%> function only "
+ "available with %<-std=c++2b%> or %<-std=gnu++2b%>");
+ return false;
+
+ case ANNOTATE_EXPR:
+ return RECUR (TREE_OPERAND (t, 0), rval);
+
+ case BIT_CAST_EXPR:
+ return RECUR (TREE_OPERAND (t, 0), rval);
+
+ default:
+ sorry ("unexpected AST of kind %s", get_tree_code_name (TREE_CODE (t)));
+ gcc_unreachable ();
+ return false;
+ }
+#undef RECUR
+}
+
+bool
+potential_constant_expression_1 (tree t, bool want_rval, bool strict, bool now,
+ tsubst_flags_t flags)
+{
+ if (flags & tf_error)
+ {
+ /* Check potentiality quietly first, as that could be performed more
+ efficiently in some cases (currently only for TRUTH_*_EXPR). If
+ that fails, replay the check noisily to give errors. */
+ flags &= ~tf_error;
+ if (potential_constant_expression_1 (t, want_rval, strict, now, flags))
+ return true;
+ flags |= tf_error;
+ }
+
+ tree target = NULL_TREE;
+ return potential_constant_expression_1 (t, want_rval, strict, now, flags,
+ &target);
+}
+
+// forked from gcc/cp/constexpr.cc fold_non_dependent_init
+
+/* Like maybe_constant_init but first fully instantiate the argument. */
+
+tree
+fold_non_dependent_init (tree t, tsubst_flags_t /*=tf_warning_or_error*/,
+ bool manifestly_const_eval /*=false*/,
+ tree object /* = NULL_TREE */)
+{
+ if (t == NULL_TREE)
+ return NULL_TREE;
+
+ return maybe_constant_init (t, object, manifestly_const_eval);
+}
+
+// #include "gt-rust-rust-constexpr.h"
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-constexpr.h b/gcc/rust/backend/rust-constexpr.h
new file mode 100644
index 0000000..77a0797
--- /dev/null
+++ b/gcc/rust/backend/rust-constexpr.h
@@ -0,0 +1,33 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_CONSTEXPR
+#define RUST_CONSTEXPR
+
+#include "rust-system.h"
+#include "tree.h"
+
+namespace Rust {
+namespace Compile {
+
+extern tree fold_expr (tree);
+extern void
+maybe_save_constexpr_fundef (tree fun);
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_CONSTEXPR
diff --git a/gcc/rust/backend/rust-mangle.cc b/gcc/rust/backend/rust-mangle.cc
new file mode 100644
index 0000000..83aefa7
--- /dev/null
+++ b/gcc/rust/backend/rust-mangle.cc
@@ -0,0 +1,313 @@
+#include "rust-mangle.h"
+#include "fnv-hash.h"
+#include "rust-base62.h"
+
+// FIXME: Rename those to legacy_*
+static const std::string kMangledSymbolPrefix = "_ZN";
+static const std::string kMangledSymbolDelim = "E";
+static const std::string kMangledGenericDelim = "$C$";
+static const std::string kMangledSubstBegin = "$LT$";
+static const std::string kMangledSubstEnd = "$GT$";
+static const std::string kMangledSpace = "$u20$";
+static const std::string kMangledRef = "$RF$";
+static const std::string kMangledPtr = "$BP$";
+static const std::string kMangledLeftSqParen = "$u5b$"; // [
+static const std::string kMangledRightSqParen = "$u5d$"; // ]
+static const std::string kMangledLeftBrace = "$u7b$"; // {
+static const std::string kMangledRightBrace = "$u7d$"; // }
+static const std::string kQualPathBegin = "_" + kMangledSubstBegin;
+static const std::string kMangledComma = "$C$";
+
+namespace Rust {
+namespace Compile {
+
+Mangler::MangleVersion Mangler::version = MangleVersion::LEGACY;
+
+static std::string
+legacy_mangle_name (const std::string &name)
+{
+ // example
+ // <&T as core::fmt::Debug>::fmt:
+ // _ZN42_$LT$$RF$T$u20$as$u20$core..fmt..Debug$GT$3fmt17h6dac924c0051eef7E
+ // replace all white space with $ and & with RF
+ //
+ // <example::Bar as example::A>::fooA:
+ // _ZN43_$LT$example..Bar$u20$as$u20$example..A$GT$4fooA17hfc615fa76c7db7a0E:
+ //
+ // core::ptr::const_ptr::<impl *const T>::cast:
+ // _ZN4core3ptr9const_ptr33_$LT$impl$u20$$BP$const$u20$T$GT$4cast17hb79f4617226f1d55E:
+ //
+ // core::ptr::const_ptr::<impl *const [T]>::as_ptr:
+ // _ZN4core3ptr9const_ptr43_$LT$impl$u20$$BP$const$u20$$u5b$T$u5d$$GT$6as_ptr17he16e0dcd9473b04fE:
+ //
+ // example::Foo<T>::new:
+ // _ZN7example12Foo$LT$T$GT$3new17h9a2aacb7fd783515E:
+ //
+ // <example::Identity as example::FnLike<&T,&T>>::call
+ // _ZN74_$LT$example..Identity$u20$as$u20$example..FnLike$LT$$RF$T$C$$RF$T$GT$$GT$4call17ha9ee58935895acb3E
+
+ std::string buffer;
+ for (size_t i = 0; i < name.size (); i++)
+ {
+ std::string m;
+ char c = name.at (i);
+
+ if (c == ' ')
+ m = kMangledSpace;
+ else if (c == '&')
+ m = kMangledRef;
+ else if (i == 0 && c == '<')
+ m = kQualPathBegin;
+ else if (c == '<')
+ m = kMangledSubstBegin;
+ else if (c == '>')
+ m = kMangledSubstEnd;
+ else if (c == '*')
+ m = kMangledPtr;
+ else if (c == '[')
+ m = kMangledLeftSqParen;
+ else if (c == ']')
+ m = kMangledRightSqParen;
+ else if (c == '{')
+ m = kMangledLeftBrace;
+ else if (c == '}')
+ m = kMangledRightBrace;
+ else if (c == ',')
+ m = kMangledComma;
+ else if (c == ':')
+ {
+ rust_assert (i + 1 < name.size ());
+ rust_assert (name.at (i + 1) == ':');
+ i++;
+ m = "..";
+ }
+ else
+ m.push_back (c);
+
+ buffer += m;
+ }
+
+ return std::to_string (buffer.size ()) + buffer;
+}
+
+static std::string
+legacy_mangle_canonical_path (const Resolver::CanonicalPath &path)
+{
+ std::string buffer;
+ for (size_t i = 0; i < path.size (); i++)
+ {
+ auto &seg = path.get_seg_at (i);
+ buffer += legacy_mangle_name (seg.second);
+ }
+ return buffer;
+}
+
+// rustc uses a sip128 hash for legacy mangling, but an fnv 128 was quicker to
+// implement for now
+static std::string
+legacy_hash (const std::string &fingerprint)
+{
+ Hash::FNV128 hasher;
+ hasher.write ((const unsigned char *) fingerprint.c_str (),
+ fingerprint.size ());
+
+ uint64_t hi, lo;
+ hasher.sum (&hi, &lo);
+
+ char hex[16 + 1];
+ memset (hex, 0, sizeof hex);
+ snprintf (hex, sizeof hex, "%08" PRIx64 "%08" PRIx64, lo, hi);
+
+ return "h" + std::string (hex, sizeof (hex) - 1);
+}
+
+static std::string
+v0_tuple_prefix (const TyTy::BaseType *ty)
+{
+ if (ty->is_unit ())
+ return "u";
+
+ // FIXME: ARTHUR: Add rest of algorithm
+ return "";
+}
+
+static std::string
+v0_numeric_prefix (const TyTy::BaseType *ty)
+{
+ static const std::map<std::string, std::string> num_prefixes = {
+ {"[i8]", "a"}, {"[u8]", "h"}, {"[i16]", "s"}, {"[u16]", "t"},
+ {"[i32]", "l"}, {"[u32]", "m"}, {"[i64]", "x"}, {"[u64]", "y"},
+ {"[isize]", "i"}, {"[usize]", "j"}, {"[f32]", "f"}, {"[f64]", "d"},
+ };
+
+ auto ty_kind = ty->get_kind ();
+ auto ty_str = ty->as_string ();
+ auto numeric_iter = num_prefixes.end ();
+
+ // Special numeric types
+ if (ty_kind == TyTy::TypeKind::ISIZE)
+ return "i";
+ else if (ty_kind == TyTy::TypeKind::USIZE)
+ return "j";
+
+ numeric_iter = num_prefixes.find (ty_str);
+ if (numeric_iter != num_prefixes.end ())
+ return numeric_iter->second;
+
+ return "";
+}
+
+static std::string
+v0_simple_type_prefix (const TyTy::BaseType *ty)
+{
+ switch (ty->get_kind ())
+ {
+ case TyTy::TypeKind::BOOL:
+ return "b";
+ case TyTy::TypeKind::CHAR:
+ return "c";
+ case TyTy::TypeKind::STR:
+ return "e";
+ case TyTy::TypeKind::NEVER:
+ return "z";
+
+ // Placeholder types
+ case TyTy::TypeKind::ERROR: // Fallthrough
+ case TyTy::TypeKind::INFER: // Fallthrough
+ case TyTy::TypeKind::PLACEHOLDER: // Fallthrough
+ case TyTy::TypeKind::PARAM:
+ // FIXME: TyTy::TypeKind::BOUND is also a valid variant in rustc
+ return "p";
+
+ case TyTy::TypeKind::TUPLE:
+ return v0_tuple_prefix (ty);
+
+ case TyTy::TypeKind::UINT: // Fallthrough
+ case TyTy::TypeKind::INT: // Fallthrough
+ case TyTy::TypeKind::FLOAT: // Fallthrough
+ case TyTy::TypeKind::ISIZE: // Fallthrough
+ case TyTy::TypeKind::USIZE: // Fallthrough
+ return v0_numeric_prefix (ty);
+
+ default:
+ return "";
+ }
+
+ gcc_unreachable ();
+}
+
+// Add an underscore-terminated base62 integer to the mangling string.
+// This corresponds to the `<base-62-number>` grammar in the v0 mangling RFC:
+// - 0 is encoded as "_"
+// - any other value is encoded as itself minus one in base 62, followed by
+// "_"
+static void
+v0_add_integer_62 (std::string &mangled, uint64_t x)
+{
+ if (x > 0)
+ mangled.append (base62_integer (x - 1));
+
+ mangled.append ("_");
+}
+
+// Add a tag-prefixed base62 integer to the mangling string when the
+// integer is greater than 0:
+// - 0 is encoded as "" (nothing)
+// - any other value is encoded as <tag> + v0_add_integer_62(itself), that is
+// <tag> + base62(itself - 1) + '_'
+static void
+v0_add_opt_integer_62 (std::string &mangled, std::string tag, uint64_t x)
+{
+ if (x > 0)
+ {
+ mangled.append (tag);
+ v0_add_integer_62 (mangled, x);
+ }
+}
+
+static void
+v0_add_disambiguator (std::string &mangled, uint64_t dis)
+{
+ v0_add_opt_integer_62 (mangled, "s", dis);
+}
+
+// Add an identifier to the mangled string. This corresponds to the
+// `<identifier>` grammar in the v0 mangling RFC.
+static void
+v0_add_identifier (std::string &mangled, const std::string &identifier)
+{
+ // FIXME: gccrs cannot handle unicode identifiers yet, so we never have to
+ // create mangling for unicode values for now. However, this is handled
+ // by the v0 mangling scheme. The grammar for unicode identifier is
+ // contained in <undisambiguated-identifier>, right under the <identifier>
+ // one. If the identifier contains unicode values, then an extra "u" needs
+ // to be added to the mangling string and `punycode` must be used to encode
+ // the characters.
+
+ mangled += std::to_string (identifier.size ());
+
+ // If the first character of the identifier is a digit or an underscore, we
+ // add an extra underscore
+ if (identifier[0] == '_')
+ mangled.append ("_");
+
+ mangled.append (identifier);
+}
+
+static std::string
+v0_type_prefix (const TyTy::BaseType *ty)
+{
+ auto ty_prefix = v0_simple_type_prefix (ty);
+ if (!ty_prefix.empty ())
+ return ty_prefix;
+
+ // FIXME: We need to fetch more type prefixes
+ gcc_unreachable ();
+}
+
+static std::string
+legacy_mangle_item (const TyTy::BaseType *ty,
+ const Resolver::CanonicalPath &path)
+{
+ const std::string hash = legacy_hash (ty->as_string ());
+ const std::string hash_sig = legacy_mangle_name (hash);
+
+ return kMangledSymbolPrefix + legacy_mangle_canonical_path (path) + hash_sig
+ + kMangledSymbolDelim;
+}
+
+static std::string
+v0_mangle_item (const TyTy::BaseType *ty, const Resolver::CanonicalPath &path)
+{
+ // we can get this from the canonical_path
+ auto mappings = Analysis::Mappings::get ();
+ std::string crate_name;
+ bool ok = mappings->get_crate_name (path.get_crate_num (), crate_name);
+ rust_assert (ok);
+
+ std::string mangled;
+ // FIXME: Add real algorithm once all pieces are implemented
+ auto ty_prefix = v0_type_prefix (ty);
+ v0_add_identifier (mangled, crate_name);
+ v0_add_disambiguator (mangled, 62);
+
+ gcc_unreachable ();
+}
+
+std::string
+Mangler::mangle_item (const TyTy::BaseType *ty,
+ const Resolver::CanonicalPath &path) const
+{
+ switch (version)
+ {
+ case Mangler::MangleVersion::LEGACY:
+ return legacy_mangle_item (ty, path);
+ case Mangler::MangleVersion::V0:
+ return v0_mangle_item (ty, path);
+ default:
+ gcc_unreachable ();
+ }
+}
+
+} // namespace Compile
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-mangle.h b/gcc/rust/backend/rust-mangle.h
new file mode 100644
index 0000000..6d5a64f
--- /dev/null
+++ b/gcc/rust/backend/rust-mangle.h
@@ -0,0 +1,52 @@
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_MANGLE_H
+#define RUST_MANGLE_H
+
+#include "rust-system.h"
+#include "rust-tyty.h"
+
+namespace Rust {
+namespace Compile {
+
+class Mangler
+{
+public:
+ enum MangleVersion
+ {
+ // Values defined in rust/lang.opt
+ LEGACY = 0,
+ V0 = 1,
+ };
+
+ // this needs to support Legacy and V0 see github #429 or #305
+ std::string mangle_item (const TyTy::BaseType *ty,
+ const Resolver::CanonicalPath &path) const;
+
+ static void set_mangling (int frust_mangling_value)
+ {
+ version = static_cast<MangleVersion> (frust_mangling_value);
+ }
+
+private:
+ static enum MangleVersion version;
+};
+
+} // namespace Compile
+} // namespace Rust
+
+#endif // RUST_MANGLE_H
diff --git a/gcc/rust/backend/rust-tree.cc b/gcc/rust/backend/rust-tree.cc
new file mode 100644
index 0000000..8243d4c
--- /dev/null
+++ b/gcc/rust/backend/rust-tree.cc
@@ -0,0 +1,6157 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#include "rust-tree.h"
+#include "fold-const.h"
+#include "stringpool.h"
+#include "attribs.h"
+#include "escaped_string.h"
+#include "libiberty.h"
+#include "stor-layout.h"
+#include "hash-map.h"
+#include "diagnostic.h"
+#include "timevar.h"
+#include "convert.h"
+#include "gimple-expr.h"
+#include "gimplify.h"
+#include "function.h"
+#include "gcc-rich-location.h"
+#include "target.h"
+#include "file-prefix-map.h"
+#include "cgraph.h"
+#include "output.h"
+#include "memmodel.h"
+#include "tm_p.h"
+
+// forked from gcc/c-family/c-common.cc c_global_trees
+tree c_global_trees[CTI_MAX];
+// forked from gcc/cp/decl.cc cp_global_trees
+tree cp_global_trees[CPTI_MAX];
+
+struct saved_scope *scope_chain;
+
+namespace Rust {
+
+void
+mark_exp_read (tree exp)
+{
+ char tmp_name[32];
+ ASM_GENERATE_INTERNAL_LABEL (tmp_name, "Lsrc_loc", 1);
+
+ if (exp == NULL)
+ return;
+
+ switch (TREE_CODE (exp))
+ {
+ case VAR_DECL:
+ gcc_fallthrough ();
+ case PARM_DECL:
+ DECL_READ_P (exp) = 1;
+ break;
+ case ARRAY_REF:
+ case COMPONENT_REF:
+ case MODIFY_EXPR:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ CASE_CONVERT:
+ case ADDR_EXPR:
+ case INDIRECT_REF:
+ case FLOAT_EXPR:
+ case NON_DEPENDENT_EXPR:
+ case VIEW_CONVERT_EXPR:
+ mark_exp_read (TREE_OPERAND (exp, 0));
+ break;
+ case COMPOUND_EXPR:
+ mark_exp_read (TREE_OPERAND (exp, 1));
+ break;
+ case COND_EXPR:
+ if (TREE_OPERAND (exp, 1))
+ mark_exp_read (TREE_OPERAND (exp, 1));
+ if (TREE_OPERAND (exp, 2))
+ mark_exp_read (TREE_OPERAND (exp, 2));
+ break;
+ default:
+ break;
+ }
+}
+
+tree
+convert_from_reference (tree val)
+{
+ if (TREE_TYPE (val) && TYPE_REF_P (TREE_TYPE (val)))
+ {
+ tree t = TREE_TYPE (TREE_TYPE (val));
+ tree ref = build1 (INDIRECT_REF, t, val);
+
+ mark_exp_read (val);
+
+ TREE_SIDE_EFFECTS (ref)
+ = (TREE_THIS_VOLATILE (ref) || TREE_SIDE_EFFECTS (val));
+ val = ref;
+ }
+
+ return val;
+}
+
+tree
+mark_use (tree expr, bool rvalue_p, bool read_p,
+ location_t loc /* = UNKNOWN_LOCATION */,
+ bool reject_builtin /* = true */)
+{
+#define RECUR(t) mark_use ((t), rvalue_p, read_p, loc, reject_builtin)
+
+ if (expr == NULL_TREE || error_operand_p (expr))
+ return expr;
+
+ if (reject_builtin)
+ return error_mark_node;
+
+ if (read_p)
+ mark_exp_read (expr);
+
+ bool recurse_op[3] = {false, false, false};
+ switch (TREE_CODE (expr))
+ {
+ case COMPONENT_REF:
+ case NON_DEPENDENT_EXPR:
+ recurse_op[0] = true;
+ break;
+ case COMPOUND_EXPR:
+ recurse_op[1] = true;
+ break;
+ case COND_EXPR:
+ recurse_op[2] = true;
+ if (TREE_OPERAND (expr, 1))
+ recurse_op[1] = true;
+ break;
+ case INDIRECT_REF:
+ if (REFERENCE_REF_P (expr))
+ {
+ /* Try to look through the reference. */
+ tree ref = TREE_OPERAND (expr, 0);
+ tree r = mark_rvalue_use (ref, loc, reject_builtin);
+ if (r != ref)
+ expr = convert_from_reference (r);
+ }
+ break;
+
+ case VIEW_CONVERT_EXPR:
+ if (location_wrapper_p (expr))
+ {
+ loc = EXPR_LOCATION (expr);
+ tree op = TREE_OPERAND (expr, 0);
+ tree nop = RECUR (op);
+ if (nop == error_mark_node)
+ return error_mark_node;
+ else if (op == nop)
+ /* No change. */;
+ else if (DECL_P (nop) || CONSTANT_CLASS_P (nop))
+ {
+ /* Reuse the location wrapper. */
+ TREE_OPERAND (expr, 0) = nop;
+ /* If we're replacing a DECL with a constant, we also need to
+ change the TREE_CODE of the location wrapper. */
+ if (rvalue_p)
+ TREE_SET_CODE (expr, NON_LVALUE_EXPR);
+ }
+ else
+ {
+ /* Drop the location wrapper. */
+ expr = nop;
+ protected_set_expr_location (expr, loc);
+ }
+ return expr;
+ }
+ gcc_fallthrough ();
+ CASE_CONVERT:
+ recurse_op[0] = true;
+ break;
+
+ default:
+ break;
+ }
+
+ for (int i = 0; i < 3; ++i)
+ if (recurse_op[i])
+ {
+ tree op = TREE_OPERAND (expr, i);
+ op = RECUR (op);
+ if (op == error_mark_node)
+ return error_mark_node;
+ TREE_OPERAND (expr, i) = op;
+ }
+
+ return expr;
+#undef RECUR
+}
+
+tree
+mark_rvalue_use (tree e, location_t loc /* = UNKNOWN_LOCATION */,
+ bool reject_builtin /* = true */)
+{
+ return mark_use (e, true, true, loc, reject_builtin);
+}
+
+tree
+mark_lvalue_use (tree expr)
+{
+ return mark_use (expr, false, true, input_location, false);
+}
+
+tree
+mark_lvalue_use_nonread (tree expr)
+{
+ return mark_use (expr, false, false, input_location, false);
+}
+
+tree
+mark_discarded_use (tree expr)
+{
+ if (expr == NULL_TREE)
+ return expr;
+
+ STRIP_ANY_LOCATION_WRAPPER (expr);
+
+ switch (TREE_CODE (expr))
+ {
+ case COND_EXPR:
+ TREE_OPERAND (expr, 2) = mark_discarded_use (TREE_OPERAND (expr, 2));
+ gcc_fallthrough ();
+ case COMPOUND_EXPR:
+ TREE_OPERAND (expr, 1) = mark_discarded_use (TREE_OPERAND (expr, 1));
+ return expr;
+
+ case COMPONENT_REF:
+ case ARRAY_REF:
+ case INDIRECT_REF:
+ case MEMBER_REF:
+ break;
+ default:
+ if (DECL_P (expr))
+ break;
+ else
+ return expr;
+ }
+
+ return mark_use (expr, true, true, input_location, false);
+}
+
+tree
+convert_to_void (tree expr, impl_conv_void implicit)
+{
+ location_t loc = expr_loc_or_input_loc (expr);
+ if (expr == error_mark_node || TREE_TYPE (expr) == error_mark_node)
+ return error_mark_node;
+
+ expr = mark_discarded_use (expr);
+ if (implicit == ICV_CAST)
+ /* An explicit cast to void avoids all -Wunused-but-set* warnings. */
+ mark_exp_read (expr);
+
+ if (!TREE_TYPE (expr))
+ return expr;
+
+ if (VOID_TYPE_P (TREE_TYPE (expr)))
+ return expr;
+ switch (TREE_CODE (expr))
+ {
+ case COND_EXPR: {
+ /* The two parts of a cond expr might be separate lvalues. */
+ tree op1 = TREE_OPERAND (expr, 1);
+ tree op2 = TREE_OPERAND (expr, 2);
+ bool side_effects
+ = ((op1 && TREE_SIDE_EFFECTS (op1)) || TREE_SIDE_EFFECTS (op2));
+ tree new_op1, new_op2;
+ new_op1 = NULL_TREE;
+ if (implicit != ICV_CAST && !side_effects)
+ {
+ if (op1)
+ new_op1 = convert_to_void (op1, ICV_SECOND_OF_COND);
+ new_op2 = convert_to_void (op2, ICV_THIRD_OF_COND);
+ }
+ else
+ {
+ if (op1)
+ new_op1 = convert_to_void (op1, ICV_CAST);
+ new_op2 = convert_to_void (op2, ICV_CAST);
+ }
+
+ expr = build3_loc (loc, COND_EXPR, TREE_TYPE (new_op2),
+ TREE_OPERAND (expr, 0), new_op1, new_op2);
+ break;
+ }
+
+ case COMPOUND_EXPR: {
+ /* The second part of a compound expr contains the value. */
+ tree op1 = TREE_OPERAND (expr, 1);
+ tree new_op1;
+ if (implicit != ICV_CAST
+ && !warning_suppressed_p (expr /* What warning? */))
+ new_op1 = convert_to_void (op1, ICV_RIGHT_OF_COMMA);
+ else
+ new_op1 = convert_to_void (op1, ICV_CAST);
+
+ if (new_op1 != op1)
+ {
+ tree t = build2_loc (loc, COMPOUND_EXPR, TREE_TYPE (new_op1),
+ TREE_OPERAND (expr, 0), new_op1);
+ expr = t;
+ }
+
+ break;
+ }
+
+ case NON_LVALUE_EXPR:
+ case NOP_EXPR:
+ /* These have already decayed to rvalue. */
+ break;
+
+ case CALL_EXPR:
+ maybe_warn_nodiscard (expr, implicit);
+ break;
+
+ case INDIRECT_REF: {
+ tree type = TREE_TYPE (expr);
+ int is_reference = TYPE_REF_P (TREE_TYPE (TREE_OPERAND (expr, 0)));
+ int is_volatile = TYPE_VOLATILE (type);
+ int is_complete = COMPLETE_TYPE_P (type);
+
+ /* Can't load the value if we don't know the type. */
+ if (is_volatile && !is_complete)
+ {
+ switch (implicit)
+ {
+ case ICV_CAST:
+ warning_at (loc, 0,
+ "conversion to void will not access "
+ "object of incomplete type %qT",
+ type);
+ break;
+ case ICV_SECOND_OF_COND:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "incomplete type %qT in second operand "
+ "of conditional expression",
+ type);
+ break;
+ case ICV_THIRD_OF_COND:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "incomplete type %qT in third operand "
+ "of conditional expression",
+ type);
+ break;
+ case ICV_RIGHT_OF_COMMA:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "incomplete type %qT in right operand of "
+ "comma operator",
+ type);
+ break;
+ case ICV_LEFT_OF_COMMA:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "incomplete type %qT in left operand of "
+ "comma operator",
+ type);
+ break;
+ case ICV_STATEMENT:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "incomplete type %qT in statement",
+ type);
+ break;
+ case ICV_THIRD_IN_FOR:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "incomplete type %qT in for increment "
+ "expression",
+ type);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ /* Don't load the value if this is an implicit dereference, or if
+ the type needs to be handled by ctors/dtors. */
+ else if (is_volatile && is_reference)
+ {
+ switch (implicit)
+ {
+ case ICV_CAST:
+ warning_at (loc, 0,
+ "conversion to void will not access "
+ "object of type %qT",
+ type);
+ break;
+ case ICV_SECOND_OF_COND:
+ warning_at (loc, 0,
+ "implicit dereference will not access "
+ "object of type %qT in second operand of "
+ "conditional expression",
+ type);
+ break;
+ case ICV_THIRD_OF_COND:
+ warning_at (loc, 0,
+ "implicit dereference will not access "
+ "object of type %qT in third operand of "
+ "conditional expression",
+ type);
+ break;
+ case ICV_RIGHT_OF_COMMA:
+ warning_at (loc, 0,
+ "implicit dereference will not access "
+ "object of type %qT in right operand of "
+ "comma operator",
+ type);
+ break;
+ case ICV_LEFT_OF_COMMA:
+ warning_at (loc, 0,
+ "implicit dereference will not access "
+ "object of type %qT in left operand of comma "
+ "operator",
+ type);
+ break;
+ case ICV_STATEMENT:
+ warning_at (loc, 0,
+ "implicit dereference will not access "
+ "object of type %qT in statement",
+ type);
+ break;
+ case ICV_THIRD_IN_FOR:
+ warning_at (loc, 0,
+ "implicit dereference will not access "
+ "object of type %qT in for increment expression",
+ type);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else if (is_volatile && TREE_ADDRESSABLE (type))
+ {
+ switch (implicit)
+ {
+ case ICV_CAST:
+ warning_at (loc, 0,
+ "conversion to void will not access "
+ "object of non-trivially-copyable type %qT",
+ type);
+ break;
+ case ICV_SECOND_OF_COND:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "non-trivially-copyable type %qT in second "
+ "operand of conditional expression",
+ type);
+ break;
+ case ICV_THIRD_OF_COND:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "non-trivially-copyable type %qT in third "
+ "operand of conditional expression",
+ type);
+ break;
+ case ICV_RIGHT_OF_COMMA:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "non-trivially-copyable type %qT in right "
+ "operand of comma operator",
+ type);
+ break;
+ case ICV_LEFT_OF_COMMA:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "non-trivially-copyable type %qT in left "
+ "operand of comma operator",
+ type);
+ break;
+ case ICV_STATEMENT:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "non-trivially-copyable type %qT in statement",
+ type);
+ break;
+ case ICV_THIRD_IN_FOR:
+ warning_at (loc, 0,
+ "indirection will not access object of "
+ "non-trivially-copyable type %qT in for "
+ "increment expression",
+ type);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ if (is_reference || !is_volatile || !is_complete
+ || TREE_ADDRESSABLE (type))
+ {
+ /* Emit a warning (if enabled) when the "effect-less" INDIRECT_REF
+ operation is stripped off. Note that we don't warn about
+ - an expression with TREE_NO_WARNING set. (For an example of
+ such expressions, see build_over_call in call.cc.)
+ - automatic dereferencing of references, since the user cannot
+ control it. (See also warn_if_unused_value() in c-common.cc.)
+ */
+ if (warn_unused_value && implicit != ICV_CAST
+ && !warning_suppressed_p (expr, OPT_Wunused_value)
+ && !is_reference)
+ warning_at (loc, OPT_Wunused_value, "value computed is not used");
+ expr = TREE_OPERAND (expr, 0);
+ if (TREE_CODE (expr) == CALL_EXPR)
+ maybe_warn_nodiscard (expr, implicit);
+ }
+
+ break;
+ }
+
+ case VAR_DECL: {
+ /* External variables might be incomplete. */
+ tree type = TREE_TYPE (expr);
+ int is_complete = COMPLETE_TYPE_P (type);
+
+ if (TYPE_VOLATILE (type) && !is_complete)
+ switch (implicit)
+ {
+ case ICV_CAST:
+ warning_at (loc, 0,
+ "conversion to void will not access "
+ "object %qE of incomplete type %qT",
+ expr, type);
+ break;
+ case ICV_SECOND_OF_COND:
+ warning_at (loc, 0,
+ "variable %qE of incomplete type %qT will "
+ "not be accessed in second operand of "
+ "conditional expression",
+ expr, type);
+ break;
+ case ICV_THIRD_OF_COND:
+ warning_at (loc, 0,
+ "variable %qE of incomplete type %qT will "
+ "not be accessed in third operand of "
+ "conditional expression",
+ expr, type);
+ break;
+ case ICV_RIGHT_OF_COMMA:
+ warning_at (loc, 0,
+ "variable %qE of incomplete type %qT will "
+ "not be accessed in right operand of comma operator",
+ expr, type);
+ break;
+ case ICV_LEFT_OF_COMMA:
+ warning_at (loc, 0,
+ "variable %qE of incomplete type %qT will "
+ "not be accessed in left operand of comma operator",
+ expr, type);
+ break;
+ case ICV_STATEMENT:
+ warning_at (loc, 0,
+ "variable %qE of incomplete type %qT will "
+ "not be accessed in statement",
+ expr, type);
+ break;
+ case ICV_THIRD_IN_FOR:
+ warning_at (loc, 0,
+ "variable %qE of incomplete type %qT will "
+ "not be accessed in for increment expression",
+ expr, type);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+
+ break;
+ }
+
+ default:;
+ }
+
+ if (!TREE_SIDE_EFFECTS (expr))
+ expr = void_node;
+
+ return expr;
+}
+
+void
+maybe_warn_nodiscard (tree expr, impl_conv_void implicit)
+{
+ tree call = expr;
+ if (TREE_CODE (expr) == TARGET_EXPR)
+ call = TARGET_EXPR_INITIAL (expr);
+
+ location_t loc = expr_loc_or_input_loc (call);
+ tree callee = CALL_EXPR_FN (call);
+ if (!callee)
+ return;
+
+ tree type = TREE_TYPE (callee);
+ if (INDIRECT_TYPE_P (type))
+ type = TREE_TYPE (type);
+
+ tree rettype = TREE_TYPE (type);
+ tree fn = get_fndecl_from_callee (callee);
+ tree attr;
+ if (implicit != ICV_CAST && fn
+ && (attr = lookup_attribute ("nodiscard", DECL_ATTRIBUTES (fn))))
+ {
+ escaped_string msg;
+ tree args = TREE_VALUE (attr);
+ if (args)
+ msg.escape (TREE_STRING_POINTER (TREE_VALUE (args)));
+ const char *format
+ = (msg ? G_ ("ignoring return value of %qD, that must be used: %<%s%>")
+ : G_ ("ignoring return value of %qD, that must be used"));
+ const char *raw_msg = msg ? (const char *) msg : "";
+ auto_diagnostic_group d;
+ if (warning_at (loc, OPT_Wunused_result, format, fn, raw_msg))
+ inform (DECL_SOURCE_LOCATION (fn), "declared here");
+ }
+ else if (implicit != ICV_CAST
+ && (attr
+ = lookup_attribute ("nodiscard", TYPE_ATTRIBUTES (rettype))))
+ {
+ escaped_string msg;
+ tree args = TREE_VALUE (attr);
+ if (args)
+ msg.escape (TREE_STRING_POINTER (TREE_VALUE (args)));
+ const char *format
+ = (msg ? G_ (
+ "ignoring returned value of type %qT, that must be used: %<%s%>")
+ : G_ ("ignoring returned value of type %qT, that must be used"));
+ const char *raw_msg = msg ? (const char *) msg : "";
+ auto_diagnostic_group d;
+ if (warning_at (loc, OPT_Wunused_result, format, rettype, raw_msg))
+ {
+ if (fn)
+ inform (DECL_SOURCE_LOCATION (fn), "in call to %qD, declared here",
+ fn);
+ inform (DECL_SOURCE_LOCATION (TYPE_NAME (rettype)),
+ "%qT declared here", rettype);
+ }
+ }
+}
+
+location_t
+expr_loc_or_loc (const_tree t, location_t or_loc)
+{
+ location_t loc = EXPR_LOCATION (t);
+ if (loc == UNKNOWN_LOCATION)
+ loc = or_loc;
+ return loc;
+}
+
+location_t
+expr_loc_or_input_loc (const_tree t)
+{
+ return expr_loc_or_loc (t, input_location);
+}
+
+// FN is the callee of a CALL_EXPR or AGGR_INIT_EXPR; return the FUNCTION_DECL
+// if we can.
+tree
+get_fndecl_from_callee (tree fn)
+{
+ if (fn == NULL_TREE)
+ return fn;
+ if (TREE_CODE (fn) == FUNCTION_DECL)
+ return fn;
+ tree type = TREE_TYPE (fn);
+ if (type == NULL_TREE || !INDIRECT_TYPE_P (type))
+ return NULL_TREE;
+
+ STRIP_NOPS (fn);
+ if (TREE_CODE (fn) == ADDR_EXPR || TREE_CODE (fn) == FDESC_EXPR)
+ fn = TREE_OPERAND (fn, 0);
+ if (TREE_CODE (fn) == FUNCTION_DECL)
+ return fn;
+ return NULL_TREE;
+}
+
+tree
+pointer_offset_expression (tree base_tree, tree index_tree, location_t location)
+{
+ tree element_type_tree = TREE_TYPE (TREE_TYPE (base_tree));
+ if (base_tree == error_mark_node || TREE_TYPE (base_tree) == error_mark_node
+ || index_tree == error_mark_node || element_type_tree == error_mark_node)
+ return error_mark_node;
+
+ tree element_size = TYPE_SIZE_UNIT (element_type_tree);
+ index_tree = fold_convert_loc (location, sizetype, index_tree);
+ tree offset
+ = fold_build2_loc (location, MULT_EXPR, sizetype, index_tree, element_size);
+
+ return fold_build2_loc (location, POINTER_PLUS_EXPR, TREE_TYPE (base_tree),
+ base_tree, offset);
+}
+
+// forked from gcc/cp/tree.cc cp_walk_subtrees
+/* Apply FUNC to all language-specific sub-trees of TP in a pre-order
+ traversal. Called from walk_tree. */
+
+tree
+rs_walk_subtrees (tree *tp, int *walk_subtrees_p, walk_tree_fn func, void *data,
+ hash_set<tree> *pset)
+{
+ enum tree_code code = TREE_CODE (*tp);
+ tree result;
+
+#define WALK_SUBTREE(NODE) \
+ do \
+ { \
+ result = rs_walk_tree (&(NODE), func, data, pset); \
+ if (result) \
+ goto out; \
+ } \
+ while (0)
+
+ if (TYPE_P (*tp))
+ {
+ /* If *WALK_SUBTREES_P is 1, we're interested in the syntactic form of
+ the argument, so don't look through typedefs, but do walk into
+ template arguments for alias templates (and non-typedefed classes).
+
+ If *WALK_SUBTREES_P > 1, we're interested in type identity or
+ equivalence, so look through typedefs, ignoring template arguments for
+ alias templates, and walk into template args of classes.
+
+ See find_abi_tags_r for an example of setting *WALK_SUBTREES_P to 2
+ when that's the behavior the walk_tree_fn wants. */
+ if (*walk_subtrees_p == 1 && typedef_variant_p (*tp))
+ {
+ *walk_subtrees_p = 0;
+ return NULL_TREE;
+ }
+ }
+
+ /* Not one of the easy cases. We must explicitly go through the
+ children. */
+ result = NULL_TREE;
+ switch (code)
+ {
+ case TREE_LIST:
+ WALK_SUBTREE (TREE_PURPOSE (*tp));
+ break;
+
+ case RECORD_TYPE:
+ if (TYPE_PTRMEMFUNC_P (*tp))
+ WALK_SUBTREE (TYPE_PTRMEMFUNC_FN_TYPE_RAW (*tp));
+ break;
+
+ case CONSTRUCTOR:
+ if (COMPOUND_LITERAL_P (*tp))
+ WALK_SUBTREE (TREE_TYPE (*tp));
+ break;
+
+ case DECL_EXPR:
+ /* User variables should be mentioned in BIND_EXPR_VARS
+ and their initializers and sizes walked when walking
+ the containing BIND_EXPR. Compiler temporaries are
+ handled here. And also normal variables in templates,
+ since do_poplevel doesn't build a BIND_EXPR then. */
+ if (VAR_P (TREE_OPERAND (*tp, 0))
+ && (DECL_ARTIFICIAL (TREE_OPERAND (*tp, 0))
+ && !TREE_STATIC (TREE_OPERAND (*tp, 0))))
+ {
+ tree decl = TREE_OPERAND (*tp, 0);
+ WALK_SUBTREE (DECL_INITIAL (decl));
+ WALK_SUBTREE (DECL_SIZE (decl));
+ WALK_SUBTREE (DECL_SIZE_UNIT (decl));
+ }
+ break;
+
+ default:
+ return NULL_TREE;
+ }
+
+ /* We didn't find what we were looking for. */
+out:
+ return result;
+
+#undef WALK_SUBTREE
+}
+
+// forked from gcc/cp/tree.cc cp_expr_location
+
+/* Like EXPR_LOCATION, but also handle some tcc_exceptional that have
+ locations. */
+
+location_t
+rs_expr_location (const_tree t_)
+{
+ tree t = CONST_CAST_TREE (t_);
+ if (t == NULL_TREE)
+ return UNKNOWN_LOCATION;
+
+ return EXPR_LOCATION (t);
+}
+
+// forked from gcc/cp/class.cc is_really_empty_class
+
+/* Returns true if TYPE contains no actual data, just various
+ possible combinations of empty classes. If IGNORE_VPTR is true,
+ a vptr doesn't prevent the class from being considered empty. Typically
+ we want to ignore the vptr on assignment, and not on initialization. */
+
+bool
+is_really_empty_class (tree type, bool ignore_vptr)
+{
+ if (CLASS_TYPE_P (type))
+ {
+ tree field;
+ tree binfo;
+ tree base_binfo;
+ int i;
+
+ /* CLASSTYPE_EMPTY_P isn't set properly until the class is actually laid
+ out, but we'd like to be able to check this before then. */
+ if (COMPLETE_TYPE_P (type) && is_empty_class (type))
+ return true;
+
+ if (!ignore_vptr && TYPE_CONTAINS_VPTR_P (type))
+ return false;
+
+ for (binfo = TYPE_BINFO (type), i = 0;
+ BINFO_BASE_ITERATE (binfo, i, base_binfo); ++i)
+ if (!is_really_empty_class (BINFO_TYPE (base_binfo), ignore_vptr))
+ return false;
+ for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) == FIELD_DECL
+ && !DECL_ARTIFICIAL (field)
+ /* An unnamed bit-field is not a data member. */
+ && !DECL_UNNAMED_BIT_FIELD (field)
+ && !is_really_empty_class (TREE_TYPE (field), ignore_vptr))
+ return false;
+ return true;
+ }
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ return (integer_zerop (array_type_nelts_top (type))
+ || is_really_empty_class (TREE_TYPE (type), ignore_vptr));
+ return false;
+}
+
+// forked from gcc/cp/class.cc is_empty_class
+
+/* Returns 1 if TYPE contains only padding bytes. */
+
+int
+is_empty_class (tree type)
+{
+ if (type == error_mark_node)
+ return 0;
+
+ if (!CLASS_TYPE_P (type))
+ return 0;
+
+ return CLASSTYPE_EMPTY_P (type);
+}
+
+// forked from gcc/cp/tree.cc array_type_nelts_top
+
+/* Return, as an INTEGER_CST node, the number of elements for TYPE
+ (which is an ARRAY_TYPE). This counts only elements of the top
+ array. */
+
+tree
+array_type_nelts_top (tree type)
+{
+ return fold_build2_loc (input_location, PLUS_EXPR, sizetype,
+ array_type_nelts (type), size_one_node);
+}
+
+// forked from gcc/cp/tree.cc builtin_valid_in_constant_expr_p
+
+/* Test whether DECL is a builtin that may appear in a
+ constant-expression. */
+
+bool
+builtin_valid_in_constant_expr_p (const_tree decl)
+{
+ STRIP_ANY_LOCATION_WRAPPER (decl);
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ /* Not a function. */
+ return false;
+ if (DECL_BUILT_IN_CLASS (decl) != BUILT_IN_NORMAL)
+ {
+ if (fndecl_built_in_p (decl, BUILT_IN_FRONTEND))
+ switch (DECL_FE_FUNCTION_CODE (decl))
+ {
+ case RS_BUILT_IN_IS_CONSTANT_EVALUATED:
+ case RS_BUILT_IN_SOURCE_LOCATION:
+ case RS_BUILT_IN_IS_CORRESPONDING_MEMBER:
+ case RS_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS:
+ return true;
+ default:
+ break;
+ }
+ /* Not a built-in. */
+ return false;
+ }
+ switch (DECL_FUNCTION_CODE (decl))
+ {
+ /* These always have constant results like the corresponding
+ macros/symbol. */
+ case BUILT_IN_FILE:
+ case BUILT_IN_FUNCTION:
+ case BUILT_IN_LINE:
+
+ /* The following built-ins are valid in constant expressions
+ when their arguments are. */
+ case BUILT_IN_ADD_OVERFLOW_P:
+ case BUILT_IN_SUB_OVERFLOW_P:
+ case BUILT_IN_MUL_OVERFLOW_P:
+
+ /* These have constant results even if their operands are
+ non-constant. */
+ case BUILT_IN_CONSTANT_P:
+ case BUILT_IN_ATOMIC_ALWAYS_LOCK_FREE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// forked from gcc/cp/decl2.cc decl_maybe_constant_var_p
+
+/* Returns true if DECL could be a symbolic constant variable, depending on
+ its initializer. */
+
+bool
+decl_maybe_constant_var_p (tree decl)
+{
+ tree type = TREE_TYPE (decl);
+ if (!VAR_P (decl))
+ return false;
+ if (DECL_DECLARED_CONSTEXPR_P (decl))
+ return true;
+ if (DECL_HAS_VALUE_EXPR_P (decl))
+ /* A proxy isn't constant. */
+ return false;
+ if (TYPE_REF_P (type))
+ /* References can be constant. */;
+ else if (RS_TYPE_CONST_NON_VOLATILE_P (type)
+ && INTEGRAL_OR_ENUMERATION_TYPE_P (type))
+ /* And const integers. */;
+ else
+ return false;
+
+ if (DECL_INITIAL (decl) && !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl))
+ /* We know the initializer, and it isn't constant. */
+ return false;
+ else
+ return true;
+}
+
+// forked from gcc/cp/typeck.cc cp_type_quals
+
+/* Returns the type qualifiers for this type, including the qualifiers on the
+ elements for an array type. */
+
+int
+rs_type_quals (const_tree type)
+{
+ int quals;
+ /* This CONST_CAST is okay because strip_array_types returns its
+ argument unmodified and we assign it to a const_tree. */
+ type = strip_array_types (CONST_CAST_TREE (type));
+ if (type == error_mark_node
+ /* Quals on a FUNCTION_TYPE are memfn quals. */
+ || TREE_CODE (type) == FUNCTION_TYPE)
+ return TYPE_UNQUALIFIED;
+ quals = TYPE_QUALS (type);
+ /* METHOD and REFERENCE_TYPEs should never have quals. */
+ // gcc_assert (
+ // (TREE_CODE (type) != METHOD_TYPE && !TYPE_REF_P (type))
+ // || ((quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) ==
+ // TYPE_UNQUALIFIED));
+ return quals;
+}
+
+// forked from gcc/cp/decl.cc cp_global_trees
+
+/* The following symbols are subsumed in the cp_global_trees array, and
+ listed here individually for documentation purposes.
+
+ C++ extensions
+ tree wchar_decl_node;
+
+ tree vtable_entry_type;
+ tree delta_type_node;
+ tree __t_desc_type_node;
+
+ tree class_type_node;
+ tree unknown_type_node;
+
+ Array type `vtable_entry_type[]'
+
+ tree vtbl_type_node;
+ tree vtbl_ptr_type_node;
+
+ Namespaces,
+
+ tree std_node;
+ tree abi_node;
+
+ A FUNCTION_DECL which can call `abort'. Not necessarily the
+ one that the user will declare, but sufficient to be called
+ by routines that want to abort the program.
+
+ tree abort_fndecl;
+
+ Used by RTTI
+ tree type_info_type_node, tinfo_decl_id, tinfo_decl_type;
+ tree tinfo_var_id; */
+
+/* The following symbols are subsumed in the c_global_trees array, and
+ listed here individually for documentation purposes.
+
+ INTEGER_TYPE and REAL_TYPE nodes for the standard data types.
+
+ tree short_integer_type_node;
+ tree long_integer_type_node;
+ tree long_long_integer_type_node;
+
+ tree short_unsigned_type_node;
+ tree long_unsigned_type_node;
+ tree long_long_unsigned_type_node;
+
+ tree truthvalue_type_node;
+ tree truthvalue_false_node;
+ tree truthvalue_true_node;
+
+ tree ptrdiff_type_node;
+
+ tree unsigned_char_type_node;
+ tree signed_char_type_node;
+ tree wchar_type_node;
+
+ tree char8_type_node;
+ tree char16_type_node;
+ tree char32_type_node;
+
+ tree float_type_node;
+ tree double_type_node;
+ tree long_double_type_node;
+
+ tree complex_integer_type_node;
+ tree complex_float_type_node;
+ tree complex_double_type_node;
+ tree complex_long_double_type_node;
+
+ tree dfloat32_type_node;
+ tree dfloat64_type_node;
+ tree_dfloat128_type_node;
+
+ tree intQI_type_node;
+ tree intHI_type_node;
+ tree intSI_type_node;
+ tree intDI_type_node;
+ tree intTI_type_node;
+
+ tree unsigned_intQI_type_node;
+ tree unsigned_intHI_type_node;
+ tree unsigned_intSI_type_node;
+ tree unsigned_intDI_type_node;
+ tree unsigned_intTI_type_node;
+
+ tree widest_integer_literal_type_node;
+ tree widest_unsigned_literal_type_node;
+
+ Nodes for types `void *' and `const void *'.
+
+ tree ptr_type_node, const_ptr_type_node;
+
+ Nodes for types `char *' and `const char *'.
+
+ tree string_type_node, const_string_type_node;
+
+ Type `char[SOMENUMBER]'.
+ Used when an array of char is needed and the size is irrelevant.
+
+ tree char_array_type_node;
+
+ Type `wchar_t[SOMENUMBER]' or something like it.
+ Used when a wide string literal is created.
+
+ tree wchar_array_type_node;
+
+ Type `char8_t[SOMENUMBER]' or something like it.
+ Used when a UTF-8 string literal is created.
+
+ tree char8_array_type_node;
+
+ Type `char16_t[SOMENUMBER]' or something like it.
+ Used when a UTF-16 string literal is created.
+
+ tree char16_array_type_node;
+
+ Type `char32_t[SOMENUMBER]' or something like it.
+ Used when a UTF-32 string literal is created.
+
+ tree char32_array_type_node;
+
+ Type `int ()' -- used for implicit declaration of functions.
+
+ tree default_function_type;
+
+ A VOID_TYPE node, packaged in a TREE_LIST.
+
+ tree void_list_node;
+
+ The lazily created VAR_DECLs for __FUNCTION__, __PRETTY_FUNCTION__,
+ and __func__. (C doesn't generate __FUNCTION__ and__PRETTY_FUNCTION__
+ VAR_DECLS, but C++ does.)
+
+ tree function_name_decl_node;
+ tree pretty_function_name_decl_node;
+ tree c99_function_name_decl_node;
+
+ Stack of nested function name VAR_DECLs.
+
+ tree saved_function_name_decls;
+
+*/
+
+// forked from gcc/cp/module.cc fixed_trees
+
+static GTY (()) vec<tree, va_gc> *fixed_trees;
+
+// forked from gcc/cp/module.cc maybe_add_global
+
+/* VAL is a global tree, add it to the global vec if it is
+ interesting. Add some of its targets, if they too are
+ interesting. We do not add identifiers, as they can be re-found
+ via the identifier hash table. There is a cost to the number of
+ global trees. */
+
+static int
+maybe_add_global (tree val, unsigned &crc)
+{
+ int v = 0;
+
+ if (val && !(TREE_CODE (val) == IDENTIFIER_NODE || TREE_VISITED (val)))
+ {
+ TREE_VISITED (val) = true;
+ crc = crc32_unsigned (crc, fixed_trees->length ());
+ vec_safe_push (fixed_trees, val);
+ v++;
+
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (val), TS_TYPED))
+ v += maybe_add_global (TREE_TYPE (val), crc);
+ if (CODE_CONTAINS_STRUCT (TREE_CODE (val), TS_TYPE_COMMON))
+ v += maybe_add_global (TYPE_NAME (val), crc);
+ }
+
+ return v;
+}
+
+// forked from gcc/cp/module.cc global_tree_arys
+
+/* Global trees. */
+static const std::pair<tree *, unsigned> global_tree_arys[] = {
+ std::pair<tree *, unsigned> (cp_global_trees, CPTI_MODULE_HWM),
+ std::pair<tree *, unsigned> (c_global_trees, CTI_MODULE_HWM),
+};
+
+// forked from gcc/cp/module.cc init_modules
+
+void
+init_modules ()
+{
+ unsigned crc = 0;
+ vec_alloc (fixed_trees, 200);
+
+ const tree *ptr = global_tree_arys[0].first;
+ unsigned limit = global_tree_arys[0].second;
+ for (unsigned ix = 0; ix != limit; ix++, ptr++)
+ {
+ maybe_add_global (*ptr, crc);
+ }
+
+ ptr = global_tree_arys[1].first;
+ limit = global_tree_arys[1].second;
+ for (unsigned ix = 0; ix != limit; ix++, ptr++)
+ {
+ maybe_add_global (*ptr, crc);
+ }
+}
+
+// forked from gcc/cp/constexpr.cc var_in_constexpr_fn
+
+/* True if T was declared in a function declared to be constexpr, and
+ therefore potentially constant in C++14. */
+
+bool
+var_in_constexpr_fn (tree t)
+{
+ tree ctx = DECL_CONTEXT (t);
+ return (ctx && TREE_CODE (ctx) == FUNCTION_DECL
+ && DECL_DECLARED_CONSTEXPR_P (ctx));
+}
+
+// forked from gcc/cp/name-lookup.cc member_vec_linear_search
+
+/* Linear search of (unordered) MEMBER_VEC for NAME. */
+
+static tree
+member_vec_linear_search (vec<tree, va_gc> *member_vec, tree name)
+{
+ for (int ix = member_vec->length (); ix--;)
+ if (tree binding = (*member_vec)[ix])
+ if (OVL_NAME (binding) == name)
+ return binding;
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/name-lookup.cc member_vec_binary_search
+
+/* Binary search of (ordered) MEMBER_VEC for NAME. */
+
+static tree
+member_vec_binary_search (vec<tree, va_gc> *member_vec, tree name)
+{
+ for (unsigned lo = 0, hi = member_vec->length (); lo < hi;)
+ {
+ unsigned mid = (lo + hi) / 2;
+ tree binding = (*member_vec)[mid];
+ tree binding_name = OVL_NAME (binding);
+
+ if (binding_name > name)
+ hi = mid;
+ else if (binding_name < name)
+ lo = mid + 1;
+ else
+ return binding;
+ }
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/tree.cc is_overloaded_fn
+
+/* Returns nonzero if X is an expression for a (possibly overloaded)
+ function. If "f" is a function or function template, "f", "c->f",
+ "c.f", "C::f", and "f<int>" will all be considered possibly
+ overloaded functions. Returns 2 if the function is actually
+ overloaded, i.e., if it is impossible to know the type of the
+ function without performing overload resolution. */
+
+int
+is_overloaded_fn (tree x)
+{
+ STRIP_ANY_LOCATION_WRAPPER (x);
+
+ if (TREE_CODE (x) == COMPONENT_REF)
+ x = TREE_OPERAND (x, 1);
+
+ return OVL_P (x);
+}
+
+// forked from gcc/cp/tree.cc ovl_make
+
+/* Make a raw overload node containing FN. */
+
+tree
+ovl_make (tree fn, tree next)
+{
+ tree result = make_node (OVERLOAD);
+
+ if (TREE_CODE (fn) == OVERLOAD)
+ OVL_NESTED_P (result) = true;
+
+ TREE_TYPE (result) = (next ? unknown_type_node : TREE_TYPE (fn));
+ if (next && TREE_CODE (next) == OVERLOAD && OVL_DEDUP_P (next))
+ OVL_DEDUP_P (result) = true;
+ OVL_FUNCTION (result) = fn;
+ OVL_CHAIN (result) = next;
+ return result;
+}
+
+// forked from gcc/cp/name-lookup.cc lookup_add
+
+/* Add a set of new FNS into a lookup. */
+
+tree
+lookup_add (tree fns, tree lookup)
+{
+ if (fns == error_mark_node || lookup == error_mark_node)
+ return error_mark_node;
+
+ lookup = fns;
+
+ return lookup;
+}
+
+// forked from gcc/cp/typeck.cc type_memfn_quals
+
+/* Returns the function-cv-quals for TYPE, which must be a FUNCTION_TYPE or
+ METHOD_TYPE. */
+
+int
+type_memfn_quals (const_tree type)
+{
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ return TYPE_QUALS (type);
+ else if (TREE_CODE (type) == METHOD_TYPE)
+ return rs_type_quals (class_of_this_parm (type));
+ else
+ gcc_unreachable ();
+}
+
+// forked from gcc/cp/pt.cc find_parameter_pack_data
+
+/* Structure used to track the progress of find_parameter_packs_r. */
+struct find_parameter_pack_data
+{
+ /* TREE_LIST that will contain all of the parameter packs found by
+ the traversal. */
+ tree *parameter_packs;
+
+ /* Set of AST nodes that have been visited by the traversal. */
+ hash_set<tree> *visited;
+
+ /* True iff we're making a type pack expansion. */
+ bool type_pack_expansion_p;
+
+ /* True iff we found a subtree that has the extra args mechanism. */
+ bool found_extra_args_tree_p = false;
+};
+
+// forked from gcc/cp/lex.cc conv_type_hasher
+
+/* Hasher for the conversion operator name hash table. */
+struct conv_type_hasher : ggc_ptr_hash<tree_node>
+{
+ /* Hash NODE, an identifier node in the table. TYPE_UID is
+ suitable, as we're not concerned about matching canonicalness
+ here. */
+ static hashval_t hash (tree node)
+ {
+ return (hashval_t) TYPE_UID (TREE_TYPE (node));
+ }
+
+ /* Compare NODE, an identifier node in the table, against TYPE, an
+ incoming TYPE being looked up. */
+ static bool equal (tree node, tree type) { return TREE_TYPE (node) == type; }
+};
+
+static GTY (()) hash_table<conv_type_hasher> *conv_type_names;
+
+// forked from gcc/cp/lex.cc make_conv_op_name
+
+/* Return an identifier for a conversion operator to TYPE. We can get
+ from the returned identifier to the type. We store TYPE, which is
+ not necessarily the canonical type, which allows us to report the
+ form the user used in error messages. All these identifiers are
+ not in the identifier hash table, and have the same string name.
+ These IDENTIFIERS are not in the identifier hash table, and all
+ have the same IDENTIFIER_STRING. */
+
+tree
+make_conv_op_name (tree type)
+{
+ if (type == error_mark_node)
+ return error_mark_node;
+
+ if (conv_type_names == NULL)
+ conv_type_names = hash_table<conv_type_hasher>::create_ggc (31);
+
+ tree *slot
+ = conv_type_names->find_slot_with_hash (type, (hashval_t) TYPE_UID (type),
+ INSERT);
+ tree identifier = *slot;
+ if (!identifier)
+ {
+ /* Create a raw IDENTIFIER outside of the identifier hash
+ table. */
+ identifier = copy_node (conv_op_identifier);
+
+ /* Just in case something managed to bind. */
+ IDENTIFIER_BINDING (identifier) = NULL;
+
+ /* Hang TYPE off the identifier so it can be found easily later
+ when performing conversions. */
+ TREE_TYPE (identifier) = type;
+
+ *slot = identifier;
+ }
+
+ return identifier;
+}
+
+// forked from gcc/cp/pt.cc builtin_pack_fn_p
+
+/* True iff FN is a function representing a built-in variadic parameter
+ pack. */
+
+bool
+builtin_pack_fn_p (tree fn)
+{
+ if (!fn || TREE_CODE (fn) != FUNCTION_DECL
+ || !DECL_IS_UNDECLARED_BUILTIN (fn))
+ return false;
+
+ if (id_equal (DECL_NAME (fn), "__integer_pack"))
+ return true;
+
+ return false;
+}
+
+// forked from gcc/cp/pt.cc builtin_pack_call_p
+
+/* True iff CALL is a call to a function representing a built-in variadic
+ parameter pack. */
+
+static bool
+builtin_pack_call_p (tree call)
+{
+ if (TREE_CODE (call) != CALL_EXPR)
+ return false;
+ return builtin_pack_fn_p (CALL_EXPR_FN (call));
+}
+
+//// forked from gcc/cp/pt.cc has_extra_args_mechanism_p
+//
+///* Return true if the tree T has the extra args mechanism for
+// avoiding partial instantiation. */
+//
+// static bool
+// has_extra_args_mechanism_p (const_tree t)
+//{
+// return false;
+//}
+
+// forked from gcc/cp/pt.cc find_parameter_packs_r
+
+/* Identifies all of the argument packs that occur in a template
+ argument and appends them to the TREE_LIST inside DATA, which is a
+ find_parameter_pack_data structure. This is a subroutine of
+ make_pack_expansion and uses_parameter_packs. */
+static tree
+find_parameter_packs_r (tree *tp, int *walk_subtrees, void *data)
+{
+ tree t = *tp;
+ struct find_parameter_pack_data *ppd
+ = (struct find_parameter_pack_data *) data;
+ bool parameter_pack_p = false;
+
+#define WALK_SUBTREE(NODE) \
+ rs_walk_tree (&(NODE), &find_parameter_packs_r, ppd, ppd->visited)
+
+ /* Don't look through typedefs; we are interested in whether a
+ parameter pack is actually written in the expression/type we're
+ looking at, not the target type. */
+ if (TYPE_P (t) && typedef_variant_p (t))
+ {
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ /* Identify whether this is a parameter pack or not. */
+ switch (TREE_CODE (t))
+ {
+ case FIELD_DECL:
+ case PARM_DECL:
+ break;
+
+ case VAR_DECL:
+ break;
+
+ case CALL_EXPR:
+ if (builtin_pack_call_p (t))
+ parameter_pack_p = true;
+ break;
+
+ case BASES:
+ parameter_pack_p = true;
+ break;
+ default:
+ /* Not a parameter pack. */
+ break;
+ }
+
+ if (parameter_pack_p)
+ {
+ /* Add this parameter pack to the list. */
+ *ppd->parameter_packs = tree_cons (NULL_TREE, t, *ppd->parameter_packs);
+ }
+
+ if (TYPE_P (t))
+ rs_walk_tree (&TYPE_CONTEXT (t), &find_parameter_packs_r, ppd,
+ ppd->visited);
+
+ /* This switch statement will return immediately if we don't find a
+ parameter pack. ??? Should some of these be in cp_walk_subtrees? */
+ switch (TREE_CODE (t))
+ {
+ case DECL_EXPR: {
+ tree decl = DECL_EXPR_DECL (t);
+ if (is_typedef_decl (decl))
+ /* Since we stop at typedefs above, we need to look through them at
+ the point of the DECL_EXPR. */
+ rs_walk_tree (&DECL_ORIGINAL_TYPE (decl), &find_parameter_packs_r,
+ ppd, ppd->visited);
+ return NULL_TREE;
+ }
+
+ case INTEGER_TYPE:
+ rs_walk_tree (&TYPE_MAX_VALUE (t), &find_parameter_packs_r, ppd,
+ ppd->visited);
+ *walk_subtrees = 0;
+ return NULL_TREE;
+
+ case IDENTIFIER_NODE:
+ rs_walk_tree (&TREE_TYPE (t), &find_parameter_packs_r, ppd, ppd->visited);
+ *walk_subtrees = 0;
+ return NULL_TREE;
+
+ case DECLTYPE_TYPE: {
+ /* When traversing a DECLTYPE_TYPE_EXPR, we need to set
+ type_pack_expansion_p to false so that any placeholders
+ within the expression don't get marked as parameter packs. */
+ bool type_pack_expansion_p = ppd->type_pack_expansion_p;
+ ppd->type_pack_expansion_p = false;
+ rs_walk_tree (&DECLTYPE_TYPE_EXPR (t), &find_parameter_packs_r, ppd,
+ ppd->visited);
+ ppd->type_pack_expansion_p = type_pack_expansion_p;
+ *walk_subtrees = 0;
+ return NULL_TREE;
+ }
+
+ case IF_STMT:
+ rs_walk_tree (&IF_COND (t), &find_parameter_packs_r, ppd, ppd->visited);
+ rs_walk_tree (&THEN_CLAUSE (t), &find_parameter_packs_r, ppd,
+ ppd->visited);
+ rs_walk_tree (&ELSE_CLAUSE (t), &find_parameter_packs_r, ppd,
+ ppd->visited);
+ /* Don't walk into IF_STMT_EXTRA_ARGS. */
+ *walk_subtrees = 0;
+ return NULL_TREE;
+
+ case FUNCTION_TYPE:
+ case METHOD_TYPE:
+ WALK_SUBTREE (TYPE_RAISES_EXCEPTIONS (t));
+ break;
+
+ default:
+ return NULL_TREE;
+ }
+
+#undef WALK_SUBTREE
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/typeck.cc type_memfn_rqual
+
+/* Returns the function-ref-qualifier for TYPE */
+
+rs_ref_qualifier
+type_memfn_rqual (const_tree type)
+{
+ gcc_assert (FUNC_OR_METHOD_TYPE_P (type));
+
+ if (!FUNCTION_REF_QUALIFIED (type))
+ return REF_QUAL_NONE;
+ else if (FUNCTION_RVALUE_QUALIFIED (type))
+ return REF_QUAL_RVALUE;
+ else
+ return REF_QUAL_LVALUE;
+}
+
+// forked from gcc/cp/lex.cc maybe_add_lang_type_raw
+
+/* Add a raw lang_type to T, a type, should it need one. */
+
+bool
+maybe_add_lang_type_raw (tree t)
+{
+ if (!RECORD_OR_UNION_CODE_P (TREE_CODE (t)))
+ return false;
+
+ auto *lt = (struct lang_type *) (ggc_internal_cleared_alloc (
+ sizeof (struct lang_type)));
+ TYPE_LANG_SPECIFIC (t) = lt;
+
+ if (GATHER_STATISTICS)
+ {
+ tree_node_counts[(int) lang_type] += 1;
+ tree_node_sizes[(int) lang_type] += sizeof (struct lang_type);
+ }
+
+ return true;
+}
+
+// forked from gcc/c-family/c-lex.cc get_fileinfo
+
+static splay_tree file_info_tree;
+
+struct c_fileinfo *
+get_fileinfo (const char *name)
+{
+ splay_tree_node n;
+ struct c_fileinfo *fi;
+
+ if (!file_info_tree)
+ file_info_tree = splay_tree_new (splay_tree_compare_strings, 0,
+ splay_tree_delete_pointers);
+
+ n = splay_tree_lookup (file_info_tree, (splay_tree_key) name);
+ if (n)
+ return (struct c_fileinfo *) n->value;
+
+ fi = XNEW (struct c_fileinfo);
+ fi->time = 0;
+ fi->interface_only = 0;
+ fi->interface_unknown = 1;
+ splay_tree_insert (file_info_tree, (splay_tree_key) name,
+ (splay_tree_value) fi);
+ return fi;
+}
+
+// forked from gcc/cp/lex.cc cxx_make_type
+
+tree
+cxx_make_type (enum tree_code code MEM_STAT_DECL)
+{
+ tree t = make_node (code PASS_MEM_STAT);
+
+ if (maybe_add_lang_type_raw (t))
+ {
+ /* Set up some flags that give proper default behavior. */
+ struct c_fileinfo *finfo = get_fileinfo (LOCATION_FILE (input_location));
+ SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, finfo->interface_unknown);
+ CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only;
+ }
+
+ if (code == RECORD_TYPE || code == UNION_TYPE)
+ TYPE_CXX_ODR_P (t) = 1;
+
+ return t;
+}
+
+// forked from gcc/cp/tree.cc build_min_array_type
+
+/* Build an ARRAY_TYPE without laying it out. */
+
+static tree
+build_min_array_type (tree elt_type, tree index_type)
+{
+ tree t = cxx_make_type (ARRAY_TYPE);
+ TREE_TYPE (t) = elt_type;
+ TYPE_DOMAIN (t) = index_type;
+ return t;
+}
+
+// forked from gcc/cp/name-lookup.cc fields_linear_search
+
+/* Linear search of (partially ordered) fields of KLASS for NAME. */
+
+static tree
+fields_linear_search (tree klass, tree name, bool want_type)
+{
+ for (tree fields = TYPE_FIELDS (klass); fields; fields = DECL_CHAIN (fields))
+ {
+ tree decl = fields;
+
+ if (DECL_NAME (decl) != name)
+ continue;
+
+ if (DECL_DECLARES_FUNCTION_P (decl))
+ /* Functions are found separately. */
+ continue;
+
+ if (!want_type || DECL_DECLARES_TYPE_P (decl))
+ return decl;
+ }
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/except.cc canonnothrow_spec_pical_eh_spec
+
+/* Return true iff SPEC is throw() or noexcept(true). */
+
+bool
+nothrow_spec_p (const_tree spec)
+{
+ if (spec == empty_except_spec || spec == noexcept_true_spec)
+ return true;
+
+ gcc_assert (!spec || TREE_VALUE (spec) || spec == noexcept_false_spec
+ || TREE_PURPOSE (spec) == error_mark_node);
+
+ return false;
+}
+
+// forked from gcc/cp/tree.cc may_get_fns
+
+/* Get the overload set FROM refers to. Returns NULL if it's not an
+ overload set. */
+
+tree
+maybe_get_fns (tree from)
+{
+ STRIP_ANY_LOCATION_WRAPPER (from);
+
+ /* A baselink is also considered an overloaded function. */
+ if (TREE_CODE (from) == COMPONENT_REF)
+ from = TREE_OPERAND (from, 1);
+
+ if (OVL_P (from))
+ return from;
+
+ return NULL;
+}
+
+// forked from gcc/cp/tree.cc get_fns
+
+/* FROM refers to an overload set. Return that set (or die). */
+
+tree
+get_fns (tree from)
+{
+ tree res = maybe_get_fns (from);
+
+ gcc_assert (res);
+ return res;
+}
+
+// forked from gcc/cp/tree.cc get_first_fn
+
+/* Return the first function of the overload set FROM refers to. */
+
+tree
+get_first_fn (tree from)
+{
+ return OVL_FIRST (get_fns (from));
+}
+
+// forked from gcc/cp/tree.cc dependent_name
+
+/* X is the CALL_EXPR_FN of a CALL_EXPR. If X represents a dependent name
+ (14.6.2), return the IDENTIFIER_NODE for that name. Otherwise, return
+ NULL_TREE. */
+
+tree
+dependent_name (tree x)
+{
+ /* FIXME a dependent name must be unqualified, but this function doesn't
+ distinguish between qualified and unqualified identifiers. */
+ if (identifier_p (x))
+ return x;
+
+ if (OVL_P (x))
+ return OVL_NAME (x);
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/tree.cc called_fns_equal
+
+/* Subroutine of rs_tree_equal: t1 and t2 are the CALL_EXPR_FNs of two
+ CALL_EXPRS. Return whether they are equivalent. */
+
+static bool
+called_fns_equal (tree t1, tree t2)
+{
+ /* Core 1321: dependent names are equivalent even if the overload sets
+ are different. But do compare explicit template arguments. */
+ tree name1 = dependent_name (t1);
+ tree name2 = dependent_name (t2);
+ if (name1 || name2)
+ {
+ tree targs1 = NULL_TREE, targs2 = NULL_TREE;
+
+ if (name1 != name2)
+ return false;
+
+ /* FIXME dependent_name currently returns an unqualified name regardless
+ of whether the function was named with a qualified- or unqualified-id.
+ Until that's fixed, check that we aren't looking at overload sets from
+ different scopes. */
+ if (is_overloaded_fn (t1) && is_overloaded_fn (t2)
+ && (DECL_CONTEXT (get_first_fn (t1))
+ != DECL_CONTEXT (get_first_fn (t2))))
+ return false;
+
+ return rs_tree_equal (targs1, targs2);
+ }
+ else
+ return rs_tree_equal (t1, t2);
+}
+
+// forked from gcc/cp/tree.cc canonical_eh_spec
+
+/* Return the canonical version of exception-specification RAISES for a C++17
+ function type, for use in type comparison and building TYPE_CANONICAL. */
+
+tree
+canonical_eh_spec (tree raises)
+{
+ if (raises == NULL_TREE)
+ return raises;
+ else if (nothrow_spec_p (raises))
+ /* throw() -> noexcept. */
+ return noexcept_true_spec;
+ else
+ /* For C++17 type matching, anything else -> nothing. */
+ return NULL_TREE;
+}
+
+/* Like cp_tree_operand_length, but takes a tree_code CODE. */
+
+int
+rs_tree_code_length (enum tree_code code)
+{
+ gcc_assert (TREE_CODE_CLASS (code) != tcc_vl_exp);
+
+ switch (code)
+ {
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ return 1;
+
+ case ARRAY_REF:
+ return 2;
+
+ default:
+ return TREE_CODE_LENGTH (code);
+ }
+}
+
+// forked from gcc/cp/tree.cc rs_tree_operand_length
+
+/* Return the number of operands in T that we care about for things like
+ mangling. */
+
+int
+rs_tree_operand_length (const_tree t)
+{
+ enum tree_code code = TREE_CODE (t);
+
+ if (TREE_CODE_CLASS (code) == tcc_vl_exp)
+ return VL_EXP_OPERAND_LENGTH (t);
+
+ return rs_tree_code_length (code);
+}
+
+// forked from gcc/cp/tree.cc cp_tree_equal
+
+/* Return truthvalue of whether T1 is the same tree structure as T2.
+ Return 1 if they are the same. Return 0 if they are different. */
+
+bool
+rs_tree_equal (tree t1, tree t2)
+{
+ enum tree_code code1, code2;
+
+ if (t1 == t2)
+ return true;
+ if (!t1 || !t2)
+ return false;
+
+ code1 = TREE_CODE (t1);
+ code2 = TREE_CODE (t2);
+
+ if (code1 != code2)
+ return false;
+
+ if (CONSTANT_CLASS_P (t1) && !same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+
+ switch (code1)
+ {
+ case VOID_CST:
+ /* There's only a single VOID_CST node, so we should never reach
+ here. */
+ gcc_unreachable ();
+
+ case INTEGER_CST:
+ return tree_int_cst_equal (t1, t2);
+
+ case REAL_CST:
+ return real_identical (&TREE_REAL_CST (t1), &TREE_REAL_CST (t2));
+
+ case STRING_CST:
+ return TREE_STRING_LENGTH (t1) == TREE_STRING_LENGTH (t2)
+ && !memcmp (TREE_STRING_POINTER (t1), TREE_STRING_POINTER (t2),
+ TREE_STRING_LENGTH (t1));
+
+ case FIXED_CST:
+ return FIXED_VALUES_IDENTICAL (TREE_FIXED_CST (t1), TREE_FIXED_CST (t2));
+
+ case COMPLEX_CST:
+ return rs_tree_equal (TREE_REALPART (t1), TREE_REALPART (t2))
+ && rs_tree_equal (TREE_IMAGPART (t1), TREE_IMAGPART (t2));
+
+ case VECTOR_CST:
+ return operand_equal_p (t1, t2, OEP_ONLY_CONST);
+
+ case CONSTRUCTOR:
+ /* We need to do this when determining whether or not two
+ non-type pointer to member function template arguments
+ are the same. */
+ if (!same_type_p (TREE_TYPE (t1), TREE_TYPE (t2))
+ || CONSTRUCTOR_NELTS (t1) != CONSTRUCTOR_NELTS (t2))
+ return false;
+ {
+ tree field, value;
+ unsigned int i;
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (t1), i, field, value)
+ {
+ constructor_elt *elt2 = CONSTRUCTOR_ELT (t2, i);
+ if (!rs_tree_equal (field, elt2->index)
+ || !rs_tree_equal (value, elt2->value))
+ return false;
+ }
+ }
+ return true;
+
+ case TREE_LIST:
+ if (!rs_tree_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)))
+ return false;
+ if (!rs_tree_equal (TREE_VALUE (t1), TREE_VALUE (t2)))
+ return false;
+ return rs_tree_equal (TREE_CHAIN (t1), TREE_CHAIN (t2));
+
+ case SAVE_EXPR:
+ return rs_tree_equal (TREE_OPERAND (t1, 0), TREE_OPERAND (t2, 0));
+
+ case CALL_EXPR: {
+ if (KOENIG_LOOKUP_P (t1) != KOENIG_LOOKUP_P (t2))
+ return false;
+
+ if (!called_fns_equal (CALL_EXPR_FN (t1), CALL_EXPR_FN (t2)))
+ return false;
+
+ call_expr_arg_iterator iter1, iter2;
+ init_call_expr_arg_iterator (t1, &iter1);
+ init_call_expr_arg_iterator (t2, &iter2);
+ if (iter1.n != iter2.n)
+ return false;
+
+ while (more_call_expr_args_p (&iter1))
+ {
+ tree arg1 = next_call_expr_arg (&iter1);
+ tree arg2 = next_call_expr_arg (&iter2);
+
+ gcc_checking_assert (arg1 && arg2);
+ if (!rs_tree_equal (arg1, arg2))
+ return false;
+ }
+
+ return true;
+ }
+
+ case TARGET_EXPR: {
+ tree o1 = TREE_OPERAND (t1, 0);
+ tree o2 = TREE_OPERAND (t2, 0);
+
+ /* Special case: if either target is an unallocated VAR_DECL,
+ it means that it's going to be unified with whatever the
+ TARGET_EXPR is really supposed to initialize, so treat it
+ as being equivalent to anything. */
+ if (VAR_P (o1) && DECL_NAME (o1) == NULL_TREE && !DECL_RTL_SET_P (o1))
+ /*Nop*/;
+ else if (VAR_P (o2) && DECL_NAME (o2) == NULL_TREE
+ && !DECL_RTL_SET_P (o2))
+ /*Nop*/;
+ else if (!rs_tree_equal (o1, o2))
+ return false;
+
+ return rs_tree_equal (TREE_OPERAND (t1, 1), TREE_OPERAND (t2, 1));
+ }
+
+ case PARM_DECL:
+ /* For comparing uses of parameters in late-specified return types
+ with an out-of-class definition of the function, but can also come
+ up for expressions that involve 'this' in a member function
+ template. */
+
+ if (same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ {
+ if (DECL_ARTIFICIAL (t1) ^ DECL_ARTIFICIAL (t2))
+ return false;
+ if (CONSTRAINT_VAR_P (t1) ^ CONSTRAINT_VAR_P (t2))
+ return false;
+ if (DECL_ARTIFICIAL (t1)
+ || (DECL_PARM_LEVEL (t1) == DECL_PARM_LEVEL (t2)
+ && DECL_PARM_INDEX (t1) == DECL_PARM_INDEX (t2)))
+ return true;
+ }
+ return false;
+
+ case VAR_DECL:
+ case CONST_DECL:
+ case FIELD_DECL:
+ case FUNCTION_DECL:
+ case IDENTIFIER_NODE:
+ case SSA_NAME:
+ return false;
+
+ case TREE_VEC:
+ return true;
+
+ case NON_LVALUE_EXPR:
+ case VIEW_CONVERT_EXPR:
+ /* Used for location wrappers with possibly NULL types. */
+ if (!TREE_TYPE (t1) || !TREE_TYPE (t2))
+ {
+ if (TREE_TYPE (t1) || TREE_TYPE (t2))
+ return false;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ switch (TREE_CODE_CLASS (code1))
+ {
+ case tcc_unary:
+ case tcc_binary:
+ case tcc_comparison:
+ case tcc_expression:
+ case tcc_vl_exp:
+ case tcc_reference:
+ case tcc_statement: {
+ int n = rs_tree_operand_length (t1);
+ if (TREE_CODE_CLASS (code1) == tcc_vl_exp
+ && n != TREE_OPERAND_LENGTH (t2))
+ return false;
+
+ for (int i = 0; i < n; ++i)
+ if (!rs_tree_equal (TREE_OPERAND (t1, i), TREE_OPERAND (t2, i)))
+ return false;
+
+ return true;
+ }
+
+ case tcc_type:
+ return same_type_p (t1, t2);
+
+ default:
+ gcc_unreachable ();
+ }
+
+ /* We can get here with --disable-checking. */
+ return false;
+}
+
+// forked from gcc/cp/class.cc publicly_uniquely_derived_p
+
+/* TRUE iff TYPE is publicly & uniquely derived from PARENT. */
+
+bool publicly_uniquely_derived_p (tree, tree) { return false; }
+
+// forked from gcc/cp/typeck.cc comp_except_types
+
+/* Compare two exception specifier types for exactness or subsetness, if
+ allowed. Returns false for mismatch, true for match (same, or
+ derived and !exact).
+
+ [except.spec] "If a class X ... objects of class X or any class publicly
+ and unambiguously derived from X. Similarly, if a pointer type Y * ...
+ exceptions of type Y * or that are pointers to any type publicly and
+ unambiguously derived from Y. Otherwise a function only allows exceptions
+ that have the same type ..."
+ This does not mention cv qualifiers and is different to what throw
+ [except.throw] and catch [except.catch] will do. They will ignore the
+ top level cv qualifiers, and allow qualifiers in the pointer to class
+ example.
+
+ We implement the letter of the standard. */
+
+static bool
+comp_except_types (tree a, tree b, bool exact)
+{
+ if (same_type_p (a, b))
+ return true;
+ else if (!exact)
+ {
+ if (rs_type_quals (a) || rs_type_quals (b))
+ return false;
+
+ if (TYPE_PTR_P (a) && TYPE_PTR_P (b))
+ {
+ a = TREE_TYPE (a);
+ b = TREE_TYPE (b);
+ if (rs_type_quals (a) || rs_type_quals (b))
+ return false;
+ }
+
+ if (TREE_CODE (a) != RECORD_TYPE || TREE_CODE (b) != RECORD_TYPE)
+ return false;
+
+ if (publicly_uniquely_derived_p (a, b))
+ return true;
+ }
+ return false;
+}
+
+// forked from gcc/cp/typeck.cc comp_except_specs
+
+/* Return true if TYPE1 and TYPE2 are equivalent exception specifiers.
+ If EXACT is ce_derived, T2 can be stricter than T1 (according to 15.4/5).
+ If EXACT is ce_type, the C++17 type compatibility rules apply.
+ If EXACT is ce_normal, the compatibility rules in 15.4/3 apply.
+ If EXACT is ce_exact, the specs must be exactly the same. Exception lists
+ are unordered, but we've already filtered out duplicates. Most lists will
+ be in order, we should try to make use of that. */
+
+bool
+comp_except_specs (const_tree t1, const_tree t2, int exact)
+{
+ const_tree probe;
+ const_tree base;
+ int length = 0;
+
+ if (t1 == t2)
+ return true;
+
+ /* First handle noexcept. */
+ if (exact < ce_exact)
+ {
+ if (exact == ce_type
+ && (canonical_eh_spec (CONST_CAST_TREE (t1))
+ == canonical_eh_spec (CONST_CAST_TREE (t2))))
+ return true;
+
+ /* noexcept(false) is compatible with no exception-specification,
+ and less strict than any spec. */
+ if (t1 == noexcept_false_spec)
+ return t2 == NULL_TREE || exact == ce_derived;
+ /* Even a derived noexcept(false) is compatible with no
+ exception-specification. */
+ if (t2 == noexcept_false_spec)
+ return t1 == NULL_TREE;
+
+ /* Otherwise, if we aren't looking for an exact match, noexcept is
+ equivalent to throw(). */
+ if (t1 == noexcept_true_spec)
+ t1 = empty_except_spec;
+ if (t2 == noexcept_true_spec)
+ t2 = empty_except_spec;
+ }
+
+ /* If any noexcept is left, it is only comparable to itself;
+ either we're looking for an exact match or we're redeclaring a
+ template with dependent noexcept. */
+ if ((t1 && TREE_PURPOSE (t1)) || (t2 && TREE_PURPOSE (t2)))
+ return (t1 && t2 && rs_tree_equal (TREE_PURPOSE (t1), TREE_PURPOSE (t2)));
+
+ if (t1 == NULL_TREE) /* T1 is ... */
+ return t2 == NULL_TREE || exact == ce_derived;
+ if (!TREE_VALUE (t1)) /* t1 is EMPTY */
+ return t2 != NULL_TREE && !TREE_VALUE (t2);
+ if (t2 == NULL_TREE) /* T2 is ... */
+ return false;
+ if (TREE_VALUE (t1) && !TREE_VALUE (t2)) /* T2 is EMPTY, T1 is not */
+ return exact == ce_derived;
+
+ /* Neither set is ... or EMPTY, make sure each part of T2 is in T1.
+ Count how many we find, to determine exactness. For exact matching and
+ ordered T1, T2, this is an O(n) operation, otherwise its worst case is
+ O(nm). */
+ for (base = t1; t2 != NULL_TREE; t2 = TREE_CHAIN (t2))
+ {
+ for (probe = base; probe != NULL_TREE; probe = TREE_CHAIN (probe))
+ {
+ tree a = TREE_VALUE (probe);
+ tree b = TREE_VALUE (t2);
+
+ if (comp_except_types (a, b, exact))
+ {
+ if (probe == base && exact > ce_derived)
+ base = TREE_CHAIN (probe);
+ length++;
+ break;
+ }
+ }
+ if (probe == NULL_TREE)
+ return false;
+ }
+ return exact == ce_derived || base == NULL_TREE || length == list_length (t1);
+}
+
+// forked from gcc/cp/typeck.cc compparms
+
+/* Subroutines of `comptypes'. */
+
+/* Return true if two parameter type lists PARMS1 and PARMS2 are
+ equivalent in the sense that functions with those parameter types
+ can have equivalent types. The two lists must be equivalent,
+ element by element. */
+
+bool
+compparms (const_tree parms1, const_tree parms2)
+{
+ const_tree t1, t2;
+
+ /* An unspecified parmlist matches any specified parmlist
+ whose argument types don't need default promotions. */
+
+ for (t1 = parms1, t2 = parms2; t1 || t2;
+ t1 = TREE_CHAIN (t1), t2 = TREE_CHAIN (t2))
+ {
+ /* If one parmlist is shorter than the other,
+ they fail to match. */
+ if (!t1 || !t2)
+ return false;
+ if (!same_type_p (TREE_VALUE (t1), TREE_VALUE (t2)))
+ return false;
+ }
+ return true;
+}
+
+/* Set TYPE_CANONICAL like build_array_type_1, but using
+ build_cplus_array_type. */
+
+static void
+set_array_type_canon (tree t, tree elt_type, tree index_type, bool dep)
+{
+ /* Set the canonical type for this new node. */
+ if (TYPE_STRUCTURAL_EQUALITY_P (elt_type)
+ || (index_type && TYPE_STRUCTURAL_EQUALITY_P (index_type)))
+ SET_TYPE_STRUCTURAL_EQUALITY (t);
+ else if (TYPE_CANONICAL (elt_type) != elt_type
+ || (index_type && TYPE_CANONICAL (index_type) != index_type))
+ TYPE_CANONICAL (t)
+ = build_cplus_array_type (TYPE_CANONICAL (elt_type),
+ index_type ? TYPE_CANONICAL (index_type)
+ : index_type,
+ dep);
+ else
+ TYPE_CANONICAL (t) = t;
+}
+
+// forked from gcc/cp/tree.cc cplus_array_info
+
+struct cplus_array_info
+{
+ tree type;
+ tree domain;
+};
+
+// forked from gcc/cp/tree.cc cplus_array_hasher
+
+struct cplus_array_hasher : ggc_ptr_hash<tree_node>
+{
+ typedef cplus_array_info *compare_type;
+
+ static hashval_t hash (tree t);
+ static bool equal (tree, cplus_array_info *);
+};
+
+/* Hash an ARRAY_TYPE. K is really of type `tree'. */
+
+hashval_t
+cplus_array_hasher::hash (tree t)
+{
+ hashval_t hash;
+
+ hash = TYPE_UID (TREE_TYPE (t));
+ if (TYPE_DOMAIN (t))
+ hash ^= TYPE_UID (TYPE_DOMAIN (t));
+ return hash;
+}
+
+/* Compare two ARRAY_TYPEs. K1 is really of type `tree', K2 is really
+ of type `cplus_array_info*'. */
+
+bool
+cplus_array_hasher::equal (tree t1, cplus_array_info *t2)
+{
+ return (TREE_TYPE (t1) == t2->type && TYPE_DOMAIN (t1) == t2->domain);
+}
+
+// forked from gcc/cp/tree.cc cplus_array_htab
+
+/* Hash table containing dependent array types, which are unsuitable for
+ the language-independent type hash table. */
+static GTY (()) hash_table<cplus_array_hasher> *cplus_array_htab;
+
+// forked from gcc/cp/tree.cc is_byte_access_type
+
+/* Returns true if TYPE is char, unsigned char, or std::byte. */
+
+bool
+is_byte_access_type (tree type)
+{
+ type = TYPE_MAIN_VARIANT (type);
+ if (type == char_type_node || type == unsigned_char_type_node)
+ return true;
+
+ return (TREE_CODE (type) == ENUMERAL_TYPE && TYPE_CONTEXT (type) == std_node
+ && !strcmp ("byte", TYPE_NAME_STRING (type)));
+}
+
+// forked from gcc/cp/tree.cc build_cplus_array_type
+
+/* Like build_array_type, but handle special C++ semantics: an array of a
+ variant element type is a variant of the array of the main variant of
+ the element type. IS_DEPENDENT is -ve if we should determine the
+ dependency. Otherwise its bool value indicates dependency. */
+
+tree
+build_cplus_array_type (tree elt_type, tree index_type, int dependent)
+{
+ tree t;
+
+ if (elt_type == error_mark_node || index_type == error_mark_node)
+ return error_mark_node;
+
+ if (dependent < 0)
+ dependent = 0;
+
+ if (elt_type != TYPE_MAIN_VARIANT (elt_type))
+ /* Start with an array of the TYPE_MAIN_VARIANT. */
+ t = build_cplus_array_type (TYPE_MAIN_VARIANT (elt_type), index_type,
+ dependent);
+ else if (dependent)
+ {
+ /* Since type_hash_canon calls layout_type, we need to use our own
+ hash table. */
+ cplus_array_info cai;
+ hashval_t hash;
+
+ if (cplus_array_htab == NULL)
+ cplus_array_htab = hash_table<cplus_array_hasher>::create_ggc (61);
+
+ hash = TYPE_UID (elt_type);
+ if (index_type)
+ hash ^= TYPE_UID (index_type);
+ cai.type = elt_type;
+ cai.domain = index_type;
+
+ tree *e = cplus_array_htab->find_slot_with_hash (&cai, hash, INSERT);
+ if (*e)
+ /* We have found the type: we're done. */
+ return (tree) *e;
+ else
+ {
+ /* Build a new array type. */
+ t = build_min_array_type (elt_type, index_type);
+
+ /* Store it in the hash table. */
+ *e = t;
+
+ /* Set the canonical type for this new node. */
+ set_array_type_canon (t, elt_type, index_type, dependent);
+
+ /* Mark it as dependent now, this saves time later. */
+ TYPE_DEPENDENT_P_VALID (t) = true;
+ TYPE_DEPENDENT_P (t) = true;
+ }
+ }
+ else
+ {
+ bool typeless_storage = is_byte_access_type (elt_type);
+ t = build_array_type (elt_type, index_type, typeless_storage);
+
+ /* Mark as non-dependenty now, this will save time later. */
+ TYPE_DEPENDENT_P_VALID (t) = true;
+ }
+
+ /* Now check whether we already have this array variant. */
+ if (elt_type != TYPE_MAIN_VARIANT (elt_type))
+ {
+ tree m = t;
+ for (t = m; t; t = TYPE_NEXT_VARIANT (t))
+ if (TREE_TYPE (t) == elt_type && TYPE_NAME (t) == NULL_TREE
+ && TYPE_ATTRIBUTES (t) == NULL_TREE)
+ break;
+ if (!t)
+ {
+ t = build_min_array_type (elt_type, index_type);
+ /* Mark dependency now, this saves time later. */
+ TYPE_DEPENDENT_P_VALID (t) = true;
+ TYPE_DEPENDENT_P (t) = dependent;
+ set_array_type_canon (t, elt_type, index_type, dependent);
+ if (!dependent)
+ {
+ layout_type (t);
+ /* Make sure sizes are shared with the main variant.
+ layout_type can't be called after setting TYPE_NEXT_VARIANT,
+ as it will overwrite alignment etc. of all variants. */
+ TYPE_SIZE (t) = TYPE_SIZE (m);
+ TYPE_SIZE_UNIT (t) = TYPE_SIZE_UNIT (m);
+ TYPE_TYPELESS_STORAGE (t) = TYPE_TYPELESS_STORAGE (m);
+ }
+
+ TYPE_MAIN_VARIANT (t) = m;
+ TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (m);
+ TYPE_NEXT_VARIANT (m) = t;
+ }
+ }
+
+ /* Avoid spurious warnings with VLAs (c++/54583). */
+ if (TYPE_SIZE (t) && EXPR_P (TYPE_SIZE (t)))
+ suppress_warning (TYPE_SIZE (t), OPT_Wunused);
+
+ /* Push these needs up to the ARRAY_TYPE so that initialization takes
+ place more easily. */
+ bool needs_ctor
+ = (TYPE_NEEDS_CONSTRUCTING (t) = TYPE_NEEDS_CONSTRUCTING (elt_type));
+ bool needs_dtor = (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (t)
+ = TYPE_HAS_NONTRIVIAL_DESTRUCTOR (elt_type));
+
+ if (!dependent && t == TYPE_MAIN_VARIANT (t) && !COMPLETE_TYPE_P (t)
+ && COMPLETE_TYPE_P (elt_type))
+ {
+ /* The element type has been completed since the last time we saw
+ this array type; update the layout and 'tor flags for any variants
+ that need it. */
+ layout_type (t);
+ for (tree v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v))
+ {
+ TYPE_NEEDS_CONSTRUCTING (v) = needs_ctor;
+ TYPE_HAS_NONTRIVIAL_DESTRUCTOR (v) = needs_dtor;
+ }
+ }
+
+ return t;
+}
+
+// forked from gcc/cp/tree.cc cp_build_qualified_type_real
+
+/* Make a variant of TYPE, qualified with the TYPE_QUALS. Handles
+ arrays correctly. In particular, if TYPE is an array of T's, and
+ TYPE_QUALS is non-empty, returns an array of qualified T's.
+
+ FLAGS determines how to deal with ill-formed qualifications. If
+ tf_ignore_bad_quals is set, then bad qualifications are dropped
+ (this is permitted if TYPE was introduced via a typedef or template
+ type parameter). If bad qualifications are dropped and tf_warning
+ is set, then a warning is issued for non-const qualifications. If
+ tf_ignore_bad_quals is not set and tf_error is not set, we
+ return error_mark_node. Otherwise, we issue an error, and ignore
+ the qualifications.
+
+ Qualification of a reference type is valid when the reference came
+ via a typedef or template type argument. [dcl.ref] No such
+ dispensation is provided for qualifying a function type. [dcl.fct]
+ DR 295 queries this and the proposed resolution brings it into line
+ with qualifying a reference. We implement the DR. We also behave
+ in a similar manner for restricting non-pointer types. */
+
+tree
+rs_build_qualified_type_real (tree type, int type_quals,
+ tsubst_flags_t complain)
+{
+ tree result;
+ int bad_quals = TYPE_UNQUALIFIED;
+
+ if (type == error_mark_node)
+ return type;
+
+ if (type_quals == rs_type_quals (type))
+ return type;
+
+ if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ /* In C++, the qualification really applies to the array element
+ type. Obtain the appropriately qualified element type. */
+ tree t;
+ tree element_type
+ = rs_build_qualified_type_real (TREE_TYPE (type), type_quals, complain);
+
+ if (element_type == error_mark_node)
+ return error_mark_node;
+
+ /* See if we already have an identically qualified type. Tests
+ should be equivalent to those in check_qualified_type. */
+ for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
+ if (TREE_TYPE (t) == element_type && TYPE_NAME (t) == TYPE_NAME (type)
+ && TYPE_CONTEXT (t) == TYPE_CONTEXT (type)
+ && attribute_list_equal (TYPE_ATTRIBUTES (t),
+ TYPE_ATTRIBUTES (type)))
+ break;
+
+ if (!t)
+ {
+ /* If we already know the dependentness, tell the array type
+ constructor. This is important for module streaming, as we cannot
+ dynamically determine that on read in. */
+ t = build_cplus_array_type (element_type, TYPE_DOMAIN (type),
+ TYPE_DEPENDENT_P_VALID (type)
+ ? int (TYPE_DEPENDENT_P (type))
+ : -1);
+
+ /* Keep the typedef name. */
+ if (TYPE_NAME (t) != TYPE_NAME (type))
+ {
+ t = build_variant_type_copy (t);
+ TYPE_NAME (t) = TYPE_NAME (type);
+ SET_TYPE_ALIGN (t, TYPE_ALIGN (type));
+ TYPE_USER_ALIGN (t) = TYPE_USER_ALIGN (type);
+ }
+ }
+
+ /* Even if we already had this variant, we update
+ TYPE_NEEDS_CONSTRUCTING and TYPE_HAS_NONTRIVIAL_DESTRUCTOR in case
+ they changed since the variant was originally created.
+
+ This seems hokey; if there is some way to use a previous
+ variant *without* coming through here,
+ TYPE_NEEDS_CONSTRUCTING will never be updated. */
+ TYPE_NEEDS_CONSTRUCTING (t)
+ = TYPE_NEEDS_CONSTRUCTING (TYPE_MAIN_VARIANT (element_type));
+ TYPE_HAS_NONTRIVIAL_DESTRUCTOR (t)
+ = TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TYPE_MAIN_VARIANT (element_type));
+ return t;
+ }
+
+ /* A reference or method type shall not be cv-qualified.
+ [dcl.ref], [dcl.fct]. This used to be an error, but as of DR 295
+ (in CD1) we always ignore extra cv-quals on functions. */
+
+ /* [dcl.ref/1] Cv-qualified references are ill-formed except when
+ the cv-qualifiers are introduced through the use of a typedef-name
+ ([dcl.typedef], [temp.param]) or decltype-specifier
+ ([dcl.type.decltype]),in which case the cv-qualifiers are
+ ignored. */
+ if (type_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)
+ && (TYPE_REF_P (type) || FUNC_OR_METHOD_TYPE_P (type)))
+ {
+ if (TYPE_REF_P (type)
+ && (!typedef_variant_p (type) || FUNC_OR_METHOD_TYPE_P (type)))
+ bad_quals |= type_quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE);
+ type_quals &= ~(TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE);
+ }
+
+ /* But preserve any function-cv-quals on a FUNCTION_TYPE. */
+ if (TREE_CODE (type) == FUNCTION_TYPE)
+ type_quals |= type_memfn_quals (type);
+
+ /* A restrict-qualified type must be a pointer (or reference)
+ to object or incomplete type. */
+ if ((type_quals & TYPE_QUAL_RESTRICT) && TREE_CODE (type) != TYPENAME_TYPE
+ && !INDIRECT_TYPE_P (type))
+ {
+ bad_quals |= TYPE_QUAL_RESTRICT;
+ type_quals &= ~TYPE_QUAL_RESTRICT;
+ }
+
+ if (bad_quals == TYPE_UNQUALIFIED || (complain & tf_ignore_bad_quals))
+ /*OK*/;
+ else if (!(complain & tf_error))
+ return error_mark_node;
+ else
+ {
+ tree bad_type = build_qualified_type (ptr_type_node, bad_quals);
+ error ("%qV qualifiers cannot be applied to %qT", bad_type, type);
+ }
+
+ /* Retrieve (or create) the appropriately qualified variant. */
+ result = build_qualified_type (type, type_quals);
+
+ return result;
+}
+
+// forked from gcc/cp/c-common.cc vector_targets_convertible_p
+
+/* vector_targets_convertible_p is used for vector pointer types. The
+ callers perform various checks that the qualifiers are satisfactory,
+ while OTOH vector_targets_convertible_p ignores the number of elements
+ in the vectors. That's fine with vector pointers as we can consider,
+ say, a vector of 8 elements as two consecutive vectors of 4 elements,
+ and that does not require and conversion of the pointer values.
+ In contrast, vector_types_convertible_p and
+ vector_types_compatible_elements_p are used for vector value types. */
+/* True if pointers to distinct types T1 and T2 can be converted to
+ each other without an explicit cast. Only returns true for opaque
+ vector types. */
+bool
+vector_targets_convertible_p (const_tree t1, const_tree t2)
+{
+ if (VECTOR_TYPE_P (t1) && VECTOR_TYPE_P (t2)
+ && (TYPE_VECTOR_OPAQUE (t1) || TYPE_VECTOR_OPAQUE (t2))
+ && tree_int_cst_equal (TYPE_SIZE (t1), TYPE_SIZE (t2)))
+ return true;
+
+ return false;
+}
+
+// forked from gcc/cp/typeck.cc comp_array_types
+
+/* Compare the array types T1 and T2. CB says how we should behave when
+ comparing array bounds: bounds_none doesn't allow dimensionless arrays,
+ bounds_either says than any array can be [], bounds_first means that
+ onlt T1 can be an array with unknown bounds. STRICT is true if
+ qualifiers must match when comparing the types of the array elements. */
+
+static bool
+comp_array_types (const_tree t1, const_tree t2, compare_bounds_t cb,
+ bool strict)
+{
+ tree d1;
+ tree d2;
+ tree max1, max2;
+
+ if (t1 == t2)
+ return true;
+
+ /* The type of the array elements must be the same. */
+ if (strict ? !same_type_p (TREE_TYPE (t1), TREE_TYPE (t2))
+ : !similar_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+
+ d1 = TYPE_DOMAIN (t1);
+ d2 = TYPE_DOMAIN (t2);
+
+ if (d1 == d2)
+ return true;
+
+ /* If one of the arrays is dimensionless, and the other has a
+ dimension, they are of different types. However, it is valid to
+ write:
+
+ extern int a[];
+ int a[3];
+
+ by [basic.link]:
+
+ declarations for an array object can specify
+ array types that differ by the presence or absence of a major
+ array bound (_dcl.array_). */
+ if (!d1 && d2)
+ return cb >= bounds_either;
+ else if (d1 && !d2)
+ return cb == bounds_either;
+
+ /* Check that the dimensions are the same. */
+
+ if (!rs_tree_equal (TYPE_MIN_VALUE (d1), TYPE_MIN_VALUE (d2)))
+ return false;
+ max1 = TYPE_MAX_VALUE (d1);
+ max2 = TYPE_MAX_VALUE (d2);
+
+ if (!rs_tree_equal (max1, max2))
+ return false;
+
+ return true;
+}
+
+// forked from gcc/cp/typeck.cc same_type_ignoring_top_level_qualifiers_p
+
+/* Returns nonzero iff TYPE1 and TYPE2 are the same type, ignoring
+ top-level qualifiers. */
+
+bool
+same_type_ignoring_top_level_qualifiers_p (tree type1, tree type2)
+{
+ if (type1 == error_mark_node || type2 == error_mark_node)
+ return false;
+ if (type1 == type2)
+ return true;
+
+ type1 = rs_build_qualified_type (type1, TYPE_UNQUALIFIED);
+ type2 = rs_build_qualified_type (type2, TYPE_UNQUALIFIED);
+ return same_type_p (type1, type2);
+}
+
+// forked from gcc/cp/typeck.cc comp_ptr_ttypes_const
+
+/* Return true if TO and FROM (both of which are POINTER_TYPEs or
+ pointer-to-member types) are the same, ignoring cv-qualification at
+ all levels. CB says how we should behave when comparing array bounds. */
+
+bool
+comp_ptr_ttypes_const (tree to, tree from, compare_bounds_t cb)
+{
+ bool is_opaque_pointer = false;
+
+ for (;; to = TREE_TYPE (to), from = TREE_TYPE (from))
+ {
+ if (TREE_CODE (to) != TREE_CODE (from))
+ return false;
+
+ if (TREE_CODE (from) == OFFSET_TYPE
+ && same_type_p (TYPE_OFFSET_BASETYPE (from),
+ TYPE_OFFSET_BASETYPE (to)))
+ continue;
+
+ if (VECTOR_TYPE_P (to))
+ is_opaque_pointer = vector_targets_convertible_p (to, from);
+
+ if (TREE_CODE (to) == ARRAY_TYPE
+ /* Ignore cv-qualification, but if we see e.g. int[3] and int[4],
+ we must fail. */
+ && !comp_array_types (to, from, cb, /*strict=*/false))
+ return false;
+
+ /* CWG 330 says we need to look through arrays. */
+ if (!TYPE_PTR_P (to) && TREE_CODE (to) != ARRAY_TYPE)
+ return (is_opaque_pointer
+ || same_type_ignoring_top_level_qualifiers_p (to, from));
+ }
+}
+
+// forked from gcc/cp/typeck.cc similar_type_p
+
+/* Returns nonzero iff TYPE1 and TYPE2 are similar, as per [conv.qual]. */
+
+bool
+similar_type_p (tree type1, tree type2)
+{
+ if (type1 == error_mark_node || type2 == error_mark_node)
+ return false;
+
+ /* Informally, two types are similar if, ignoring top-level cv-qualification:
+ * they are the same type; or
+ * they are both pointers, and the pointed-to types are similar; or
+ * they are both pointers to member of the same class, and the types of
+ the pointed-to members are similar; or
+ * they are both arrays of the same size or both arrays of unknown bound,
+ and the array element types are similar. */
+
+ if (same_type_ignoring_top_level_qualifiers_p (type1, type2))
+ return true;
+
+ if ((TYPE_PTR_P (type1) && TYPE_PTR_P (type2))
+ || (TYPE_PTRDATAMEM_P (type1) && TYPE_PTRDATAMEM_P (type2))
+ || (TREE_CODE (type1) == ARRAY_TYPE && TREE_CODE (type2) == ARRAY_TYPE))
+ return comp_ptr_ttypes_const (type1, type2, bounds_either);
+
+ return false;
+}
+
+// forked from gcc/cp/typeck.cc structural_comptypes
+// note: this fork only handles strict == COMPARE_STRICT
+// if you pass in any other value for strict i.e. COMPARE_BASE,
+// COMPARE_DERIVED, COMPARE_REDECLARATION or COMPARE_STRUCTURAL
+// see the original function in gcc/cp/typeck.cc and port the required bits
+// specifically under case UNION_TYPE.
+
+/* Subroutine in comptypes. */
+
+static bool
+structural_comptypes (tree t1, tree t2, int strict)
+{
+ /* Both should be types that are not obviously the same. */
+ gcc_checking_assert (t1 != t2 && TYPE_P (t1) && TYPE_P (t2));
+
+ if (TYPE_PTRMEMFUNC_P (t1))
+ t1 = TYPE_PTRMEMFUNC_FN_TYPE (t1);
+ if (TYPE_PTRMEMFUNC_P (t2))
+ t2 = TYPE_PTRMEMFUNC_FN_TYPE (t2);
+
+ /* Different classes of types can't be compatible. */
+ if (TREE_CODE (t1) != TREE_CODE (t2))
+ return false;
+
+ /* Qualifiers must match. For array types, we will check when we
+ recur on the array element types. */
+ if (TREE_CODE (t1) != ARRAY_TYPE && rs_type_quals (t1) != rs_type_quals (t2))
+ return false;
+ if (TREE_CODE (t1) == FUNCTION_TYPE
+ && type_memfn_quals (t1) != type_memfn_quals (t2))
+ return false;
+ /* Need to check this before TYPE_MAIN_VARIANT.
+ FIXME function qualifiers should really change the main variant. */
+ if (FUNC_OR_METHOD_TYPE_P (t1))
+ {
+ if (type_memfn_rqual (t1) != type_memfn_rqual (t2))
+ return false;
+ if (/* cxx_dialect >= cxx17 && */
+ !comp_except_specs (TYPE_RAISES_EXCEPTIONS (t1),
+ TYPE_RAISES_EXCEPTIONS (t2), ce_type))
+ return false;
+ }
+
+ /* Allow for two different type nodes which have essentially the same
+ definition. Note that we already checked for equality of the type
+ qualifiers (just above). */
+ if (TREE_CODE (t1) != ARRAY_TYPE
+ && TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
+ return true;
+
+ /* Compare the types. Return false on known not-same. Break on not
+ known. Never return true from this switch -- you'll break
+ specialization comparison. */
+ switch (TREE_CODE (t1))
+ {
+ case VOID_TYPE:
+ case BOOLEAN_TYPE:
+ /* All void and bool types are the same. */
+ break;
+
+ case OPAQUE_TYPE:
+ case INTEGER_TYPE:
+ case FIXED_POINT_TYPE:
+ case REAL_TYPE:
+ /* With these nodes, we can't determine type equivalence by
+ looking at what is stored in the nodes themselves, because
+ two nodes might have different TYPE_MAIN_VARIANTs but still
+ represent the same type. For example, wchar_t and int could
+ have the same properties (TYPE_PRECISION, TYPE_MIN_VALUE,
+ TYPE_MAX_VALUE, etc.), but have different TYPE_MAIN_VARIANTs
+ and are distinct types. On the other hand, int and the
+ following typedef
+
+ typedef int INT __attribute((may_alias));
+
+ have identical properties, different TYPE_MAIN_VARIANTs, but
+ represent the same type. The canonical type system keeps
+ track of equivalence in this case, so we fall back on it. */
+ if (TYPE_CANONICAL (t1) != TYPE_CANONICAL (t2))
+ return false;
+
+ /* We don't need or want the attribute comparison. */
+ return true;
+
+ case RECORD_TYPE:
+ case UNION_TYPE:
+ return false;
+
+ case OFFSET_TYPE:
+ if (!comptypes (TYPE_OFFSET_BASETYPE (t1), TYPE_OFFSET_BASETYPE (t2),
+ strict & ~COMPARE_REDECLARATION))
+ return false;
+ if (!same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+ break;
+
+ case REFERENCE_TYPE:
+ if (TYPE_REF_IS_RVALUE (t1) != TYPE_REF_IS_RVALUE (t2))
+ return false;
+ /* fall through to checks for pointer types */
+ gcc_fallthrough ();
+
+ case POINTER_TYPE:
+ if (TYPE_MODE (t1) != TYPE_MODE (t2)
+ || !same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+ break;
+
+ case METHOD_TYPE:
+ case FUNCTION_TYPE:
+ /* Exception specs and memfn_rquals were checked above. */
+ if (!same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+ if (!compparms (TYPE_ARG_TYPES (t1), TYPE_ARG_TYPES (t2)))
+ return false;
+ break;
+
+ case ARRAY_TYPE:
+ /* Target types must match incl. qualifiers. */
+ if (!comp_array_types (t1, t2,
+ ((strict & COMPARE_REDECLARATION) ? bounds_either
+ : bounds_none),
+ /*strict=*/true))
+ return false;
+ break;
+
+ case COMPLEX_TYPE:
+ if (!same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+ break;
+
+ case VECTOR_TYPE:
+ if (gnu_vector_type_p (t1) != gnu_vector_type_p (t2)
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
+ || !same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
+ return false;
+ break;
+
+ default:
+ return false;
+ }
+
+ /* If we get here, we know that from a target independent POV the
+ types are the same. Make sure the target attributes are also
+ the same. */
+ if (!comp_type_attributes (t1, t2))
+ return false;
+
+ return true;
+}
+
+// forked from gcc/cp/typeck.cc comptypes
+
+/* Return true if T1 and T2 are related as allowed by STRICT. STRICT
+ is a bitwise-or of the COMPARE_* flags. */
+
+bool
+comptypes (tree t1, tree t2, int strict)
+{
+ gcc_checking_assert (t1 && t2);
+
+ /* TYPE_ARGUMENT_PACKS are not really types. */
+ gcc_checking_assert (TREE_CODE (t1) != TYPE_ARGUMENT_PACK
+ && TREE_CODE (t2) != TYPE_ARGUMENT_PACK);
+
+ if (t1 == t2)
+ return true;
+
+ /* Suppress errors caused by previously reported errors. */
+ if (t1 == error_mark_node || t2 == error_mark_node)
+ return false;
+
+ if (strict == COMPARE_STRICT)
+ {
+ if (TYPE_STRUCTURAL_EQUALITY_P (t1) || TYPE_STRUCTURAL_EQUALITY_P (t2))
+ /* At least one of the types requires structural equality, so
+ perform a deep check. */
+ return structural_comptypes (t1, t2, strict);
+
+ if (!flag_checking)
+ return TYPE_CANONICAL (t1) == TYPE_CANONICAL (t2);
+ else
+ return structural_comptypes (t1, t2, strict);
+ }
+ else if (strict == COMPARE_STRUCTURAL)
+ return structural_comptypes (t1, t2, COMPARE_STRICT);
+ else
+ return structural_comptypes (t1, t2, strict);
+}
+
+// forked from gcc/cp/decl.cc next_initializable_field
+
+/* FIELD is an element of TYPE_FIELDS or NULL. In the former case, the value
+ returned is the next FIELD_DECL (possibly FIELD itself) that can be
+ initialized. If there are no more such fields, the return value
+ will be NULL. */
+
+tree
+next_initializable_field (tree field)
+{
+ while (field
+ && (TREE_CODE (field) != FIELD_DECL || DECL_UNNAMED_BIT_FIELD (field)
+ || (DECL_ARTIFICIAL (field)
+ /* Don't skip vptr fields. We might see them when we're
+ called from reduced_constant_expression_p. */
+ && !DECL_VIRTUAL_P (field))))
+ field = DECL_CHAIN (field);
+
+ return field;
+}
+
+// forked from gcc/cp/call.cc sufficient_parms_p
+
+/* Returns nonzero if PARMLIST consists of only default parms,
+ ellipsis, and/or undeduced parameter packs. */
+
+bool
+sufficient_parms_p (const_tree parmlist)
+{
+ for (; parmlist && parmlist != void_list_node;
+ parmlist = TREE_CHAIN (parmlist))
+ if (!TREE_PURPOSE (parmlist))
+ return false;
+ return true;
+}
+
+// forked from gcc/cp/class.cc default_ctor_p
+
+/* Returns true if FN is a default constructor. */
+
+bool
+default_ctor_p (const_tree fn)
+{
+ return (DECL_CONSTRUCTOR_P (fn)
+ && sufficient_parms_p (FUNCTION_FIRST_USER_PARMTYPE (fn)));
+}
+
+// forked from gcc/cp/class.cc user_provided_p
+
+/* Returns true iff FN is a user-provided function, i.e. user-declared
+ and not defaulted at its first declaration. */
+
+bool
+user_provided_p (tree fn)
+{
+ return (!DECL_ARTIFICIAL (fn)
+ && !(DECL_INITIALIZED_IN_CLASS_P (fn)
+ && (DECL_DEFAULTED_FN (fn) || DECL_DELETED_FN (fn))));
+}
+
+// forked from gcc/cp/class.cc type_has_non_user_provided_default_constructor
+
+/* Returns true iff class T has a non-user-provided (i.e. implicitly
+ declared or explicitly defaulted in the class body) default
+ constructor. */
+
+bool
+type_has_non_user_provided_default_constructor (tree t)
+{
+ if (!TYPE_HAS_DEFAULT_CONSTRUCTOR (t))
+ return false;
+ if (CLASSTYPE_LAZY_DEFAULT_CTOR (t))
+ return true;
+
+ for (ovl_iterator iter (CLASSTYPE_CONSTRUCTORS (t)); iter; ++iter)
+ {
+ tree fn = *iter;
+ if (TREE_CODE (fn) == FUNCTION_DECL && default_ctor_p (fn)
+ && !user_provided_p (fn))
+ return true;
+ }
+
+ return false;
+}
+
+// forked from gcc/cp/class.cc default_init_uninitialized_part
+
+/* If default-initialization leaves part of TYPE uninitialized, returns
+ a DECL for the field or TYPE itself (DR 253). */
+
+tree
+default_init_uninitialized_part (tree type)
+{
+ tree t, r, binfo;
+ int i;
+
+ type = strip_array_types (type);
+ if (!CLASS_TYPE_P (type))
+ return type;
+ if (!type_has_non_user_provided_default_constructor (type))
+ return NULL_TREE;
+ for (binfo = TYPE_BINFO (type), i = 0; BINFO_BASE_ITERATE (binfo, i, t); ++i)
+ {
+ r = default_init_uninitialized_part (BINFO_TYPE (t));
+ if (r)
+ return r;
+ }
+ for (t = next_initializable_field (TYPE_FIELDS (type)); t;
+ t = next_initializable_field (DECL_CHAIN (t)))
+ if (!DECL_INITIAL (t) && !DECL_ARTIFICIAL (t))
+ {
+ r = default_init_uninitialized_part (TREE_TYPE (t));
+ if (r)
+ return DECL_P (r) ? r : t;
+ }
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/name-lookup.cc extract_conversion_operator
+
+/* FNS is an overload set of conversion functions. Return the
+ overloads converting to TYPE. */
+
+static tree
+extract_conversion_operator (tree fns, tree type)
+{
+ tree convs = NULL_TREE;
+ tree tpls = NULL_TREE;
+
+ for (ovl_iterator iter (fns); iter; ++iter)
+ {
+ if (same_type_p (DECL_CONV_FN_TYPE (*iter), type))
+ convs = lookup_add (*iter, convs);
+ }
+
+ if (!convs)
+ convs = tpls;
+
+ return convs;
+}
+
+// forked from gcc/cp/name-lookup.cc
+
+/* Look for NAME as an immediate member of KLASS (including
+ anon-members or unscoped enum member). TYPE_OR_FNS is zero for
+ regular search. >0 to get a type binding (if there is one) and <0
+ if you want (just) the member function binding.
+
+ Use this if you do not want lazy member creation. */
+
+tree
+get_class_binding_direct (tree klass, tree name, bool want_type)
+{
+ gcc_checking_assert (RECORD_OR_UNION_TYPE_P (klass));
+
+ /* Conversion operators can only be found by the marker conversion
+ operator name. */
+ bool conv_op = IDENTIFIER_CONV_OP_P (name);
+ tree lookup = conv_op ? conv_op_identifier : name;
+ tree val = NULL_TREE;
+ vec<tree, va_gc> *member_vec = CLASSTYPE_MEMBER_VEC (klass);
+
+ if (COMPLETE_TYPE_P (klass) && member_vec)
+ {
+ val = member_vec_binary_search (member_vec, lookup);
+ if (!val)
+ ;
+ else if (STAT_HACK_P (val))
+ val = want_type ? STAT_TYPE (val) : STAT_DECL (val);
+ else if (want_type && !DECL_DECLARES_TYPE_P (val))
+ val = NULL_TREE;
+ }
+ else
+ {
+ if (member_vec && !want_type)
+ val = member_vec_linear_search (member_vec, lookup);
+
+ if (!val || (TREE_CODE (val) == OVERLOAD && OVL_DEDUP_P (val)))
+ /* Dependent using declarations are a 'field', make sure we
+ return that even if we saw an overload already. */
+ if (tree field_val = fields_linear_search (klass, lookup, want_type))
+ {
+ if (!val)
+ val = field_val;
+ else if (TREE_CODE (field_val) == USING_DECL)
+ val = ovl_make (field_val, val);
+ }
+ }
+
+ /* Extract the conversion operators asked for, unless the general
+ conversion operator was requested. */
+ if (val && conv_op)
+ {
+ gcc_checking_assert (OVL_FUNCTION (val) == conv_op_marker);
+ val = OVL_CHAIN (val);
+ if (tree type = TREE_TYPE (name))
+ val = extract_conversion_operator (val, type);
+ }
+
+ return val;
+}
+
+#if defined ENABLE_TREE_CHECKING
+
+// forked from gcc/cp/tree.cc lang_check_failed
+
+/* Complain that some language-specific thing hanging off a tree
+ node has been accessed improperly. */
+
+void
+lang_check_failed (const char *file, int line, const char *function)
+{
+ internal_error ("%<lang_*%> check: failed in %s, at %s:%d", function,
+ trim_filename (file), line);
+}
+#endif /* ENABLE_TREE_CHECKING */
+
+// forked from gcc/cp/tree.cc skip_artificial_parms_for
+
+/* Given a FUNCTION_DECL FN and a chain LIST, skip as many elements of LIST
+ as there are artificial parms in FN. */
+
+tree
+skip_artificial_parms_for (const_tree fn, tree list)
+{
+ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (fn))
+ list = TREE_CHAIN (list);
+ else
+ return list;
+
+ if (DECL_HAS_IN_CHARGE_PARM_P (fn))
+ list = TREE_CHAIN (list);
+ if (DECL_HAS_VTT_PARM_P (fn))
+ list = TREE_CHAIN (list);
+ return list;
+}
+
+// forked from gcc/cp/class.cc in_class_defaulted_default_constructor
+
+/* Returns the defaulted constructor if T has one. Otherwise, returns
+ NULL_TREE. */
+
+tree
+in_class_defaulted_default_constructor (tree t)
+{
+ if (!TYPE_HAS_USER_CONSTRUCTOR (t))
+ return NULL_TREE;
+
+ for (ovl_iterator iter (CLASSTYPE_CONSTRUCTORS (t)); iter; ++iter)
+ {
+ tree fn = *iter;
+
+ if (DECL_DEFAULTED_IN_CLASS_P (fn) && default_ctor_p (fn))
+ return fn;
+ }
+
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/constexpr.cc
+
+/* Returns true iff FUN is an instantiation of a constexpr function
+ template or a defaulted constexpr function. */
+
+bool
+is_instantiation_of_constexpr (tree fun)
+{
+ return ((DECL_DEFAULTED_FN (fun) && DECL_DECLARED_CONSTEXPR_P (fun)));
+}
+
+// forked from gcc/cp/decl.cc check_for_uninitialized_const_var
+
+/* Issue an error message if DECL is an uninitialized const variable.
+ CONSTEXPR_CONTEXT_P is true when the function is called in a constexpr
+ context from potential_constant_expression. Returns true if all is well,
+ false otherwise. */
+
+bool
+check_for_uninitialized_const_var (tree decl, bool constexpr_context_p,
+ tsubst_flags_t complain)
+{
+ tree type = strip_array_types (TREE_TYPE (decl));
+
+ /* ``Unless explicitly declared extern, a const object does not have
+ external linkage and must be initialized. ($8.4; $12.1)'' ARM
+ 7.1.6 */
+ if (VAR_P (decl) && !TYPE_REF_P (type) && (RS_TYPE_CONST_P (type))
+ && !DECL_NONTRIVIALLY_INITIALIZED_P (decl))
+ {
+ tree field = default_init_uninitialized_part (type);
+ if (!field)
+ return true;
+
+ bool show_notes = true;
+
+ if (!constexpr_context_p)
+ {
+ if (RS_TYPE_CONST_P (type))
+ {
+ if (complain & tf_error)
+ show_notes = permerror (DECL_SOURCE_LOCATION (decl),
+ "uninitialized %<const %D%>", decl);
+ }
+ else
+ {
+ if (!is_instantiation_of_constexpr (current_function_decl)
+ && (complain & tf_error))
+ error_at (DECL_SOURCE_LOCATION (decl),
+ "uninitialized variable %qD in %<constexpr%> "
+ "function",
+ decl);
+ else
+ show_notes = false;
+ }
+ }
+ else if (complain & tf_error)
+ error_at (DECL_SOURCE_LOCATION (decl),
+ "uninitialized variable %qD in %<constexpr%> context", decl);
+
+ if (show_notes && CLASS_TYPE_P (type) && (complain & tf_error))
+ {
+ // tree defaulted_ctor;
+
+ // inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)),
+ // "%q#T has no user-provided default constructor", type);
+ // defaulted_ctor = in_class_defaulted_default_constructor (type);
+ // if (defaulted_ctor)
+ // inform (DECL_SOURCE_LOCATION (defaulted_ctor),
+ // "constructor is not user-provided because it is "
+ // "explicitly defaulted in the class body");
+ // inform (DECL_SOURCE_LOCATION (field),
+ // "and the implicitly-defined constructor does not "
+ // "initialize %q#D",
+ // field);
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+// forked from gcc/cp/tree.cc cv_unqualified
+
+/* Return TYPE with const and volatile removed. */
+
+tree
+cv_unqualified (tree type)
+{
+ int quals;
+
+ if (type == error_mark_node)
+ return type;
+
+ quals = rs_type_quals (type);
+ quals &= ~(TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE);
+ return rs_build_qualified_type (type, quals);
+}
+
+/* The C and C++ parsers both use vectors to hold function arguments.
+ For efficiency, we keep a cache of unused vectors. This is the
+ cache. */
+
+typedef vec<tree, va_gc> *tree_gc_vec;
+static GTY ((deletable)) vec<tree_gc_vec, va_gc> *tree_vector_cache;
+
+// forked from gcc/c-family/c-common.c make_tree_vector
+
+/* Return a new vector from the cache. If the cache is empty,
+ allocate a new vector. These vectors are GC'ed, so it is OK if the
+ pointer is not released.. */
+
+vec<tree, va_gc> *
+make_tree_vector (void)
+{
+ if (tree_vector_cache && !tree_vector_cache->is_empty ())
+ return tree_vector_cache->pop ();
+ else
+ {
+ /* Passing 0 to vec::alloc returns NULL, and our callers require
+ that we always return a non-NULL value. The vector code uses
+ 4 when growing a NULL vector, so we do too. */
+ vec<tree, va_gc> *v;
+ vec_alloc (v, 4);
+ return v;
+ }
+}
+
+// forked from gcc/c-family/c-common.c release_tree_vector
+
+/* Release a vector of trees back to the cache. */
+
+void
+release_tree_vector (vec<tree, va_gc> *vec)
+{
+ if (vec != NULL)
+ {
+ if (vec->allocated () >= 16)
+ /* Don't cache vecs that have expanded more than once. On a p64
+ target, vecs double in alloc size with each power of 2 elements, e.g
+ at 16 elements the alloc increases from 128 to 256 bytes. */
+ vec_free (vec);
+ else
+ {
+ vec->truncate (0);
+ vec_safe_push (tree_vector_cache, vec);
+ }
+ }
+}
+
+// forked from gcc/cp/cvt.cc instantiation_dependent_expression_p
+
+/* As above, but also check value-dependence of the expression as a whole. */
+
+bool instantiation_dependent_expression_p (tree) { return false; }
+
+// forked from gcc/cp/cvt.cc cp_get_callee
+
+/* If CALL is a call, return the callee; otherwise null. */
+
+tree
+cp_get_callee (tree call)
+{
+ if (call == NULL_TREE)
+ return call;
+ else if (TREE_CODE (call) == CALL_EXPR)
+ return CALL_EXPR_FN (call);
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/typeck.cc build_nop
+
+/* Return a NOP_EXPR converting EXPR to TYPE. */
+
+tree
+build_nop (tree type, tree expr)
+{
+ if (type == error_mark_node || error_operand_p (expr))
+ return expr;
+ return build1_loc (EXPR_LOCATION (expr), NOP_EXPR, type, expr);
+}
+
+// forked from gcc/cp/tree.cc scalarish_type_p
+
+/* Returns 1 iff type T is something we want to treat as a scalar type for
+ the purpose of deciding whether it is trivial/POD/standard-layout. */
+
+bool
+scalarish_type_p (const_tree t)
+{
+ if (t == error_mark_node)
+ return 1;
+
+ return (SCALAR_TYPE_P (t) || VECTOR_TYPE_P (t));
+}
+
+// forked from gcc/cp/tree.cc type_has_nontrivial_copy_init
+
+/* Returns true iff copying an object of type T (including via move
+ constructor) is non-trivial. That is, T has no non-trivial copy
+ constructors and no non-trivial move constructors, and not all copy/move
+ constructors are deleted. This function implements the ABI notion of
+ non-trivial copy, which has diverged from the one in the standard. */
+
+bool type_has_nontrivial_copy_init (const_tree) { return false; }
+
+// forked from gcc/cp/tree.cc build_local_temp
+
+/* Return an undeclared local temporary of type TYPE for use in building a
+ TARGET_EXPR. */
+
+tree
+build_local_temp (tree type)
+{
+ tree slot = build_decl (input_location, VAR_DECL, NULL_TREE, type);
+ DECL_ARTIFICIAL (slot) = 1;
+ DECL_IGNORED_P (slot) = 1;
+ DECL_CONTEXT (slot) = current_function_decl;
+ layout_decl (slot, 0);
+ return slot;
+}
+
+// forked from gcc/cp/lambda.cc is_normal_capture_proxy
+
+/* Returns true iff DECL is a capture proxy for a normal capture
+ (i.e. without explicit initializer). */
+
+bool is_normal_capture_proxy (tree) { return false; }
+
+// forked from gcc/cp/c-common.cc reject_gcc_builtin
+
+/* For an EXPR of a FUNCTION_TYPE that references a GCC built-in function
+ with no library fallback or for an ADDR_EXPR whose operand is such type
+ issues an error pointing to the location LOC.
+ Returns true when the expression has been diagnosed and false
+ otherwise. */
+
+bool
+reject_gcc_builtin (const_tree expr, location_t loc /* = UNKNOWN_LOCATION */)
+{
+ if (TREE_CODE (expr) == ADDR_EXPR)
+ expr = TREE_OPERAND (expr, 0);
+
+ STRIP_ANY_LOCATION_WRAPPER (expr);
+
+ if (TREE_TYPE (expr) && TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE
+ && TREE_CODE (expr) == FUNCTION_DECL
+ /* The intersection of DECL_BUILT_IN and DECL_IS_UNDECLARED_BUILTIN avoids
+ false positives for user-declared built-ins such as abs or
+ strlen, and for C++ operators new and delete.
+ The c_decl_implicit() test avoids false positives for implicitly
+ declared built-ins with library fallbacks (such as abs). */
+ && fndecl_built_in_p (expr) && DECL_IS_UNDECLARED_BUILTIN (expr)
+ && !DECL_ASSEMBLER_NAME_SET_P (expr))
+ {
+ if (loc == UNKNOWN_LOCATION)
+ loc = EXPR_LOC_OR_LOC (expr, input_location);
+
+ /* Reject arguments that are built-in functions with
+ no library fallback. */
+ error_at (loc, "built-in function %qE must be directly called", expr);
+
+ return true;
+ }
+
+ return false;
+}
+
+// forked from gcc/cp/typeck.cc is_bitfield_expr_with_lowered_type
+
+/* If EXP is a reference to a bit-field, and the type of EXP does not
+ match the declared type of the bit-field, return the declared type
+ of the bit-field. Otherwise, return NULL_TREE. */
+
+tree
+is_bitfield_expr_with_lowered_type (const_tree exp)
+{
+ switch (TREE_CODE (exp))
+ {
+ case COND_EXPR:
+ if (!is_bitfield_expr_with_lowered_type (TREE_OPERAND (exp, 1)
+ ? TREE_OPERAND (exp, 1)
+ : TREE_OPERAND (exp, 0)))
+ return NULL_TREE;
+ return is_bitfield_expr_with_lowered_type (TREE_OPERAND (exp, 2));
+
+ case COMPOUND_EXPR:
+ return is_bitfield_expr_with_lowered_type (TREE_OPERAND (exp, 1));
+
+ case MODIFY_EXPR:
+ case SAVE_EXPR:
+ case UNARY_PLUS_EXPR:
+ case PREDECREMENT_EXPR:
+ case PREINCREMENT_EXPR:
+ case POSTDECREMENT_EXPR:
+ case POSTINCREMENT_EXPR:
+ case NEGATE_EXPR:
+ case NON_LVALUE_EXPR:
+ case BIT_NOT_EXPR:
+ return is_bitfield_expr_with_lowered_type (TREE_OPERAND (exp, 0));
+
+ case COMPONENT_REF: {
+ tree field;
+
+ field = TREE_OPERAND (exp, 1);
+ if (TREE_CODE (field) != FIELD_DECL || !DECL_BIT_FIELD_TYPE (field))
+ return NULL_TREE;
+ if (same_type_ignoring_top_level_qualifiers_p (
+ TREE_TYPE (exp), DECL_BIT_FIELD_TYPE (field)))
+ return NULL_TREE;
+ return DECL_BIT_FIELD_TYPE (field);
+ }
+
+ case VAR_DECL:
+ if (DECL_HAS_VALUE_EXPR_P (exp))
+ return is_bitfield_expr_with_lowered_type (
+ DECL_VALUE_EXPR (CONST_CAST_TREE (exp)));
+ return NULL_TREE;
+
+ case VIEW_CONVERT_EXPR:
+ if (location_wrapper_p (exp))
+ return is_bitfield_expr_with_lowered_type (TREE_OPERAND (exp, 0));
+ else
+ return NULL_TREE;
+
+ default:
+ return NULL_TREE;
+ }
+}
+
+// forked from gcc/cp/semantics.cc maybe_undo_parenthesized_ref
+
+/* If T is an id-expression obfuscated by force_paren_expr, undo the
+ obfuscation and return the underlying id-expression. Otherwise
+ return T. */
+
+tree
+maybe_undo_parenthesized_ref (tree t)
+{
+ if ((TREE_CODE (t) == PAREN_EXPR || TREE_CODE (t) == VIEW_CONVERT_EXPR)
+ && REF_PARENTHESIZED_P (t))
+ t = TREE_OPERAND (t, 0);
+
+ return t;
+}
+
+// forked from gcc/c-family/c-common.cc fold_offsetof
+
+/* Fold an offsetof-like expression. EXPR is a nested sequence of component
+ references with an INDIRECT_REF of a constant at the bottom; much like the
+ traditional rendering of offsetof as a macro. TYPE is the desired type of
+ the whole expression. Return the folded result. */
+
+tree
+fold_offsetof (tree expr, tree type, enum tree_code ctx)
+{
+ tree base, off, t;
+ tree_code code = TREE_CODE (expr);
+ switch (code)
+ {
+ case ERROR_MARK:
+ return expr;
+
+ case VAR_DECL:
+ error ("cannot apply %<offsetof%> to static data member %qD", expr);
+ return error_mark_node;
+
+ case CALL_EXPR:
+ case TARGET_EXPR:
+ error ("cannot apply %<offsetof%> when %<operator[]%> is overloaded");
+ return error_mark_node;
+
+ case NOP_EXPR:
+ case INDIRECT_REF:
+ if (!TREE_CONSTANT (TREE_OPERAND (expr, 0)))
+ {
+ error ("cannot apply %<offsetof%> to a non constant address");
+ return error_mark_node;
+ }
+ return convert (type, TREE_OPERAND (expr, 0));
+
+ case COMPONENT_REF:
+ base = fold_offsetof (TREE_OPERAND (expr, 0), type, code);
+ if (base == error_mark_node)
+ return base;
+
+ t = TREE_OPERAND (expr, 1);
+ if (DECL_C_BIT_FIELD (t))
+ {
+ error ("attempt to take address of bit-field structure "
+ "member %qD",
+ t);
+ return error_mark_node;
+ }
+ off = size_binop_loc (input_location, PLUS_EXPR, DECL_FIELD_OFFSET (t),
+ size_int (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (t))
+ / BITS_PER_UNIT));
+ break;
+
+ case ARRAY_REF:
+ base = fold_offsetof (TREE_OPERAND (expr, 0), type, code);
+ if (base == error_mark_node)
+ return base;
+
+ t = TREE_OPERAND (expr, 1);
+ STRIP_ANY_LOCATION_WRAPPER (t);
+
+ /* Check if the offset goes beyond the upper bound of the array. */
+ if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) >= 0)
+ {
+ tree upbound = array_ref_up_bound (expr);
+ if (upbound != NULL_TREE && TREE_CODE (upbound) == INTEGER_CST
+ && !tree_int_cst_equal (upbound,
+ TYPE_MAX_VALUE (TREE_TYPE (upbound))))
+ {
+ if (ctx != ARRAY_REF && ctx != COMPONENT_REF)
+ upbound = size_binop (PLUS_EXPR, upbound,
+ build_int_cst (TREE_TYPE (upbound), 1));
+ if (tree_int_cst_lt (upbound, t))
+ {
+ tree v;
+
+ for (v = TREE_OPERAND (expr, 0);
+ TREE_CODE (v) == COMPONENT_REF; v = TREE_OPERAND (v, 0))
+ if (TREE_CODE (TREE_TYPE (TREE_OPERAND (v, 0)))
+ == RECORD_TYPE)
+ {
+ tree fld_chain = DECL_CHAIN (TREE_OPERAND (v, 1));
+ for (; fld_chain; fld_chain = DECL_CHAIN (fld_chain))
+ if (TREE_CODE (fld_chain) == FIELD_DECL)
+ break;
+
+ if (fld_chain)
+ break;
+ }
+ /* Don't warn if the array might be considered a poor
+ man's flexible array member with a very permissive
+ definition thereof. */
+ if (TREE_CODE (v) == ARRAY_REF
+ || TREE_CODE (v) == COMPONENT_REF)
+ warning (OPT_Warray_bounds_,
+ "index %E denotes an offset "
+ "greater than size of %qT",
+ t, TREE_TYPE (TREE_OPERAND (expr, 0)));
+ }
+ }
+ }
+
+ t = convert (sizetype, t);
+ off = size_binop (MULT_EXPR, TYPE_SIZE_UNIT (TREE_TYPE (expr)), t);
+ break;
+
+ case COMPOUND_EXPR:
+ /* Handle static members of volatile structs. */
+ t = TREE_OPERAND (expr, 1);
+ gcc_checking_assert (VAR_P (get_base_address (t)));
+ return fold_offsetof (t, type);
+
+ default:
+ gcc_unreachable ();
+ }
+
+ if (!POINTER_TYPE_P (type))
+ return size_binop (PLUS_EXPR, base, convert (type, off));
+ return fold_build_pointer_plus (base, off);
+}
+
+// forked from gcc/cp/tree.cc char_type_p
+
+/* Returns nonzero if TYPE is a character type, including wchar_t. */
+
+int
+char_type_p (tree type)
+{
+ return (same_type_p (type, char_type_node)
+ || same_type_p (type, unsigned_char_type_node)
+ || same_type_p (type, signed_char_type_node)
+ || same_type_p (type, char8_type_node)
+ || same_type_p (type, char16_type_node)
+ || same_type_p (type, char32_type_node)
+ || same_type_p (type, wchar_type_node));
+}
+
+// forked from gcc/cp/pt.cc resolve_nondeduced_context
+
+/* Core DR 115: In contexts where deduction is done and fails, or in
+ contexts where deduction is not done, if a template argument list is
+ specified and it, along with any default template arguments, identifies
+ a single function template specialization, then the template-id is an
+ lvalue for the function template specialization. */
+
+tree
+resolve_nondeduced_context (tree orig_expr, tsubst_flags_t)
+{
+ return orig_expr;
+}
+
+// forked from gcc/cp/pt.cc instantiate_non_dependent_or_null
+
+/* Like instantiate_non_dependent_expr, but return NULL_TREE rather than
+ an uninstantiated expression. */
+
+tree
+instantiate_non_dependent_or_null (tree expr)
+{
+ if (expr == NULL_TREE)
+ return NULL_TREE;
+
+ return expr;
+}
+
+// forked from gcc/cp/pt.cc resolve_nondeduced_context_or_error
+
+/* As above, but error out if the expression remains overloaded. */
+
+tree
+resolve_nondeduced_context_or_error (tree exp, tsubst_flags_t complain)
+{
+ exp = resolve_nondeduced_context (exp, complain);
+ if (type_unknown_p (exp))
+ {
+ if (complain & tf_error)
+ cxx_incomplete_type_error (exp, TREE_TYPE (exp));
+ return error_mark_node;
+ }
+ return exp;
+}
+
+// forked from gcc/cp/tree.cc really_overloaded_fn
+
+/* Returns true iff X is an expression for an overloaded function
+ whose type cannot be known without performing overload
+ resolution. */
+
+bool
+really_overloaded_fn (tree x)
+{
+ return is_overloaded_fn (x) == 2;
+}
+
+// forked from gcc/cp/typeck..cc invalid_nonstatic_memfn_p
+
+/* EXPR is being used in a context that is not a function call.
+ Enforce:
+
+ [expr.ref]
+
+ The expression can be used only as the left-hand operand of a
+ member function call.
+
+ [expr.mptr.operator]
+
+ If the result of .* or ->* is a function, then that result can be
+ used only as the operand for the function call operator ().
+
+ by issuing an error message if appropriate. Returns true iff EXPR
+ violates these rules. */
+
+bool
+invalid_nonstatic_memfn_p (location_t loc, tree expr, tsubst_flags_t complain)
+{
+ if (expr == NULL_TREE)
+ return false;
+ /* Don't enforce this in MS mode. */
+ if (flag_ms_extensions)
+ return false;
+ if (is_overloaded_fn (expr) && !really_overloaded_fn (expr))
+ expr = get_first_fn (expr);
+ if (DECL_NONSTATIC_MEMBER_FUNCTION_P (expr))
+ {
+ if (complain & tf_error)
+ {
+ if (DECL_P (expr))
+ {
+ error_at (loc, "invalid use of non-static member function %qD",
+ expr);
+ inform (DECL_SOURCE_LOCATION (expr), "declared here");
+ }
+ else
+ error_at (loc,
+ "invalid use of non-static member function of "
+ "type %qT",
+ TREE_TYPE (expr));
+ }
+ return true;
+ }
+ return false;
+}
+
+// forked from gcc/cp/call.cc strip_top_quals
+
+tree
+strip_top_quals (tree t)
+{
+ if (TREE_CODE (t) == ARRAY_TYPE)
+ return t;
+ return rs_build_qualified_type (t, 0);
+}
+
+// forked from gcc/cp/typeck2.cc cxx_incomplete_type_inform
+
+/* Print an inform about the declaration of the incomplete type TYPE. */
+
+// void
+// cxx_incomplete_type_inform (const_tree type)
+// {
+// if (!TYPE_MAIN_DECL (type))
+// return;
+
+// location_t loc = DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type));
+// tree ptype = strip_top_quals (CONST_CAST_TREE (type));
+
+// if (current_class_type && TYPE_BEING_DEFINED (current_class_type)
+// && same_type_p (ptype, current_class_type))
+// inform (loc,
+// "definition of %q#T is not complete until "
+// "the closing brace",
+// ptype);
+// else
+// inform (loc, "forward declaration of %q#T", ptype);
+// }
+
+// forked from gcc/cp/typeck2.cc cxx_incomplete_type_diagnostic
+
+/* Print an error message for invalid use of an incomplete type.
+ VALUE is the expression that was used (or 0 if that isn't known)
+ and TYPE is the type that was invalid. DIAG_KIND indicates the
+ type of diagnostic (see diagnostic.def). */
+
+void
+cxx_incomplete_type_diagnostic (location_t loc, const_tree value,
+ const_tree type, diagnostic_t diag_kind)
+{
+ // bool is_decl = false, complained = false;
+
+ gcc_assert (diag_kind == DK_WARNING || diag_kind == DK_PEDWARN
+ || diag_kind == DK_ERROR);
+
+ /* Avoid duplicate error message. */
+ if (TREE_CODE (type) == ERROR_MARK)
+ return;
+
+ if (value)
+ {
+ STRIP_ANY_LOCATION_WRAPPER (value);
+
+ if (VAR_P (value) || TREE_CODE (value) == PARM_DECL
+ || TREE_CODE (value) == FIELD_DECL)
+ {
+ // complained = emit_diagnostic (diag_kind, DECL_SOURCE_LOCATION
+ // (value),
+ // 0, "%qD has incomplete type", value);
+ // is_decl = true;
+ }
+ }
+retry:
+ /* We must print an error message. Be clever about what it says. */
+
+ switch (TREE_CODE (type))
+ {
+ // case RECORD_TYPE:
+ // case UNION_TYPE:
+ // case ENUMERAL_TYPE:
+ // if (!is_decl)
+ // complained
+ // = emit_diagnostic (diag_kind, loc, 0,
+ // "invalid use of incomplete type %q#T", type);
+ // if (complained)
+ // cxx_incomplete_type_inform (type);
+ // break;
+
+ case VOID_TYPE:
+ emit_diagnostic (diag_kind, loc, 0, "invalid use of %qT", type);
+ break;
+
+ case ARRAY_TYPE:
+ if (TYPE_DOMAIN (type))
+ {
+ type = TREE_TYPE (type);
+ goto retry;
+ }
+ emit_diagnostic (diag_kind, loc, 0,
+ "invalid use of array with unspecified bounds");
+ break;
+
+ case OFFSET_TYPE:
+ bad_member : {
+ tree member = TREE_OPERAND (value, 1);
+ if (is_overloaded_fn (member))
+ member = get_first_fn (member);
+
+ if (DECL_FUNCTION_MEMBER_P (member) && !flag_ms_extensions)
+ {
+ gcc_rich_location richloc (loc);
+ /* If "member" has no arguments (other than "this"), then
+ add a fix-it hint. */
+ if (type_num_arguments (TREE_TYPE (member)) == 1)
+ richloc.add_fixit_insert_after ("()");
+ emit_diagnostic (diag_kind, &richloc, 0,
+ "invalid use of member function %qD "
+ "(did you forget the %<()%> ?)",
+ member);
+ }
+ else
+ emit_diagnostic (diag_kind, loc, 0,
+ "invalid use of member %qD "
+ "(did you forget the %<&%> ?)",
+ member);
+ }
+ break;
+
+ case LANG_TYPE:
+ if (type == init_list_type_node)
+ {
+ emit_diagnostic (diag_kind, loc, 0,
+ "invalid use of brace-enclosed initializer list");
+ break;
+ }
+ gcc_assert (type == unknown_type_node);
+ if (value && TREE_CODE (value) == COMPONENT_REF)
+ goto bad_member;
+ else if (value && TREE_CODE (value) == ADDR_EXPR)
+ emit_diagnostic (diag_kind, loc, 0,
+ "address of overloaded function with no contextual "
+ "type information");
+ else if (value && TREE_CODE (value) == OVERLOAD)
+ emit_diagnostic (
+ diag_kind, loc, 0,
+ "overloaded function with no contextual type information");
+ else
+ emit_diagnostic (
+ diag_kind, loc, 0,
+ "insufficient contextual information to determine type");
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+}
+
+// forked from gcc/cp/decl2.cc decl_constant_var_p
+
+/* Nonzero for a VAR_DECL whose value can be used in a constant expression.
+
+ [expr.const]
+
+ An integral constant-expression can only involve ... const
+ variables of integral or enumeration types initialized with
+ constant expressions ...
+
+ C++0x also allows constexpr variables and temporaries initialized
+ with constant expressions. We handle the former here, but the latter
+ are just folded away in cxx_eval_constant_expression.
+
+ The standard does not require that the expression be non-volatile.
+ G++ implements the proposed correction in DR 457. */
+
+bool
+decl_constant_var_p (tree decl)
+{
+ if (!decl_maybe_constant_var_p (decl))
+ return false;
+
+ return DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl);
+}
+
+// forked from gcc/cp/decl.cc undeduced_auto_decl
+
+/* Returns true iff DECL is a variable or function declared with an auto type
+ that has not yet been deduced to a real type. */
+
+bool undeduced_auto_decl (tree) { return false; }
+
+// forked from gcc/cp/decl.cc require_deduced_type
+
+/* Complain if DECL has an undeduced return type. */
+
+bool require_deduced_type (tree, tsubst_flags_t) { return true; }
+
+/* Return the location of a tree passed to %+ formats. */
+
+location_t
+location_of (tree t)
+{
+ if (TYPE_P (t))
+ {
+ t = TYPE_MAIN_DECL (t);
+ if (t == NULL_TREE)
+ return input_location;
+ }
+ else if (TREE_CODE (t) == OVERLOAD)
+ t = OVL_FIRST (t);
+
+ if (DECL_P (t))
+ return DECL_SOURCE_LOCATION (t);
+
+ return EXPR_LOCATION (t);
+}
+
+/* For element type ELT_TYPE, return the appropriate type of the heap object
+ containing such element(s). COOKIE_SIZE is NULL or the size of cookie
+ in bytes. FULL_SIZE is NULL if it is unknown how big the heap allocation
+ will be, otherwise size of the heap object. If COOKIE_SIZE is NULL,
+ return array type ELT_TYPE[FULL_SIZE / sizeof(ELT_TYPE)], otherwise return
+ struct { size_t[COOKIE_SIZE/sizeof(size_t)]; ELT_TYPE[N]; }
+ where N is nothing (flexible array member) if FULL_SIZE is NULL, otherwise
+ it is computed such that the size of the struct fits into FULL_SIZE. */
+
+tree
+build_new_constexpr_heap_type (tree elt_type, tree cookie_size, tree full_size)
+{
+ gcc_assert (cookie_size == NULL_TREE || tree_fits_uhwi_p (cookie_size));
+ gcc_assert (full_size == NULL_TREE || tree_fits_uhwi_p (full_size));
+ unsigned HOST_WIDE_INT csz = cookie_size ? tree_to_uhwi (cookie_size) : 0;
+ tree itype2 = NULL_TREE;
+ if (full_size)
+ {
+ unsigned HOST_WIDE_INT fsz = tree_to_uhwi (full_size);
+ gcc_assert (fsz >= csz);
+ fsz -= csz;
+ fsz /= int_size_in_bytes (elt_type);
+ itype2 = build_index_type (size_int (fsz - 1));
+ if (!cookie_size)
+ return build_cplus_array_type (elt_type, itype2);
+ }
+ else
+ gcc_assert (cookie_size);
+ csz /= int_size_in_bytes (sizetype);
+ tree itype1 = build_index_type (size_int (csz - 1));
+ tree atype1 = build_cplus_array_type (sizetype, itype1);
+ tree atype2 = build_cplus_array_type (elt_type, itype2);
+ tree rtype = cxx_make_type (RECORD_TYPE);
+ TYPE_NAME (rtype) = heap_identifier;
+ tree fld1 = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE, atype1);
+ tree fld2 = build_decl (UNKNOWN_LOCATION, FIELD_DECL, NULL_TREE, atype2);
+ DECL_FIELD_CONTEXT (fld1) = rtype;
+ DECL_FIELD_CONTEXT (fld2) = rtype;
+ DECL_ARTIFICIAL (fld1) = true;
+ DECL_ARTIFICIAL (fld2) = true;
+ TYPE_FIELDS (rtype) = fld1;
+ DECL_CHAIN (fld1) = fld2;
+ layout_type (rtype);
+ return rtype;
+}
+
+// forked from gcc/cp/class.cc field_poverlapping_p
+
+/* Return true iff FIELD_DECL DECL is potentially overlapping. */
+
+static bool
+field_poverlapping_p (tree decl)
+{
+ return lookup_attribute ("no_unique_address", DECL_ATTRIBUTES (decl));
+}
+
+// forked from gcc/cp/class.cc is_empty_field
+
+/* Return true iff DECL is an empty field, either for an empty base or a
+ [[no_unique_address]] data member. */
+
+bool
+is_empty_field (tree decl)
+{
+ if (!decl || TREE_CODE (decl) != FIELD_DECL)
+ return false;
+
+ bool r = (is_empty_class (TREE_TYPE (decl)) && (field_poverlapping_p (decl)));
+
+ /* Empty fields should have size zero. */
+ gcc_checking_assert (!r || integer_zerop (DECL_SIZE (decl)));
+
+ return r;
+}
+
+// forked from gcc/cp/call.cc in_immediate_context
+
+/* Return true if in an immediate function context, or an unevaluated operand,
+ or a subexpression of an immediate invocation. */
+
+bool
+in_immediate_context ()
+{
+ return false;
+}
+
+// forked from gcc/cp/cvt.cc cp_get_fndecl_from_callee
+
+/* FN is the callee of a CALL_EXPR or AGGR_INIT_EXPR; return the FUNCTION_DECL
+ if we can. */
+
+tree
+rs_get_fndecl_from_callee (tree fn, bool fold /* = true */)
+{
+ if (fn == NULL_TREE)
+ return fn;
+ if (TREE_CODE (fn) == FUNCTION_DECL)
+ return fn;
+ tree type = TREE_TYPE (fn);
+ if (type == NULL_TREE || !INDIRECT_TYPE_P (type))
+ return NULL_TREE;
+ if (fold)
+ fn = Compile::maybe_constant_init (fn);
+ STRIP_NOPS (fn);
+ if (TREE_CODE (fn) == ADDR_EXPR || TREE_CODE (fn) == FDESC_EXPR)
+ fn = TREE_OPERAND (fn, 0);
+ if (TREE_CODE (fn) == FUNCTION_DECL)
+ return fn;
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/cvt.cc cp_get_callee_fndecl_nofold
+tree
+rs_get_callee_fndecl_nofold (tree call)
+{
+ return rs_get_fndecl_from_callee (cp_get_callee (call), false);
+}
+
+// forked from gcc/cp/init.cc is_class_type
+
+/* Report an error if TYPE is not a user-defined, class type. If
+ OR_ELSE is nonzero, give an error message. */
+
+int
+is_class_type (tree type, int or_else)
+{
+ if (type == error_mark_node)
+ return 0;
+
+ if (!CLASS_TYPE_P (type))
+ {
+ if (or_else)
+ error ("%qT is not a class type", type);
+ return 0;
+ }
+ return 1;
+}
+
+// forked from gcc/cp/decl.cc lookup_enumerator
+
+/* Look for an enumerator with the given NAME within the enumeration
+ type ENUMTYPE. This routine is used primarily for qualified name
+ lookup into an enumerator in C++0x, e.g.,
+
+ enum class Color { Red, Green, Blue };
+
+ Color color = Color::Red;
+
+ Returns the value corresponding to the enumerator, or
+ NULL_TREE if no such enumerator was found. */
+tree
+lookup_enumerator (tree enumtype, tree name)
+{
+ tree e;
+ gcc_assert (enumtype && TREE_CODE (enumtype) == ENUMERAL_TYPE);
+
+ e = purpose_member (name, TYPE_VALUES (enumtype));
+ return e ? TREE_VALUE (e) : NULL_TREE;
+}
+
+// forked from gcc/cp/init.cc constant_value_1
+// commented out mark_used
+
+/* If DECL is a scalar enumeration constant or variable with a
+ constant initializer, return the initializer (or, its initializers,
+ recursively); otherwise, return DECL. If STRICT_P, the
+ initializer is only returned if DECL is a
+ constant-expression. If RETURN_AGGREGATE_CST_OK_P, it is ok to
+ return an aggregate constant. If UNSHARE_P, return an unshared
+ copy of the initializer. */
+
+static tree
+constant_value_1 (tree decl, bool strict_p, bool return_aggregate_cst_ok_p,
+ bool unshare_p)
+{
+ while (TREE_CODE (decl) == CONST_DECL || decl_constant_var_p (decl)
+ || (!strict_p && VAR_P (decl)
+ && RS_TYPE_CONST_NON_VOLATILE_P (TREE_TYPE (decl))))
+ {
+ tree init;
+ /* If DECL is a static data member in a template
+ specialization, we must instantiate it here. The
+ initializer for the static data member is not processed
+ until needed; we need it now. */
+ // mark_used (decl, tf_none);
+ init = DECL_INITIAL (decl);
+ if (init == error_mark_node)
+ {
+ if (TREE_CODE (decl) == CONST_DECL
+ || DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl))
+ /* Treat the error as a constant to avoid cascading errors on
+ excessively recursive template instantiation (c++/9335). */
+ return init;
+ else
+ return decl;
+ }
+
+ /* Instantiate a non-dependent initializer for user variables. We
+ mustn't do this for the temporary for an array compound literal;
+ trying to instatiate the initializer will keep creating new
+ temporaries until we crash. Probably it's not useful to do it for
+ other artificial variables, either. */
+ if (!DECL_ARTIFICIAL (decl))
+ init = instantiate_non_dependent_or_null (init);
+ if (!init || !TREE_TYPE (init) || !TREE_CONSTANT (init)
+ || (!return_aggregate_cst_ok_p
+ /* Unless RETURN_AGGREGATE_CST_OK_P is true, do not
+ return an aggregate constant (of which string
+ literals are a special case), as we do not want
+ to make inadvertent copies of such entities, and
+ we must be sure that their addresses are the
+ same everywhere. */
+ && (TREE_CODE (init) == CONSTRUCTOR
+ || TREE_CODE (init) == STRING_CST)))
+ break;
+ /* Don't return a CONSTRUCTOR for a variable with partial run-time
+ initialization, since it doesn't represent the entire value.
+ Similarly for VECTOR_CSTs created by cp_folding those
+ CONSTRUCTORs. */
+ if ((TREE_CODE (init) == CONSTRUCTOR || TREE_CODE (init) == VECTOR_CST)
+ && !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl))
+ break;
+ /* If the variable has a dynamic initializer, don't use its
+ DECL_INITIAL which doesn't reflect the real value. */
+ if (VAR_P (decl) && TREE_STATIC (decl)
+ && !DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl)
+ && DECL_NONTRIVIALLY_INITIALIZED_P (decl))
+ break;
+ decl = init;
+ }
+ return unshare_p ? unshare_expr (decl) : decl;
+}
+
+// forked from gcc/cp/init.cc decl_constant_value
+
+/* A more relaxed version of decl_really_constant_value, used by the
+ common C/C++ code. */
+
+tree
+decl_constant_value (tree decl, bool unshare_p)
+{
+ return constant_value_1 (decl, /*strict_p=*/false,
+ /*return_aggregate_cst_ok_p=*/true,
+ /*unshare_p=*/unshare_p);
+}
+
+// Below is forked from gcc/cp/init.cc decl_constant_value
+
+tree
+decl_constant_value (tree decl)
+{
+ return decl_constant_value (decl, /*unshare_p=*/true);
+}
+
+// Below is forked from gcc/cp/cp-gimplify.cc
+
+/* Type for source_location_table hash_set. */
+struct GTY ((for_user)) source_location_table_entry
+{
+ location_t loc;
+ unsigned uid;
+ tree var;
+};
+
+/* Traits class for function start hash maps below. */
+
+struct source_location_table_entry_hash
+ : ggc_remove<source_location_table_entry>
+{
+ typedef source_location_table_entry value_type;
+ typedef source_location_table_entry compare_type;
+
+ static hashval_t hash (const source_location_table_entry &ref)
+ {
+ inchash::hash hstate (0);
+ hstate.add_int (ref.loc);
+ hstate.add_int (ref.uid);
+ return hstate.end ();
+ }
+
+ static bool equal (const source_location_table_entry &ref1,
+ const source_location_table_entry &ref2)
+ {
+ return ref1.loc == ref2.loc && ref1.uid == ref2.uid;
+ }
+
+ static void mark_deleted (source_location_table_entry &ref)
+ {
+ ref.loc = UNKNOWN_LOCATION;
+ ref.uid = -1U;
+ ref.var = NULL_TREE;
+ }
+
+ static const bool empty_zero_p = true;
+
+ static void mark_empty (source_location_table_entry &ref)
+ {
+ ref.loc = UNKNOWN_LOCATION;
+ ref.uid = 0;
+ ref.var = NULL_TREE;
+ }
+
+ static bool is_deleted (const source_location_table_entry &ref)
+ {
+ return (ref.loc == UNKNOWN_LOCATION && ref.uid == -1U
+ && ref.var == NULL_TREE);
+ }
+
+ static bool is_empty (const source_location_table_entry &ref)
+ {
+ return (ref.loc == UNKNOWN_LOCATION && ref.uid == 0
+ && ref.var == NULL_TREE);
+ }
+
+ static void pch_nx (source_location_table_entry &p)
+ {
+ extern void gt_pch_nx (source_location_table_entry &);
+ gt_pch_nx (p);
+ }
+
+ static void pch_nx (source_location_table_entry &p, gt_pointer_operator op,
+ void *cookie)
+ {
+ extern void gt_pch_nx (source_location_table_entry *, gt_pointer_operator,
+ void *);
+ gt_pch_nx (&p, op, cookie);
+ }
+};
+
+static GTY (())
+ hash_table<source_location_table_entry_hash> *source_location_table;
+static GTY (()) unsigned int source_location_id;
+
+// Above is forked from gcc/cp/cp-gimplify.cc
+
+// forked from gcc/cp/tree.cc lvalue_kind
+
+/* If REF is an lvalue, returns the kind of lvalue that REF is.
+ Otherwise, returns clk_none. */
+
+cp_lvalue_kind
+lvalue_kind (const_tree ref)
+{
+ cp_lvalue_kind op1_lvalue_kind = clk_none;
+ cp_lvalue_kind op2_lvalue_kind = clk_none;
+
+ /* Expressions of reference type are sometimes wrapped in
+ INDIRECT_REFs. INDIRECT_REFs are just internal compiler
+ representation, not part of the language, so we have to look
+ through them. */
+ if (REFERENCE_REF_P (ref))
+ return lvalue_kind (TREE_OPERAND (ref, 0));
+
+ if (TREE_TYPE (ref) && TYPE_REF_P (TREE_TYPE (ref)))
+ {
+ /* unnamed rvalue references are rvalues */
+ if (TYPE_REF_IS_RVALUE (TREE_TYPE (ref)) && TREE_CODE (ref) != PARM_DECL
+ && !VAR_P (ref)
+ && TREE_CODE (ref) != COMPONENT_REF
+ /* Functions are always lvalues. */
+ && TREE_CODE (TREE_TYPE (TREE_TYPE (ref))) != FUNCTION_TYPE)
+ {
+ op1_lvalue_kind = clk_rvalueref;
+ if (implicit_rvalue_p (ref))
+ op1_lvalue_kind |= clk_implicit_rval;
+ return op1_lvalue_kind;
+ }
+
+ /* lvalue references and named rvalue references are lvalues. */
+ return clk_ordinary;
+ }
+
+ if (ref == current_class_ptr)
+ return clk_none;
+
+ /* Expressions with cv void type are prvalues. */
+ if (TREE_TYPE (ref) && VOID_TYPE_P (TREE_TYPE (ref)))
+ return clk_none;
+
+ switch (TREE_CODE (ref))
+ {
+ case SAVE_EXPR:
+ return clk_none;
+
+ /* preincrements and predecrements are valid lvals, provided
+ what they refer to are valid lvals. */
+ case PREINCREMENT_EXPR:
+ case PREDECREMENT_EXPR:
+ case TRY_CATCH_EXPR:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ case VIEW_CONVERT_EXPR:
+ return lvalue_kind (TREE_OPERAND (ref, 0));
+
+ case ARRAY_REF: {
+ tree op1 = TREE_OPERAND (ref, 0);
+ if (TREE_CODE (TREE_TYPE (op1)) == ARRAY_TYPE)
+ {
+ op1_lvalue_kind = lvalue_kind (op1);
+ if (op1_lvalue_kind == clk_class)
+ /* in the case of an array operand, the result is an lvalue if
+ that operand is an lvalue and an xvalue otherwise */
+ op1_lvalue_kind = clk_rvalueref;
+ return op1_lvalue_kind;
+ }
+ else
+ return clk_ordinary;
+ }
+
+ case MEMBER_REF:
+ case DOTSTAR_EXPR:
+ if (TREE_CODE (ref) == MEMBER_REF)
+ op1_lvalue_kind = clk_ordinary;
+ else
+ op1_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 0));
+ if (TYPE_PTRMEMFUNC_P (TREE_TYPE (TREE_OPERAND (ref, 1))))
+ op1_lvalue_kind = clk_none;
+ else if (op1_lvalue_kind == clk_class)
+ /* The result of a .* expression whose second operand is a pointer to a
+ data member is an lvalue if the first operand is an lvalue and an
+ xvalue otherwise. */
+ op1_lvalue_kind = clk_rvalueref;
+ return op1_lvalue_kind;
+
+ case COMPONENT_REF:
+ op1_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 0));
+ if (op1_lvalue_kind == clk_class)
+ /* If E1 is an lvalue, then E1.E2 is an lvalue;
+ otherwise E1.E2 is an xvalue. */
+ op1_lvalue_kind = clk_rvalueref;
+
+ /* Look at the member designator. */
+ if (!op1_lvalue_kind)
+ ;
+ else if (is_overloaded_fn (TREE_OPERAND (ref, 1)))
+ /* The "field" can be a FUNCTION_DECL or an OVERLOAD in some
+ situations. If we're seeing a COMPONENT_REF, it's a non-static
+ member, so it isn't an lvalue. */
+ op1_lvalue_kind = clk_none;
+ else if (TREE_CODE (TREE_OPERAND (ref, 1)) != FIELD_DECL)
+ /* This can be IDENTIFIER_NODE in a template. */;
+ else if (DECL_C_BIT_FIELD (TREE_OPERAND (ref, 1)))
+ {
+ /* Clear the ordinary bit. If this object was a class
+ rvalue we want to preserve that information. */
+ op1_lvalue_kind &= ~clk_ordinary;
+ /* The lvalue is for a bitfield. */
+ op1_lvalue_kind |= clk_bitfield;
+ }
+ else if (DECL_PACKED (TREE_OPERAND (ref, 1)))
+ op1_lvalue_kind |= clk_packed;
+
+ return op1_lvalue_kind;
+
+ case STRING_CST:
+ case COMPOUND_LITERAL_EXPR:
+ return clk_ordinary;
+
+ case CONST_DECL:
+ /* CONST_DECL without TREE_STATIC are enumeration values and
+ thus not lvalues. With TREE_STATIC they are used by ObjC++
+ in objc_build_string_object and need to be considered as
+ lvalues. */
+ if (!TREE_STATIC (ref))
+ return clk_none;
+ /* FALLTHRU */
+ case VAR_DECL:
+ if (VAR_P (ref) && DECL_HAS_VALUE_EXPR_P (ref))
+ return lvalue_kind (DECL_VALUE_EXPR (CONST_CAST_TREE (ref)));
+
+ if (TREE_READONLY (ref) && !TREE_STATIC (ref) && DECL_LANG_SPECIFIC (ref)
+ && DECL_IN_AGGR_P (ref))
+ return clk_none;
+ /* FALLTHRU */
+ case INDIRECT_REF:
+ case ARROW_EXPR:
+ case PARM_DECL:
+ case RESULT_DECL:
+ case PLACEHOLDER_EXPR:
+ return clk_ordinary;
+
+ case MAX_EXPR:
+ case MIN_EXPR:
+ /* Disallow <? and >? as lvalues if either argument side-effects. */
+ if (TREE_SIDE_EFFECTS (TREE_OPERAND (ref, 0))
+ || TREE_SIDE_EFFECTS (TREE_OPERAND (ref, 1)))
+ return clk_none;
+ op1_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 0));
+ op2_lvalue_kind = lvalue_kind (TREE_OPERAND (ref, 1));
+ break;
+
+ case COND_EXPR: {
+ tree op1 = TREE_OPERAND (ref, 1);
+ if (!op1)
+ op1 = TREE_OPERAND (ref, 0);
+ tree op2 = TREE_OPERAND (ref, 2);
+ op1_lvalue_kind = lvalue_kind (op1);
+ op2_lvalue_kind = lvalue_kind (op2);
+ if (!op1_lvalue_kind != !op2_lvalue_kind)
+ {
+ /* The second or the third operand (but not both) is a
+ throw-expression; the result is of the type
+ and value category of the other. */
+ if (op1_lvalue_kind && TREE_CODE (op2) == THROW_EXPR)
+ op2_lvalue_kind = op1_lvalue_kind;
+ else if (op2_lvalue_kind && TREE_CODE (op1) == THROW_EXPR)
+ op1_lvalue_kind = op2_lvalue_kind;
+ }
+ }
+ break;
+
+ case MODIFY_EXPR:
+ case TYPEID_EXPR:
+ return clk_ordinary;
+
+ case COMPOUND_EXPR:
+ return lvalue_kind (TREE_OPERAND (ref, 1));
+
+ case TARGET_EXPR:
+ return clk_class;
+
+ case VA_ARG_EXPR:
+ return (CLASS_TYPE_P (TREE_TYPE (ref)) ? clk_class : clk_none);
+
+ case CALL_EXPR:
+ /* We can see calls outside of TARGET_EXPR in templates. */
+ if (CLASS_TYPE_P (TREE_TYPE (ref)))
+ return clk_class;
+ return clk_none;
+
+ case FUNCTION_DECL:
+ /* All functions (except non-static-member functions) are
+ lvalues. */
+ return (DECL_NONSTATIC_MEMBER_FUNCTION_P (ref) ? clk_none : clk_ordinary);
+
+ case NON_DEPENDENT_EXPR:
+ case PAREN_EXPR:
+ return lvalue_kind (TREE_OPERAND (ref, 0));
+
+ case TEMPLATE_PARM_INDEX:
+ if (CLASS_TYPE_P (TREE_TYPE (ref)))
+ /* A template parameter object is an lvalue. */
+ return clk_ordinary;
+ return clk_none;
+
+ default:
+ if (!TREE_TYPE (ref))
+ return clk_none;
+ if (CLASS_TYPE_P (TREE_TYPE (ref))
+ || TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE)
+ return clk_class;
+ return clk_none;
+ }
+
+ /* If one operand is not an lvalue at all, then this expression is
+ not an lvalue. */
+ if (!op1_lvalue_kind || !op2_lvalue_kind)
+ return clk_none;
+
+ /* Otherwise, it's an lvalue, and it has all the odd properties
+ contributed by either operand. */
+ op1_lvalue_kind = op1_lvalue_kind | op2_lvalue_kind;
+ /* It's not an ordinary lvalue if it involves any other kind. */
+ if ((op1_lvalue_kind & ~clk_ordinary) != clk_none)
+ op1_lvalue_kind &= ~clk_ordinary;
+ /* It can't be both a pseudo-lvalue and a non-addressable lvalue.
+ A COND_EXPR of those should be wrapped in a TARGET_EXPR. */
+ if ((op1_lvalue_kind & (clk_rvalueref | clk_class))
+ && (op1_lvalue_kind & (clk_bitfield | clk_packed)))
+ op1_lvalue_kind = clk_none;
+ return op1_lvalue_kind;
+}
+
+// forked from gcc/cp/tree.cc glvalue_p
+
+/* This differs from lvalue_p in that xvalues are included. */
+
+bool
+glvalue_p (const_tree ref)
+{
+ cp_lvalue_kind kind = lvalue_kind (ref);
+ if (kind & clk_class)
+ return false;
+ else
+ return (kind != clk_none);
+}
+
+// forked from gcc/cp/init.cc cv_qualified_p
+
+/* Returns nonzero if TYPE is const or volatile. */
+
+bool
+cv_qualified_p (const_tree type)
+{
+ int quals = rs_type_quals (type);
+ return (quals & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) != 0;
+}
+
+// forked from gcc/cp/tree.cc rvalue
+
+/* EXPR is being used in an rvalue context. Return a version of EXPR
+ that is marked as an rvalue. */
+
+tree
+rvalue (tree expr)
+{
+ tree type;
+
+ if (error_operand_p (expr))
+ return expr;
+
+ expr = mark_rvalue_use (expr);
+
+ /* [basic.lval]
+
+ Non-class rvalues always have cv-unqualified types. */
+ type = TREE_TYPE (expr);
+ if (!CLASS_TYPE_P (type) && cv_qualified_p (type))
+ type = cv_unqualified (type);
+
+ /* We need to do this for rvalue refs as well to get the right answer
+ from decltype; see c++/36628. */
+ if (glvalue_p (expr))
+ {
+ /* But don't use this function for class lvalues; use move (to treat an
+ lvalue as an xvalue) or force_rvalue (to make a prvalue copy). */
+ gcc_checking_assert (!CLASS_TYPE_P (type));
+ expr = build1 (NON_LVALUE_EXPR, type, expr);
+ }
+ else if (type != TREE_TYPE (expr))
+ expr = build_nop (type, expr);
+
+ return expr;
+}
+
+// forked from gcc/cp/tree.cc bitfield_p
+
+/* True if REF is a bit-field. */
+
+bool
+bitfield_p (const_tree ref)
+{
+ return (lvalue_kind (ref) & clk_bitfield);
+}
+
+// forked from gcc/cp/typeck.cc cxx_mark_addressable
+
+/* Mark EXP saying that we need to be able to take the
+ address of it; it should not be allocated in a register.
+ Value is true if successful. ARRAY_REF_P is true if this
+ is for ARRAY_REF construction - in that case we don't want
+ to look through VIEW_CONVERT_EXPR from VECTOR_TYPE to ARRAY_TYPE,
+ it is fine to use ARRAY_REFs for vector subscripts on vector
+ register variables.
+
+ C++: we do not allow `current_class_ptr' to be addressable. */
+
+bool
+cxx_mark_addressable (tree exp, bool array_ref_p)
+{
+ tree x = exp;
+
+ while (1)
+ switch (TREE_CODE (x))
+ {
+ case VIEW_CONVERT_EXPR:
+ if (array_ref_p && TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE
+ && VECTOR_TYPE_P (TREE_TYPE (TREE_OPERAND (x, 0))))
+ return true;
+ x = TREE_OPERAND (x, 0);
+ break;
+
+ case COMPONENT_REF:
+ if (bitfield_p (x))
+ error ("attempt to take address of bit-field");
+ /* FALLTHRU */
+ case ADDR_EXPR:
+ case ARRAY_REF:
+ case REALPART_EXPR:
+ case IMAGPART_EXPR:
+ x = TREE_OPERAND (x, 0);
+ break;
+
+ case PARM_DECL:
+ if (x == current_class_ptr)
+ {
+ error ("cannot take the address of %<this%>, which is an rvalue "
+ "expression");
+ TREE_ADDRESSABLE (x) = 1; /* so compiler doesn't die later. */
+ return true;
+ }
+ /* Fall through. */
+
+ case VAR_DECL:
+ /* Caller should not be trying to mark initialized
+ constant fields addressable. */
+ gcc_assert (DECL_LANG_SPECIFIC (x) == 0 || DECL_IN_AGGR_P (x) == 0
+ || TREE_STATIC (x) || DECL_EXTERNAL (x));
+ /* Fall through. */
+
+ case RESULT_DECL:
+ if (DECL_REGISTER (x) && !TREE_ADDRESSABLE (x) && !DECL_ARTIFICIAL (x))
+ {
+ if (VAR_P (x) && DECL_HARD_REGISTER (x))
+ {
+ error ("address of explicit register variable %qD requested",
+ x);
+ return false;
+ }
+ else if (extra_warnings)
+ warning (
+ OPT_Wextra,
+ "address requested for %qD, which is declared %<register%>", x);
+ }
+ TREE_ADDRESSABLE (x) = 1;
+ return true;
+
+ case CONST_DECL:
+ case FUNCTION_DECL:
+ TREE_ADDRESSABLE (x) = 1;
+ return true;
+
+ case CONSTRUCTOR:
+ TREE_ADDRESSABLE (x) = 1;
+ return true;
+
+ case TARGET_EXPR:
+ TREE_ADDRESSABLE (x) = 1;
+ cxx_mark_addressable (TREE_OPERAND (x, 0));
+ return true;
+
+ default:
+ return true;
+ }
+}
+
+// forked from gcc/cp/typeck.cc build_address
+
+/* Returns the address of T. This function will fold away
+ ADDR_EXPR of INDIRECT_REF. This is only for low-level usage;
+ most places should use cp_build_addr_expr instead. */
+
+tree
+build_address (tree t)
+{
+ if (error_operand_p (t) || !cxx_mark_addressable (t))
+ return error_mark_node;
+ gcc_checking_assert (TREE_CODE (t) != CONSTRUCTOR);
+ t = build_fold_addr_expr_loc (EXPR_LOCATION (t), t);
+ if (TREE_CODE (t) != ADDR_EXPR)
+ t = rvalue (t);
+ return t;
+}
+
+// forked from gcc/cp/gp-gimplify.cc fold_builtin_source_location
+
+/* Fold __builtin_source_location () call. LOC is the location
+ of the call. */
+
+tree
+fold_builtin_source_location (location_t loc)
+{
+ // if (source_location_impl == NULL_TREE)
+ // {
+ // auto_diagnostic_group d;
+ // source_location_impl = get_source_location_impl_type (loc);
+ // if (source_location_impl == error_mark_node)
+ // inform (loc, "evaluating %qs", "__builtin_source_location");
+ // }
+ if (source_location_impl == error_mark_node)
+ return build_zero_cst (const_ptr_type_node);
+ if (source_location_table == NULL)
+ source_location_table
+ = hash_table<source_location_table_entry_hash>::create_ggc (64);
+ const line_map_ordinary *map;
+ source_location_table_entry entry;
+ entry.loc = linemap_resolve_location (line_table, loc,
+ LRK_MACRO_EXPANSION_POINT, &map);
+ entry.uid = current_function_decl ? DECL_UID (current_function_decl) : -1;
+ entry.var = error_mark_node;
+ source_location_table_entry *entryp
+ = source_location_table->find_slot (entry, INSERT);
+ tree var;
+ if (entryp->var)
+ var = entryp->var;
+ else
+ {
+ char tmp_name[32];
+ ASM_GENERATE_INTERNAL_LABEL (tmp_name, "Lsrc_loc", source_location_id++);
+ var = build_decl (loc, VAR_DECL, get_identifier (tmp_name),
+ source_location_impl);
+ TREE_STATIC (var) = 1;
+ TREE_PUBLIC (var) = 0;
+ DECL_ARTIFICIAL (var) = 1;
+ DECL_IGNORED_P (var) = 1;
+ DECL_EXTERNAL (var) = 0;
+ DECL_DECLARED_CONSTEXPR_P (var) = 1;
+ DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (var) = 1;
+ layout_decl (var, 0);
+
+ vec<constructor_elt, va_gc> *v = NULL;
+ vec_alloc (v, 4);
+ for (tree field = TYPE_FIELDS (source_location_impl);
+ (field = next_initializable_field (field)) != NULL_TREE;
+ field = DECL_CHAIN (field))
+ {
+ const char *n = IDENTIFIER_POINTER (DECL_NAME (field));
+ tree val = NULL_TREE;
+ if (strcmp (n, "_M_file_name") == 0)
+ {
+ if (const char *fname = LOCATION_FILE (loc))
+ {
+ fname = remap_macro_filename (fname);
+ val = build_string_literal (strlen (fname) + 1, fname);
+ }
+ else
+ val = build_string_literal (1, "");
+ }
+ else if (strcmp (n, "_M_function_name") == 0)
+ {
+ const char *name = "todo: add funciton name here";
+
+ // if (current_function_decl)
+ // name = cxx_printable_name (current_function_decl, 2);
+
+ val = build_string_literal (strlen (name) + 1, name);
+ }
+ else if (strcmp (n, "_M_line") == 0)
+ val = build_int_cst (TREE_TYPE (field), LOCATION_LINE (loc));
+ else if (strcmp (n, "_M_column") == 0)
+ val = build_int_cst (TREE_TYPE (field), LOCATION_COLUMN (loc));
+ else
+ gcc_unreachable ();
+ CONSTRUCTOR_APPEND_ELT (v, field, val);
+ }
+
+ tree ctor = build_constructor (source_location_impl, v);
+ TREE_CONSTANT (ctor) = 1;
+ TREE_STATIC (ctor) = 1;
+ DECL_INITIAL (var) = ctor;
+ varpool_node::finalize_decl (var);
+ *entryp = entry;
+ entryp->var = var;
+ }
+
+ return build_fold_addr_expr_with_type_loc (loc, var, const_ptr_type_node);
+}
+
+// forked from gcc/c-family/c-common.cc braced_lists_to_strings
+
+/* Attempt to convert a braced array initializer list CTOR for array
+ TYPE into a STRING_CST for convenience and efficiency. Return
+ the converted string on success or the original ctor on failure. */
+
+static tree
+braced_list_to_string (tree type, tree ctor, bool member)
+{
+ /* Ignore non-members with unknown size like arrays with unspecified
+ bound. */
+ tree typesize = TYPE_SIZE_UNIT (type);
+ if (!member && !tree_fits_uhwi_p (typesize))
+ return ctor;
+
+ /* If the target char size differes from the host char size, we'd risk
+ loosing data and getting object sizes wrong by converting to
+ host chars. */
+ if (TYPE_PRECISION (char_type_node) != CHAR_BIT)
+ return ctor;
+
+ /* If the array has an explicit bound, use it to constrain the size
+ of the string. If it doesn't, be sure to create a string that's
+ as long as implied by the index of the last zero specified via
+ a designator, as in:
+ const char a[] = { [7] = 0 }; */
+ unsigned HOST_WIDE_INT maxelts;
+ if (typesize)
+ {
+ maxelts = tree_to_uhwi (typesize);
+ maxelts /= tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (type)));
+ }
+ else
+ maxelts = HOST_WIDE_INT_M1U;
+
+ /* Avoid converting initializers for zero-length arrays (but do
+ create them for flexible array members). */
+ if (!maxelts)
+ return ctor;
+
+ unsigned HOST_WIDE_INT nelts = CONSTRUCTOR_NELTS (ctor);
+
+ auto_vec<char> str;
+ str.reserve (nelts + 1);
+
+ unsigned HOST_WIDE_INT i;
+ tree index, value;
+
+ FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (ctor), i, index, value)
+ {
+ unsigned HOST_WIDE_INT idx = i;
+ if (index)
+ {
+ if (!tree_fits_uhwi_p (index))
+ return ctor;
+ idx = tree_to_uhwi (index);
+ }
+
+ /* auto_vec is limited to UINT_MAX elements. */
+ if (idx > UINT_MAX)
+ return ctor;
+
+ /* Avoid non-constant initializers. */
+ if (!tree_fits_shwi_p (value))
+ return ctor;
+
+ /* Skip over embedded nuls except the last one (initializer
+ elements are in ascending order of indices). */
+ HOST_WIDE_INT val = tree_to_shwi (value);
+ if (!val && i + 1 < nelts)
+ continue;
+
+ if (idx < str.length ())
+ return ctor;
+
+ /* Bail if the CTOR has a block of more than 256 embedded nuls
+ due to implicitly initialized elements. */
+ unsigned nchars = (idx - str.length ()) + 1;
+ if (nchars > 256)
+ return ctor;
+
+ if (nchars > 1)
+ {
+ str.reserve (idx);
+ str.quick_grow_cleared (idx);
+ }
+
+ if (idx >= maxelts)
+ return ctor;
+
+ str.safe_insert (idx, val);
+ }
+
+ /* Append a nul string termination. */
+ if (maxelts != HOST_WIDE_INT_M1U && str.length () < maxelts)
+ str.safe_push (0);
+
+ /* Build a STRING_CST with the same type as the array. */
+ tree res = build_string (str.length (), str.begin ());
+ TREE_TYPE (res) = type;
+ return res;
+}
+
+// forked from gcc/c-family/c-common.cc braced_lists_to_strings
+
+/* Implementation of the two-argument braced_lists_to_string withe
+ the same arguments plus MEMBER which is set for struct members
+ to allow initializers for flexible member arrays. */
+
+static tree
+braced_lists_to_strings (tree type, tree ctor, bool member)
+{
+ if (TREE_CODE (ctor) != CONSTRUCTOR)
+ return ctor;
+
+ tree_code code = TREE_CODE (type);
+
+ tree ttp;
+ if (code == ARRAY_TYPE)
+ ttp = TREE_TYPE (type);
+ else if (code == RECORD_TYPE)
+ {
+ ttp = TREE_TYPE (ctor);
+ if (TREE_CODE (ttp) == ARRAY_TYPE)
+ {
+ type = ttp;
+ ttp = TREE_TYPE (ttp);
+ }
+ }
+ else
+ return ctor;
+
+ if ((TREE_CODE (ttp) == ARRAY_TYPE || TREE_CODE (ttp) == INTEGER_TYPE)
+ && TYPE_STRING_FLAG (ttp))
+ return braced_list_to_string (type, ctor, member);
+
+ code = TREE_CODE (ttp);
+ if (code == ARRAY_TYPE || RECORD_OR_UNION_TYPE_P (ttp))
+ {
+ bool rec = RECORD_OR_UNION_TYPE_P (ttp);
+
+ /* Handle array of arrays or struct member initializers. */
+ tree val;
+ unsigned HOST_WIDE_INT idx;
+ FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (ctor), idx, val)
+ {
+ val = braced_lists_to_strings (ttp, val, rec);
+ CONSTRUCTOR_ELT (ctor, idx)->value = val;
+ }
+ }
+
+ return ctor;
+}
+
+// forked from gcc/c-family/c-common.cc braced_lists_to_strings
+
+/* Attempt to convert a CTOR containing braced array initializer lists
+ for array TYPE into one containing STRING_CSTs, for convenience and
+ efficiency. Recurse for arrays of arrays and member initializers.
+ Return the converted CTOR or STRING_CST on success or the original
+ CTOR otherwise. */
+
+tree
+braced_lists_to_strings (tree type, tree ctor)
+{
+ return braced_lists_to_strings (type, ctor, false);
+}
+
+/*---------------------------------------------------------------------------
+ Constraint satisfaction
+---------------------------------------------------------------------------*/
+
+// forked from gcc/cp/constraint.cc satisfying_constraint
+
+/* True if we are currently satisfying a failed_type_completions. */
+
+static bool satisfying_constraint;
+
+// forked from gcc/cp/constraint.cc satisfying_constraint
+
+/* A vector of incomplete types (and of declarations with undeduced return
+ type), appended to by note_failed_type_completion_for_satisfaction. The
+ satisfaction caches use this in order to keep track of "potentially unstable"
+ satisfaction results.
+
+ Since references to entries in this vector are stored only in the
+ GC-deletable sat_cache, it's safe to make this deletable as well. */
+
+static GTY ((deletable)) vec<tree, va_gc> *failed_type_completions;
+
+// forked from gcc/cp/constraint.cc note_failed_type_completion_for_satisfaction
+
+/* Called whenever a type completion (or return type deduction) failure occurs
+ that definitely affects the meaning of the program, by e.g. inducing
+ substitution failure. */
+
+void
+note_failed_type_completion_for_satisfaction (tree t)
+{
+ if (satisfying_constraint)
+ {
+ gcc_checking_assert ((TYPE_P (t) && !COMPLETE_TYPE_P (t))
+ || (DECL_P (t) && undeduced_auto_decl (t)));
+ vec_safe_push (failed_type_completions, t);
+ }
+}
+
+// forked from gcc/cp/typeck.cc complete_type
+
+/* Try to complete TYPE, if it is incomplete. For example, if TYPE is
+ a template instantiation, do the instantiation. Returns TYPE,
+ whether or not it could be completed, unless something goes
+ horribly wrong, in which case the error_mark_node is returned. */
+
+tree
+complete_type (tree type)
+{
+ if (type == NULL_TREE)
+ /* Rather than crash, we return something sure to cause an error
+ at some point. */
+ return error_mark_node;
+
+ if (type == error_mark_node || COMPLETE_TYPE_P (type))
+ ;
+ else if (TREE_CODE (type) == ARRAY_TYPE)
+ {
+ tree t = complete_type (TREE_TYPE (type));
+ unsigned int needs_constructing, has_nontrivial_dtor;
+ if (COMPLETE_TYPE_P (t))
+ layout_type (type);
+ needs_constructing = TYPE_NEEDS_CONSTRUCTING (TYPE_MAIN_VARIANT (t));
+ has_nontrivial_dtor
+ = TYPE_HAS_NONTRIVIAL_DESTRUCTOR (TYPE_MAIN_VARIANT (t));
+ for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
+ {
+ TYPE_NEEDS_CONSTRUCTING (t) = needs_constructing;
+ TYPE_HAS_NONTRIVIAL_DESTRUCTOR (t) = has_nontrivial_dtor;
+ }
+ }
+
+ return type;
+}
+
+// forked from gcc/cp/typeck.cc complete_type_or_maybe_complain
+
+/* Like complete_type, but issue an error if the TYPE cannot be completed.
+ VALUE is used for informative diagnostics.
+ Returns NULL_TREE if the type cannot be made complete. */
+
+tree
+complete_type_or_maybe_complain (tree type, tree value, tsubst_flags_t complain)
+{
+ type = complete_type (type);
+ if (type == error_mark_node)
+ /* We already issued an error. */
+ return NULL_TREE;
+ else if (!COMPLETE_TYPE_P (type))
+ {
+ if (complain & tf_error)
+ cxx_incomplete_type_diagnostic (value, type, DK_ERROR);
+ note_failed_type_completion_for_satisfaction (type);
+ return NULL_TREE;
+ }
+ else
+ return type;
+}
+
+// forked from gcc/cp/typeck.cc complete_type_or_else
+
+tree
+complete_type_or_else (tree type, tree value)
+{
+ return complete_type_or_maybe_complain (type, value, tf_warning_or_error);
+}
+
+// forked from gcc/cp/tree.cc std_layout_type_p
+
+/* Returns true iff T is a standard-layout type, as defined in
+ [basic.types]. */
+
+bool
+std_layout_type_p (const_tree t)
+{
+ t = strip_array_types (CONST_CAST_TREE (t));
+
+ if (CLASS_TYPE_P (t))
+ return !CLASSTYPE_NON_STD_LAYOUT (t);
+ else
+ return scalarish_type_p (t);
+}
+
+// forked from /gcc/cp/semantics.cc first_nonstatic_data_member_p
+
+/* Helper function for fold_builtin_is_pointer_inverconvertible_with_class,
+ return true if MEMBERTYPE is the type of the first non-static data member
+ of TYPE or for unions of any members. */
+static bool
+first_nonstatic_data_member_p (tree type, tree membertype)
+{
+ for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
+ {
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+ if (DECL_FIELD_IS_BASE (field) && is_empty_field (field))
+ continue;
+ if (DECL_FIELD_IS_BASE (field))
+ return first_nonstatic_data_member_p (TREE_TYPE (field), membertype);
+ if (ANON_AGGR_TYPE_P (TREE_TYPE (field)))
+ {
+ if ((TREE_CODE (TREE_TYPE (field)) == UNION_TYPE
+ || std_layout_type_p (TREE_TYPE (field)))
+ && first_nonstatic_data_member_p (TREE_TYPE (field), membertype))
+ return true;
+ }
+ else if (same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (field),
+ membertype))
+ return true;
+ if (TREE_CODE (type) != UNION_TYPE)
+ return false;
+ }
+ return false;
+}
+
+// forked from gcc/cp/semantics.cc
+// fold_builtin_is_pointer_inverconvertible_with_class
+
+/* Fold __builtin_is_pointer_interconvertible_with_class call. */
+
+tree
+fold_builtin_is_pointer_inverconvertible_with_class (location_t loc, int nargs,
+ tree *args)
+{
+ /* Unless users call the builtin directly, the following 3 checks should be
+ ensured from std::is_pointer_interconvertible_with_class function
+ template. */
+ if (nargs != 1)
+ {
+ error_at (loc, "%<__builtin_is_pointer_interconvertible_with_class%> "
+ "needs a single argument");
+ return boolean_false_node;
+ }
+ tree arg = args[0];
+ if (error_operand_p (arg))
+ return boolean_false_node;
+ if (!TYPE_PTRMEM_P (TREE_TYPE (arg)))
+ {
+ error_at (loc, "%<__builtin_is_pointer_interconvertible_with_class%> "
+ "argument is not pointer to member");
+ return boolean_false_node;
+ }
+
+ if (!TYPE_PTRDATAMEM_P (TREE_TYPE (arg)))
+ return boolean_false_node;
+
+ tree membertype = TREE_TYPE (TREE_TYPE (arg));
+ tree basetype = TYPE_OFFSET_BASETYPE (TREE_TYPE (arg));
+ if (!complete_type_or_else (basetype, NULL_TREE))
+ return boolean_false_node;
+
+ if (TREE_CODE (basetype) != UNION_TYPE && !std_layout_type_p (basetype))
+ return boolean_false_node;
+
+ if (!first_nonstatic_data_member_p (basetype, membertype))
+ return boolean_false_node;
+
+ if (integer_nonzerop (arg))
+ return boolean_false_node;
+ if (integer_zerop (arg))
+ return boolean_true_node;
+
+ return fold_build2 (EQ_EXPR, boolean_type_node, arg,
+ build_zero_cst (TREE_TYPE (arg)));
+}
+
+// forked from gcc/c-family/c-common.cc registered_builtin_types
+
+/* Used for communication between c_common_type_for_mode and
+ c_register_builtin_type. */
+tree registered_builtin_types;
+
+/* Return a data type that has machine mode MODE.
+ If the mode is an integer,
+ then UNSIGNEDP selects between signed and unsigned types.
+ If the mode is a fixed-point mode,
+ then UNSIGNEDP selects between saturating and nonsaturating types. */
+
+// forked from gcc/c-family/c-common.cc c_common_type_for_mode
+
+tree
+c_common_type_for_mode (machine_mode mode, int unsignedp)
+{
+ tree t;
+ int i;
+
+ if (mode == TYPE_MODE (integer_type_node))
+ return unsignedp ? unsigned_type_node : integer_type_node;
+
+ if (mode == TYPE_MODE (signed_char_type_node))
+ return unsignedp ? unsigned_char_type_node : signed_char_type_node;
+
+ if (mode == TYPE_MODE (short_integer_type_node))
+ return unsignedp ? short_unsigned_type_node : short_integer_type_node;
+
+ if (mode == TYPE_MODE (long_integer_type_node))
+ return unsignedp ? long_unsigned_type_node : long_integer_type_node;
+
+ if (mode == TYPE_MODE (long_long_integer_type_node))
+ return unsignedp ? long_long_unsigned_type_node
+ : long_long_integer_type_node;
+
+ for (i = 0; i < NUM_INT_N_ENTS; i++)
+ if (int_n_enabled_p[i] && mode == int_n_data[i].m)
+ return (unsignedp ? int_n_trees[i].unsigned_type
+ : int_n_trees[i].signed_type);
+
+ if (mode == QImode)
+ return unsignedp ? unsigned_intQI_type_node : intQI_type_node;
+
+ if (mode == HImode)
+ return unsignedp ? unsigned_intHI_type_node : intHI_type_node;
+
+ if (mode == SImode)
+ return unsignedp ? unsigned_intSI_type_node : intSI_type_node;
+
+ if (mode == DImode)
+ return unsignedp ? unsigned_intDI_type_node : intDI_type_node;
+
+#if HOST_BITS_PER_WIDE_INT >= 64
+ if (mode == TYPE_MODE (intTI_type_node))
+ return unsignedp ? unsigned_intTI_type_node : intTI_type_node;
+#endif
+
+ if (mode == TYPE_MODE (float_type_node))
+ return float_type_node;
+
+ if (mode == TYPE_MODE (double_type_node))
+ return double_type_node;
+
+ if (mode == TYPE_MODE (long_double_type_node))
+ return long_double_type_node;
+
+ for (i = 0; i < NUM_FLOATN_NX_TYPES; i++)
+ if (FLOATN_NX_TYPE_NODE (i) != NULL_TREE
+ && mode == TYPE_MODE (FLOATN_NX_TYPE_NODE (i)))
+ return FLOATN_NX_TYPE_NODE (i);
+
+ if (mode == TYPE_MODE (void_type_node))
+ return void_type_node;
+
+ if (mode == TYPE_MODE (build_pointer_type (char_type_node))
+ || mode == TYPE_MODE (build_pointer_type (integer_type_node)))
+ {
+ unsigned int precision
+ = GET_MODE_PRECISION (as_a<scalar_int_mode> (mode));
+ return (unsignedp ? make_unsigned_type (precision)
+ : make_signed_type (precision));
+ }
+
+ if (COMPLEX_MODE_P (mode))
+ {
+ machine_mode inner_mode;
+ tree inner_type;
+
+ if (mode == TYPE_MODE (complex_float_type_node))
+ return complex_float_type_node;
+ if (mode == TYPE_MODE (complex_double_type_node))
+ return complex_double_type_node;
+ if (mode == TYPE_MODE (complex_long_double_type_node))
+ return complex_long_double_type_node;
+
+ for (i = 0; i < NUM_FLOATN_NX_TYPES; i++)
+ if (COMPLEX_FLOATN_NX_TYPE_NODE (i) != NULL_TREE
+ && mode == TYPE_MODE (COMPLEX_FLOATN_NX_TYPE_NODE (i)))
+ return COMPLEX_FLOATN_NX_TYPE_NODE (i);
+
+ if (mode == TYPE_MODE (complex_integer_type_node) && !unsignedp)
+ return complex_integer_type_node;
+
+ inner_mode = GET_MODE_INNER (mode);
+ inner_type = c_common_type_for_mode (inner_mode, unsignedp);
+ if (inner_type != NULL_TREE)
+ return build_complex_type (inner_type);
+ }
+ else if (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
+ {
+ unsigned int elem_bits
+ = vector_element_size (GET_MODE_BITSIZE (mode), GET_MODE_NUNITS (mode));
+ tree bool_type = build_nonstandard_boolean_type (elem_bits);
+ return build_vector_type_for_mode (bool_type, mode);
+ }
+ else if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
+ {
+ machine_mode inner_mode = GET_MODE_INNER (mode);
+ tree inner_type = c_common_type_for_mode (inner_mode, unsignedp);
+ if (inner_type != NULL_TREE)
+ return build_vector_type_for_mode (inner_type, mode);
+ }
+
+ if (dfloat32_type_node != NULL_TREE && mode == TYPE_MODE (dfloat32_type_node))
+ return dfloat32_type_node;
+ if (dfloat64_type_node != NULL_TREE && mode == TYPE_MODE (dfloat64_type_node))
+ return dfloat64_type_node;
+ if (dfloat128_type_node != NULL_TREE
+ && mode == TYPE_MODE (dfloat128_type_node))
+ return dfloat128_type_node;
+
+ if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
+ {
+ if (mode == TYPE_MODE (short_fract_type_node))
+ return unsignedp ? sat_short_fract_type_node : short_fract_type_node;
+ if (mode == TYPE_MODE (fract_type_node))
+ return unsignedp ? sat_fract_type_node : fract_type_node;
+ if (mode == TYPE_MODE (long_fract_type_node))
+ return unsignedp ? sat_long_fract_type_node : long_fract_type_node;
+ if (mode == TYPE_MODE (long_long_fract_type_node))
+ return unsignedp ? sat_long_long_fract_type_node
+ : long_long_fract_type_node;
+
+ if (mode == TYPE_MODE (unsigned_short_fract_type_node))
+ return unsignedp ? sat_unsigned_short_fract_type_node
+ : unsigned_short_fract_type_node;
+ if (mode == TYPE_MODE (unsigned_fract_type_node))
+ return unsignedp ? sat_unsigned_fract_type_node
+ : unsigned_fract_type_node;
+ if (mode == TYPE_MODE (unsigned_long_fract_type_node))
+ return unsignedp ? sat_unsigned_long_fract_type_node
+ : unsigned_long_fract_type_node;
+ if (mode == TYPE_MODE (unsigned_long_long_fract_type_node))
+ return unsignedp ? sat_unsigned_long_long_fract_type_node
+ : unsigned_long_long_fract_type_node;
+
+ if (mode == TYPE_MODE (short_accum_type_node))
+ return unsignedp ? sat_short_accum_type_node : short_accum_type_node;
+ if (mode == TYPE_MODE (accum_type_node))
+ return unsignedp ? sat_accum_type_node : accum_type_node;
+ if (mode == TYPE_MODE (long_accum_type_node))
+ return unsignedp ? sat_long_accum_type_node : long_accum_type_node;
+ if (mode == TYPE_MODE (long_long_accum_type_node))
+ return unsignedp ? sat_long_long_accum_type_node
+ : long_long_accum_type_node;
+
+ if (mode == TYPE_MODE (unsigned_short_accum_type_node))
+ return unsignedp ? sat_unsigned_short_accum_type_node
+ : unsigned_short_accum_type_node;
+ if (mode == TYPE_MODE (unsigned_accum_type_node))
+ return unsignedp ? sat_unsigned_accum_type_node
+ : unsigned_accum_type_node;
+ if (mode == TYPE_MODE (unsigned_long_accum_type_node))
+ return unsignedp ? sat_unsigned_long_accum_type_node
+ : unsigned_long_accum_type_node;
+ if (mode == TYPE_MODE (unsigned_long_long_accum_type_node))
+ return unsignedp ? sat_unsigned_long_long_accum_type_node
+ : unsigned_long_long_accum_type_node;
+
+ if (mode == QQmode)
+ return unsignedp ? sat_qq_type_node : qq_type_node;
+ if (mode == HQmode)
+ return unsignedp ? sat_hq_type_node : hq_type_node;
+ if (mode == SQmode)
+ return unsignedp ? sat_sq_type_node : sq_type_node;
+ if (mode == DQmode)
+ return unsignedp ? sat_dq_type_node : dq_type_node;
+ if (mode == TQmode)
+ return unsignedp ? sat_tq_type_node : tq_type_node;
+
+ if (mode == UQQmode)
+ return unsignedp ? sat_uqq_type_node : uqq_type_node;
+ if (mode == UHQmode)
+ return unsignedp ? sat_uhq_type_node : uhq_type_node;
+ if (mode == USQmode)
+ return unsignedp ? sat_usq_type_node : usq_type_node;
+ if (mode == UDQmode)
+ return unsignedp ? sat_udq_type_node : udq_type_node;
+ if (mode == UTQmode)
+ return unsignedp ? sat_utq_type_node : utq_type_node;
+
+ if (mode == HAmode)
+ return unsignedp ? sat_ha_type_node : ha_type_node;
+ if (mode == SAmode)
+ return unsignedp ? sat_sa_type_node : sa_type_node;
+ if (mode == DAmode)
+ return unsignedp ? sat_da_type_node : da_type_node;
+ if (mode == TAmode)
+ return unsignedp ? sat_ta_type_node : ta_type_node;
+
+ if (mode == UHAmode)
+ return unsignedp ? sat_uha_type_node : uha_type_node;
+ if (mode == USAmode)
+ return unsignedp ? sat_usa_type_node : usa_type_node;
+ if (mode == UDAmode)
+ return unsignedp ? sat_uda_type_node : uda_type_node;
+ if (mode == UTAmode)
+ return unsignedp ? sat_uta_type_node : uta_type_node;
+ }
+
+ for (t = registered_builtin_types; t; t = TREE_CHAIN (t))
+ {
+ tree type = TREE_VALUE (t);
+ if (TYPE_MODE (type) == mode
+ && VECTOR_TYPE_P (type) == VECTOR_MODE_P (mode)
+ && !!unsignedp == !!TYPE_UNSIGNED (type))
+ return type;
+ }
+ return NULL_TREE;
+}
+
+// forked from gcc/cp/semantics.cc finish_underlying_type
+
+/* Implement the __underlying_type keyword: Return the underlying
+ type of TYPE, suitable for use as a type-specifier. */
+
+tree
+finish_underlying_type (tree type)
+{
+ tree underlying_type;
+
+ if (!complete_type_or_else (type, NULL_TREE))
+ return error_mark_node;
+
+ if (TREE_CODE (type) != ENUMERAL_TYPE)
+ {
+ error ("%qT is not an enumeration type", type);
+ return error_mark_node;
+ }
+
+ underlying_type = ENUM_UNDERLYING_TYPE (type);
+
+ /* Fixup necessary in this case because ENUM_UNDERLYING_TYPE
+ includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information.
+ See finish_enum_value_list for details. */
+ if (!ENUM_FIXED_UNDERLYING_TYPE_P (type))
+ underlying_type = c_common_type_for_mode (TYPE_MODE (underlying_type),
+ TYPE_UNSIGNED (underlying_type));
+
+ return underlying_type;
+}
+
+// forked from gcc/cp/typeck.cc layout_compatible_type_p
+
+/* Return true if TYPE1 and TYPE2 are layout-compatible types. */
+
+bool
+layout_compatible_type_p (tree type1, tree type2)
+{
+ if (type1 == error_mark_node || type2 == error_mark_node)
+ return false;
+ if (type1 == type2)
+ return true;
+ if (TREE_CODE (type1) != TREE_CODE (type2))
+ return false;
+
+ type1 = rs_build_qualified_type (type1, TYPE_UNQUALIFIED);
+ type2 = rs_build_qualified_type (type2, TYPE_UNQUALIFIED);
+
+ if (TREE_CODE (type1) == ENUMERAL_TYPE)
+ return (TYPE_ALIGN (type1) == TYPE_ALIGN (type2)
+ && tree_int_cst_equal (TYPE_SIZE (type1), TYPE_SIZE (type2))
+ && same_type_p (finish_underlying_type (type1),
+ finish_underlying_type (type2)));
+
+ if (CLASS_TYPE_P (type1) && std_layout_type_p (type1)
+ && std_layout_type_p (type2) && TYPE_ALIGN (type1) == TYPE_ALIGN (type2)
+ && tree_int_cst_equal (TYPE_SIZE (type1), TYPE_SIZE (type2)))
+ {
+ tree field1 = TYPE_FIELDS (type1);
+ tree field2 = TYPE_FIELDS (type2);
+ if (TREE_CODE (type1) == RECORD_TYPE)
+ {
+ while (1)
+ {
+ if (!next_common_initial_seqence (field1, field2))
+ return false;
+ if (field1 == NULL_TREE)
+ return true;
+ field1 = DECL_CHAIN (field1);
+ field2 = DECL_CHAIN (field2);
+ }
+ }
+ /* Otherwise both types must be union types.
+ The standard says:
+ "Two standard-layout unions are layout-compatible if they have
+ the same number of non-static data members and corresponding
+ non-static data members (in any order) have layout-compatible
+ types."
+ but the code anticipates that bitfield vs. non-bitfield,
+ different bitfield widths or presence/absence of
+ [[no_unique_address]] should be checked as well. */
+ auto_vec<tree, 16> vec;
+ unsigned int count = 0;
+ for (; field1; field1 = DECL_CHAIN (field1))
+ if (TREE_CODE (field1) == FIELD_DECL)
+ count++;
+ for (; field2; field2 = DECL_CHAIN (field2))
+ if (TREE_CODE (field2) == FIELD_DECL)
+ vec.safe_push (field2);
+ /* Discussions on core lean towards treating multiple union fields
+ of the same type as the same field, so this might need changing
+ in the future. */
+ if (count != vec.length ())
+ return false;
+ for (field1 = TYPE_FIELDS (type1); field1; field1 = DECL_CHAIN (field1))
+ {
+ if (TREE_CODE (field1) != FIELD_DECL)
+ continue;
+ unsigned int j;
+ tree t1 = DECL_BIT_FIELD_TYPE (field1);
+ if (t1 == NULL_TREE)
+ t1 = TREE_TYPE (field1);
+ FOR_EACH_VEC_ELT (vec, j, field2)
+ {
+ tree t2 = DECL_BIT_FIELD_TYPE (field2);
+ if (t2 == NULL_TREE)
+ t2 = TREE_TYPE (field2);
+ if (DECL_BIT_FIELD_TYPE (field1))
+ {
+ if (!DECL_BIT_FIELD_TYPE (field2))
+ continue;
+ if (TYPE_PRECISION (TREE_TYPE (field1))
+ != TYPE_PRECISION (TREE_TYPE (field2)))
+ continue;
+ }
+ else if (DECL_BIT_FIELD_TYPE (field2))
+ continue;
+ if (!layout_compatible_type_p (t1, t2))
+ continue;
+ if ((!lookup_attribute ("no_unique_address",
+ DECL_ATTRIBUTES (field1)))
+ != !lookup_attribute ("no_unique_address",
+ DECL_ATTRIBUTES (field2)))
+ continue;
+ break;
+ }
+ if (j == vec.length ())
+ return false;
+ vec.unordered_remove (j);
+ }
+ return true;
+ }
+
+ return same_type_p (type1, type2);
+}
+
+// forked from gcc/cp/semnatics.cc is_corresponding_member_union
+
+/* Helper function for is_corresponding_member_aggr. Return true if
+ MEMBERTYPE pointer-to-data-member ARG can be found in anonymous
+ union or structure BASETYPE. */
+
+static bool
+is_corresponding_member_union (tree basetype, tree membertype, tree arg)
+{
+ for (tree field = TYPE_FIELDS (basetype); field; field = DECL_CHAIN (field))
+ if (TREE_CODE (field) != FIELD_DECL || DECL_BIT_FIELD_TYPE (field))
+ continue;
+ else if (same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (field),
+ membertype))
+ {
+ if (TREE_CODE (arg) != INTEGER_CST
+ || tree_int_cst_equal (arg, byte_position (field)))
+ return true;
+ }
+ else if (ANON_AGGR_TYPE_P (TREE_TYPE (field)))
+ {
+ tree narg = arg;
+ if (TREE_CODE (basetype) != UNION_TYPE
+ && TREE_CODE (narg) == INTEGER_CST)
+ narg = size_binop (MINUS_EXPR, arg, byte_position (field));
+ if (is_corresponding_member_union (TREE_TYPE (field), membertype, narg))
+ return true;
+ }
+ return false;
+}
+
+// forked from gcc/cp/typeck.cc next_common_initial_seqence
+
+/* Helper function for layout_compatible_type_p and
+ is_corresponding_member_aggr. Advance to next members (NULL if
+ no further ones) and return true if those members are still part of
+ the common initial sequence. */
+
+bool
+next_common_initial_seqence (tree &memb1, tree &memb2)
+{
+ while (memb1)
+ {
+ if (TREE_CODE (memb1) != FIELD_DECL
+ || (DECL_FIELD_IS_BASE (memb1) && is_empty_field (memb1)))
+ {
+ memb1 = DECL_CHAIN (memb1);
+ continue;
+ }
+ if (DECL_FIELD_IS_BASE (memb1))
+ {
+ memb1 = TYPE_FIELDS (TREE_TYPE (memb1));
+ continue;
+ }
+ break;
+ }
+ while (memb2)
+ {
+ if (TREE_CODE (memb2) != FIELD_DECL
+ || (DECL_FIELD_IS_BASE (memb2) && is_empty_field (memb2)))
+ {
+ memb2 = DECL_CHAIN (memb2);
+ continue;
+ }
+ if (DECL_FIELD_IS_BASE (memb2))
+ {
+ memb2 = TYPE_FIELDS (TREE_TYPE (memb2));
+ continue;
+ }
+ break;
+ }
+ if (memb1 == NULL_TREE && memb2 == NULL_TREE)
+ return true;
+ if (memb1 == NULL_TREE || memb2 == NULL_TREE)
+ return false;
+ if (DECL_BIT_FIELD_TYPE (memb1))
+ {
+ if (!DECL_BIT_FIELD_TYPE (memb2))
+ return false;
+ if (!layout_compatible_type_p (DECL_BIT_FIELD_TYPE (memb1),
+ DECL_BIT_FIELD_TYPE (memb2)))
+ return false;
+ if (TYPE_PRECISION (TREE_TYPE (memb1))
+ != TYPE_PRECISION (TREE_TYPE (memb2)))
+ return false;
+ }
+ else if (DECL_BIT_FIELD_TYPE (memb2))
+ return false;
+ else if (!layout_compatible_type_p (TREE_TYPE (memb1), TREE_TYPE (memb2)))
+ return false;
+ if ((!lookup_attribute ("no_unique_address", DECL_ATTRIBUTES (memb1)))
+ != !lookup_attribute ("no_unique_address", DECL_ATTRIBUTES (memb2)))
+ return false;
+ if (!tree_int_cst_equal (bit_position (memb1), bit_position (memb2)))
+ return false;
+ return true;
+}
+
+// forked from gcc/cp/semantics.cc is_corresponding_member_aggr
+
+/* Helper function for fold_builtin_is_corresponding_member call.
+ Return boolean_false_node if MEMBERTYPE1 BASETYPE1::*ARG1 and
+ MEMBERTYPE2 BASETYPE2::*ARG2 aren't corresponding members,
+ boolean_true_node if they are corresponding members, or for
+ non-constant ARG2 the highest member offset for corresponding
+ members. */
+
+static tree
+is_corresponding_member_aggr (location_t loc, tree basetype1, tree membertype1,
+ tree arg1, tree basetype2, tree membertype2,
+ tree arg2)
+{
+ tree field1 = TYPE_FIELDS (basetype1);
+ tree field2 = TYPE_FIELDS (basetype2);
+ tree ret = boolean_false_node;
+ while (1)
+ {
+ bool r = next_common_initial_seqence (field1, field2);
+ if (field1 == NULL_TREE || field2 == NULL_TREE)
+ break;
+ if (r
+ && same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (field1),
+ membertype1)
+ && same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (field2),
+ membertype2))
+ {
+ tree pos = byte_position (field1);
+ if (TREE_CODE (arg1) == INTEGER_CST && tree_int_cst_equal (arg1, pos))
+ {
+ if (TREE_CODE (arg2) == INTEGER_CST)
+ return boolean_true_node;
+ return pos;
+ }
+ else if (TREE_CODE (arg1) != INTEGER_CST)
+ ret = pos;
+ }
+ else if (ANON_AGGR_TYPE_P (TREE_TYPE (field1))
+ && ANON_AGGR_TYPE_P (TREE_TYPE (field2)))
+ {
+ if ((!lookup_attribute ("no_unique_address",
+ DECL_ATTRIBUTES (field1)))
+ != !lookup_attribute ("no_unique_address",
+ DECL_ATTRIBUTES (field2)))
+ break;
+ if (!tree_int_cst_equal (bit_position (field1),
+ bit_position (field2)))
+ break;
+ bool overlap = true;
+ tree pos = byte_position (field1);
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ {
+ tree off1 = fold_convert (sizetype, arg1);
+ tree sz1 = TYPE_SIZE_UNIT (TREE_TYPE (field1));
+ if (tree_int_cst_lt (off1, pos)
+ || tree_int_cst_le (size_binop (PLUS_EXPR, pos, sz1), off1))
+ overlap = false;
+ }
+ if (TREE_CODE (arg2) == INTEGER_CST)
+ {
+ tree off2 = fold_convert (sizetype, arg2);
+ tree sz2 = TYPE_SIZE_UNIT (TREE_TYPE (field2));
+ if (tree_int_cst_lt (off2, pos)
+ || tree_int_cst_le (size_binop (PLUS_EXPR, pos, sz2), off2))
+ overlap = false;
+ }
+ if (overlap && NON_UNION_CLASS_TYPE_P (TREE_TYPE (field1))
+ && NON_UNION_CLASS_TYPE_P (TREE_TYPE (field2)))
+ {
+ tree narg1 = arg1;
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ narg1
+ = size_binop (MINUS_EXPR, fold_convert (sizetype, arg1), pos);
+ tree narg2 = arg2;
+ if (TREE_CODE (arg2) == INTEGER_CST)
+ narg2
+ = size_binop (MINUS_EXPR, fold_convert (sizetype, arg2), pos);
+ tree t1 = TREE_TYPE (field1);
+ tree t2 = TREE_TYPE (field2);
+ tree nret
+ = is_corresponding_member_aggr (loc, t1, membertype1, narg1, t2,
+ membertype2, narg2);
+ if (nret != boolean_false_node)
+ {
+ if (nret == boolean_true_node)
+ return nret;
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ return size_binop (PLUS_EXPR, nret, pos);
+ ret = size_binop (PLUS_EXPR, nret, pos);
+ }
+ }
+ else if (overlap && TREE_CODE (TREE_TYPE (field1)) == UNION_TYPE
+ && TREE_CODE (TREE_TYPE (field2)) == UNION_TYPE)
+ {
+ tree narg1 = arg1;
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ narg1
+ = size_binop (MINUS_EXPR, fold_convert (sizetype, arg1), pos);
+ tree narg2 = arg2;
+ if (TREE_CODE (arg2) == INTEGER_CST)
+ narg2
+ = size_binop (MINUS_EXPR, fold_convert (sizetype, arg2), pos);
+ if (is_corresponding_member_union (TREE_TYPE (field1),
+ membertype1, narg1)
+ && is_corresponding_member_union (TREE_TYPE (field2),
+ membertype2, narg2))
+ {
+ sorry_at (loc, "%<__builtin_is_corresponding_member%> "
+ "not well defined for anonymous unions");
+ return boolean_false_node;
+ }
+ }
+ }
+ if (!r)
+ break;
+ field1 = DECL_CHAIN (field1);
+ field2 = DECL_CHAIN (field2);
+ }
+ return ret;
+}
+
+// forked from gcc/cp/call.cc null_member_pointer_value_p
+
+/* Returns true iff T is a null member pointer value (4.11). */
+
+bool
+null_member_pointer_value_p (tree t)
+{
+ tree type = TREE_TYPE (t);
+ if (!type)
+ return false;
+ else if (TYPE_PTRMEMFUNC_P (type))
+ return (TREE_CODE (t) == CONSTRUCTOR && CONSTRUCTOR_NELTS (t)
+ && integer_zerop (CONSTRUCTOR_ELT (t, 0)->value));
+ else if (TYPE_PTRDATAMEM_P (type))
+ return integer_all_onesp (t);
+ else
+ return false;
+}
+
+// forked from gcc/cp/semantics.cc fold_builtin_is_corresponding_member
+
+/* Fold __builtin_is_corresponding_member call. */
+
+tree
+fold_builtin_is_corresponding_member (location_t loc, int nargs, tree *args)
+{
+ /* Unless users call the builtin directly, the following 3 checks should be
+ ensured from std::is_corresponding_member function template. */
+ if (nargs != 2)
+ {
+ error_at (loc, "%<__builtin_is_corresponding_member%> "
+ "needs two arguments");
+ return boolean_false_node;
+ }
+ tree arg1 = args[0];
+ tree arg2 = args[1];
+ if (error_operand_p (arg1) || error_operand_p (arg2))
+ return boolean_false_node;
+ if (!TYPE_PTRMEM_P (TREE_TYPE (arg1)) || !TYPE_PTRMEM_P (TREE_TYPE (arg2)))
+ {
+ error_at (loc, "%<__builtin_is_corresponding_member%> "
+ "argument is not pointer to member");
+ return boolean_false_node;
+ }
+
+ if (!TYPE_PTRDATAMEM_P (TREE_TYPE (arg1))
+ || !TYPE_PTRDATAMEM_P (TREE_TYPE (arg2)))
+ return boolean_false_node;
+
+ tree membertype1 = TREE_TYPE (TREE_TYPE (arg1));
+ tree basetype1 = TYPE_OFFSET_BASETYPE (TREE_TYPE (arg1));
+ if (!complete_type_or_else (basetype1, NULL_TREE))
+ return boolean_false_node;
+
+ tree membertype2 = TREE_TYPE (TREE_TYPE (arg2));
+ tree basetype2 = TYPE_OFFSET_BASETYPE (TREE_TYPE (arg2));
+ if (!complete_type_or_else (basetype2, NULL_TREE))
+ return boolean_false_node;
+
+ if (!NON_UNION_CLASS_TYPE_P (basetype1) || !NON_UNION_CLASS_TYPE_P (basetype2)
+ || !std_layout_type_p (basetype1) || !std_layout_type_p (basetype2))
+ return boolean_false_node;
+
+ /* If the member types aren't layout compatible, then they
+ can't be corresponding members. */
+ if (!layout_compatible_type_p (membertype1, membertype2))
+ return boolean_false_node;
+
+ if (null_member_pointer_value_p (arg1) || null_member_pointer_value_p (arg2))
+ return boolean_false_node;
+
+ if (TREE_CODE (arg1) == INTEGER_CST && TREE_CODE (arg2) == INTEGER_CST
+ && !tree_int_cst_equal (arg1, arg2))
+ return boolean_false_node;
+
+ if (TREE_CODE (arg2) == INTEGER_CST && TREE_CODE (arg1) != INTEGER_CST)
+ {
+ std::swap (arg1, arg2);
+ std::swap (membertype1, membertype2);
+ std::swap (basetype1, basetype2);
+ }
+
+ tree ret = is_corresponding_member_aggr (loc, basetype1, membertype1, arg1,
+ basetype2, membertype2, arg2);
+ if (TREE_TYPE (ret) == boolean_type_node)
+ return ret;
+ /* If both arg1 and arg2 are INTEGER_CSTs, is_corresponding_member_aggr
+ already returns boolean_{true,false}_node whether those particular
+ members are corresponding members or not. Otherwise, if only
+ one of them is INTEGER_CST (canonicalized to first being INTEGER_CST
+ above), it returns boolean_false_node if it is certainly not a
+ corresponding member and otherwise we need to do a runtime check that
+ those two OFFSET_TYPE offsets are equal.
+ If neither of the operands is INTEGER_CST, is_corresponding_member_aggr
+ returns the largest offset at which the members would be corresponding
+ members, so perform arg1 <= ret && arg1 == arg2 runtime check. */
+ gcc_assert (TREE_CODE (arg2) != INTEGER_CST);
+ if (TREE_CODE (arg1) == INTEGER_CST)
+ return fold_build2 (EQ_EXPR, boolean_type_node, arg1,
+ fold_convert (TREE_TYPE (arg1), arg2));
+ ret = fold_build2 (LE_EXPR, boolean_type_node,
+ fold_convert (pointer_sized_int_node, arg1),
+ fold_convert (pointer_sized_int_node, ret));
+ return fold_build2 (TRUTH_AND_EXPR, boolean_type_node, ret,
+ fold_build2 (EQ_EXPR, boolean_type_node, arg1,
+ fold_convert (TREE_TYPE (arg1), arg2)));
+}
+
+// forked from gcc/cp/tree.cc lvalue_type
+
+/* The type of ARG when used as an lvalue. */
+
+tree
+lvalue_type (tree arg)
+{
+ tree type = TREE_TYPE (arg);
+ return type;
+}
+
+// forked from gcc/c-family/c-warn.cc lvalue_error
+
+/* Print an error message for an invalid lvalue. USE says
+ how the lvalue is being used and so selects the error message. LOC
+ is the location for the error. */
+
+void
+lvalue_error (location_t loc, enum lvalue_use use)
+{
+ switch (use)
+ {
+ case lv_assign:
+ error_at (loc, "lvalue required as left operand of assignment");
+ break;
+ case lv_increment:
+ error_at (loc, "lvalue required as increment operand");
+ break;
+ case lv_decrement:
+ error_at (loc, "lvalue required as decrement operand");
+ break;
+ case lv_addressof:
+ error_at (loc, "lvalue required as unary %<&%> operand");
+ break;
+ case lv_asm:
+ error_at (loc, "lvalue required in %<asm%> statement");
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+// forked from gcc/cp/cp--gimplify.cc cp_fold_maybe_rvalue
+
+/* Fold expression X which is used as an rvalue if RVAL is true. */
+
+tree
+cp_fold_maybe_rvalue (tree x, bool rval)
+{
+ while (true)
+ {
+ x = fold (x);
+ if (rval)
+ x = mark_rvalue_use (x);
+ if (rval && DECL_P (x) && !TYPE_REF_P (TREE_TYPE (x)))
+ {
+ tree v = decl_constant_value (x);
+ if (v != x && v != error_mark_node)
+ {
+ x = v;
+ continue;
+ }
+ }
+ break;
+ }
+ return x;
+}
+
+// forked from gcc/cp/cp--gimplify.cc cp_fold_rvalue
+
+/* Fold expression X which is used as an rvalue. */
+
+tree
+cp_fold_rvalue (tree x)
+{
+ return cp_fold_maybe_rvalue (x, true);
+}
+
+/* Returns true iff class T has a constexpr destructor or has an
+ implicitly declared destructor that we can't tell if it's constexpr
+ without forcing a lazy declaration (which might cause undesired
+ instantiations). */
+
+static bool
+type_maybe_constexpr_destructor (tree t)
+{
+ /* Until C++20, only trivial destruction is constexpr. */
+ if (TYPE_HAS_TRIVIAL_DESTRUCTOR (t))
+ return true;
+
+ if (CLASS_TYPE_P (t) && CLASSTYPE_LAZY_DESTRUCTOR (t))
+ /* Assume it's constexpr. */
+ return true;
+ tree fn = CLASSTYPE_DESTRUCTOR (t);
+ return (fn && Compile::maybe_constexpr_fn (fn));
+}
+
+/* T is a non-literal type used in a context which requires a constant
+ expression. Explain why it isn't literal. */
+
+void
+explain_non_literal_class (tree t)
+{
+ static hash_set<tree> *diagnosed;
+
+ if (!CLASS_TYPE_P (t))
+ return;
+ t = TYPE_MAIN_VARIANT (t);
+
+ if (diagnosed == NULL)
+ diagnosed = new hash_set<tree>;
+ if (diagnosed->add (t))
+ /* Already explained. */
+ return;
+
+ auto_diagnostic_group d;
+ inform (UNKNOWN_LOCATION, "%q+T is not literal because:", t);
+ if (LAMBDA_TYPE_P (t))
+ inform (UNKNOWN_LOCATION,
+ " %qT is a closure type, which is only literal in "
+ "C++17 and later",
+ t);
+ else if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (t)
+ && !type_maybe_constexpr_destructor (t))
+ inform (UNKNOWN_LOCATION, " %q+T does not have %<constexpr%> destructor",
+ t);
+ else if (TYPE_HAS_NONTRIVIAL_DESTRUCTOR (t))
+ inform (UNKNOWN_LOCATION, " %q+T has a non-trivial destructor", t);
+ else if (CLASSTYPE_NON_AGGREGATE (t) && !TYPE_HAS_TRIVIAL_DFLT (t)
+ && !LAMBDA_TYPE_P (t) && !TYPE_HAS_CONSTEXPR_CTOR (t))
+ {
+ inform (UNKNOWN_LOCATION,
+ " %q+T is not an aggregate, does not have a trivial "
+ "default constructor, and has no %<constexpr%> constructor that "
+ "is not a copy or move constructor",
+ t);
+ if (type_has_non_user_provided_default_constructor (t))
+ /* Note that we can't simply call locate_ctor because when the
+ constructor is deleted it just returns NULL_TREE. */
+ for (ovl_iterator iter (CLASSTYPE_CONSTRUCTORS (t)); iter; ++iter)
+ {
+ tree fn = *iter;
+ tree parms = TYPE_ARG_TYPES (TREE_TYPE (fn));
+
+ parms = skip_artificial_parms_for (fn, parms);
+
+ if (sufficient_parms_p (parms))
+ {
+ Compile::explain_invalid_constexpr_fn (fn);
+ break;
+ }
+ }
+ }
+ else
+ {
+ tree binfo, base_binfo, field;
+ int i;
+ for (binfo = TYPE_BINFO (t), i = 0;
+ BINFO_BASE_ITERATE (binfo, i, base_binfo); i++)
+ {
+ tree basetype = TREE_TYPE (base_binfo);
+ if (!CLASSTYPE_LITERAL_P (basetype))
+ {
+ inform (UNKNOWN_LOCATION,
+ " base class %qT of %q+T is non-literal", basetype, t);
+ explain_non_literal_class (basetype);
+ return;
+ }
+ }
+ for (field = TYPE_FIELDS (t); field; field = TREE_CHAIN (field))
+ {
+ tree ftype;
+ if (TREE_CODE (field) != FIELD_DECL)
+ continue;
+ ftype = TREE_TYPE (field);
+ if (!Compile::literal_type_p (ftype))
+ {
+ inform (DECL_SOURCE_LOCATION (field),
+ " non-static data member %qD has non-literal type",
+ field);
+ if (CLASS_TYPE_P (ftype))
+ explain_non_literal_class (ftype);
+ }
+ if (RS_TYPE_VOLATILE_P (ftype))
+ inform (DECL_SOURCE_LOCATION (field),
+ " non-static data member %qD has volatile type", field);
+ }
+ }
+}
+
+// forked from gcc/cp/call.cc reference_related_p
+
+/* Returns nonzero if T1 is reference-related to T2. */
+
+bool
+reference_related_p (tree t1, tree t2)
+{
+ if (t1 == error_mark_node || t2 == error_mark_node)
+ return false;
+
+ t1 = TYPE_MAIN_VARIANT (t1);
+ t2 = TYPE_MAIN_VARIANT (t2);
+
+ /* [dcl.init.ref]
+
+ Given types "cv1 T1" and "cv2 T2," "cv1 T1" is reference-related
+ to "cv2 T2" if T1 is similar to T2, or T1 is a base class of T2. */
+ return (similar_type_p (t1, t2)
+ /*|| (CLASS_TYPE_P (t1) && CLASS_TYPE_P (t2)
+ && DERIVED_FROM_P (t1, t2))*/);
+}
+
+// forked from gcc/cp/typeck2.cc ordinary_char_type_p
+
+/* True iff TYPE is a C++20 "ordinary" character type. */
+
+bool
+ordinary_char_type_p (tree type)
+{
+ type = TYPE_MAIN_VARIANT (type);
+ return (type == char_type_node || type == signed_char_type_node
+ || type == unsigned_char_type_node);
+}
+
+// forked from gcc/cp/typeck2.cc array_string_literal_compatible_p
+
+/* True iff the string literal INIT has a type suitable for initializing array
+ TYPE. */
+
+bool
+array_string_literal_compatible_p (tree type, tree init)
+{
+ tree to_char_type = TYPE_MAIN_VARIANT (TREE_TYPE (type));
+ tree from_char_type = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (init)));
+
+ if (to_char_type == from_char_type)
+ return true;
+ /* The array element type does not match the initializing string
+ literal element type; this is only allowed when both types are
+ ordinary character type. There are no string literals of
+ signed or unsigned char type in the language, but we can get
+ them internally from converting braced-init-lists to
+ STRING_CST. */
+ if (ordinary_char_type_p (to_char_type)
+ && ordinary_char_type_p (from_char_type))
+ return true;
+ return false;
+}
+
+} // namespace Rust
diff --git a/gcc/rust/backend/rust-tree.h b/gcc/rust/backend/rust-tree.h
new file mode 100644
index 0000000..284fd87
--- /dev/null
+++ b/gcc/rust/backend/rust-tree.h
@@ -0,0 +1,3391 @@
+// Copyright (C) 2020-2023 Free Software Foundation, Inc.
+
+// This file is part of GCC.
+
+// GCC is free software; you can redistribute it and/or modify it under
+// the terms of the GNU General Public License as published by the Free
+// Software Foundation; either version 3, or (at your option) any later
+// version.
+
+// GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or
+// FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+// for more details.
+
+// You should have received a copy of the GNU General Public License
+// along with GCC; see the file COPYING3. If not see
+// <http://www.gnu.org/licenses/>.
+
+#ifndef RUST_TREE
+#define RUST_TREE
+
+#include "rust-system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "cpplib.h"
+#include "splay-tree.h"
+
+/* Returns true if NODE is a pointer. */
+#define TYPE_PTR_P(NODE) (TREE_CODE (NODE) == POINTER_TYPE)
+
+/* Returns true if NODE is a reference. */
+#define TYPE_REF_P(NODE) (TREE_CODE (NODE) == REFERENCE_TYPE)
+
+/* Returns true if NODE is a pointer or a reference. */
+#define INDIRECT_TYPE_P(NODE) (TYPE_PTR_P (NODE) || TYPE_REF_P (NODE))
+
+/* [basic.fundamental]
+
+ Types bool, char, wchar_t, and the signed and unsigned integer types
+ are collectively called integral types.
+
+ Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
+ types as well, which is incorrect in C++. Keep these checks in
+ ascending code order. */
+#define RS_INTEGRAL_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == BOOLEAN_TYPE || TREE_CODE (TYPE) == INTEGER_TYPE)
+
+/* [basic.fundamental]
+
+ Integral and floating types are collectively called arithmetic
+ types.
+
+ As a GNU extension, we also accept complex types.
+
+ Keep these checks in ascending code order. */
+#define ARITHMETIC_TYPE_P(TYPE) \
+ (RS_INTEGRAL_TYPE_P (TYPE) || TREE_CODE (TYPE) == REAL_TYPE \
+ || TREE_CODE (TYPE) == COMPLEX_TYPE)
+
+/* True iff TYPE is cv decltype(nullptr). */
+#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
+
+/* [basic.types]
+
+ Arithmetic types, enumeration types, pointer types,
+ pointer-to-member types, and std::nullptr_t are collectively called
+ scalar types.
+
+ Keep these checks in ascending code order. */
+#define SCALAR_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == ENUMERAL_TYPE || ARITHMETIC_TYPE_P (TYPE) \
+ || TYPE_PTR_P (TYPE) || NULLPTR_TYPE_P (TYPE))
+
+/* True if NODE is an implicit INDIRECT_REF from convert_from_reference. */
+#define REFERENCE_REF_P(NODE) \
+ (INDIRECT_REF_P (NODE) && TREE_TYPE (TREE_OPERAND (NODE, 0)) \
+ && TYPE_REF_P (TREE_TYPE (TREE_OPERAND ((NODE), 0))))
+
+// this is a helper to differentiate RECORD types between actual records and
+// slices
+#define SLICE_FLAG TREE_LANG_FLAG_0
+#define SLICE_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == RECORD_TYPE && TREE_LANG_FLAG_0 (TYPE))
+
+// lambda?
+#define RS_CLOSURE_FLAG TREE_LANG_FLAG_1
+#define RS_CLOSURE_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == RECORD_TYPE && TREE_LANG_FLAG_1 (TYPE))
+
+/* Returns true if NODE is a pointer to member function type. */
+#define TYPE_PTRMEMFUNC_P(NODE) \
+ (TREE_CODE (NODE) == RECORD_TYPE && TYPE_PTRMEMFUNC_FLAG (NODE))
+
+#define TYPE_PTRMEMFUNC_FLAG(NODE) (TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE)))
+
+#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) (TREE_TYPE (TYPE_FIELDS (NODE)))
+
+/* True if NODE is a compound-literal, i.e., a brace-enclosed
+ initializer cast to a particular type. This is mostly only set during
+ template parsing; once the initializer has been digested into an actual
+ value of the type, the expression is represented by a TARGET_EXPR. */
+#define COMPOUND_LITERAL_P(NODE) \
+ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
+
+/* When appearing in an INDIRECT_REF, it means that the tree structure
+ underneath is actually a call to a constructor. This is needed
+ when the constructor must initialize local storage (which can
+ be automatically destroyed), rather than allowing it to allocate
+ space from the heap.
+
+ When appearing in a SAVE_EXPR, it means that underneath
+ is a call to a constructor.
+
+ When appearing in a CONSTRUCTOR, the expression is an unconverted
+ compound literal.
+
+ When appearing in a FIELD_DECL, it means that this field
+ has been duly initialized in its constructor. */
+#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
+
+/* Nonzero if T is a class type. Zero for template type parameters,
+ typename types, and so forth. */
+#define CLASS_TYPE_P(T) \
+ (RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
+
+/* [class.virtual]
+
+ A class that declares or inherits a virtual function is called a
+ polymorphic class. */
+#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
+
+/* Nonzero if this class has a virtual function table pointer. */
+#define TYPE_CONTAINS_VPTR_P(NODE) \
+ (TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
+
+/* A vector of BINFOs for the direct and indirect virtual base classes
+ that this type uses in a post-order depth-first left-to-right
+ order. (In other words, these bases appear in the order that they
+ should be initialized.) */
+#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
+
+/* A vector of BINFOs for the direct and indirect virtual base classes
+ that this type uses in a post-order depth-first left-to-right
+ order. (In other words, these bases appear in the order that they
+ should be initialized.) */
+#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
+
+/* We used to have a variant type for lang_type. Keep the name of the
+ checking accessor for the sole survivor. */
+#define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE))
+
+/* Keep these checks in ascending code order. */
+#define RECORD_OR_UNION_CODE_P(T) ((T) == RECORD_TYPE || (T) == UNION_TYPE)
+#define OVERLOAD_TYPE_P(T) (CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
+
+/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
+#define CLASSTYPE_EMPTY_P(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
+
+/* True if DECL is declared 'constexpr'. */
+#define DECL_DECLARED_CONSTEXPR_P(DECL) \
+ DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (DECL))
+
+#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
+ TREE_CHECK2 (NODE, VAR_DECL, FUNCTION_DECL)
+
+// forked from gcc/cp/c-common.h c_tree_index
+
+/* Standard named or nameless data types of the C compiler. */
+
+enum c_tree_index
+{
+ CTI_CHAR8_TYPE,
+ CTI_CHAR16_TYPE,
+ CTI_CHAR32_TYPE,
+ CTI_WCHAR_TYPE,
+ CTI_UNDERLYING_WCHAR_TYPE,
+ CTI_WINT_TYPE,
+ CTI_SIGNED_SIZE_TYPE, /* For format checking only. */
+ CTI_UNSIGNED_PTRDIFF_TYPE, /* For format checking only. */
+ CTI_INTMAX_TYPE,
+ CTI_UINTMAX_TYPE,
+ CTI_WIDEST_INT_LIT_TYPE,
+ CTI_WIDEST_UINT_LIT_TYPE,
+
+ /* Types for <stdint.h>, that may not be defined on all
+ targets. */
+ CTI_SIG_ATOMIC_TYPE,
+ CTI_INT8_TYPE,
+ CTI_INT16_TYPE,
+ CTI_INT32_TYPE,
+ CTI_INT64_TYPE,
+ CTI_UINT8_TYPE,
+ CTI_UINT16_TYPE,
+ CTI_UINT32_TYPE,
+ CTI_UINT64_TYPE,
+ CTI_INT_LEAST8_TYPE,
+ CTI_INT_LEAST16_TYPE,
+ CTI_INT_LEAST32_TYPE,
+ CTI_INT_LEAST64_TYPE,
+ CTI_UINT_LEAST8_TYPE,
+ CTI_UINT_LEAST16_TYPE,
+ CTI_UINT_LEAST32_TYPE,
+ CTI_UINT_LEAST64_TYPE,
+ CTI_INT_FAST8_TYPE,
+ CTI_INT_FAST16_TYPE,
+ CTI_INT_FAST32_TYPE,
+ CTI_INT_FAST64_TYPE,
+ CTI_UINT_FAST8_TYPE,
+ CTI_UINT_FAST16_TYPE,
+ CTI_UINT_FAST32_TYPE,
+ CTI_UINT_FAST64_TYPE,
+ CTI_INTPTR_TYPE,
+ CTI_UINTPTR_TYPE,
+
+ CTI_CHAR_ARRAY_TYPE,
+ CTI_CHAR8_ARRAY_TYPE,
+ CTI_CHAR16_ARRAY_TYPE,
+ CTI_CHAR32_ARRAY_TYPE,
+ CTI_WCHAR_ARRAY_TYPE,
+ CTI_STRING_TYPE,
+ CTI_CONST_STRING_TYPE,
+
+ /* Type for boolean expressions (bool in C++, int in C). */
+ CTI_TRUTHVALUE_TYPE,
+ CTI_TRUTHVALUE_TRUE,
+ CTI_TRUTHVALUE_FALSE,
+
+ CTI_DEFAULT_FUNCTION_TYPE,
+
+ CTI_NULL,
+
+ /* These are not types, but we have to look them up all the time. */
+ CTI_FUNCTION_NAME_DECL,
+ CTI_PRETTY_FUNCTION_NAME_DECL,
+ CTI_C99_FUNCTION_NAME_DECL,
+
+ CTI_MODULE_HWM,
+ /* Below here entities change during compilation. */
+
+ CTI_SAVED_FUNCTION_NAME_DECLS,
+
+ CTI_MAX
+};
+
+// forked from gcc/c-family/c-common.h c_tree_index
+
+extern GTY (()) tree c_global_trees[CTI_MAX];
+
+// forked from gcc/cp/cp-tree.h cp_tree_index
+
+enum cp_tree_index
+{
+ CPTI_WCHAR_DECL,
+ CPTI_VTABLE_ENTRY_TYPE,
+ CPTI_DELTA_TYPE,
+ CPTI_VTABLE_INDEX_TYPE,
+ CPTI_CLEANUP_TYPE,
+ CPTI_VTT_PARM_TYPE,
+
+ CPTI_CLASS_TYPE,
+ CPTI_UNKNOWN_TYPE,
+ CPTI_INIT_LIST_TYPE,
+ CPTI_EXPLICIT_VOID_LIST,
+ CPTI_VTBL_TYPE,
+ CPTI_VTBL_PTR_TYPE,
+ CPTI_GLOBAL,
+ CPTI_ABORT_FNDECL,
+ CPTI_AGGR_TAG,
+ CPTI_CONV_OP_MARKER,
+
+ CPTI_CTOR_IDENTIFIER,
+ CPTI_COMPLETE_CTOR_IDENTIFIER,
+ CPTI_BASE_CTOR_IDENTIFIER,
+ CPTI_DTOR_IDENTIFIER,
+ CPTI_COMPLETE_DTOR_IDENTIFIER,
+ CPTI_BASE_DTOR_IDENTIFIER,
+ CPTI_DELETING_DTOR_IDENTIFIER,
+ CPTI_CONV_OP_IDENTIFIER,
+ CPTI_DELTA_IDENTIFIER,
+ CPTI_IN_CHARGE_IDENTIFIER,
+ CPTI_VTT_PARM_IDENTIFIER,
+ CPTI_AS_BASE_IDENTIFIER,
+ CPTI_THIS_IDENTIFIER,
+ CPTI_PFN_IDENTIFIER,
+ CPTI_VPTR_IDENTIFIER,
+ CPTI_GLOBAL_IDENTIFIER,
+ CPTI_ANON_IDENTIFIER,
+ CPTI_AUTO_IDENTIFIER,
+ CPTI_DECLTYPE_AUTO_IDENTIFIER,
+ CPTI_INIT_LIST_IDENTIFIER,
+ CPTI_FOR_RANGE__IDENTIFIER,
+ CPTI_FOR_BEGIN__IDENTIFIER,
+ CPTI_FOR_END__IDENTIFIER,
+ CPTI_FOR_RANGE_IDENTIFIER,
+ CPTI_FOR_BEGIN_IDENTIFIER,
+ CPTI_FOR_END_IDENTIFIER,
+ CPTI_ABI_TAG_IDENTIFIER,
+ CPTI_ALIGNED_IDENTIFIER,
+ CPTI_BEGIN_IDENTIFIER,
+ CPTI_END_IDENTIFIER,
+ CPTI_GET_IDENTIFIER,
+ CPTI_GNU_IDENTIFIER,
+ CPTI_TUPLE_ELEMENT_IDENTIFIER,
+ CPTI_TUPLE_SIZE_IDENTIFIER,
+ CPTI_TYPE_IDENTIFIER,
+ CPTI_VALUE_IDENTIFIER,
+ CPTI_FUN_IDENTIFIER,
+ CPTI_CLOSURE_IDENTIFIER,
+ CPTI_HEAP_UNINIT_IDENTIFIER,
+ CPTI_HEAP_IDENTIFIER,
+ CPTI_HEAP_DELETED_IDENTIFIER,
+ CPTI_HEAP_VEC_UNINIT_IDENTIFIER,
+ CPTI_HEAP_VEC_IDENTIFIER,
+ CPTI_OMP_IDENTIFIER,
+
+ CPTI_LANG_NAME_C,
+ CPTI_LANG_NAME_CPLUSPLUS,
+
+ CPTI_EMPTY_EXCEPT_SPEC,
+ CPTI_NOEXCEPT_TRUE_SPEC,
+ CPTI_NOEXCEPT_FALSE_SPEC,
+ CPTI_NOEXCEPT_DEFERRED_SPEC,
+
+ CPTI_NULLPTR,
+ CPTI_NULLPTR_TYPE,
+
+ CPTI_ANY_TARG,
+
+ CPTI_MODULE_HWM,
+ /* Nodes after here change during compilation, or should not be in
+ the module's global tree table. Such nodes must be locatable
+ via name lookup or type-construction, as those are the only
+ cross-TU matching capabilities remaining. */
+
+ /* We must find these via the global namespace. */
+ CPTI_STD,
+ CPTI_ABI,
+
+ /* These are created at init time, but the library/headers provide
+ definitions. */
+ CPTI_ALIGN_TYPE,
+ CPTI_TERMINATE_FN,
+ CPTI_CALL_UNEXPECTED_FN,
+
+ /* These are lazily inited. */
+ CPTI_CONST_TYPE_INFO_TYPE,
+ CPTI_GET_EXCEPTION_PTR_FN,
+ CPTI_BEGIN_CATCH_FN,
+ CPTI_END_CATCH_FN,
+ CPTI_ALLOCATE_EXCEPTION_FN,
+ CPTI_FREE_EXCEPTION_FN,
+ CPTI_THROW_FN,
+ CPTI_RETHROW_FN,
+ CPTI_ATEXIT_FN_PTR_TYPE,
+ CPTI_ATEXIT,
+ CPTI_DSO_HANDLE,
+ CPTI_DCAST,
+
+ CPTI_SOURCE_LOCATION_IMPL,
+
+ CPTI_FALLBACK_DFLOAT32_TYPE,
+ CPTI_FALLBACK_DFLOAT64_TYPE,
+ CPTI_FALLBACK_DFLOAT128_TYPE,
+
+ CPTI_MAX
+};
+
+// forked from gcc/cp/cp-tree.h cp_global_trees
+
+extern GTY (()) tree cp_global_trees[CPTI_MAX];
+
+#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
+#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
+/* The type used to represent an offset by which to adjust the `this'
+ pointer in pointer-to-member types. */
+#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
+/* The type used to represent an index into the vtable. */
+#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
+
+#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
+#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
+#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
+#define explicit_void_list_node cp_global_trees[CPTI_EXPLICIT_VOID_LIST]
+#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
+#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
+#define std_node cp_global_trees[CPTI_STD]
+#define abi_node cp_global_trees[CPTI_ABI]
+#define global_namespace cp_global_trees[CPTI_GLOBAL]
+#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
+#define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER]
+#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
+#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
+#define nullptr_node cp_global_trees[CPTI_NULLPTR]
+#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
+/* std::align_val_t */
+#define align_type_node cp_global_trees[CPTI_ALIGN_TYPE]
+
+#define char8_type_node c_global_trees[CTI_CHAR8_TYPE]
+#define char16_type_node c_global_trees[CTI_CHAR16_TYPE]
+#define char32_type_node c_global_trees[CTI_CHAR32_TYPE]
+#define wchar_type_node c_global_trees[CTI_WCHAR_TYPE]
+#define underlying_wchar_type_node c_global_trees[CTI_UNDERLYING_WCHAR_TYPE]
+#define wint_type_node c_global_trees[CTI_WINT_TYPE]
+#define signed_size_type_node c_global_trees[CTI_SIGNED_SIZE_TYPE]
+#define unsigned_ptrdiff_type_node c_global_trees[CTI_UNSIGNED_PTRDIFF_TYPE]
+#define intmax_type_node c_global_trees[CTI_INTMAX_TYPE]
+#define uintmax_type_node c_global_trees[CTI_UINTMAX_TYPE]
+#define widest_integer_literal_type_node c_global_trees[CTI_WIDEST_INT_LIT_TYPE]
+#define widest_unsigned_literal_type_node \
+ c_global_trees[CTI_WIDEST_UINT_LIT_TYPE]
+
+#define sig_atomic_type_node c_global_trees[CTI_SIG_ATOMIC_TYPE]
+#define int8_type_node c_global_trees[CTI_INT8_TYPE]
+#define int16_type_node c_global_trees[CTI_INT16_TYPE]
+#define int32_type_node c_global_trees[CTI_INT32_TYPE]
+#define int64_type_node c_global_trees[CTI_INT64_TYPE]
+#define uint8_type_node c_global_trees[CTI_UINT8_TYPE]
+#define c_uint16_type_node c_global_trees[CTI_UINT16_TYPE]
+#define c_uint32_type_node c_global_trees[CTI_UINT32_TYPE]
+#define c_uint64_type_node c_global_trees[CTI_UINT64_TYPE]
+#define int_least8_type_node c_global_trees[CTI_INT_LEAST8_TYPE]
+#define int_least16_type_node c_global_trees[CTI_INT_LEAST16_TYPE]
+#define int_least32_type_node c_global_trees[CTI_INT_LEAST32_TYPE]
+#define int_least64_type_node c_global_trees[CTI_INT_LEAST64_TYPE]
+#define uint_least8_type_node c_global_trees[CTI_UINT_LEAST8_TYPE]
+#define uint_least16_type_node c_global_trees[CTI_UINT_LEAST16_TYPE]
+#define uint_least32_type_node c_global_trees[CTI_UINT_LEAST32_TYPE]
+#define uint_least64_type_node c_global_trees[CTI_UINT_LEAST64_TYPE]
+#define int_fast8_type_node c_global_trees[CTI_INT_FAST8_TYPE]
+#define int_fast16_type_node c_global_trees[CTI_INT_FAST16_TYPE]
+#define int_fast32_type_node c_global_trees[CTI_INT_FAST32_TYPE]
+#define int_fast64_type_node c_global_trees[CTI_INT_FAST64_TYPE]
+#define uint_fast8_type_node c_global_trees[CTI_UINT_FAST8_TYPE]
+#define uint_fast16_type_node c_global_trees[CTI_UINT_FAST16_TYPE]
+#define uint_fast32_type_node c_global_trees[CTI_UINT_FAST32_TYPE]
+#define uint_fast64_type_node c_global_trees[CTI_UINT_FAST64_TYPE]
+#define intptr_type_node c_global_trees[CTI_INTPTR_TYPE]
+#define uintptr_type_node c_global_trees[CTI_UINTPTR_TYPE]
+
+#define truthvalue_type_node c_global_trees[CTI_TRUTHVALUE_TYPE]
+#define truthvalue_true_node c_global_trees[CTI_TRUTHVALUE_TRUE]
+#define truthvalue_false_node c_global_trees[CTI_TRUTHVALUE_FALSE]
+
+#define char_array_type_node c_global_trees[CTI_CHAR_ARRAY_TYPE]
+#define char8_array_type_node c_global_trees[CTI_CHAR8_ARRAY_TYPE]
+#define char16_array_type_node c_global_trees[CTI_CHAR16_ARRAY_TYPE]
+#define char32_array_type_node c_global_trees[CTI_CHAR32_ARRAY_TYPE]
+#define wchar_array_type_node c_global_trees[CTI_WCHAR_ARRAY_TYPE]
+#define string_type_node c_global_trees[CTI_STRING_TYPE]
+#define const_string_type_node c_global_trees[CTI_CONST_STRING_TYPE]
+
+#define default_function_type c_global_trees[CTI_DEFAULT_FUNCTION_TYPE]
+
+#define function_name_decl_node c_global_trees[CTI_FUNCTION_NAME_DECL]
+#define pretty_function_name_decl_node \
+ c_global_trees[CTI_PRETTY_FUNCTION_NAME_DECL]
+#define c99_function_name_decl_node c_global_trees[CTI_C99_FUNCTION_NAME_DECL]
+#define saved_function_name_decls c_global_trees[CTI_SAVED_FUNCTION_NAME_DECLS]
+
+/* The node for C++ `__null'. */
+#define null_node c_global_trees[CTI_NULL]
+
+/* We cache these tree nodes so as to call get_identifier less frequently.
+ For identifiers for functions, including special member functions such
+ as ctors and assignment operators, the nodes can be used (among other
+ things) to iterate over their overloads defined by/for a type. For
+ example:
+
+ tree ovlid = assign_op_identifier;
+ tree overloads = get_class_binding (type, ovlid);
+ for (ovl_iterator it (overloads); it; ++it) { ... }
+
+ iterates over the set of implicitly and explicitly defined overloads
+ of the assignment operator for type (including the copy and move
+ assignment operators, whether deleted or not). */
+
+/* The name of a constructor that takes an in-charge parameter to
+ decide whether or not to construct virtual base classes. */
+#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
+/* The name of a constructor that constructs virtual base classes. */
+#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
+/* The name of a constructor that does not construct virtual base classes. */
+#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
+/* The name of a destructor that takes an in-charge parameter to
+ decide whether or not to destroy virtual base classes and whether
+ or not to delete the object. */
+#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
+/* The name of a destructor that destroys virtual base classes. */
+#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
+/* The name of a destructor that does not destroy virtual base
+ classes. */
+#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
+/* The name of a destructor that destroys virtual base classes, and
+ then deletes the entire object. */
+#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
+
+/* The name used for conversion operators -- but note that actual
+ conversion functions use special identifiers outside the identifier
+ table. */
+#define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER]
+
+#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
+#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
+/* The name of the parameter that contains a pointer to the VTT to use
+ for this subobject constructor or destructor. */
+#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
+#define as_base_identifier cp_global_trees[CPTI_AS_BASE_IDENTIFIER]
+#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
+#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
+#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
+/* The name of the ::, std & anon namespaces. */
+#define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER]
+#define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER]
+/* auto and declspec(auto) identifiers. */
+#define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER]
+#define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER]
+#define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER]
+#define for_range__identifier cp_global_trees[CPTI_FOR_RANGE__IDENTIFIER]
+#define for_begin__identifier cp_global_trees[CPTI_FOR_BEGIN__IDENTIFIER]
+#define for_end__identifier cp_global_trees[CPTI_FOR_END__IDENTIFIER]
+#define for_range_identifier cp_global_trees[CPTI_FOR_RANGE_IDENTIFIER]
+#define for_begin_identifier cp_global_trees[CPTI_FOR_BEGIN_IDENTIFIER]
+#define for_end_identifier cp_global_trees[CPTI_FOR_END_IDENTIFIER]
+#define abi_tag_identifier cp_global_trees[CPTI_ABI_TAG_IDENTIFIER]
+#define aligned_identifier cp_global_trees[CPTI_ALIGNED_IDENTIFIER]
+#define begin_identifier cp_global_trees[CPTI_BEGIN_IDENTIFIER]
+#define end_identifier cp_global_trees[CPTI_END_IDENTIFIER]
+#define get__identifier cp_global_trees[CPTI_GET_IDENTIFIER]
+#define gnu_identifier cp_global_trees[CPTI_GNU_IDENTIFIER]
+#define tuple_element_identifier cp_global_trees[CPTI_TUPLE_ELEMENT_IDENTIFIER]
+#define tuple_size_identifier cp_global_trees[CPTI_TUPLE_SIZE_IDENTIFIER]
+#define type_identifier cp_global_trees[CPTI_TYPE_IDENTIFIER]
+#define value_identifier cp_global_trees[CPTI_VALUE_IDENTIFIER]
+#define fun_identifier cp_global_trees[CPTI_FUN_IDENTIFIER]
+#define closure_identifier cp_global_trees[CPTI_CLOSURE_IDENTIFIER]
+#define heap_uninit_identifier cp_global_trees[CPTI_HEAP_UNINIT_IDENTIFIER]
+#define heap_identifier cp_global_trees[CPTI_HEAP_IDENTIFIER]
+#define heap_deleted_identifier cp_global_trees[CPTI_HEAP_DELETED_IDENTIFIER]
+#define heap_vec_uninit_identifier \
+ cp_global_trees[CPTI_HEAP_VEC_UNINIT_IDENTIFIER]
+#define heap_vec_identifier cp_global_trees[CPTI_HEAP_VEC_IDENTIFIER]
+#define omp_identifier cp_global_trees[CPTI_OMP_IDENTIFIER]
+#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
+#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
+
+/* Exception specifiers used for throw(), noexcept(true),
+ noexcept(false) and deferred noexcept. We rely on these being
+ uncloned. */
+#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
+#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
+#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
+#define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC]
+
+/* Exception handling function declarations. */
+#define terminate_fn cp_global_trees[CPTI_TERMINATE_FN]
+#define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN]
+#define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN]
+#define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN]
+#define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN]
+#define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN]
+#define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN]
+#define throw_fn cp_global_trees[CPTI_THROW_FN]
+#define rethrow_fn cp_global_trees[CPTI_RETHROW_FN]
+
+/* The type of the function-pointer argument to "__cxa_atexit" (or
+ "std::atexit", if "__cxa_atexit" is not being used). */
+#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
+
+/* A pointer to `std::atexit'. */
+#define atexit_node cp_global_trees[CPTI_ATEXIT]
+
+/* A pointer to `__dso_handle'. */
+#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
+
+/* The declaration of the dynamic_cast runtime. */
+#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
+
+/* The type of a destructor. */
+#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
+
+/* The type of the vtt parameter passed to subobject constructors and
+ destructors. */
+#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
+
+/* A node which matches any template argument. */
+#define any_targ_node cp_global_trees[CPTI_ANY_TARG]
+
+/* std::source_location::__impl class. */
+#define source_location_impl cp_global_trees[CPTI_SOURCE_LOCATION_IMPL]
+
+/* These two accessors should only be used by OVL manipulators.
+ Other users should use iterators and convenience functions. */
+#define OVL_FUNCTION(NODE) \
+ (((struct tree_overload *) OVERLOAD_CHECK (NODE))->function)
+#define OVL_CHAIN(NODE) \
+ (((struct tree_overload *) OVERLOAD_CHECK (NODE))->common.chain)
+
+/* If set, this or a subsequent overload contains decls that need deduping. */
+#define OVL_DEDUP_P(NODE) TREE_LANG_FLAG_0 (OVERLOAD_CHECK (NODE))
+/* If set, this was imported in a using declaration. */
+#define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE))
+/* If set, this overload is a hidden decl. */
+#define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE))
+/* If set, this overload contains a nested overload. */
+#define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE))
+/* If set, this overload was constructed during lookup. */
+#define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE))
+/* If set, this OVL_USING_P overload is exported. */
+#define OVL_EXPORT_P(NODE) TREE_LANG_FLAG_5 (OVERLOAD_CHECK (NODE))
+
+/* The first decl of an overload. */
+#define OVL_FIRST(NODE) ovl_first (NODE)
+/* The name of the overload set. */
+#define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE))
+
+/* Whether this is a set of overloaded functions. TEMPLATE_DECLS are
+ always wrapped in an OVERLOAD, so we don't need to check them
+ here. */
+#define OVL_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD)
+/* Whether this is a single member overload. */
+#define OVL_SINGLE_P(NODE) (TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE))
+
+/* Nonzero means that this type has an X() constructor. */
+#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor)
+
+/* Nonzero means that NODE (a class type) has a default constructor --
+ but that it has not yet been declared. */
+#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
+
+/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
+ are the constructors that take an in-charge parameter. */
+#define CLASSTYPE_CONSTRUCTORS(NODE) \
+ (get_class_binding_direct (NODE, ctor_identifier))
+
+/* In a TREE_LIST in an attribute list, indicates that the attribute
+ must be applied at instantiation time. */
+#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
+
+/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
+ was inherited from a template parameter, not explicitly indicated. */
+#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
+
+/* In a TREE_LIST for a parameter-declaration-list, indicates that all the
+ parameters in the list have declarators enclosed in (). */
+#define PARENTHESIZED_LIST_P(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
+
+/* Non zero if this is a using decl for a dependent scope. */
+#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
+
+/* The scope named in a using decl. */
+#define USING_DECL_SCOPE(NODE) DECL_RESULT_FLD (USING_DECL_CHECK (NODE))
+
+/* The decls named by a using decl. */
+#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
+
+/* Non zero if the using decl refers to a dependent type. */
+#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
+
+/* True if member using decl NODE refers to a non-inherited NODE. */
+#define USING_DECL_UNRELATED_P(NODE) DECL_LANG_FLAG_2 (USING_DECL_CHECK (NODE))
+
+/* Nonzero if NODE declares a function. */
+#define DECL_DECLARES_FUNCTION_P(NODE) (TREE_CODE (NODE) == FUNCTION_DECL)
+
+/* Nonzero for a NODE which declares a type. */
+#define DECL_DECLARES_TYPE_P(NODE) (TREE_CODE (NODE) == TYPE_DECL)
+
+/* Kind bits. */
+#define IDENTIFIER_KIND_BIT_0(NODE) \
+ TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE))
+#define IDENTIFIER_KIND_BIT_1(NODE) \
+ TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE))
+#define IDENTIFIER_KIND_BIT_2(NODE) \
+ TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE))
+
+/* Used by various search routines. */
+#define IDENTIFIER_MARKED(NODE) TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE))
+
+/* Nonzero if this identifier is used as a virtual function name somewhere
+ (optimizes searches). */
+#define IDENTIFIER_VIRTUAL_P(NODE) \
+ TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE))
+
+/* True if this identifier is a reserved word. C_RID_CODE (node) is
+ then the RID_* value of the keyword. Value 1. */
+#define IDENTIFIER_KEYWORD_P(NODE) \
+ ((!IDENTIFIER_KIND_BIT_2 (NODE)) & (!IDENTIFIER_KIND_BIT_1 (NODE)) \
+ & IDENTIFIER_KIND_BIT_0 (NODE))
+
+/* True if this identifier is the name of a constructor or
+ destructor. Value 2 or 3. */
+#define IDENTIFIER_CDTOR_P(NODE) \
+ ((!IDENTIFIER_KIND_BIT_2 (NODE)) & IDENTIFIER_KIND_BIT_1 (NODE))
+
+/* True if this identifier is the name of a constructor. Value 2. */
+#define IDENTIFIER_CTOR_P(NODE) \
+ (IDENTIFIER_CDTOR_P (NODE) & (!IDENTIFIER_KIND_BIT_0 (NODE)))
+
+/* True if this identifier is the name of a destructor. Value 3. */
+#define IDENTIFIER_DTOR_P(NODE) \
+ (IDENTIFIER_CDTOR_P (NODE) & IDENTIFIER_KIND_BIT_0 (NODE))
+
+/* True if this identifier is for any operator name (including
+ conversions). Value 4, 5, 6 or 7. */
+#define IDENTIFIER_ANY_OP_P(NODE) (IDENTIFIER_KIND_BIT_2 (NODE))
+
+/* True if this identifier is for an overloaded operator. Values 4, 5. */
+#define IDENTIFIER_OVL_OP_P(NODE) \
+ (IDENTIFIER_ANY_OP_P (NODE) & (!IDENTIFIER_KIND_BIT_1 (NODE)))
+
+/* True if this identifier is for any assignment. Values 5. */
+#define IDENTIFIER_ASSIGN_OP_P(NODE) \
+ (IDENTIFIER_OVL_OP_P (NODE) & IDENTIFIER_KIND_BIT_0 (NODE))
+
+/* True if this identifier is the name of a type-conversion
+ operator. Value 7. */
+#define IDENTIFIER_CONV_OP_P(NODE) \
+ (IDENTIFIER_ANY_OP_P (NODE) & IDENTIFIER_KIND_BIT_1 (NODE) \
+ & (!IDENTIFIER_KIND_BIT_0 (NODE)))
+
+/* True if this identifier is a new or delete operator. */
+#define IDENTIFIER_NEWDEL_OP_P(NODE) \
+ (IDENTIFIER_OVL_OP_P (NODE) \
+ && IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC)
+
+/* True if this identifier is a new operator. */
+#define IDENTIFIER_NEW_OP_P(NODE) \
+ (IDENTIFIER_OVL_OP_P (NODE) \
+ && (IDENTIFIER_OVL_OP_FLAGS (NODE) \
+ & (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) \
+ == OVL_OP_FLAG_ALLOC)
+
+/* Nonzero if the class NODE has multiple paths to the same (virtual)
+ base object. */
+#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->diamond_shaped)
+
+/* Nonzero if the class NODE has multiple instances of the same base
+ type. */
+#define CLASSTYPE_REPEATED_BASE_P(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->repeated_base)
+
+/* The member function with which the vtable will be emitted:
+ the first noninline non-pure-virtual member function. NULL_TREE
+ if there is no key function or if this is a class template */
+#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
+
+/* Vector of members. During definition, it is unordered and only
+ member functions are present. After completion it is sorted and
+ contains both member functions and non-functions. STAT_HACK is
+ involved to preserve oneslot per name invariant. */
+#define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members)
+
+/* For class templates, this is a TREE_LIST of all member data,
+ functions, types, and friends in the order of declaration.
+ The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
+ and the RECORD_TYPE for the class template otherwise. */
+#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
+
+/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
+ are the constructors that take an in-charge parameter. */
+#define CLASSTYPE_CONSTRUCTORS(NODE) \
+ (get_class_binding_direct (NODE, ctor_identifier))
+
+/* A FUNCTION_DECL for the destructor for NODE. This is the
+ destructors that take an in-charge parameter. If
+ CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
+ until the destructor is created with lazily_declare_fn. */
+#define CLASSTYPE_DESTRUCTOR(NODE) \
+ (get_class_binding_direct (NODE, dtor_identifier))
+
+/* Nonzero if NODE has a primary base class, i.e., a base class with
+ which it shares the virtual function table pointer. */
+#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
+ (CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
+
+/* If non-NULL, this is the binfo for the primary base class, i.e.,
+ the base class which contains the virtual function table pointer
+ for this class. */
+#define CLASSTYPE_PRIMARY_BINFO(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
+
+/* A vector of BINFOs for the direct and indirect virtual base classes
+ that this type uses in a post-order depth-first left-to-right
+ order. (In other words, these bases appear in the order that they
+ should be initialized.) */
+#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
+
+/* The type corresponding to NODE when NODE is used as a base class,
+ i.e., NODE without virtual base classes or tail padding. */
+#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
+
+/* Nonzero if NODE is a user-defined conversion operator. */
+#define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE))
+
+/* The type to which conversion operator FN converts to. */
+#define DECL_CONV_FN_TYPE(FN) \
+ TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN)))
+
+/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
+ sense of `same'. */
+#define same_type_p(TYPE1, TYPE2) comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
+
+/* Nonzero if T is a type that could resolve to any kind of concrete type
+ at instantiation time. */
+#define WILDCARD_TYPE_P(T) \
+ (TREE_CODE (T) == TEMPLATE_TYPE_PARM || TREE_CODE (T) == TYPENAME_TYPE \
+ || TREE_CODE (T) == TYPEOF_TYPE \
+ || TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
+ || TREE_CODE (T) == DECLTYPE_TYPE \
+ || TREE_CODE (T) == DEPENDENT_OPERATOR_TYPE)
+
+/* Nonzero if T is a class (or struct or union) type. Also nonzero
+ for template type parameters, typename types, and instantiated
+ template template parameters. Keep these checks in ascending code
+ order. */
+#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
+
+/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
+#define FUNCTION_REF_QUALIFIED(NODE) \
+ TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
+
+/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
+#define FUNCTION_RVALUE_QUALIFIED(NODE) \
+ TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
+
+/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
+ pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
+ before using this macro. */
+#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
+ (rs_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)), \
+ rs_type_quals (NODE)))
+
+/* As above, but can be used in places that want an lvalue at the expense
+ of not necessarily having the correct cv-qualifiers. */
+#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) (TREE_TYPE (TYPE_FIELDS (NODE)))
+
+/* True if this type is dependent. This predicate is only valid if
+ TYPE_DEPENDENT_P_VALID is true. */
+#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
+
+/* True if dependent_type_p has been called for this type, with the
+ result that TYPE_DEPENDENT_P is valid. */
+#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6 (NODE)
+
+/* Nonzero for _TYPE node means that this type does not have a trivial
+ destructor. Therefore, destroying an object of this type will
+ involve a call to a destructor. This can apply to objects of
+ ARRAY_TYPE if the type of the elements needs a destructor. */
+#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) (TYPE_LANG_FLAG_4 (NODE))
+
+/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
+ this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
+ will be NULL_TREE to indicate a throw specification of `()', or
+ no exceptions allowed. For a noexcept specification, TREE_VALUE
+ is NULL_TREE and TREE_PURPOSE is the constant-expression. For
+ a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
+ (for templates) or an OVERLOAD list of functions (for implicitly
+ declared functions). */
+#define TYPE_RAISES_EXCEPTIONS(NODE) \
+ TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
+
+/* Identifiers map directly to block or class-scope bindings.
+ Namespace-scope bindings are held in hash tables on the respective
+ namespaces. The identifier bindings are the innermost active
+ binding, from whence you can get the decl and/or implicit-typedef
+ of an elaborated type. When not bound to a local entity the
+ values are NULL. */
+#define IDENTIFIER_BINDING(NODE) (LANG_IDENTIFIER_CAST (NODE)->bindings)
+
+#define LANG_IDENTIFIER_CAST(NODE) \
+ ((struct lang_identifier *) IDENTIFIER_NODE_CHECK (NODE))
+
+/* IF_STMT accessors. These give access to the condition of the if
+ statement, the then block of the if statement, and the else block
+ of the if statement if it exists. */
+#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
+#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
+#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
+#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
+#define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE))
+#define IF_STMT_CONSTEVAL_P(NODE) TREE_LANG_FLAG_2 (IF_STMT_CHECK (NODE))
+
+/* The expression in question for a DECLTYPE_TYPE. */
+#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
+
+#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE, X) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
+
+/* Nonzero if this class is included from a header file which employs
+ `#pragma interface', and it is not included in its implementation file. */
+#define CLASSTYPE_INTERFACE_ONLY(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
+
+#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
+#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
+
+/* Whether a PARM_DECL represents a local parameter in a
+ requires-expression. */
+#define CONSTRAINT_VAR_P(NODE) DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL))
+
+/* In a CALL_EXPR appearing in a template, true if Koenig lookup
+ should be performed at instantiation time. */
+#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
+
+/* The index of a user-declared parameter in its function, starting at 1.
+ All artificial parameters will have index 0. */
+#define DECL_PARM_INDEX(NODE) (LANG_DECL_PARM_CHECK (NODE)->index)
+
+/* The level of a user-declared parameter in its function, starting at 1.
+ A parameter of the function will have level 1; a parameter of the first
+ nested function declarator (i.e. t in void f (void (*p)(T t))) will have
+ level 2. */
+#define DECL_PARM_LEVEL(NODE) (LANG_DECL_PARM_CHECK (NODE)->level)
+
+/* These flags are used by the conversion code.
+ CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
+ CONV_STATIC : Perform the explicit conversions for static_cast.
+ CONV_CONST : Perform the explicit conversions for const_cast.
+ CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
+ CONV_PRIVATE : Perform upcasts to private bases.
+ CONV_FORCE_TEMP : Require a new temporary when converting to the same
+ aggregate type. */
+
+#define CONV_IMPLICIT 1
+#define CONV_STATIC 2
+#define CONV_CONST 4
+#define CONV_REINTERPRET 8
+#define CONV_PRIVATE 16
+#define CONV_FORCE_TEMP 32
+#define CONV_FOLD 64
+#define CONV_OLD_CONVERT \
+ (CONV_IMPLICIT | CONV_STATIC | CONV_CONST | CONV_REINTERPRET)
+#define CONV_C_CAST \
+ (CONV_IMPLICIT | CONV_STATIC | CONV_CONST | CONV_REINTERPRET | CONV_PRIVATE \
+ | CONV_FORCE_TEMP)
+#define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD)
+
+/* Used by build_expr_type_conversion to indicate which types are
+ acceptable as arguments to the expression under consideration. */
+
+#define WANT_INT 1 /* integer types, including bool */
+#define WANT_FLOAT 2 /* floating point types */
+#define WANT_ENUM 4 /* enumerated types */
+#define WANT_POINTER 8 /* pointer types */
+#define WANT_NULL 16 /* null pointer constant */
+#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
+#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
+
+/* Used with comptypes, and related functions, to guide type
+ comparison. */
+
+#define COMPARE_STRICT \
+ 0 /* Just check if the types are the \
+ same. */
+#define COMPARE_BASE \
+ 1 /* Check to see if the second type is \
+ derived from the first. */
+#define COMPARE_DERIVED \
+ 2 /* Like COMPARE_BASE, but in \
+ reverse. */
+#define COMPARE_REDECLARATION \
+ 4 /* The comparison is being done when \
+ another declaration of an existing \
+ entity is seen. */
+#define COMPARE_STRUCTURAL \
+ 8 /* The comparison is intended to be \
+ structural. The actual comparison \
+ will be identical to \
+ COMPARE_STRICT. */
+
+/* Used with start function. */
+#define SF_DEFAULT 0 /* No flags. */
+#define SF_PRE_PARSED \
+ 1 /* The function declaration has \
+ already been parsed. */
+#define SF_INCLASS_INLINE \
+ 2 /* The function is an inline, defined \
+ in the class body. */
+
+/* Used with start_decl's initialized parameter. */
+#define SD_UNINITIALIZED 0
+#define SD_INITIALIZED 1
+/* Like SD_INITIALIZED, but also mark the new decl as DECL_DECOMPOSITION_P. */
+#define SD_DECOMPOSITION 2
+#define SD_DEFAULTED 3
+#define SD_DELETED 4
+
+/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
+ sense of `same'. */
+#define same_type_p(TYPE1, TYPE2) comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
+
+/* Returns true if NODE is a pointer-to-data-member. */
+#define TYPE_PTRDATAMEM_P(NODE) (TREE_CODE (NODE) == OFFSET_TYPE)
+
+/* Nonzero if this type is const-qualified. */
+#define RS_TYPE_CONST_P(NODE) ((rs_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
+
+/* The _DECL for this _TYPE. */
+#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
+
+/* Nonzero for a VAR_DECL iff an explicit initializer was provided
+ or a non-trivial constructor is called. */
+#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
+ (TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE)))
+
+/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
+#define DECL_DEFAULTED_FN(DECL) (LANG_DECL_FN_CHECK (DECL)->defaulted_p)
+
+/* Nonzero for a class type means that the class type has a
+ user-declared constructor. */
+#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
+
+/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
+ are the constructors that take an in-charge parameter. */
+#define CLASSTYPE_CONSTRUCTORS(NODE) \
+ (get_class_binding_direct (NODE, ctor_identifier))
+
+/* Nonzero if the DECL was initialized in the class definition itself,
+ rather than outside the class. This is used for both static member
+ VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
+#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
+ (DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
+ ->u.base.initialized_in_class)
+
+/* Nonzero if DECL is explicitly defaulted in the class body. */
+#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
+ (DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
+
+/* Nonzero for FUNCTION_DECL means that this decl is a non-static
+ member function. */
+#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
+ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
+
+/* For FUNCTION_DECLs: nonzero means that this function is a
+ constructor or a destructor with an extra in-charge parameter to
+ control whether or not virtual bases are constructed. */
+#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
+
+/* Nonzero if the VTT parm has been added to NODE. */
+#define DECL_HAS_VTT_PARM_P(NODE) (LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
+
+/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
+ which refers to a user-written parameter. */
+#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
+ skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
+
+/* Similarly, but for DECL_ARGUMENTS. */
+#define FUNCTION_FIRST_USER_PARM(NODE) \
+ skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
+
+/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
+ is a constructor. */
+#define DECL_CONSTRUCTOR_P(NODE) DECL_CXX_CONSTRUCTOR_P (NODE)
+
+/* Nonzero if DECL was declared with '= delete'. */
+#define DECL_DELETED_FN(DECL) \
+ (LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
+
+/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
+#define DECL_DEFAULTED_FN(DECL) (LANG_DECL_FN_CHECK (DECL)->defaulted_p)
+
+/* True if NODE is a brace-enclosed initializer. */
+#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
+ (TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
+
+/* True if FNDECL is an immediate function. */
+#define DECL_IMMEDIATE_FUNCTION_P(NODE) \
+ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE)) \
+ ? LANG_DECL_FN_CHECK (NODE)->immediate_fn_p \
+ : false)
+#define SET_DECL_IMMEDIATE_FUNCTION_P(NODE) \
+ (retrofit_lang_decl (FUNCTION_DECL_CHECK (NODE)), \
+ LANG_DECL_FN_CHECK (NODE)->immediate_fn_p = true)
+
+/* True if this CONSTRUCTOR should not be used as a variable initializer
+ because it was loaded from a constexpr variable with mutable fields. */
+#define CONSTRUCTOR_MUTABLE_POISON(NODE) \
+ (TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE)))
+
+/* For a pointer-to-member constant `X::Y' this is the _DECL for
+ `Y'. */
+#define PTRMEM_CST_MEMBER(NODE) \
+ (((ptrmem_cst_t) PTRMEM_CST_CHECK (NODE))->member)
+
+/* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, an
+ INDIRECT_REF comes from parenthesizing a _DECL, or a PAREN_EXPR identifies a
+ parenthesized initializer relevant for decltype(auto). Currently only set
+ some of the time in C++14 mode. */
+
+#define REF_PARENTHESIZED_P(NODE) \
+ TREE_LANG_FLAG_2 (TREE_CHECK5 ((NODE), COMPONENT_REF, INDIRECT_REF, \
+ SCOPE_REF, VIEW_CONVERT_EXPR, PAREN_EXPR))
+
+/* Returns true if NODE is a pointer-to-member. */
+#define TYPE_PTRMEM_P(NODE) \
+ (TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
+
+/* Returns true if NODE is a pointer or a pointer-to-member. */
+#define TYPE_PTR_OR_PTRMEM_P(NODE) (TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
+
+/* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding
+ declaration or one of VAR_DECLs for the user identifiers in it. */
+#define DECL_DECOMPOSITION_P(NODE) \
+ (VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \
+ ? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \
+ : false)
+
+/* The underlying artificial VAR_DECL for structured binding. */
+#define DECL_DECOMP_BASE(NODE) (LANG_DECL_DECOMP_CHECK (NODE)->base)
+
+/* Nonzero if either DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P or
+ DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P is true of NODE. */
+#define DECL_MAYBE_IN_CHARGE_CDTOR_P(NODE) \
+ (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (NODE) \
+ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (NODE))
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
+ specialized in-charge constructor, in-charge deleting constructor,
+ or the base destructor. */
+#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == dtor_identifier)
+
+/* Nonzero if NODE (a _DECL) is a cloned constructor or
+ destructor. */
+#define DECL_CLONED_FUNCTION_P(NODE) \
+ (DECL_NAME (NODE) && IDENTIFIER_CDTOR_P (DECL_NAME (NODE)) \
+ && !DECL_MAYBE_IN_CHARGE_CDTOR_P (NODE))
+
+/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
+ cloned. */
+#define DECL_CLONED_FUNCTION(NODE) \
+ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE))->u.fn.u5.cloned_function)
+
+/* Nonzero if NODE (a _DECL) is a cloned constructor or
+ destructor. */
+#define DECL_CLONED_FUNCTION_P(NODE) \
+ (DECL_NAME (NODE) && IDENTIFIER_CDTOR_P (DECL_NAME (NODE)) \
+ && !DECL_MAYBE_IN_CHARGE_CDTOR_P (NODE))
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
+ specialized in-charge constructor or the specialized not-in-charge
+ constructor. */
+#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == ctor_identifier)
+
+/* The current C++-specific per-function global variables. */
+
+#define cp_function_chain (cfun->language)
+
+/* In a constructor destructor, the point at which all derived class
+ destroying/construction has been done. I.e., just before a
+ constructor returns, or before any base class destroying will be done
+ in a destructor. */
+
+#define cdtor_label cp_function_chain->x_cdtor_label
+
+/* When we're processing a member function, current_class_ptr is the
+ PARM_DECL for the `this' pointer. The current_class_ref is an
+ expression for `*this'. */
+
+#define current_class_ptr \
+ (*(cfun && cp_function_chain ? &cp_function_chain->x_current_class_ptr \
+ : &scope_chain->x_current_class_ptr))
+#define current_class_ref \
+ (*(cfun && cp_function_chain ? &cp_function_chain->x_current_class_ref \
+ : &scope_chain->x_current_class_ref))
+
+/* The EH_SPEC_BLOCK for the exception-specifiers for the current
+ function, if any. */
+
+#define current_eh_spec_block cp_function_chain->x_eh_spec_block
+
+/* The `__in_chrg' parameter for the current function. Only used for
+ constructors and destructors. */
+
+#define current_in_charge_parm cp_function_chain->x_in_charge_parm
+
+/* The `__vtt_parm' parameter for the current function. Only used for
+ constructors and destructors. */
+
+#define current_vtt_parm cp_function_chain->x_vtt_parm
+
+/* A boolean flag to control whether we need to clean up the return value if a
+ local destructor throws. Only used in functions that return by value a
+ class with a destructor. Which 'tors don't, so we can use the same
+ field as current_vtt_parm. */
+
+#define current_retval_sentinel current_vtt_parm
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement that specifies a return value is seen. */
+
+#define current_function_returns_value cp_function_chain->returns_value
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a return statement with no argument is seen. */
+
+#define current_function_returns_null cp_function_chain->returns_null
+
+/* Set to 0 at beginning of a function definition, set to 1 if
+ a call to a noreturn function is seen. */
+
+#define current_function_returns_abnormally \
+ cp_function_chain->returns_abnormally
+
+/* Set to 0 at beginning of a function definition, set to 1 if we see an
+ obvious infinite loop. This can have false positives and false
+ negatives, so it should only be used as a heuristic. */
+
+#define current_function_infinite_loop cp_function_chain->infinite_loop
+
+/* Nonzero if we are processing a base initializer. Zero elsewhere. */
+#define in_base_initializer cp_function_chain->x_in_base_initializer
+
+#define in_function_try_handler cp_function_chain->x_in_function_try_handler
+
+/* Expression always returned from function, or error_mark_node
+ otherwise, for use by the automatic named return value optimization. */
+
+#define current_function_return_value (cp_function_chain->x_return_value)
+
+#define current_class_type scope_chain->class_type
+
+#define in_discarded_stmt scope_chain->discarded_stmt
+#define in_consteval_if_p scope_chain->consteval_if_p
+
+/* Nonzero means that this type is being defined. I.e., the left brace
+ starting the definition of this type has been seen. */
+#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a static
+ member function. */
+#define DECL_STATIC_FUNCTION_P(NODE) \
+ (LANG_DECL_FN_CHECK (NODE)->static_function)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a non-static
+ member function. */
+#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
+ (TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
+
+/* Nonzero for FUNCTION_DECL means that this decl is a member function
+ (static or non-static). */
+#define DECL_FUNCTION_MEMBER_P(NODE) \
+ (DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
+
+/* Nonzero if NODE is the target for genericization of 'return' stmts
+ in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */
+#define LABEL_DECL_CDTOR(NODE) DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE))
+
+/* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions
+ are not constexprs. Other NOP_EXPRs are. */
+#define REINTERPRET_CAST_P(NODE) TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE))
+
+/* Returns true if NODE is an object type:
+
+ [basic.types]
+
+ An object type is a (possibly cv-qualified) type that is not a
+ function type, not a reference type, and not a void type.
+
+ Keep these checks in ascending order, for speed. */
+#define TYPE_OBJ_P(NODE) \
+ (!TYPE_REF_P (NODE) && !VOID_TYPE_P (NODE) && !FUNC_OR_METHOD_TYPE_P (NODE))
+
+/* Returns true if NODE is a pointer to an object. Keep these checks
+ in ascending tree code order. */
+#define TYPE_PTROB_P(NODE) (TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
+
+/* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the
+ CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with
+ CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */
+#define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \
+ (TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE)))
+
+#define AGGR_INIT_EXPR_SLOT(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
+
+/* True if this TARGET_EXPR expresses direct-initialization of an object
+ to be named later. */
+#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
+ TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
+
+/* Nonzero if DECL is a declaration of __builtin_constant_p. */
+#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
+ (TREE_CODE (NODE) == FUNCTION_DECL \
+ && DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
+ && DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
+
+/* True iff this represents an lvalue being treated as an rvalue during return
+ or throw as per [class.copy.elision]. */
+#define IMPLICIT_RVALUE_P(NODE) \
+ TREE_LANG_FLAG_3 (TREE_CHECK2 ((NODE), NON_LVALUE_EXPR, STATIC_CAST_EXPR))
+
+/* Nonzero for _DECL means that this decl appears in (or will appear
+ in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
+ detecting circularity in case members are multiply defined. In the
+ case of a VAR_DECL, it means that no definition has been seen, even
+ if an initializer has been. */
+#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
+
+/* Nonzero means that this class type is a non-standard-layout class. */
+#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
+
+/* Nonzero for FIELD_DECL node means that this field is a base class
+ of the parent object, as opposed to a member field. */
+#define DECL_FIELD_IS_BASE(NODE) DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
+
+/* Nonzero if TYPE is an anonymous union type. */
+#define ANON_UNION_TYPE_P(NODE) \
+ (TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
+
+/* For an ANON_AGGR_TYPE_P the single FIELD_DECL it is used with. */
+#define ANON_AGGR_TYPE_FIELD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
+
+/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
+ flag for this because "A union for which objects or pointers are
+ declared is not an anonymous union" [class.union]. */
+#define ANON_AGGR_TYPE_P(NODE) \
+ (CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
+#define SET_ANON_AGGR_TYPE_P(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
+
+/* Nonzero if T is a class type but not a union. */
+#define NON_UNION_CLASS_TYPE_P(T) \
+ (TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T))
+
+/* Determines whether an ENUMERAL_TYPE has an explicit
+ underlying type. */
+#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
+
+/* Returns the underlying type of the given enumeration type. The
+ underlying type is determined in different ways, depending on the
+ properties of the enum:
+
+ - In C++0x, the underlying type can be explicitly specified, e.g.,
+
+ enum E1 : char { ... } // underlying type is char
+
+ - In a C++0x scoped enumeration, the underlying type is int
+ unless otherwises specified:
+
+ enum class E2 { ... } // underlying type is int
+
+ - Otherwise, the underlying type is determined based on the
+ values of the enumerators. In this case, the
+ ENUM_UNDERLYING_TYPE will not be set until after the definition
+ of the enumeration is completed by finish_enum. */
+#define ENUM_UNDERLYING_TYPE(TYPE) TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
+
+/* Nonzero if this type is volatile-qualified. */
+#define RS_TYPE_VOLATILE_P(NODE) \
+ ((rs_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
+
+/* Nonzero means that this type is either complete or being defined, so we
+ can do lookup in it. */
+#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
+ (COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
+
+/* Indicates when overload resolution may resolve to a pointer to
+ member function. [expr.unary.op]/3 */
+#define PTRMEM_OK_P(NODE) \
+ TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
+
+/* Returns nonzero iff NODE is a declaration for the global function
+ `main'. */
+#define DECL_MAIN_P(NODE) \
+ (DECL_NAME (NODE) != NULL_TREE && MAIN_NAME_P (DECL_NAME (NODE)) \
+ && flag_hosted)
+
+/* Nonzero if the variable was declared to be thread-local.
+ We need a special C++ version of this test because the middle-end
+ DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for
+ templates. */
+#define RS_DECL_THREAD_LOCAL_P(NODE) (TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
+
+#define COND_EXPR_IS_VEC_DELETE(NODE) TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE))
+
+/* RANGE_FOR_STMT accessors. These give access to the declarator,
+ expression, body, and scope of the statement, respectively. */
+#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
+#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
+#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
+#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
+#define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4)
+#define RANGE_FOR_INIT_STMT(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 5)
+#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
+
+#define CP_DECL_CONTEXT(NODE) \
+ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
+#define CP_TYPE_CONTEXT(NODE) \
+ (!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
+#define FROB_CONTEXT(NODE) \
+ ((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
+
+/* Nonzero if NODE is the std namespace. */
+#define DECL_NAMESPACE_STD_P(NODE) ((NODE) == std_node)
+
+/* Whether the namepace is an inline namespace. */
+#define DECL_NAMESPACE_INLINE_P(NODE) \
+ TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE))
+
+#define CP_DECL_CONTEXT(NODE) \
+ (!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
+
+/* Based off of TYPE_UNNAMED_P. */
+#define LAMBDA_TYPE_P(NODE) \
+ (TREE_CODE (NODE) == RECORD_TYPE && TYPE_LINKAGE_IDENTIFIER (NODE) \
+ && IDENTIFIER_LAMBDA_P (TYPE_LINKAGE_IDENTIFIER (NODE)))
+
+/* Macros to make error reporting functions' lives easier. */
+#define TYPE_LINKAGE_IDENTIFIER(NODE) \
+ (TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
+
+/* Identifiers used for lambda types are almost anonymous. Use this
+ spare flag to distinguish them (they also have the anonymous flag). */
+#define IDENTIFIER_LAMBDA_P(NODE) \
+ (IDENTIFIER_NODE_CHECK (NODE)->base.protected_flag)
+
+/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
+ is the constructor it inherits from. */
+#define DECL_INHERITED_CTOR(NODE) \
+ (DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
+ ? LANG_DECL_FN_CHECK (NODE)->context \
+ : NULL_TREE)
+
+/* True if the class type TYPE is a literal type. */
+#define CLASSTYPE_LITERAL_P(TYPE) (LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
+
+/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
+ is a destructor. */
+#define DECL_DESTRUCTOR_P(NODE) DECL_CXX_DESTRUCTOR_P (NODE)
+
+/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
+
+ A destructor is trivial if it is an implicitly declared
+ destructor and if:
+
+ - all of the direct base classes of its class have trivial
+ destructors,
+
+ - for all of the non-static data members of its class that are
+ of class type (or array thereof), each such class has a
+ trivial destructor. */
+#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
+ (!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
+
+/* Nonzero means that NODE (a class type) has a destructor -- but that
+ it has not yet been declared. */
+#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
+ object. */
+#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == complete_ctor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
+ object. */
+#define DECL_BASE_CONSTRUCTOR_P(NODE) (DECL_NAME (NODE) == base_ctor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
+ specialized in-charge constructor or the specialized not-in-charge
+ constructor. */
+#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == ctor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
+#define DECL_COPY_CONSTRUCTOR_P(NODE) \
+ (DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
+#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
+ (DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
+ specialized in-charge constructor, in-charge deleting constructor,
+ or the base destructor. */
+#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == dtor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
+ object. */
+#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == complete_dtor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
+ object. */
+#define DECL_BASE_DESTRUCTOR_P(NODE) (DECL_NAME (NODE) == base_dtor_identifier)
+
+/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
+ object that deletes the object after it has been destroyed. */
+#define DECL_DELETING_DESTRUCTOR_P(NODE) \
+ (DECL_NAME (NODE) == deleting_dtor_identifier)
+
+/* Nonzero if either DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P or
+ DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P is true of NODE. */
+#define DECL_MAYBE_IN_CHARGE_CDTOR_P(NODE) \
+ (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (NODE) \
+ || DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (NODE))
+
+/* Nonzero if NODE (a _DECL) is a cloned constructor or
+ destructor. */
+#define DECL_CLONED_FUNCTION_P(NODE) \
+ (DECL_NAME (NODE) && IDENTIFIER_CDTOR_P (DECL_NAME (NODE)) \
+ && !DECL_MAYBE_IN_CHARGE_CDTOR_P (NODE))
+
+/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
+ cloned. */
+#define DECL_CLONED_FUNCTION(NODE) \
+ (DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (NODE))->u.fn.u5.cloned_function)
+
+/* Nonzero means that an object of this type cannot be initialized using
+ an initializer list. */
+#define CLASSTYPE_NON_AGGREGATE(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
+#define TYPE_NON_AGGREGATE_CLASS(NODE) \
+ (CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
+
+/* Nonzero for class type means that the default constructor is trivial. */
+#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
+ (TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && !TYPE_HAS_COMPLEX_DFLT (NODE))
+
+/* Nonzero if this class has a constexpr constructor other than a copy/move
+ constructor. Note that a class can have constexpr constructors for
+ static initialization even if it isn't a literal class. */
+#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
+
+/* Nonzero if there is no trivial default constructor for this class. */
+#define TYPE_HAS_COMPLEX_DFLT(NODE) \
+ (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
+
+/* [dcl.init.aggr]
+
+ An aggregate is an array or a class with no user-provided
+ constructors, no brace-or-equal-initializers for non-static data
+ members, no private or protected non-static data members, no
+ base classes, and no virtual functions.
+
+ As an extension, we also treat vectors as aggregates. Keep these
+ checks in ascending code order. */
+#define CP_AGGREGATE_TYPE_P(TYPE) \
+ (gnu_vector_type_p (TYPE) || TREE_CODE (TYPE) == ARRAY_TYPE \
+ || (CLASS_TYPE_P (TYPE) && COMPLETE_TYPE_P (TYPE) \
+ && !CLASSTYPE_NON_AGGREGATE (TYPE)))
+
+/* Nonzero for a FIELD_DECL means that this member object type
+ is mutable. */
+#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (FIELD_DECL_CHECK (NODE)))
+
+#if defined ENABLE_TREE_CHECKING
+
+#define LANG_DECL_MIN_CHECK(NODE) \
+ __extension__({ \
+ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (!LANG_DECL_HAS_MIN (NODE)) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.min; \
+ })
+
+/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
+ template, not just on a FUNCTION_DECL. So when looking for things in
+ lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
+#define LANG_DECL_FN_CHECK(NODE) \
+ __extension__({ \
+ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (!DECL_DECLARES_FUNCTION_P (NODE) || lt->u.base.selector != lds_fn) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.fn; \
+ })
+
+#define LANG_DECL_NS_CHECK(NODE) \
+ __extension__({ \
+ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (TREE_CODE (NODE) != NAMESPACE_DECL || lt->u.base.selector != lds_ns) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.ns; \
+ })
+
+#define LANG_DECL_PARM_CHECK(NODE) \
+ __extension__({ \
+ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (TREE_CODE (NODE) != PARM_DECL || lt->u.base.selector != lds_parm) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.parm; \
+ })
+
+#define LANG_DECL_DECOMP_CHECK(NODE) \
+ __extension__({ \
+ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
+ if (!VAR_P (NODE) || lt->u.base.selector != lds_decomp) \
+ lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
+ &lt->u.decomp; \
+ })
+
+#else
+
+#define LANG_DECL_MIN_CHECK(NODE) (&DECL_LANG_SPECIFIC (NODE)->u.min)
+
+#define LANG_DECL_FN_CHECK(NODE) (&DECL_LANG_SPECIFIC (NODE)->u.fn)
+
+#define LANG_DECL_NS_CHECK(NODE) (&DECL_LANG_SPECIFIC (NODE)->u.ns)
+
+#define LANG_DECL_PARM_CHECK(NODE) (&DECL_LANG_SPECIFIC (NODE)->u.parm)
+
+#define LANG_DECL_DECOMP_CHECK(NODE) (&DECL_LANG_SPECIFIC (NODE)->u.decomp)
+
+#endif /* ENABLE_TREE_CHECKING */
+
+// Below macros are copied from gcc/c-family/c-common.h
+
+/* In a FIELD_DECL, nonzero if the decl was originally a bitfield. */
+#define DECL_C_BIT_FIELD(NODE) (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) == 1)
+#define SET_DECL_C_BIT_FIELD(NODE) \
+ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 1)
+#define CLEAR_DECL_C_BIT_FIELD(NODE) \
+ (DECL_LANG_FLAG_4 (FIELD_DECL_CHECK (NODE)) = 0)
+
+/* True if the decl was an unnamed bitfield. */
+#define DECL_UNNAMED_BIT_FIELD(NODE) \
+ (DECL_C_BIT_FIELD (NODE) && !DECL_NAME (NODE))
+
+/* 1 iff NODE is function-local. */
+#define DECL_FUNCTION_SCOPE_P(NODE) \
+ (DECL_CONTEXT (NODE) && TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
+
+/* Nonzero if this type is const-qualified, but not
+ volatile-qualified. Other qualifiers are ignored. This macro is
+ used to test whether or not it is OK to bind an rvalue to a
+ reference. */
+#define RS_TYPE_CONST_NON_VOLATILE_P(NODE) \
+ ((rs_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
+ == TYPE_QUAL_CONST)
+
+/* Returns true if TYPE is an integral or enumeration name. Keep
+ these checks in ascending code order. */
+#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
+ (TREE_CODE (TYPE) == ENUMERAL_TYPE || RS_INTEGRAL_TYPE_P (TYPE))
+
+/* Nonzero for a VAR_DECL that was initialized with a
+ constant-expression. */
+#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
+ (TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
+
+/* WHILE_STMT accessors. These give access to the condition of the
+ while statement and the body of the while statement, respectively. */
+#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
+#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
+
+/* FOR_STMT accessors. These give access to the init statement,
+ condition, update expression, and body of the for statement,
+ respectively. */
+#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
+#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
+#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
+#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
+#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
+
+#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
+#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
+#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
+#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
+
+/* Nonzero if NODE is the target for genericization of 'break' stmts. */
+#define LABEL_DECL_BREAK(NODE) DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE))
+
+/* Nonzero if NODE is the target for genericization of 'continue' stmts. */
+#define LABEL_DECL_CONTINUE(NODE) DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE))
+
+// Above macros are copied from gcc/c-family/c-common.h
+
+// Below macros are copied from gcc/cp/name-lookup.h
+
+/* Lookup walker marking. */
+#define LOOKUP_SEEN_P(NODE) TREE_VISITED (NODE)
+#define LOOKUP_FOUND_P(NODE) \
+ TREE_LANG_FLAG_4 (TREE_CHECK4 (NODE, RECORD_TYPE, UNION_TYPE, ENUMERAL_TYPE, \
+ NAMESPACE_DECL))
+
+// Above macros are copied from gcc/cp/name-lookup.h
+
+// Below macros are copied from gcc/cp/name-lookup.cc
+
+/* Create an overload suitable for recording an artificial TYPE_DECL
+ and another decl. We use this machanism to implement the struct
+ stat hack. */
+
+#define STAT_HACK_P(N) ((N) && TREE_CODE (N) == OVERLOAD && OVL_LOOKUP_P (N))
+#define STAT_TYPE_VISIBLE_P(N) TREE_USED (OVERLOAD_CHECK (N))
+#define STAT_TYPE(N) TREE_TYPE (N)
+#define STAT_DECL(N) OVL_FUNCTION (N)
+#define STAT_VISIBLE(N) OVL_CHAIN (N)
+#define MAYBE_STAT_DECL(N) (STAT_HACK_P (N) ? STAT_DECL (N) : N)
+#define MAYBE_STAT_TYPE(N) (STAT_HACK_P (N) ? STAT_TYPE (N) : NULL_TREE)
+
+/* When a STAT_HACK_P is true, OVL_USING_P and OVL_EXPORT_P are valid
+ and apply to the hacked type. */
+
+/* For regular (maybe) overloaded functions, we have OVL_HIDDEN_P.
+ But we also need to indicate hiddenness on implicit type decls
+ (injected friend classes), and (coming soon) decls injected from
+ block-scope externs. It is too awkward to press the existing
+ overload marking for that. If we have a hidden non-function, we
+ always create a STAT_HACK, and use these two markers as needed. */
+#define STAT_TYPE_HIDDEN_P(N) OVL_HIDDEN_P (N)
+#define STAT_DECL_HIDDEN_P(N) OVL_DEDUP_P (N)
+
+/* The binding level currently in effect. */
+
+#define current_binding_level \
+ (*(cfun && cp_function_chain && cp_function_chain->bindings \
+ ? &cp_function_chain->bindings \
+ : &scope_chain->bindings))
+
+// Above macros are copied from gcc/cp/name-lookup.cc
+
+/* The various kinds of special functions. If you add to this list,
+ you should update special_function_p as well. */
+enum special_function_kind
+{
+ sfk_none = 0, /* Not a special function. This enumeral
+ must have value zero; see
+ special_function_p. */
+ /* The following are ordered, for use by member synthesis fns. */
+ sfk_destructor, /* A destructor. */
+ sfk_constructor, /* A constructor. */
+ sfk_inheriting_constructor, /* An inheriting constructor */
+ sfk_copy_constructor, /* A copy constructor. */
+ sfk_move_constructor, /* A move constructor. */
+ sfk_copy_assignment, /* A copy assignment operator. */
+ sfk_move_assignment, /* A move assignment operator. */
+ /* The following are unordered. */
+ sfk_complete_destructor, /* A destructor for complete objects. */
+ sfk_base_destructor, /* A destructor for base subobjects. */
+ sfk_deleting_destructor, /* A destructor for complete objects that
+ deletes the object after it has been
+ destroyed. */
+ sfk_conversion, /* A conversion operator. */
+ sfk_deduction_guide, /* A class template deduction guide. */
+ sfk_comparison, /* A comparison operator (e.g. ==, <, <=>). */
+ sfk_virtual_destructor /* Used by member synthesis fns. */
+};
+
+/* Places where an lvalue, or modifiable lvalue, may be required.
+ Used to select diagnostic messages in lvalue_error and
+ readonly_error. */
+enum lvalue_use
+{
+ lv_assign,
+ lv_increment,
+ lv_decrement,
+ lv_addressof,
+ lv_asm
+};
+
+/* A class for recording information about access failures (e.g. private
+ fields), so that we can potentially supply a fix-it hint about
+ an accessor (from a context in which the constness of the object
+ is known). */
+
+class access_failure_info
+{
+public:
+ access_failure_info ()
+ : m_was_inaccessible (false), m_basetype_path (NULL_TREE),
+ m_decl (NULL_TREE), m_diag_decl (NULL_TREE)
+ {}
+
+ void record_access_failure (tree basetype_path, tree decl, tree diag_decl);
+
+ bool was_inaccessible_p () const { return m_was_inaccessible; }
+ tree get_decl () const { return m_decl; }
+ tree get_diag_decl () const { return m_diag_decl; }
+ tree get_any_accessor (bool const_p) const;
+ void maybe_suggest_accessor (bool const_p) const;
+ static void add_fixit_hint (rich_location *richloc, tree accessor);
+
+private:
+ bool m_was_inaccessible;
+ tree m_basetype_path;
+ tree m_decl;
+ tree m_diag_decl;
+};
+
+/* The various kinds of access check during parsing. */
+enum deferring_kind
+{
+ dk_no_deferred = 0, /* Check access immediately */
+ dk_deferred = 1, /* Deferred check */
+ dk_no_check = 2 /* No access check */
+};
+
+/* The representation of a deferred access check. */
+
+struct GTY (()) deferred_access_check
+{
+ /* The base class in which the declaration is referenced. */
+ tree binfo;
+ /* The declaration whose access must be checked. */
+ tree decl;
+ /* The declaration that should be used in the error message. */
+ tree diag_decl;
+ /* The location of this access. */
+ location_t loc;
+};
+
+struct GTY (()) tree_template_info
+{
+ struct tree_base base;
+ tree tmpl;
+ tree args;
+ vec<deferred_access_check, va_gc> *deferred_access_checks;
+};
+
+/* The various kinds of lvalues we distinguish. */
+enum cp_lvalue_kind_flags
+{
+ clk_none = 0, /* Things that are not an lvalue. */
+ clk_ordinary = 1, /* An ordinary lvalue. */
+ clk_rvalueref = 2, /* An xvalue (rvalue formed using an rvalue reference) */
+ clk_class = 4, /* A prvalue of class or array type. */
+ clk_bitfield = 8, /* An lvalue for a bit-field. */
+ clk_packed = 16, /* An lvalue for a packed field. */
+ clk_implicit_rval = 1 << 5 /* An lvalue being treated as an xvalue. */
+};
+
+/* This type is used for parameters and variables which hold
+ combinations of the flags in enum cp_lvalue_kind_flags. */
+typedef int cp_lvalue_kind;
+
+// forked from gcc/cp/name_lookup.h scope_kind
+
+/* The kinds of scopes we recognize. */
+enum scope_kind
+{
+ sk_block = 0, /* An ordinary block scope. This enumerator must
+ have the value zero because "cp_binding_level"
+ is initialized by using "memset" to set the
+ contents to zero, and the default scope kind
+ is "sk_block". */
+ sk_cleanup, /* A scope for (pseudo-)scope for cleanup. It is
+ pseudo in that it is transparent to name lookup
+ activities. */
+ sk_try, /* A try-block. */
+ sk_catch, /* A catch-block. */
+ sk_for, /* The scope of the variable declared in a
+ init-statement. */
+ sk_cond, /* The scope of the variable declared in the condition
+ of an if or switch statement. */
+ sk_function_parms, /* The scope containing function parameters. */
+ sk_class, /* The scope containing the members of a class. */
+ sk_scoped_enum, /* The scope containing the enumerators of a C++11
+ scoped enumeration. */
+ sk_namespace, /* The scope containing the members of a
+ namespace, including the global scope. */
+ sk_template_parms, /* A scope for template parameters. */
+ sk_template_spec, /* Like sk_template_parms, but for an explicit
+ specialization. Since, by definition, an
+ explicit specialization is introduced by
+ "template <>", this scope is always empty. */
+ sk_transaction, /* A synchronized or atomic statement. */
+ sk_omp /* An OpenMP structured block. */
+};
+
+// forked from gcc/cp/cp-tree.h cp_built_in_function
+
+/* BUILT_IN_FRONTEND function codes. */
+enum cp_built_in_function
+{
+ CP_BUILT_IN_IS_CONSTANT_EVALUATED,
+ CP_BUILT_IN_INTEGER_PACK,
+ CP_BUILT_IN_IS_CORRESPONDING_MEMBER,
+ CP_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS,
+ CP_BUILT_IN_SOURCE_LOCATION,
+ CP_BUILT_IN_LAST
+};
+
+// forked from gcc/cp/cp-tree.h warning_sentinel
+
+/* RAII sentinel to disable certain warnings during template substitution
+ and elsewhere. */
+
+class warning_sentinel
+{
+public:
+ int &flag;
+ int val;
+ warning_sentinel (int &flag, bool suppress = true) : flag (flag), val (flag)
+ {
+ if (suppress)
+ flag = 0;
+ }
+ ~warning_sentinel () { flag = val; }
+};
+
+// forked from gcc/cp/cp-tree.h uid_sensitive_constexpr_evaluation_checker
+
+/* Used to determine whether uid_sensitive_constexpr_evaluation_p was
+ called and returned true, indicating that we've restricted constexpr
+ evaluation in order to avoid UID generation. We use this to control
+ updates to the fold_cache and cv_cache. */
+
+struct uid_sensitive_constexpr_evaluation_checker
+{
+ const unsigned saved_counter;
+ uid_sensitive_constexpr_evaluation_checker ();
+ bool evaluation_restricted_p () const;
+};
+
+// forked from gcc/cp/cp-tree.h iloc_sentinel
+
+/* RAII sentinel to temporarily override input_location. This will not set
+ input_location to UNKNOWN_LOCATION or BUILTINS_LOCATION. */
+
+class iloc_sentinel
+{
+ location_t saved_loc;
+
+public:
+ iloc_sentinel (location_t loc) : saved_loc (input_location)
+ {
+ if (loc >= RESERVED_LOCATION_COUNT)
+ input_location = loc;
+ }
+ ~iloc_sentinel () { input_location = saved_loc; }
+};
+
+// forked from gcc/cp/cp-tree.h ptrmem_cst
+
+struct GTY (()) ptrmem_cst
+{
+ struct tree_common common;
+ tree member;
+ location_t locus;
+};
+typedef struct ptrmem_cst *ptrmem_cst_t;
+
+// forked from gcc/cp/cp-tree.h named_decl_hash
+
+/* hash traits for declarations. Hashes potential overload sets via
+ DECL_NAME. */
+
+struct named_decl_hash : ggc_remove<tree>
+{
+ typedef tree value_type; /* A DECL or OVERLOAD */
+ typedef tree compare_type; /* An identifier. */
+
+ inline static hashval_t hash (const value_type decl);
+ inline static bool equal (const value_type existing, compare_type candidate);
+
+ static const bool empty_zero_p = true;
+ static inline void mark_empty (value_type &p) { p = NULL_TREE; }
+ static inline bool is_empty (value_type p) { return !p; }
+
+ /* Nothing is deletable. Everything is insertable. */
+ static bool is_deleted (value_type) { return false; }
+ static void mark_deleted (value_type) { gcc_unreachable (); }
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl_selector
+
+/* Discriminator values for lang_decl. */
+
+enum lang_decl_selector
+{
+ lds_min,
+ lds_fn,
+ lds_ns,
+ lds_parm,
+ lds_decomp
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl_base
+
+/* Flags shared by all forms of DECL_LANG_SPECIFIC.
+
+ Some of the flags live here only to make lang_decl_min/fn smaller. Do
+ not make this struct larger than 32 bits. */
+
+struct GTY (()) lang_decl_base
+{
+ ENUM_BITFIELD (lang_decl_selector) selector : 3;
+ unsigned use_template : 2;
+ unsigned not_really_extern : 1; /* var or fn */
+ unsigned initialized_in_class : 1; /* var or fn */
+
+ unsigned threadprivate_or_deleted_p : 1; /* var or fn */
+ /* anticipated_p is no longer used for anticipated_decls (fn, type
+ or template). It is used as DECL_OMP_PRIVATIZED_MEMBER in
+ var. */
+ unsigned anticipated_p : 1;
+ unsigned friend_or_tls : 1; /* var, fn, type or template */
+ unsigned unknown_bound_p : 1; /* var */
+ unsigned odr_used : 1; /* var or fn */
+ unsigned concept_p : 1; /* applies to vars and functions */
+ unsigned var_declared_inline_p : 1; /* var */
+ unsigned dependent_init_p : 1; /* var */
+
+ /* The following apply to VAR, FUNCTION, TYPE, CONCEPT, & NAMESPACE
+ decls. */
+ unsigned module_purview_p : 1; /* in module purview (not GMF) */
+ unsigned module_import_p : 1; /* from an import */
+ unsigned module_entity_p : 1; /* is in the entitity ary &
+ hash. */
+ /* VAR_DECL or FUNCTION_DECL has attached decls. */
+ unsigned module_attached_p : 1;
+
+ /* 12 spare bits. */
+};
+
+/* True for DECL codes which have template info and access. */
+#define LANG_DECL_HAS_MIN(NODE) \
+ (VAR_OR_FUNCTION_DECL_P (NODE) || TREE_CODE (NODE) == FIELD_DECL \
+ || TREE_CODE (NODE) == CONST_DECL || TREE_CODE (NODE) == TYPE_DECL \
+ || TREE_CODE (NODE) == TEMPLATE_DECL || TREE_CODE (NODE) == USING_DECL \
+ || TREE_CODE (NODE) == CONCEPT_DECL)
+
+// forked from gcc/c-family-common.h stmt_tree_s
+
+/* Information about a statement tree. */
+
+struct GTY (()) stmt_tree_s
+{
+ /* A stack of statement lists being collected. */
+ vec<tree, va_gc> *x_cur_stmt_list;
+
+ /* In C++, Nonzero if we should treat statements as full
+ expressions. In particular, this variable is non-zero if at the
+ end of a statement we should destroy any temporaries created
+ during that statement. Similarly, if, at the end of a block, we
+ should destroy any local variables in this block. Normally, this
+ variable is nonzero, since those are the normal semantics of
+ C++.
+
+ This flag has no effect in C. */
+ int stmts_are_full_exprs_p;
+};
+
+// forked from gcc/c-family-common.h stmt_tree_s
+
+typedef struct stmt_tree_s *stmt_tree;
+
+// forked from gcc/c-family-common.h c_language_function
+
+/* Global state pertinent to the current function. Some C dialects
+ extend this structure with additional fields. */
+
+struct GTY (()) c_language_function
+{
+ /* While we are parsing the function, this contains information
+ about the statement-tree that we are building. */
+ struct stmt_tree_s x_stmt_tree;
+
+ /* Vector of locally defined typedefs, for
+ -Wunused-local-typedefs. */
+ vec<tree, va_gc> *local_typedefs;
+};
+
+// forked from gcc/cp/cp-tree.h omp_declare_target_attr
+
+struct GTY (()) omp_declare_target_attr
+{
+ bool attr_syntax;
+};
+
+// forked from gcc/cp/name-lookup.h cxx_binding
+
+/* Datatype that represents binding established by a declaration between
+ a name and a C++ entity. */
+struct GTY (()) cxx_binding
+{
+ /* Link to chain together various bindings for this name. */
+ cxx_binding *previous;
+ /* The non-type entity this name is bound to. */
+ tree value;
+ /* The type entity this name is bound to. */
+ tree type;
+
+ bool value_is_inherited : 1;
+ bool is_local : 1;
+ bool type_is_hidden : 1;
+};
+
+// forked from gcc/cp/name-lookup.h cxx_saved_binding
+
+/* Datatype used to temporarily save C++ bindings (for implicit
+ instantiations purposes and like). Implemented in decl.cc. */
+struct GTY (()) cxx_saved_binding
+{
+ /* The name of the current binding. */
+ tree identifier;
+ /* The binding we're saving. */
+ cxx_binding *binding;
+ tree real_type_value;
+};
+
+// forked from gcc/cp/cp-tree.h saved_scope
+
+/* Global state. */
+
+struct GTY (()) saved_scope
+{
+ vec<cxx_saved_binding, va_gc> *old_bindings;
+ tree old_namespace;
+ vec<tree, va_gc> *decl_ns_list;
+ tree class_name;
+ tree class_type;
+ tree access_specifier;
+ tree function_decl;
+ vec<tree, va_gc> *lang_base;
+ tree lang_name;
+ tree template_parms;
+ tree x_saved_tree;
+
+ /* Only used for uses of this in trailing return type. */
+ tree x_current_class_ptr;
+ tree x_current_class_ref;
+
+ int x_processing_template_decl;
+ int x_processing_specialization;
+ int x_processing_constraint;
+ int suppress_location_wrappers;
+ BOOL_BITFIELD x_processing_explicit_instantiation : 1;
+ BOOL_BITFIELD need_pop_function_context : 1;
+
+ /* Nonzero if we are parsing the discarded statement of a constexpr
+ if-statement. */
+ BOOL_BITFIELD discarded_stmt : 1;
+ /* Nonzero if we are parsing or instantiating the compound-statement
+ of consteval if statement. Also set while processing an immediate
+ invocation. */
+ BOOL_BITFIELD consteval_if_p : 1;
+
+ int unevaluated_operand;
+ int inhibit_evaluation_warnings;
+ int noexcept_operand;
+ int ref_temp_count;
+
+ struct stmt_tree_s x_stmt_tree;
+
+ hash_map<tree, tree> *GTY ((skip)) x_local_specializations;
+ vec<omp_declare_target_attr, va_gc> *omp_declare_target_attribute;
+
+ struct saved_scope *prev;
+};
+
+extern GTY (()) struct saved_scope *scope_chain;
+
+// forked from gcc/cp/cp-tree.h named_label_hash
+
+struct named_label_entry; /* Defined in decl.cc. */
+
+struct named_label_hash : ggc_remove<named_label_entry *>
+{
+ typedef named_label_entry *value_type;
+ typedef tree compare_type; /* An identifier. */
+
+ inline static hashval_t hash (value_type);
+ inline static bool equal (const value_type, compare_type);
+
+ static const bool empty_zero_p = true;
+ inline static void mark_empty (value_type &p) { p = NULL; }
+ inline static bool is_empty (value_type p) { return !p; }
+
+ /* Nothing is deletable. Everything is insertable. */
+ inline static bool is_deleted (value_type) { return false; }
+ inline static void mark_deleted (value_type) { gcc_unreachable (); }
+};
+
+// forked from gcc/cp/cp-tree.h
+
+/* Global state pertinent to the current function. */
+
+struct GTY (()) language_function
+{
+ struct c_language_function base;
+
+ tree x_cdtor_label;
+ tree x_current_class_ptr;
+ tree x_current_class_ref;
+ tree x_eh_spec_block;
+ tree x_in_charge_parm;
+ tree x_vtt_parm;
+ tree x_return_value;
+
+ BOOL_BITFIELD returns_value : 1;
+ BOOL_BITFIELD returns_null : 1;
+ BOOL_BITFIELD returns_abnormally : 1;
+ BOOL_BITFIELD infinite_loop : 1;
+ BOOL_BITFIELD x_in_function_try_handler : 1;
+ BOOL_BITFIELD x_in_base_initializer : 1;
+
+ /* True if this function can throw an exception. */
+ BOOL_BITFIELD can_throw : 1;
+
+ BOOL_BITFIELD invalid_constexpr : 1;
+ BOOL_BITFIELD throwing_cleanup : 1;
+
+ hash_table<named_label_hash> *x_named_labels;
+
+ /* Tracking possibly infinite loops. This is a vec<tree> only because
+ vec<bool> doesn't work with gtype. */
+ vec<tree, va_gc> *infinite_loops;
+};
+
+// forked from gcc/c-family/c-common.h ref_operator
+
+/* The various name of operator that appears in error messages. */
+enum ref_operator
+{
+ /* NULL */
+ RO_NULL,
+ /* array indexing */
+ RO_ARRAY_INDEXING,
+ /* unary * */
+ RO_UNARY_STAR,
+ /* -> */
+ RO_ARROW,
+ /* implicit conversion */
+ RO_IMPLICIT_CONVERSION,
+ /* ->* */
+ RO_ARROW_STAR
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl_min
+
+/* DECL_LANG_SPECIFIC for the above codes. */
+
+struct GTY (()) lang_decl_min
+{
+ struct lang_decl_base base; /* 32-bits. */
+
+ /* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
+ THUNK_ALIAS.
+ In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
+ VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
+ DECL_TEMPLATE_INFO. */
+ tree template_info;
+
+ /* In a DECL_THUNK_P FUNCTION_DECL, this is THUNK_VIRTUAL_OFFSET.
+ In a lambda-capture proxy VAR_DECL, this is DECL_CAPTURED_VARIABLE.
+ In a function-scope TREE_STATIC VAR_DECL or IMPLICIT_TYPEDEF_P TYPE_DECL,
+ this is DECL_DISCRIMINATOR.
+ In a DECL_LOCAL_DECL_P decl, this is the namespace decl it aliases.
+ Otherwise, in a class-scope DECL, this is DECL_ACCESS. */
+ tree access;
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl_fn
+
+/* Additional DECL_LANG_SPECIFIC information for functions. */
+
+struct GTY (()) lang_decl_fn
+{
+ struct lang_decl_min min;
+
+ /* In a overloaded operator, this is the compressed operator code. */
+ unsigned ovl_op_code : 6;
+ unsigned global_ctor_p : 1;
+ unsigned global_dtor_p : 1;
+
+ unsigned static_function : 1;
+ unsigned pure_virtual : 1;
+ unsigned defaulted_p : 1;
+ unsigned has_in_charge_parm_p : 1;
+ unsigned has_vtt_parm_p : 1;
+ unsigned pending_inline_p : 1;
+ unsigned nonconverting : 1;
+ unsigned thunk_p : 1;
+
+ unsigned this_thunk_p : 1;
+ unsigned omp_declare_reduction_p : 1;
+ unsigned has_dependent_explicit_spec_p : 1;
+ unsigned immediate_fn_p : 1;
+ unsigned maybe_deleted : 1;
+ unsigned coroutine_p : 1;
+ unsigned implicit_constexpr : 1;
+
+ unsigned spare : 9;
+
+ /* 32-bits padding on 64-bit host. */
+
+ /* For a non-thunk function decl, this is a tree list of
+ friendly classes. For a thunk function decl, it is the
+ thunked to function decl. */
+ tree befriending_classes;
+
+ /* For a virtual FUNCTION_DECL for which
+ DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
+ this pointer and result pointer adjusting thunks are
+ chained here. This pointer thunks to return pointer thunks
+ will be chained on the return pointer thunk.
+ For a DECL_CONSTUCTOR_P FUNCTION_DECL, this is the base from
+ whence we inherit. Otherwise, it is the class in which a
+ (namespace-scope) friend is defined (if any). */
+ tree context;
+
+ union lang_decl_u5
+ {
+ /* In a non-thunk FUNCTION_DECL, this is DECL_CLONED_FUNCTION. */
+ tree GTY ((tag ("0"))) cloned_function;
+
+ /* In a FUNCTION_DECL for which THUNK_P holds this is the
+ THUNK_FIXED_OFFSET. */
+ HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
+ } GTY ((desc ("%1.thunk_p"))) u5;
+
+ union lang_decl_u3
+ {
+ struct cp_token_cache *GTY ((tag ("1"))) pending_inline_info;
+ tree GTY ((tag ("0"))) saved_auto_return_type;
+ } GTY ((desc ("%1.pending_inline_p"))) u;
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl_ns
+
+/* DECL_LANG_SPECIFIC for namespaces. */
+
+struct GTY (()) lang_decl_ns
+{
+ struct lang_decl_base base; /* 32 bits. */
+
+ /* Inline children. Needs to be va_gc, because of PCH. */
+ vec<tree, va_gc> *inlinees;
+
+ /* Hash table of bound decls. It'd be nice to have this inline, but
+ as the hash_map has a dtor, we can't then put this struct into a
+ union (until moving to c++11). */
+ hash_table<named_decl_hash> *bindings;
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl_parm
+
+/* DECL_LANG_SPECIFIC for parameters. */
+
+struct GTY (()) lang_decl_parm
+{
+ struct lang_decl_base base; /* 32 bits. */
+ int level;
+ int index;
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl_decomp
+
+/* Additional DECL_LANG_SPECIFIC information for structured bindings. */
+
+struct GTY (()) lang_decl_decomp
+{
+ struct lang_decl_min min;
+ /* The artificial underlying "e" variable of the structured binding
+ variable. */
+ tree base;
+};
+
+// forked from gcc/cp/cp-tree.h lang_decl
+
+/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
+ union rather than a struct containing a union as its only field, but
+ tree.h declares it as a struct. */
+
+struct GTY (()) lang_decl
+{
+ union GTY ((desc ("%h.base.selector"))) lang_decl_u
+ {
+ /* Nothing of only the base type exists. */
+ struct lang_decl_base GTY ((default)) base;
+ struct lang_decl_min GTY ((tag ("lds_min"))) min;
+ struct lang_decl_fn GTY ((tag ("lds_fn"))) fn;
+ struct lang_decl_ns GTY ((tag ("lds_ns"))) ns;
+ struct lang_decl_parm GTY ((tag ("lds_parm"))) parm;
+ struct lang_decl_decomp GTY ((tag ("lds_decomp"))) decomp;
+ } u;
+};
+
+// forked from gcc/c-family/c-common.h c_fileinfo
+
+/* Information recorded about each file examined during compilation. */
+
+struct c_fileinfo
+{
+ int time; /* Time spent in the file. */
+
+ /* Flags used only by C++.
+ INTERFACE_ONLY nonzero means that we are in an "interface" section
+ of the compiler. INTERFACE_UNKNOWN nonzero means we cannot trust
+ the value of INTERFACE_ONLY. If INTERFACE_UNKNOWN is zero and
+ INTERFACE_ONLY is zero, it means that we are responsible for
+ exporting definitions that others might need. */
+ short interface_only;
+ short interface_unknown;
+};
+
+// forked from gcc/c-family/c-common.h c_common_identifier
+
+/* Identifier part common to the C front ends. Inherits from
+ tree_identifier, despite appearances. */
+struct GTY (()) c_common_identifier
+{
+ struct tree_common common;
+ struct cpp_hashnode node; // from cpplib.h
+};
+
+// forked from gcc/cp/cp-tree.h lang_identifier
+
+/* Language-dependent contents of an identifier. */
+
+struct GTY (()) lang_identifier
+{
+ struct c_common_identifier c_common;
+ cxx_binding *bindings;
+};
+
+// forked from gcc/cp/cp-tree.h tree_overload
+
+/* OVL_HIDDEN_P nodes come before other nodes. */
+
+struct GTY (()) tree_overload
+{
+ struct tree_common common;
+ tree function;
+};
+
+// forked from gcc/cp/cp-tree.h ovl_iterator
+
+class ovl_iterator
+{
+ tree ovl;
+ const bool allow_inner; /* Only used when checking. */
+
+public:
+ explicit ovl_iterator (tree o, bool allow = false)
+ : ovl (o), allow_inner (allow)
+ {}
+
+public:
+ operator bool () const { return ovl; }
+ ovl_iterator &operator++ ()
+ {
+ ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl);
+ return *this;
+ }
+ tree operator* () const
+ {
+ tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl);
+
+ /* Check this is not an unexpected 2-dimensional overload. */
+ gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD);
+
+ return fn;
+ }
+ bool operator== (const ovl_iterator &o) const { return ovl == o.ovl; }
+ tree get_using () const
+ {
+ gcc_checking_assert (using_p ());
+ return ovl;
+ }
+
+public:
+ /* Whether this overload was introduced by a using decl. */
+ bool using_p () const
+ {
+ return (TREE_CODE (ovl) == USING_DECL
+ || (TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl)));
+ }
+ /* Whether this using is being exported. */
+ bool exporting_p () const { return OVL_EXPORT_P (get_using ()); }
+
+ bool hidden_p () const
+ {
+ return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl);
+ }
+
+public:
+ tree remove_node (tree head) { return remove_node (head, ovl); }
+ tree reveal_node (tree head) { return reveal_node (head, ovl); }
+
+protected:
+ /* If we have a nested overload, point at the inner overload and
+ return the next link on the outer one. */
+ tree maybe_push ()
+ {
+ tree r = NULL_TREE;
+
+ if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl))
+ {
+ r = OVL_CHAIN (ovl);
+ ovl = OVL_FUNCTION (ovl);
+ }
+ return r;
+ }
+ /* Restore an outer nested overload. */
+ void pop (tree outer)
+ {
+ gcc_checking_assert (!ovl);
+ ovl = outer;
+ }
+
+private:
+ /* We make these static functions to avoid the address of the
+ iterator escaping the local context. */
+ static tree remove_node (tree head, tree node);
+ static tree reveal_node (tree ovl, tree node);
+};
+
+// forked from gcc/cp/cp-tree.h lkp_iterator
+
+/* Iterator over a (potentially) 2 dimensional overload, which is
+ produced by name lookup. */
+
+class lkp_iterator : public ovl_iterator
+{
+ typedef ovl_iterator parent;
+
+ tree outer;
+
+public:
+ explicit lkp_iterator (tree o) : parent (o, true), outer (maybe_push ()) {}
+
+public:
+ lkp_iterator &operator++ ()
+ {
+ bool repush = !outer;
+
+ if (!parent::operator++ () && !repush)
+ {
+ pop (outer);
+ repush = true;
+ }
+
+ if (repush)
+ outer = maybe_push ();
+
+ return *this;
+ }
+};
+
+// forked from gcc/cp/cp-tree.h treee_pair_s
+
+struct GTY (()) tree_pair_s
+{
+ tree purpose;
+ tree value;
+};
+
+// forked from gcc/cp/cp-tree.h tree_pair_p
+
+typedef tree_pair_s *tree_pair_p;
+
+// forked from gcc/cp/cp-tree.h lang_type
+
+/* This structure provides additional information above and beyond
+ what is provide in the ordinary tree_type. In the past, we used it
+ for the types of class types, template parameters types, typename
+ types, and so forth. However, there can be many (tens to hundreds
+ of thousands) of template parameter types in a compilation, and
+ there's no need for this additional information in that case.
+ Therefore, we now use this data structure only for class types.
+
+ In the past, it was thought that there would be relatively few
+ class types. However, in the presence of heavy use of templates,
+ many (i.e., thousands) of classes can easily be generated.
+ Therefore, we should endeavor to keep the size of this structure to
+ a minimum. */
+struct GTY (()) lang_type
+{
+ unsigned char align;
+
+ unsigned has_type_conversion : 1;
+ unsigned has_copy_ctor : 1;
+ unsigned has_default_ctor : 1;
+ unsigned const_needs_init : 1;
+ unsigned ref_needs_init : 1;
+ unsigned has_const_copy_assign : 1;
+ unsigned use_template : 2;
+
+ unsigned has_mutable : 1;
+ unsigned com_interface : 1;
+ unsigned non_pod_class : 1;
+ unsigned nearly_empty_p : 1;
+ unsigned user_align : 1;
+ unsigned has_copy_assign : 1;
+ unsigned has_new : 1;
+ unsigned has_array_new : 1;
+
+ unsigned gets_delete : 2;
+ unsigned interface_only : 1;
+ unsigned interface_unknown : 1;
+ unsigned contains_empty_class_p : 1;
+ unsigned anon_aggr : 1;
+ unsigned non_zero_init : 1;
+ unsigned empty_p : 1;
+ /* 32 bits allocated. */
+
+ unsigned vec_new_uses_cookie : 1;
+ unsigned declared_class : 1;
+ unsigned diamond_shaped : 1;
+ unsigned repeated_base : 1;
+ unsigned being_defined : 1;
+ unsigned debug_requested : 1;
+ unsigned fields_readonly : 1;
+ unsigned ptrmemfunc_flag : 1;
+
+ unsigned lazy_default_ctor : 1;
+ unsigned lazy_copy_ctor : 1;
+ unsigned lazy_copy_assign : 1;
+ unsigned lazy_destructor : 1;
+ unsigned has_const_copy_ctor : 1;
+ unsigned has_complex_copy_ctor : 1;
+ unsigned has_complex_copy_assign : 1;
+ unsigned non_aggregate : 1;
+
+ unsigned has_complex_dflt : 1;
+ unsigned has_list_ctor : 1;
+ unsigned non_std_layout : 1;
+ unsigned is_literal : 1;
+ unsigned lazy_move_ctor : 1;
+ unsigned lazy_move_assign : 1;
+ unsigned has_complex_move_ctor : 1;
+ unsigned has_complex_move_assign : 1;
+
+ unsigned has_constexpr_ctor : 1;
+ unsigned unique_obj_representations : 1;
+ unsigned unique_obj_representations_set : 1;
+ bool erroneous : 1;
+ bool non_pod_aggregate : 1;
+
+ /* When adding a flag here, consider whether or not it ought to
+ apply to a template instance if it applies to the template. If
+ so, make sure to copy it in instantiate_class_template! */
+
+ /* There are some bits left to fill out a 32-bit word. Keep track
+ of this by updating the size of this bitfield whenever you add or
+ remove a flag. */
+ unsigned dummy : 3;
+
+ tree primary_base;
+ vec<tree_pair_s, va_gc> *vcall_indices;
+ tree vtables;
+ tree typeinfo_var;
+ vec<tree, va_gc> *vbases;
+ tree as_base;
+ vec<tree, va_gc> *pure_virtuals;
+ tree friend_classes;
+ vec<tree, va_gc> *GTY ((reorder ("resort_type_member_vec"))) members;
+ tree key_method;
+ tree decl_list;
+ tree befriending_classes;
+ /* In a RECORD_TYPE, information specific to Objective-C++, such
+ as a list of adopted protocols or a pointer to a corresponding
+ @interface. See objc/objc-act.h for details. */
+ tree objc_info;
+ /* FIXME reuse another field? */
+ tree lambda_expr;
+};
+
+namespace Rust {
+
+// forked from gcc/cp/cp-tree.h cp_ref_qualifier
+
+enum rs_ref_qualifier
+{
+ REF_QUAL_NONE = 0,
+ REF_QUAL_LVALUE = 1,
+ REF_QUAL_RVALUE = 2
+};
+
+// forked from gcc/cp/cp-tree.h tsubst_flags
+
+/* Bitmask flags to control type substitution. */
+enum tsubst_flags
+{
+ tf_none = 0, /* nothing special */
+ tf_error = 1 << 0, /* give error messages */
+ tf_warning = 1 << 1, /* give warnings too */
+ tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
+ tf_keep_type_decl = 1 << 3, /* retain typedef type decls
+ (make_typename_type use) */
+ tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
+ instantiate_type use) */
+ tf_user = 1 << 5, /* found template must be a user template
+ (lookup_template_class use) */
+ tf_conv = 1 << 6, /* We are determining what kind of
+ conversion might be permissible,
+ not actually performing the
+ conversion. */
+ tf_decltype = 1 << 7, /* We are the operand of decltype.
+ Used to implement the special rules
+ for calls in decltype (5.2.2/11). */
+ tf_partial = 1 << 8, /* Doing initial explicit argument
+ substitution in fn_type_unification. */
+ tf_fndecl_type = 1 << 9, /* Substituting the type of a function
+ declaration. */
+ tf_no_cleanup = 1 << 10, /* Do not build a cleanup
+ (build_target_expr and friends) */
+ tf_norm = 1 << 11, /* Build diagnostic information during
+ constraint normalization. */
+ /* Convenient substitution flags combinations. */
+ tf_warning_or_error = tf_warning | tf_error
+};
+
+// forked from gcc/cp/cp-tree.h cp_identifier_kind
+
+/* Kinds of identifiers. Values are carefully chosen. */
+enum cp_identifier_kind
+{
+ cik_normal = 0, /* Not a special identifier. */
+ cik_keyword = 1, /* A keyword. */
+ cik_ctor = 2, /* Constructor (in-chg, complete or base). */
+ cik_dtor = 3, /* Destructor (in-chg, deleting, complete or
+ base). */
+ cik_simple_op = 4, /* Non-assignment operator name. */
+ cik_assign_op = 5, /* An assignment operator name. */
+ cik_conv_op = 6, /* Conversion operator name. */
+ cik_reserved_for_udlit = 7, /* Not yet in use */
+ cik_max
+};
+
+// forked from gcc/cp/cp-tree.h tag_types
+
+/* An enumeration of the kind of tags that C++ accepts. */
+enum tag_types
+{
+ none_type = 0, /* Not a tag type. */
+ record_type, /* "struct" types. */
+ class_type, /* "class" types. */
+ union_type, /* "union" types. */
+ enum_type, /* "enum" types. */
+ typename_type, /* "typename" types. */
+ scope_type /* namespace or tagged type name followed by :: */
+};
+
+// forked from gcc/cp/cp-tree.h tsubst_flags_t
+
+/* This type is used for parameters and variables which hold
+ combinations of the flags in enum tsubst_flags. */
+typedef int tsubst_flags_t;
+
+// forked from gcc/cp/cvt.cc convert_to_void
+//
+// When an expression is used in a void context, its value is discarded and
+// no lvalue-rvalue and similar conversions happen [expr.static.cast/4,
+// stmt.expr/1, expr.comma/1]. This permits dereferencing an incomplete type
+// in a void context. The C++ standard does not define what an `access' to an
+// object is, but there is reason to believe that it is the lvalue to rvalue
+// conversion -- if it were not, `*&*p = 1' would violate [expr]/4 in that it
+// accesses `*p' not to calculate the value to be stored. But, dcl.type.cv/8
+// indicates that volatile semantics should be the same between C and C++
+// where ever possible. C leaves it implementation defined as to what
+// constitutes an access to a volatile. So, we interpret `*vp' as a read of
+// the volatile object `vp' points to, unless that is an incomplete type. For
+// volatile references we do not do this interpretation, because that would
+// make it impossible to ignore the reference return value from functions. We
+// issue warnings in the confusing cases.
+//
+// The IMPLICIT is ICV_CAST when the user is explicitly converting an
+// expression to void via a cast. If an expression is being implicitly
+// converted, IMPLICIT indicates the context of the implicit conversion.
+
+/* Possible cases of implicit or explicit bad conversions to void. */
+enum impl_conv_void
+{
+ ICV_CAST, /* (explicit) conversion to void */
+ ICV_SECOND_OF_COND, /* second operand of conditional expression */
+ ICV_THIRD_OF_COND, /* third operand of conditional expression */
+ ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
+ ICV_LEFT_OF_COMMA, /* left operand of comma operator */
+ ICV_STATEMENT, /* statement */
+ ICV_THIRD_IN_FOR /* for increment expression */
+};
+
+/* BUILT_IN_FRONTEND function codes. */
+enum rs_built_in_function
+{
+ RS_BUILT_IN_IS_CONSTANT_EVALUATED,
+ RS_BUILT_IN_INTEGER_PACK,
+ RS_BUILT_IN_IS_CORRESPONDING_MEMBER,
+ RS_BUILT_IN_IS_POINTER_INTERCONVERTIBLE_WITH_CLASS,
+ RS_BUILT_IN_SOURCE_LOCATION,
+ RS_BUILT_IN_LAST
+};
+
+// forked from gcc/cp/cp-tree.h compare_bounds_t
+
+/* in typeck.cc */
+/* Says how we should behave when comparing two arrays one of which
+ has unknown bounds. */
+enum compare_bounds_t
+{
+ bounds_none,
+ bounds_either,
+ bounds_first
+};
+
+extern tree
+convert_to_void (tree expr, impl_conv_void implicit);
+
+// The lvalue-to-rvalue conversion (7.1) is applied if and only if the
+// expression is a glvalue of volatile-qualified type and it is one of the
+// following:
+// * ( expression ), where expression is one of these expressions,
+// * id-expression (8.1.4),
+// * subscripting (8.2.1),
+// * class member access (8.2.5),
+// * indirection (8.3.1),
+// * pointer-to-member operation (8.5),
+// * conditional expression (8.16) where both the second and the third
+// operands are one of these expressions, or
+// * comma expression (8.19) where the right operand is one of these
+// expressions.
+extern tree
+mark_discarded_use (tree expr);
+
+// Mark EXP as read, not just set, for set but not used -Wunused warning
+// purposes.
+extern void
+mark_exp_read (tree exp);
+
+// We've seen an actual use of EXPR. Possibly replace an outer variable
+// reference inside with its constant value or a lambda capture.
+extern tree
+mark_use (tree expr, bool rvalue_p, bool read_p, location_t loc,
+ bool reject_builtin);
+
+// Called whenever the expression EXPR is used in an rvalue context.
+// When REJECT_BUILTIN is true the expression is checked to make sure
+// it doesn't make it possible to obtain the address of a GCC built-in
+// function with no library fallback (or any of its bits, such as in
+// a conversion to bool).
+extern tree
+mark_rvalue_use (tree, location_t = UNKNOWN_LOCATION,
+ bool reject_builtin = true);
+
+// Called whenever an expression is used in an lvalue context.
+extern tree
+mark_lvalue_use (tree expr);
+
+// As above, but don't consider this use a read.
+extern tree
+mark_lvalue_use_nonread (tree expr);
+
+// We are using a reference VAL for its value. Bash that reference all the way
+// down to its lowest form.
+extern tree
+convert_from_reference (tree val);
+
+// Subroutine of convert_to_void. Warn if we're discarding something with
+// attribute [[nodiscard]].
+extern void
+maybe_warn_nodiscard (tree expr, impl_conv_void implicit);
+
+extern location_t
+expr_loc_or_loc (const_tree t, location_t or_loc);
+
+extern location_t
+expr_loc_or_input_loc (const_tree t);
+
+// FN is the callee of a CALL_EXPR or AGGR_INIT_EXPR; return the FUNCTION_DECL
+// if we can.
+extern tree
+get_fndecl_from_callee (tree fn);
+
+// FIXME some helpers from HIRCompileBase could probably be moved here over time
+
+// Return an expression for the address of BASE[INDEX], used in offset intrinsic
+extern tree
+pointer_offset_expression (tree base_tree, tree index_tree, location_t locus);
+
+/* A tree node, together with a location, so that we can track locations
+ (and ranges) during parsing.
+
+ The location is redundant for node kinds that have locations,
+ but not all node kinds do (e.g. constants, and references to
+ params, locals, etc), so we stash a copy here. */
+
+extern location_t rs_expr_location (const_tree);
+
+extern int
+is_empty_class (tree type);
+
+extern tree array_type_nelts_top (tree);
+
+extern bool
+is_really_empty_class (tree, bool);
+
+extern bool builtin_valid_in_constant_expr_p (const_tree);
+
+extern bool maybe_constexpr_fn (tree);
+
+extern bool var_in_maybe_constexpr_fn (tree);
+
+extern int
+rs_type_quals (const_tree type);
+
+inline bool type_unknown_p (const_tree);
+
+extern bool decl_maybe_constant_var_p (tree);
+
+extern void
+init_modules ();
+
+extern bool var_in_constexpr_fn (tree);
+
+inline tree ovl_first (tree) ATTRIBUTE_PURE;
+
+inline bool type_unknown_p (const_tree);
+
+extern tree
+lookup_add (tree fns, tree lookup);
+
+extern tree
+ovl_make (tree fn, tree next = NULL_TREE);
+
+extern int is_overloaded_fn (tree) ATTRIBUTE_PURE;
+
+extern bool maybe_add_lang_type_raw (tree);
+
+extern rs_ref_qualifier type_memfn_rqual (const_tree);
+
+extern bool builtin_pack_fn_p (tree);
+
+extern tree make_conv_op_name (tree);
+
+extern int type_memfn_quals (const_tree);
+
+struct c_fileinfo *
+get_fileinfo (const char *);
+
+extern tree
+cxx_make_type (enum tree_code CXX_MEM_STAT_INFO);
+
+extern tree
+build_cplus_array_type (tree, tree, int is_dep = -1);
+
+extern bool is_byte_access_type (tree);
+
+extern bool
+comptypes (tree, tree, int);
+
+extern tree canonical_eh_spec (tree);
+
+extern int cp_tree_operand_length (const_tree);
+
+extern bool rs_tree_equal (tree, tree);
+
+extern bool compparms (const_tree, const_tree);
+
+extern tree
+rs_build_qualified_type_real (tree, int, tsubst_flags_t);
+#define rs_build_qualified_type(TYPE, QUALS) \
+ rs_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
+extern bool cv_qualified_p (const_tree);
+
+extern bool similar_type_p (tree, tree);
+
+extern bool rs_tree_equal (tree, tree);
+
+extern bool
+vector_targets_convertible_p (const_tree t1, const_tree t2);
+
+extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
+
+extern bool comp_ptr_ttypes_const (tree, tree, compare_bounds_t);
+
+extern tree
+get_class_binding_direct (tree, tree, bool want_type = false);
+
+extern tree skip_artificial_parms_for (const_tree, tree);
+
+extern void
+lang_check_failed (const char *, int,
+ const char *) ATTRIBUTE_NORETURN ATTRIBUTE_COLD;
+
+extern tree default_init_uninitialized_part (tree);
+
+extern bool type_has_non_user_provided_default_constructor (tree);
+
+extern bool default_ctor_p (const_tree);
+
+extern bool user_provided_p (tree);
+
+extern bool sufficient_parms_p (const_tree);
+
+extern tree next_initializable_field (tree);
+
+extern tree in_class_defaulted_default_constructor (tree);
+
+extern bool is_instantiation_of_constexpr (tree);
+
+extern bool
+check_for_uninitialized_const_var (tree, bool, tsubst_flags_t);
+
+extern bool reduced_constant_expression_p (tree);
+
+extern tree cv_unqualified (tree);
+
+extern tree cp_get_callee (tree);
+extern tree rs_get_callee_fndecl_nofold (tree);
+
+extern bool is_nondependent_static_init_expression (tree);
+
+extern tree build_nop (tree, tree);
+
+extern bool scalarish_type_p (const_tree);
+
+extern tree is_bitfield_expr_with_lowered_type (const_tree);
+
+extern tree convert_bitfield_to_declared_type (tree);
+
+extern tree
+cp_fold_maybe_rvalue (tree, bool);
+
+extern tree maybe_undo_parenthesized_ref (tree);
+
+extern tree
+fold_offsetof (tree, tree = size_type_node, tree_code ctx = ERROR_MARK);
+
+extern tree cp_truthvalue_conversion (tree, tsubst_flags_t);
+
+extern tree
+fold_non_dependent_expr (tree, tsubst_flags_t = tf_warning_or_error,
+ bool = false, tree = NULL_TREE);
+
+extern int char_type_p (tree);
+
+extern bool instantiation_dependent_expression_p (tree);
+
+extern bool type_has_nontrivial_copy_init (const_tree);
+
+extern tree build_local_temp (tree);
+
+extern bool is_normal_capture_proxy (tree);
+
+extern bool reject_gcc_builtin (const_tree, location_t = UNKNOWN_LOCATION);
+
+extern tree resolve_nondeduced_context (tree, tsubst_flags_t);
+
+extern void cxx_incomplete_type_diagnostic (location_t, const_tree, const_tree,
+ diagnostic_t);
+
+extern void cxx_incomplete_type_error (location_t, const_tree, const_tree);
+
+extern bool invalid_nonstatic_memfn_p (location_t, tree, tsubst_flags_t);
+
+extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE;
+
+extern tree resolve_nondeduced_context_or_error (tree, tsubst_flags_t);
+
+extern tree instantiate_non_dependent_or_null (tree);
+
+extern void cxx_incomplete_type_inform (const_tree);
+
+extern tree strip_top_quals (tree);
+
+extern bool undeduced_auto_decl (tree);
+
+extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error);
+
+extern bool decl_constant_var_p (tree);
+
+extern tree build_new_constexpr_heap_type (tree, tree, tree);
+
+extern bool is_empty_field (tree);
+
+extern bool
+in_immediate_context ();
+
+extern tree cp_get_callee_fndecl_nofold (tree);
+
+extern bool
+cxx_mark_addressable (tree, bool = false);
+
+extern tree fold_builtin_source_location (location_t);
+
+extern tree build_address (tree);
+
+extern bool bitfield_p (const_tree);
+
+extern tree rvalue (tree);
+
+extern bool glvalue_p (const_tree);
+
+extern cp_lvalue_kind lvalue_kind (const_tree);
+
+extern tree
+decl_constant_value (tree, bool);
+
+extern tree lookup_enumerator (tree, tree);
+
+extern int
+is_class_type (tree, int);
+
+extern tree braced_lists_to_strings (tree, tree);
+
+extern tree
+fold_builtin_is_pointer_inverconvertible_with_class (location_t, int, tree *);
+
+extern bool layout_compatible_type_p (tree, tree);
+
+extern tree finish_underlying_type (tree);
+
+extern tree
+c_common_type_for_mode (machine_mode, int);
+
+extern bool std_layout_type_p (const_tree);
+
+extern tree complete_type (tree);
+
+extern tree complete_type_or_else (tree, tree);
+
+extern void note_failed_type_completion_for_satisfaction (tree);
+
+extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
+
+extern bool
+next_common_initial_seqence (tree &, tree &);
+
+extern bool null_member_pointer_value_p (tree);
+
+extern tree
+fold_builtin_is_corresponding_member (location_t, int, tree *);
+
+extern tree cp_fold_rvalue (tree);
+
+extern tree
+maybe_constant_value (tree, tree = NULL_TREE, bool = false);
+
+extern tree lvalue_type (tree);
+
+extern void lvalue_error (location_t, enum lvalue_use);
+
+extern tree
+cp_fold_maybe_rvalue (tree, bool);
+
+extern tree get_first_fn (tree) ATTRIBUTE_PURE;
+
+extern void explain_non_literal_class (tree);
+
+extern bool reference_related_p (tree, tree);
+
+extern bool ordinary_char_type_p (tree);
+
+extern bool array_string_literal_compatible_p (tree, tree);
+
+// forked from gcc/cp/cp-tree.h
+
+enum
+{
+ ce_derived,
+ ce_type,
+ ce_normal,
+ ce_exact
+};
+
+extern tree
+rs_build_qualified_type_real (tree, int, tsubst_flags_t);
+#define rs_build_qualified_type(TYPE, QUALS) \
+ rs_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
+
+extern tree
+rs_walk_subtrees (tree *, int *, walk_tree_fn, void *, hash_set<tree> *);
+#define rs_walk_tree(tp, func, data, pset) \
+ walk_tree_1 (tp, func, data, pset, rs_walk_subtrees)
+#define rs_walk_tree_without_duplicates(tp, func, data) \
+ walk_tree_without_duplicates_1 (tp, func, data, rs_walk_subtrees)
+
+// forked from gcc/cp/cp-tree.h cp_expr_loc_or_loc
+
+inline location_t
+rs_expr_loc_or_loc (const_tree t, location_t or_loc)
+{
+ location_t loc = rs_expr_location (t);
+ if (loc == UNKNOWN_LOCATION)
+ loc = or_loc;
+ return loc;
+}
+
+// forked from gcc/cp/cp-tree.h cp_expr_loc_or_input_loc
+
+inline location_t
+rs_expr_loc_or_input_loc (const_tree t)
+{
+ return rs_expr_loc_or_loc (t, input_location);
+}
+
+// forked from gcc/cp/cp-tree.h type_unknown_p
+
+inline bool
+type_unknown_p (const_tree expr)
+{
+ return TREE_TYPE (expr) == unknown_type_node;
+}
+
+// forked from gcc/cp/cp-tree.h ovl_first
+
+/* Inline bodies. */
+
+inline tree
+ovl_first (tree node)
+{
+ while (TREE_CODE (node) == OVERLOAD)
+ node = OVL_FUNCTION (node);
+ return node;
+}
+
+// forked from gcc/cp/cp-tree.h type_of_this_parm
+
+/* Return the type of the `this' parameter of FNTYPE. */
+
+inline tree
+type_of_this_parm (const_tree fntype)
+{
+ function_args_iterator iter;
+ gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
+ function_args_iter_init (&iter, fntype);
+ return function_args_iter_cond (&iter);
+}
+
+// forked from gcc/cp/cp-tree.h class_of_this_parm
+
+/* Return the class of the `this' parameter of FNTYPE. */
+
+inline tree
+class_of_this_parm (const_tree fntype)
+{
+ return TREE_TYPE (type_of_this_parm (fntype));
+}
+
+// forked from gcc/cp/cp-tree.h identifier_p
+
+/* Return a typed pointer version of T if it designates a
+ C++ front-end identifier. */
+inline lang_identifier *
+identifier_p (tree t)
+{
+ if (TREE_CODE (t) == IDENTIFIER_NODE)
+ return (lang_identifier *) t;
+ return NULL;
+}
+
+// forked from gcc/c-family/c-common.h gnu_vector_type_p
+
+/* Return true if TYPE is a vector type that should be subject to the GNU
+ vector extensions (as opposed to a vector type that is used only for
+ the purposes of defining target-specific built-in functions). */
+
+inline bool
+gnu_vector_type_p (const_tree type)
+{
+ return TREE_CODE (type) == VECTOR_TYPE && !TYPE_INDIVISIBLE_P (type);
+}
+
+extern vec<tree, va_gc> *
+make_tree_vector (void);
+
+extern void
+release_tree_vector (vec<tree, va_gc> *);
+
+/* Simplified unique_ptr clone to release a tree vec on exit. */
+
+class releasing_vec
+{
+public:
+ typedef vec<tree, va_gc> vec_t;
+
+ releasing_vec (vec_t *v) : v (v) {}
+ releasing_vec () : v (make_tree_vector ()) {}
+
+ /* Copy ops are deliberately declared but not defined,
+ copies must always be elided. */
+ releasing_vec (const releasing_vec &);
+ releasing_vec &operator= (const releasing_vec &);
+
+ vec_t &operator* () const { return *v; }
+ vec_t *operator-> () const { return v; }
+ vec_t *get () const { return v; }
+ operator vec_t * () const { return v; }
+ vec_t **operator& () { return &v; }
+
+ /* Breaks pointer/value consistency for convenience. This takes ptrdiff_t
+ rather than unsigned to avoid ambiguity with the built-in operator[]
+ (bootstrap/91828). */
+ tree &operator[] (ptrdiff_t i) const { return (*v)[i]; }
+
+ tree *begin () { return ::begin (v); }
+ tree *end () { return ::end (v); }
+
+ void release ()
+ {
+ release_tree_vector (v);
+ v = NULL;
+ }
+
+ ~releasing_vec () { release_tree_vector (v); }
+
+private:
+ vec_t *v;
+};
+
+inline tree *
+vec_safe_push (releasing_vec &r, const tree &t CXX_MEM_STAT_INFO)
+{
+ return vec_safe_push (*&r, t PASS_MEM_STAT);
+}
+
+inline bool
+vec_safe_reserve (releasing_vec &r, unsigned n,
+ bool e = false CXX_MEM_STAT_INFO)
+{
+ return vec_safe_reserve (*&r, n, e PASS_MEM_STAT);
+}
+inline unsigned
+vec_safe_length (releasing_vec &r)
+{
+ return r->length ();
+}
+inline void
+vec_safe_splice (releasing_vec &r, vec<tree, va_gc> *p CXX_MEM_STAT_INFO)
+{
+ vec_safe_splice (*&r, p PASS_MEM_STAT);
+}
+
+inline bool
+null_node_p (const_tree expr)
+{
+ STRIP_ANY_LOCATION_WRAPPER (expr);
+ return expr == null_node;
+}
+
+inline void
+cxx_incomplete_type_diagnostic (const_tree value, const_tree type,
+ diagnostic_t diag_kind)
+{
+ cxx_incomplete_type_diagnostic (rs_expr_loc_or_input_loc (value), value, type,
+ diag_kind);
+}
+
+inline void
+cxx_incomplete_type_error (const_tree value, const_tree type)
+{
+ cxx_incomplete_type_diagnostic (value, type, DK_ERROR);
+}
+
+extern location_t
+location_of (tree t);
+
+/* Helpers for IMPLICIT_RVALUE_P to look through automatic dereference. */
+
+inline bool
+implicit_rvalue_p (const_tree t)
+{
+ if (REFERENCE_REF_P (t))
+ t = TREE_OPERAND (t, 0);
+ return ((TREE_CODE (t) == NON_LVALUE_EXPR) && IMPLICIT_RVALUE_P (t));
+}
+inline tree
+set_implicit_rvalue_p (tree ot)
+{
+ tree t = ot;
+ if (REFERENCE_REF_P (t))
+ t = TREE_OPERAND (t, 0);
+ IMPLICIT_RVALUE_P (t) = 1;
+ return ot;
+}
+
+namespace Compile {
+extern tree
+maybe_constant_init (tree, tree = NULL_TREE, bool = false);
+
+extern void
+explain_invalid_constexpr_fn (tree fun);
+
+extern bool potential_constant_expression (tree);
+
+extern bool
+literal_type_p (tree t);
+
+extern bool
+maybe_constexpr_fn (tree t);
+
+extern tree
+fold_non_dependent_init (tree, tsubst_flags_t = tf_warning_or_error,
+ bool = false, tree = NULL_TREE);
+} // namespace Compile
+
+} // namespace Rust
+
+#endif // RUST_TREE