aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
Diffstat (limited to 'gcc')
-rw-r--r--gcc/Makefile.in1
-rw-r--r--gcc/ada/gcc-interface/trans.cc2
-rw-r--r--gcc/builtins.def3
-rw-r--r--gcc/c-family/c-attribs.cc22
-rw-r--r--gcc/calls.cc3
-rw-r--r--gcc/common.opt35
-rw-r--r--gcc/cp/decl.cc3
-rw-r--r--gcc/cp/except.cc8
-rw-r--r--gcc/doc/extend.texi11
-rw-r--r--gcc/doc/invoke.texi93
-rw-r--r--gcc/flag-types.h10
-rw-r--r--gcc/gimple-harden-control-flow.cc1488
-rw-r--r--gcc/gimple.cc6
-rw-r--r--gcc/gimple.h23
-rw-r--r--gcc/params.opt8
-rw-r--r--gcc/passes.def1
-rw-r--r--gcc/testsuite/c-c++-common/harden-cfr-noret-never-O0.c12
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-always.c11
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-never.c11
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-no-xthrow.c11
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-nothrow.c11
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-abrt.c19
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-always.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c14
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c12
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c12
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c12
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c17
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-never.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-no-xthrow.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-noret-never.c18
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-noret-noexcept.c16
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-noret-nothrow.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-noret.c38
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-notail.c8
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-nothrow.c13
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-returning.c35
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-skip-leaf.c10
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr-tail.c52
-rw-r--r--gcc/testsuite/c-c++-common/torture/harden-cfr.c84
-rw-r--r--gcc/testsuite/g++.dg/harden-cfr-throw-always-O0.C13
-rw-r--r--gcc/testsuite/g++.dg/harden-cfr-throw-returning-O0.C12
-rw-r--r--gcc/testsuite/g++.dg/harden-cfr-throw-returning-enabled-O0.C11
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-noret-always-no-nothrow.C16
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-noret-never-no-nothrow.C18
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-noret-no-nothrow.C23
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw-always.C13
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw-never.C12
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C16
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow.C12
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw-nocleanup.C11
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw-nothrow.C11
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw-returning.C31
-rw-r--r--gcc/testsuite/g++.dg/torture/harden-cfr-throw.C73
-rw-r--r--gcc/testsuite/gcc.dg/torture/harden-cfr-noret-no-nothrow.c15
-rw-r--r--gcc/testsuite/gcc.dg/torture/harden-cfr-tail-ub.c40
-rw-r--r--gcc/testsuite/gnat.dg/hardcfr.adb76
-rw-r--r--gcc/tree-core.h3
-rw-r--r--gcc/tree-pass.h2
-rw-r--r--gcc/tree.cc9
63 files changed, 2635 insertions, 6 deletions
diff --git a/gcc/Makefile.in b/gcc/Makefile.in
index 747f749..a25a1e3 100644
--- a/gcc/Makefile.in
+++ b/gcc/Makefile.in
@@ -1461,6 +1461,7 @@ OBJS = \
gimple-iterator.o \
gimple-fold.o \
gimple-harden-conditionals.o \
+ gimple-harden-control-flow.o \
gimple-laddress.o \
gimple-loop-interchange.o \
gimple-loop-jam.o \
diff --git a/gcc/ada/gcc-interface/trans.cc b/gcc/ada/gcc-interface/trans.cc
index e99fbb4..89f0a07 100644
--- a/gcc/ada/gcc-interface/trans.cc
+++ b/gcc/ada/gcc-interface/trans.cc
@@ -519,6 +519,7 @@ gigi (Node_Id gnat_root,
ftype, NULL_TREE,
is_default, true, true, true, false, false, NULL,
Empty);
+ set_call_expr_flags (reraise_zcx_decl, ECF_NORETURN | ECF_XTHROW);
/* Dummy objects to materialize "others" and "all others" in the exception
tables. These are exported by a-exexpr-gcc.adb, so see this unit for
@@ -721,6 +722,7 @@ build_raise_check (int check, enum exception_info_kind kind)
= create_subprog_decl (get_identifier (Name_Buffer), NULL_TREE, ftype,
NULL_TREE, is_default, true, true, true, false,
false, NULL, Empty);
+ set_call_expr_flags (result, ECF_NORETURN | ECF_XTHROW);
return result;
}
diff --git a/gcc/builtins.def b/gcc/builtins.def
index 5953266..eb6f4ec 100644
--- a/gcc/builtins.def
+++ b/gcc/builtins.def
@@ -1179,6 +1179,9 @@ DEF_GCC_BUILTIN (BUILT_IN_FILE, "FILE", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LI
DEF_GCC_BUILTIN (BUILT_IN_FUNCTION, "FUNCTION", BT_FN_CONST_STRING, ATTR_NOTHROW_LEAF_LIST)
DEF_GCC_BUILTIN (BUILT_IN_LINE, "LINE", BT_FN_INT, ATTR_NOTHROW_LEAF_LIST)
+/* Control Flow Redundancy hardening out-of-line checker. */
+DEF_BUILTIN_STUB (BUILT_IN___HARDCFR_CHECK, "__builtin___hardcfr_check")
+
/* Synchronization Primitives. */
#include "sync-builtins.def"
diff --git a/gcc/c-family/c-attribs.cc b/gcc/c-family/c-attribs.cc
index dca7548..abf44d5 100644
--- a/gcc/c-family/c-attribs.cc
+++ b/gcc/c-family/c-attribs.cc
@@ -136,6 +136,7 @@ static tree handle_vector_mask_attribute (tree *, tree, tree, int,
static tree handle_nonnull_attribute (tree *, tree, tree, int, bool *);
static tree handle_nonstring_attribute (tree *, tree, tree, int, bool *);
static tree handle_nothrow_attribute (tree *, tree, tree, int, bool *);
+static tree handle_expected_throw_attribute (tree *, tree, tree, int, bool *);
static tree handle_cleanup_attribute (tree *, tree, tree, int, bool *);
static tree handle_warn_unused_result_attribute (tree *, tree, tree, int,
bool *);
@@ -437,6 +438,8 @@ const struct attribute_spec c_common_attribute_table[] =
handle_nonstring_attribute, NULL },
{ "nothrow", 0, 0, true, false, false, false,
handle_nothrow_attribute, NULL },
+ { "expected_throw", 0, 0, true, false, false, false,
+ handle_expected_throw_attribute, NULL },
{ "may_alias", 0, 0, false, true, false, false, NULL, NULL },
{ "cleanup", 1, 1, true, false, false, false,
handle_cleanup_attribute, NULL },
@@ -5459,6 +5462,25 @@ handle_nothrow_attribute (tree *node, tree name, tree ARG_UNUSED (args),
return NULL_TREE;
}
+/* Handle a "nothrow" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_expected_throw_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+ int ARG_UNUSED (flags), bool *no_add_attrs)
+{
+ if (TREE_CODE (*node) == FUNCTION_DECL)
+ /* No flag to set here. */;
+ /* ??? TODO: Support types. */
+ else
+ {
+ warning (OPT_Wattributes, "%qE attribute ignored", name);
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
/* Handle a "cleanup" attribute; arguments as in
struct attribute_spec.handler. */
diff --git a/gcc/calls.cc b/gcc/calls.cc
index e9e6951..9edb583 100644
--- a/gcc/calls.cc
+++ b/gcc/calls.cc
@@ -848,6 +848,9 @@ flags_from_decl_or_type (const_tree exp)
flags |= ECF_TM_PURE;
}
+ if (lookup_attribute ("expected_throw", DECL_ATTRIBUTES (exp)))
+ flags |= ECF_XTHROW;
+
flags = special_function_p (exp, flags);
}
else if (TYPE_P (exp))
diff --git a/gcc/common.opt b/gcc/common.opt
index b103b8d..ce34075 100644
--- a/gcc/common.opt
+++ b/gcc/common.opt
@@ -1831,6 +1831,41 @@ fharden-conditional-branches
Common Var(flag_harden_conditional_branches) Optimization
Harden conditional branches by checking reversed conditions.
+fharden-control-flow-redundancy
+Common Var(flag_harden_control_flow_redundancy) Optimization
+Harden control flow by recording and checking execution paths.
+
+fhardcfr-skip-leaf
+Common Var(flag_harden_control_flow_redundancy_skip_leaf) Optimization
+Disable CFR in leaf functions.
+
+fhardcfr-check-returning-calls
+Common Var(flag_harden_control_flow_redundancy_check_returning_calls) Init(-1) Optimization
+Check CFR execution paths also before calls followed by returns of their results.
+
+fhardcfr-check-exceptions
+Common Var(flag_harden_control_flow_redundancy_check_exceptions) Init(-1) Optimization
+Check CFR execution paths also when exiting a function through an exception.
+
+fhardcfr-check-noreturn-calls=
+Common Joined RejectNegative Enum(hardcfr_check_noreturn_calls) Var(flag_harden_control_flow_redundancy_check_noreturn) Init(HCFRNR_UNSPECIFIED) Optimization
+-fhardcfr-check-noreturn-calls=[always|no-xthrow|nothrow|never] Check CFR execution paths also before calling noreturn functions.
+
+Enum
+Name(hardcfr_check_noreturn_calls) Type(enum hardcfr_noret) UnknownError(unknown hardcfr noreturn checking level %qs)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(never) Value(HCFRNR_NEVER)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(nothrow) Value(HCFRNR_NOTHROW)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(no-xthrow) Value(HCFRNR_NO_XTHROW)
+
+EnumValue
+Enum(hardcfr_check_noreturn_calls) String(always) Value(HCFRNR_ALWAYS)
+
; Nonzero means ignore `#ident' directives. 0 means handle them.
; Generate position-independent code for executables if possible
; On SVR4 targets, it also controls whether or not to emit a
diff --git a/gcc/cp/decl.cc b/gcc/cp/decl.cc
index ce4c89d..16af59d 100644
--- a/gcc/cp/decl.cc
+++ b/gcc/cp/decl.cc
@@ -5281,7 +5281,8 @@ push_cp_library_fn (enum tree_code operator_code, tree type,
tree
push_throw_library_fn (tree name, tree type)
{
- tree fn = push_library_fn (name, type, NULL_TREE, ECF_NORETURN | ECF_COLD);
+ tree fn = push_library_fn (name, type, NULL_TREE,
+ ECF_NORETURN | ECF_XTHROW | ECF_COLD);
return fn;
}
diff --git a/gcc/cp/except.cc b/gcc/cp/except.cc
index 6c0f081..e32efb3 100644
--- a/gcc/cp/except.cc
+++ b/gcc/cp/except.cc
@@ -657,12 +657,13 @@ build_throw (location_t loc, tree exp)
tree args[3] = {ptr_type_node, ptr_type_node, cleanup_type};
throw_fn = declare_library_fn_1 ("__cxa_throw",
- ECF_NORETURN | ECF_COLD,
+ ECF_NORETURN | ECF_XTHROW | ECF_COLD,
void_type_node, 3, args);
if (flag_tm && throw_fn != error_mark_node)
{
tree itm_fn = declare_library_fn_1 ("_ITM_cxa_throw",
- ECF_NORETURN | ECF_COLD,
+ ECF_NORETURN | ECF_XTHROW
+ | ECF_COLD,
void_type_node, 3, args);
if (itm_fn != error_mark_node)
{
@@ -797,7 +798,8 @@ build_throw (location_t loc, tree exp)
if (!rethrow_fn)
{
rethrow_fn = declare_library_fn_1 ("__cxa_rethrow",
- ECF_NORETURN | ECF_COLD,
+ ECF_NORETURN | ECF_XTHROW
+ | ECF_COLD,
void_type_node, 0, NULL);
if (flag_tm && rethrow_fn != error_mark_node)
apply_tm_attr (rethrow_fn, get_identifier ("transaction_pure"));
diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
index 93f014a..bf941e6 100644
--- a/gcc/doc/extend.texi
+++ b/gcc/doc/extend.texi
@@ -3055,6 +3055,17 @@ when using these attributes the problem is diagnosed
earlier and with exact location of the call even in presence of inline
functions or when not emitting debugging information.
+@cindex @code{expected_throw} function attribute
+@item expected_throw
+This attribute, attached to a function, tells the compiler the function
+is more likely to raise or propagate an exception than to return, loop
+forever, or terminate the program.
+
+This hint is mostly ignored by the compiler. The only effect is when
+it's applied to @code{noreturn} functions and
+@samp{-fharden-control-flow-redundancy} is enabled, and
+@samp{-fhardcfr-check-noreturn-calls=not-always} is not overridden.
+
@cindex @code{externally_visible} function attribute
@item externally_visible
This attribute, attached to a global variable or function, nullifies
diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
index 16c4584..aebe919 100644
--- a/gcc/doc/invoke.texi
+++ b/gcc/doc/invoke.texi
@@ -642,6 +642,9 @@ Objective-C and Objective-C++ Dialects}.
-fsanitize-undefined-trap-on-error -fbounds-check
-fcf-protection=@r{[}full@r{|}branch@r{|}return@r{|}none@r{|}check@r{]}
-fharden-compares -fharden-conditional-branches
+-fharden-control-flow-redundancy -fhardcfr-skip-leaf
+-fhardcfr-check-exceptions -fhardcfr-check-returning-calls
+-fhardcfr-check-noreturn-calls=@r{[}always@r{|}no-xthrow@r{|}nothrow@r{|}never@r{]}
-fstack-protector -fstack-protector-all -fstack-protector-strong
-fstack-protector-explicit -fstack-check
-fstack-limit-register=@var{reg} -fstack-limit-symbol=@var{sym}
@@ -15964,6 +15967,16 @@ A value of zero can be used to lift
the bound. A variable whose value is unknown at compilation time and
defined outside a SCoP is a parameter of the SCoP.
+@item hardcfr-max-blocks
+Disable @option{-fharden-control-flow-redundancy} for functions with a
+larger number of blocks than the specified value. Zero removes any
+limit.
+
+@item hardcfr-max-inline-blocks
+Force @option{-fharden-control-flow-redundancy} to use out-of-line
+checking for functions with a larger number of basic blocks than the
+specified value.
+
@item loop-block-tile-size
Loop blocking or strip mining transforms, enabled with
@option{-floop-block} or @option{-floop-strip-mine}, strip mine each
@@ -17448,6 +17461,86 @@ condition, and to call @code{__builtin_trap} if the result is
unexpected. Use with @samp{-fharden-compares} to cover all
conditionals.
+@opindex fharden-control-flow-redundancy
+@item -fharden-control-flow-redundancy
+Emit extra code to set booleans when entering basic blocks, and to
+verify and trap, at function exits, when the booleans do not form an
+execution path that is compatible with the control flow graph.
+
+Verification takes place before returns, before mandatory tail calls
+(see below) and, optionally, before escaping exceptions with
+@option{-fhardcfr-check-exceptions}, before returning calls with
+@option{-fhardcfr-check-returning-calls}, and before noreturn calls with
+@option{-fhardcfr-check-noreturn-calls}). Tuning options
+@option{--param hardcfr-max-blocks} and @option{--param
+hardcfr-max-inline-blocks} are available.
+
+Tail call optimization takes place too late to affect control flow
+redundancy, but calls annotated as mandatory tail calls by language
+front-ends, and any calls marked early enough as potential tail calls
+would also have verification issued before the call, but these
+possibilities are merely theoretical, as these conditions can only be
+met when using custom compiler plugins.
+
+@opindex fhardcfr-skip-leaf
+@item -fhardcfr-skip-leaf
+Disable @option{-fharden-control-flow-redundancy} in leaf functions.
+
+@opindex fhardcfr-check-exceptions
+@opindex fno-hardcfr-check-exceptions
+@item -fhardcfr-check-exceptions
+When @option{-fharden-control-flow-redundancy} is active, check the
+recorded execution path against the control flow graph at exception
+escape points, as if the function body was wrapped with a cleanup
+handler that performed the check and reraised. This option is enabled
+by default; use @option{-fno-hardcfr-check-exceptions} to disable it.
+
+@opindex fhardcfr-check-returning-calls
+@opindex fno-hardcfr-check-returning-calls
+@item -fhardcfr-check-returning-calls
+When @option{-fharden-control-flow-redundancy} is active, check the
+recorded execution path against the control flow graph before any
+function call immediately followed by a return of its result, if any, so
+as to not prevent tail-call optimization, whether or not it is
+ultimately optimized to a tail call.
+
+This option is enabled by default whenever sibling call optimizations
+are enabled (see @option{-foptimize-sibling-calls}), but it can be
+enabled (or disabled, using its negated form) explicitly, regardless of
+the optimizations.
+
+@opindex fhardcfr-check-noreturn-calls
+@item -fhardcfr-check-noreturn-calls=@r{[}always@r{|}no-xthrow@r{|}nothrow@r{|}never@r{]}
+When @option{-fharden-control-flow-redundancy} is active, check the
+recorded execution path against the control flow graph before
+@code{noreturn} calls, either all of them (@option{always}), those that
+aren't expected to return control to the caller through an exception
+(@option{no-xthrow}, the default), those that may not return control to
+the caller through an exception either (@option{nothrow}), or none of
+them (@option{never}).
+
+Checking before a @code{noreturn} function that may return control to
+the caller through an exception may cause checking to be performed more
+than once, if the exception is caught in the caller, whether by a
+handler or a cleanup. When @option{-fhardcfr-check-exceptions} is also
+enabled, the compiler will avoid associating a @code{noreturn} call with
+the implicitly-added cleanup handler, since it would be redundant with
+the check performed before the call, but other handlers or cleanups in
+the function, if activated, will modify the recorded execution path and
+check it again when another checkpoint is hit. The checkpoint may even
+be another @code{noreturn} call, so checking may end up performed
+multiple times.
+
+Various optimizers may cause calls to be marked as @code{noreturn}
+and/or @code{nothrow}, even in the absence of the corresponding
+attributes, which may affect the placement of checks before calls, as
+well as the addition of implicit cleanup handlers for them. This
+unpredictability, and the fact that raising and reraising exceptions
+frequently amounts to implicitly calling @code{noreturn} functions, have
+made @option{no-xthrow} the default setting for this option: it excludes
+from the @code{noreturn} treatment only internal functions used to
+(re)raise exceptions, that are not affected by these optimizations.
+
@opindex fstack-protector
@item -fstack-protector
Emit extra code to check for buffer overflows, such as stack smashing
diff --git a/gcc/flag-types.h b/gcc/flag-types.h
index 7466c11..c1852cd 100644
--- a/gcc/flag-types.h
+++ b/gcc/flag-types.h
@@ -157,6 +157,16 @@ enum stack_reuse_level
SR_ALL
};
+/* Control Flow Redundancy hardening options for noreturn calls. */
+enum hardcfr_noret
+{
+ HCFRNR_NEVER,
+ HCFRNR_NOTHROW,
+ HCFRNR_NO_XTHROW,
+ HCFRNR_UNSPECIFIED,
+ HCFRNR_ALWAYS,
+};
+
/* The live patching level. */
enum live_patching_level
{
diff --git a/gcc/gimple-harden-control-flow.cc b/gcc/gimple-harden-control-flow.cc
new file mode 100644
index 0000000..5c28fd0
--- /dev/null
+++ b/gcc/gimple-harden-control-flow.cc
@@ -0,0 +1,1488 @@
+/* Control flow redundancy hardening.
+ Copyright (C) 2022 Free Software Foundation, Inc.
+ Contributed by Alexandre Oliva <oliva@adacore.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#define INCLUDE_ALGORITHM /* find */
+#include "system.h"
+#include "coretypes.h"
+#include "backend.h"
+#include "tree.h"
+#include "fold-const.h"
+#include "gimple.h"
+#include "gimplify.h"
+#include "tree-pass.h"
+#include "ssa.h"
+#include "gimple-iterator.h"
+#include "gimple-pretty-print.h"
+#include "tree-cfg.h"
+#include "tree-cfgcleanup.h"
+#include "tree-eh.h"
+#include "except.h"
+#include "sbitmap.h"
+#include "basic-block.h"
+#include "cfghooks.h"
+#include "cfgloop.h"
+#include "cgraph.h"
+#include "alias.h"
+#include "varasm.h"
+#include "output.h"
+#include "langhooks.h"
+#include "diagnostic.h"
+#include "intl.h"
+
+namespace {
+
+/* This pass introduces verification, at function exits, that booleans
+ set in each basic block during function execution reflect the
+ control flow graph: for each visited block, check that at least one
+ predecessor and at least one successor were also visited. This
+ sort of hardening may detect various kinds of attacks. */
+
+/* Define a pass to harden code through control flow redundancy. */
+
+const pass_data pass_data_harden_control_flow_redundancy = {
+ GIMPLE_PASS,
+ "hardcfr",
+ OPTGROUP_NONE,
+ TV_NONE,
+ PROP_cfg | PROP_ssa, // properties_required
+ 0, // properties_provided
+ 0, // properties_destroyed
+ TODO_cleanup_cfg, // properties_start
+ 0, // properties_finish
+};
+
+class pass_harden_control_flow_redundancy : public gimple_opt_pass
+{
+public:
+ pass_harden_control_flow_redundancy (gcc::context *ctxt)
+ : gimple_opt_pass (pass_data_harden_control_flow_redundancy, ctxt)
+ {}
+ opt_pass *clone () { return new pass_harden_control_flow_redundancy (m_ctxt); }
+ virtual bool gate (function *fun) {
+ /* Return quickly if the pass is disabled, without checking any of
+ the conditions that might give rise to warnings that would only
+ be appropriate if hardening was requested. */
+ if (!flag_harden_control_flow_redundancy)
+ return false;
+
+ /* Functions that return more than once, like setjmp and vfork
+ (that also gets this flag set), will start recording a path
+ after the first return, and then may take another path when
+ they return again. The unterminated path may then be flagged
+ as an error. ??? We could save the visited array before the
+ call and restore it if it returns again. */
+ if (fun->calls_setjmp)
+ {
+ warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
+ "%qD calls %<setjmp%> or similar,"
+ " %<-fharden-control-flow-redundancy%> is not supported",
+ fun->decl);
+ return false;
+ }
+
+ /* Some targets bypass the abnormal dispatcher block in nonlocal
+ gotos, and then we'd miss its visited bit. It might be doable
+ to make it work uniformly, but this feature is not used often
+ enough to make it worthwhile. */
+ if (fun->has_nonlocal_label)
+ {
+ warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
+ "%qD receives nonlocal gotos,"
+ " %<-fharden-control-flow-redundancy%> is not supported",
+ fun->decl);
+ return false;
+ }
+
+ if (fun->cfg && param_hardcfr_max_blocks > 0
+ && (n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS
+ > param_hardcfr_max_blocks))
+ {
+ warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
+ "%qD has more than %u blocks, the requested"
+ " maximum for %<-fharden-control-flow-redundancy%>",
+ fun->decl, param_hardcfr_max_blocks);
+ return false;
+ }
+
+ return true;
+ }
+ virtual unsigned int execute (function *);
+};
+
+}
+
+/* Return TRUE iff CFR checks should be inserted before returning
+ calls. */
+
+static bool
+check_returning_calls_p ()
+{
+ return
+ flag_harden_control_flow_redundancy_check_returning_calls > 0
+ || (flag_harden_control_flow_redundancy_check_returning_calls < 0
+ /* Gates pass_tail_calls. */
+ && flag_optimize_sibling_calls
+ /* Gates pass_all_optimizations. */
+ && optimize >= 1 && !optimize_debug);
+}
+
+/* Scan BB from the end, updating *RETPTR if given as return stmts and
+ copies are found. Return a call or a stmt that cannot appear after
+ a tail call, or NULL if the top of the block is reached without
+ finding any. */
+
+static gimple *
+hardcfr_scan_block (basic_block bb, tree **retptr)
+{
+ gimple_stmt_iterator gsi;
+ for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+
+ /* Ignore labels, returns, nops, clobbers and debug stmts. */
+ if (gimple_code (stmt) == GIMPLE_LABEL
+ || gimple_code (stmt) == GIMPLE_NOP
+ || gimple_code (stmt) == GIMPLE_PREDICT
+ || gimple_clobber_p (stmt)
+ || is_gimple_debug (stmt))
+ continue;
+
+ if (gimple_code (stmt) == GIMPLE_RETURN)
+ {
+ greturn *gret = as_a <greturn *> (stmt);
+ if (retptr)
+ {
+ gcc_checking_assert (!*retptr);
+ *retptr = gimple_return_retval_ptr (gret);
+ }
+ continue;
+ }
+
+ /* Check for a call. */
+ if (is_gimple_call (stmt))
+ return stmt;
+
+ /* Allow simple copies to the return value, updating the return
+ value to be found in earlier assignments. */
+ if (retptr && *retptr && gimple_assign_single_p (stmt)
+ && **retptr == gimple_assign_lhs (stmt))
+ {
+ *retptr = gimple_assign_rhs1_ptr (stmt);
+ continue;
+ }
+
+ return stmt;
+ }
+
+ /* Any other kind of stmt will prevent a tail call. */
+ return NULL;
+}
+
+/* Return TRUE iff CALL is to be preceded by a CFR checkpoint, i.e.,
+ if it's a returning call (one whose result is ultimately returned
+ without intervening non-copy statements) and we're checking
+ returning calls, a __builtin_return call (noreturn with a path to
+ the exit block), a must-tail call, or a tail call. */
+
+static bool
+returning_call_p (gcall *call)
+{
+ if (!(gimple_call_noreturn_p (call)
+ || gimple_call_must_tail_p (call)
+ || gimple_call_tail_p (call)
+ || check_returning_calls_p ()))
+ return false;
+
+ /* Quickly check that there's a path to exit compatible with a
+ returning call. Detect infinite loops by limiting the path
+ length to the basic block count, and by looking for duplicate
+ blocks before allocating more memory for the path, for amortized
+ O(n). */
+ auto_vec<basic_block, 10> path;
+ for (basic_block bb = gimple_bb (call);
+ bb != EXIT_BLOCK_PTR_FOR_FN (cfun);
+ bb = single_succ (bb))
+ if (!single_succ_p (bb)
+ || (single_succ_edge (bb)->flags & EDGE_EH) != 0
+ || n_basic_blocks_for_fn (cfun) - path.length () <= NUM_FIXED_BLOCKS
+ || (path.length () == path.allocated ()
+ && std::find (path.begin (), path.end (), bb) != path.end ()))
+ return false;
+ else
+ path.safe_push (bb);
+
+ /* Check the stmts in the blocks and trace the return value. */
+ tree *retptr = NULL;
+ for (;;)
+ {
+ gcc_checking_assert (!path.is_empty ());
+ basic_block bb = path.pop ();
+ gimple *stop = hardcfr_scan_block (bb, &retptr);
+ if (stop)
+ {
+ if (stop != call)
+ return false;
+ gcc_checking_assert (path.is_empty ());
+ break;
+ }
+
+ gphi *retphi = NULL;
+ if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME
+ && !SSA_NAME_IS_DEFAULT_DEF (*retptr)
+ && SSA_NAME_DEF_STMT (*retptr)
+ && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr))
+ && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb)
+ {
+ retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr));
+ gcc_checking_assert (gimple_phi_result (retphi) == *retptr);
+ }
+ else
+ continue;
+
+ gcc_checking_assert (!path.is_empty ());
+ edge e = single_succ_edge (path.last ());
+ int i = EDGE_COUNT (bb->preds);
+ while (i--)
+ if (EDGE_PRED (bb, i) == e)
+ break;
+ gcc_checking_assert (i >= 0);
+ retptr = gimple_phi_arg_def_ptr (retphi, i);
+ }
+
+ return (gimple_call_noreturn_p (call)
+ || gimple_call_must_tail_p (call)
+ || gimple_call_tail_p (call)
+ || (gimple_call_lhs (call) == (retptr ? *retptr : NULL)
+ && check_returning_calls_p ()));
+}
+
+typedef auto_vec<edge, 10> chk_edges_t;
+
+/* Declare for mutual recursion. */
+static bool hardcfr_sibcall_search_preds (basic_block bb,
+ chk_edges_t &chk_edges,
+ int &count_chkcall,
+ auto_sbitmap &chkcall_blocks,
+ int &count_postchk,
+ auto_sbitmap &postchk_blocks,
+ tree *retptr);
+
+/* Search backwards from the end of BB for a mandatory or potential
+ sibcall. Schedule the block to be handled sort-of like noreturn if
+ so. Recurse to preds, with updated RETPTR, if the block only
+ contains stmts that may follow such a call, scheduling checking at
+ edges and marking blocks as post-check as needed. Return true iff,
+ at the end of the block, a check will have already been
+ performed. */
+
+static bool
+hardcfr_sibcall_search_block (basic_block bb,
+ chk_edges_t &chk_edges,
+ int &count_chkcall,
+ auto_sbitmap &chkcall_blocks,
+ int &count_postchk,
+ auto_sbitmap &postchk_blocks,
+ tree *retptr)
+{
+ /* Conditionals and internal exceptions rule out tail calls. */
+ if (!single_succ_p (bb)
+ || (single_succ_edge (bb)->flags & EDGE_EH) != 0)
+ return false;
+
+ gimple *stmt = hardcfr_scan_block (bb, &retptr);
+ if (!stmt)
+ return hardcfr_sibcall_search_preds (bb, chk_edges,
+ count_chkcall, chkcall_blocks,
+ count_postchk, postchk_blocks,
+ retptr);
+
+ if (!is_a <gcall *> (stmt))
+ return false;
+
+ /* Avoid disrupting mandatory or early-marked tail calls,
+ inserting the check before them. This works for
+ must-tail calls, but tail calling as an optimization is
+ detected too late for us.
+
+ Also check for noreturn calls here. Noreturn calls won't
+ normally have edges to exit, so they won't be found here,
+ but __builtin_return does, and we must check before
+ it, so handle it like a tail call. */
+ gcall *call = as_a <gcall *> (stmt);
+ if (!(gimple_call_noreturn_p (call)
+ || gimple_call_must_tail_p (call)
+ || gimple_call_tail_p (call)
+ || (gimple_call_lhs (call) == (retptr ? *retptr : NULL)
+ && check_returning_calls_p ())))
+ return false;
+
+ gcc_checking_assert (returning_call_p (call));
+
+ /* We found a call that is to be preceded by checking. */
+ if (bitmap_set_bit (chkcall_blocks, bb->index))
+ ++count_chkcall;
+ else
+ gcc_unreachable ();
+ return true;
+}
+
+
+/* Search preds of BB for a mandatory or potential sibcall or
+ returning call, and arrange for the blocks containing them to have
+ a check inserted before the call, like noreturn calls. If any
+ preds are found to perform checking, schedule checks at the edges
+ of those that don't, and mark BB as postcheck.. */
+
+static bool
+hardcfr_sibcall_search_preds (basic_block bb,
+ chk_edges_t &chk_edges,
+ int &count_chkcall,
+ auto_sbitmap &chkcall_blocks,
+ int &count_postchk,
+ auto_sbitmap &postchk_blocks,
+ tree *retptr)
+{
+ /* For the exit block, we wish to force a check at every
+ predecessor, so pretend we've already found a pred that had
+ checking, so that we schedule checking at every one of its pred
+ edges. */
+ bool first = bb->index >= NUM_FIXED_BLOCKS;
+ bool postchecked = true;
+
+ gphi *retphi = NULL;
+ if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME
+ && !SSA_NAME_IS_DEFAULT_DEF (*retptr)
+ && SSA_NAME_DEF_STMT (*retptr)
+ && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr))
+ && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb)
+ {
+ retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr));
+ gcc_checking_assert (gimple_phi_result (retphi) == *retptr);
+ }
+
+ for (int i = EDGE_COUNT (bb->preds); i--; first = false)
+ {
+ edge e = EDGE_PRED (bb, i);
+
+ bool checked
+ = hardcfr_sibcall_search_block (e->src, chk_edges,
+ count_chkcall, chkcall_blocks,
+ count_postchk, postchk_blocks,
+ !retphi ? retptr
+ : gimple_phi_arg_def_ptr (retphi, i));
+
+ if (first)
+ {
+ postchecked = checked;
+ continue;
+ }
+
+ /* When we first find a checked block, force a check at every
+ other incoming edge we've already visited, and those we
+ visit afterwards that don't have their own check, so that
+ when we reach BB, the check has already been performed. */
+ if (!postchecked && checked)
+ {
+ for (int j = EDGE_COUNT (bb->preds); --j > i; )
+ chk_edges.safe_push (EDGE_PRED (bb, j));
+ postchecked = true;
+ }
+ if (postchecked && !checked)
+ chk_edges.safe_push (EDGE_PRED (bb, i));
+ }
+
+ if (postchecked && bb->index >= NUM_FIXED_BLOCKS)
+ {
+ if (bitmap_set_bit (postchk_blocks, bb->index))
+ count_postchk++;
+ else
+ gcc_unreachable ();
+ }
+
+ return postchecked;
+}
+
+
+class rt_bb_visited
+{
+ /* Use a sufficiently wide unsigned type to hold basic block numbers. */
+ typedef size_t blknum;
+
+ /* Record the original block count of the function. */
+ blknum nblocks;
+ /* Record the number of bits per VWORD (short for VISITED WORD), an
+ efficient mode to set and test bits for blocks we visited, and to
+ encode the CFG in case out-of-line verification is used. */
+ unsigned vword_bits;
+
+ /* Hold the unsigned integral VWORD type. */
+ tree vword_type;
+ /* Hold a pointer-to-VWORD type. */
+ tree vword_ptr;
+
+ /* Hold a growing sequence used to check, inline or out-of-line,
+ that VISITED encodes an expected execution path. */
+ gimple_seq ckseq;
+ /* If nonNULL, hold a growing representation of the CFG for
+ out-of-line testing. */
+ tree rtcfg;
+
+ /* Hold the declaration of an array of VWORDs, used as an array of
+ NBLOCKS-2 bits. */
+ tree visited;
+
+ /* If performing inline checking, hold a declarations of boolean
+ variables used for inline checking. CKBLK holds the result of
+ testing whether the VISITED bit corresponding to a predecessor or
+ successor is set, CKINV inverts that bit, CKPART gets cleared if
+ a block was not visited or if CKINV for any of its predecessors
+ or successors is set, and CKFAIL gets set if CKPART remains set
+ at the end of a block's predecessors or successors list. */
+ tree ckfail, ckpart, ckinv, ckblk;
+
+ /* Convert a block index N to a block vindex, the index used to
+ identify it in the VISITED array. Check that it's in range:
+ neither ENTRY nor EXIT, but maybe one-past-the-end, to compute
+ the visited array length. */
+ blknum num2idx (blknum n) {
+ gcc_checking_assert (n >= NUM_FIXED_BLOCKS && n <= nblocks);
+ return (n - NUM_FIXED_BLOCKS);
+ }
+ /* Return the block vindex for BB, that must not be ENTRY or
+ EXIT. */
+ blknum bb2idx (basic_block bb) {
+ gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
+ gcc_checking_assert (blknum (bb->index) < nblocks);
+ return num2idx (bb->index);
+ }
+
+ /* Compute the type to be used for the VISITED array. */
+ tree vtype ()
+ {
+ blknum n = num2idx (nblocks);
+ return build_array_type_nelts (vword_type,
+ (n + vword_bits - 1) / vword_bits);
+ }
+
+ /* Compute and return the index into VISITED for block BB. If BITP
+ is non-NULL, also compute and store the bit mask corresponding to
+ block BB in *BITP, so that (visited[index] & mask) tells whether
+ BB was visited. */
+ tree vwordidx (basic_block bb, tree *bitp = NULL)
+ {
+ blknum idx = bb2idx (bb);
+ if (bitp)
+ {
+ unsigned bit = idx % vword_bits;
+ /* We don't need to adjust shifts to follow native bit
+ endianness here, all of our uses of the CFG and visited
+ bitmaps, whether at compile or runtime, are shifted bits on
+ full words. This adjustment here would require a
+ corresponding adjustment at runtime, which would be nothing
+ but undesirable overhead for us. */
+ if (0 /* && BITS_BIG_ENDIAN */)
+ bit = vword_bits - bit - 1;
+ wide_int wbit = wi::set_bit_in_zero (bit, vword_bits);
+ *bitp = wide_int_to_tree (vword_type, wbit);
+ }
+ return build_int_cst (vword_ptr, idx / vword_bits);
+ }
+
+ /* Return an expr to accesses the visited element that holds
+ information about BB. If BITP is non-NULL, set it to the mask to
+ tell which bit in that expr refers to BB. */
+ tree vword (basic_block bb, tree *bitp = NULL)
+ {
+ return build2 (MEM_REF, vword_type,
+ build1 (ADDR_EXPR, vword_ptr, visited),
+ int_const_binop (MULT_EXPR, vwordidx (bb, bitp),
+ fold_convert (vword_ptr,
+ TYPE_SIZE_UNIT
+ (vword_type))));
+ }
+
+ /* Return an expr that evaluates to true iff BB was marked as
+ VISITED. Add any gimple stmts to SEQP. */
+ tree vindex (basic_block bb, gimple_seq *seqp)
+ {
+ if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
+ || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ return boolean_true_node;
+
+ tree bit, setme = vword (bb, &bit);
+ tree temp = create_tmp_var (vword_type, ".cfrtemp");
+
+ gassign *vload = gimple_build_assign (temp, setme);
+ gimple_seq_add_stmt (seqp, vload);
+
+ gassign *vmask = gimple_build_assign (temp, BIT_AND_EXPR, temp, bit);
+ gimple_seq_add_stmt (seqp, vmask);
+
+ return build2 (NE_EXPR, boolean_type_node,
+ temp, build_int_cst (vword_type, 0));
+ }
+
+ /* Set the bit corresponding to BB in VISITED. Add to SEQ any
+ required gimple stmts, and return SEQ, possibly modified. */
+ gimple_seq vset (basic_block bb, gimple_seq seq = NULL)
+ {
+ tree bit, setme = vword (bb, &bit);
+ tree temp = create_tmp_var (vword_type, ".cfrtemp");
+
+ gassign *vload = gimple_build_assign (temp, setme);
+ gimple_seq_add_stmt (&seq, vload);
+
+ gassign *vbitset = gimple_build_assign (temp, BIT_IOR_EXPR, temp, bit);
+ gimple_seq_add_stmt (&seq, vbitset);
+
+ gassign *vstore = gimple_build_assign (unshare_expr (setme), temp);
+ gimple_seq_add_stmt (&seq, vstore);
+
+ /* Prevent stores into visited from being deferred, forcing
+ subsequent bitsets to reload the word rather than reusing
+ values already in register. The purpose is threefold: make the
+ bitset get to memory in this block, so that control flow
+ attacks in functions called in this block don't easily bypass
+ the bitset; prevent the bitset word from being retained in a
+ register across blocks, which could, in an attack scenario,
+ make a later block set more than one bit; and prevent hoisting
+ or sinking loads or stores of bitset words out of loops or even
+ throughout functions, which could significantly weaken the
+ verification. This is equivalent to making the bitsetting
+ volatile within the function body, but without changing its
+ type; making the bitset volatile would make inline checking far
+ less optimizable for no reason. */
+ vec<tree, va_gc> *inputs = NULL;
+ vec<tree, va_gc> *outputs = NULL;
+ vec_safe_push (outputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (2, "=m")),
+ visited));
+ vec_safe_push (inputs,
+ build_tree_list
+ (build_tree_list
+ (NULL_TREE, build_string (1, "m")),
+ visited));
+ gasm *stabilize = gimple_build_asm_vec ("", inputs, outputs,
+ NULL, NULL);
+ gimple_seq_add_stmt (&seq, stabilize);
+
+ return seq;
+ }
+
+public:
+ /* Prepare to add control flow redundancy testing to CFUN. */
+ rt_bb_visited (int checkpoints)
+ : nblocks (n_basic_blocks_for_fn (cfun)),
+ vword_type (NULL), ckseq (NULL), rtcfg (NULL)
+ {
+ /* If we've already added a declaration for the builtin checker,
+ extract vword_type and vword_bits from its declaration. */
+ if (tree checkfn = builtin_decl_explicit (BUILT_IN___HARDCFR_CHECK))
+ {
+ tree check_arg_list = TYPE_ARG_TYPES (TREE_TYPE (checkfn));
+ tree vword_const_ptr_type = TREE_VALUE (TREE_CHAIN (check_arg_list));
+ vword_type = TYPE_MAIN_VARIANT (TREE_TYPE (vword_const_ptr_type));
+ vword_bits = tree_to_shwi (TYPE_SIZE (vword_type));
+ }
+ /* Otherwise, select vword_bits, vword_type et al, and use it to
+ declare the builtin checker. */
+ else
+ {
+ /* This setting needs to be kept in sync with libgcc/hardcfr.c.
+ We aim for at least 28 bits, which enables us to refer to as
+ many as 28 << 28 blocks in a function's CFG. That's way over
+ 4G blocks. */
+ machine_mode VWORDmode;
+ if (BITS_PER_UNIT >= 28)
+ {
+ VWORDmode = QImode;
+ vword_bits = BITS_PER_UNIT;
+ }
+ else if (BITS_PER_UNIT >= 14)
+ {
+ VWORDmode = HImode;
+ vword_bits = 2 * BITS_PER_UNIT;
+ }
+ else
+ {
+ VWORDmode = SImode;
+ vword_bits = 4 * BITS_PER_UNIT;
+ }
+
+ vword_type = lang_hooks.types.type_for_mode (VWORDmode, 1);
+ gcc_checking_assert (vword_bits == tree_to_shwi (TYPE_SIZE
+ (vword_type)));
+
+ vword_type = build_variant_type_copy (vword_type);
+ TYPE_ALIAS_SET (vword_type) = new_alias_set ();
+
+ tree vword_const = build_qualified_type (vword_type, TYPE_QUAL_CONST);
+ tree vword_const_ptr = build_pointer_type (vword_const);
+ tree type = build_function_type_list (void_type_node, sizetype,
+ vword_const_ptr, vword_const_ptr,
+ NULL_TREE);
+ tree decl = add_builtin_function_ext_scope
+ ("__builtin___hardcfr_check",
+ type, BUILT_IN___HARDCFR_CHECK, BUILT_IN_NORMAL,
+ "__hardcfr_check", NULL_TREE);
+ TREE_NOTHROW (decl) = true;
+ set_builtin_decl (BUILT_IN___HARDCFR_CHECK, decl, true);
+ }
+
+ /* The checker uses a qualified pointer, so we can't reuse it,
+ so build a new one. */
+ vword_ptr = build_pointer_type (vword_type);
+
+ tree visited_type = vtype ();
+ visited = create_tmp_var (visited_type, ".cfrvisited");
+
+ if (nblocks - NUM_FIXED_BLOCKS > blknum (param_hardcfr_max_inline_blocks)
+ || checkpoints > 1)
+ {
+ /* Make sure vword_bits is wide enough for the representation
+ of nblocks in rtcfg. Compare with vword_bits << vword_bits,
+ but avoiding overflows, shifting nblocks right instead. If
+ vword_bits is wider than HOST_WIDE_INT, assume it fits, so
+ as to avoid undefined shifts. */
+ gcc_assert (HOST_BITS_PER_WIDE_INT <= vword_bits
+ || (((unsigned HOST_WIDE_INT)(num2idx (nblocks))
+ >> vword_bits) < vword_bits));
+
+ /* Build a terminator for the constructor list. */
+ rtcfg = build_tree_list (NULL_TREE, NULL_TREE);
+ return;
+ }
+
+ ckfail = create_tmp_var (boolean_type_node, ".cfrfail");
+ ckpart = create_tmp_var (boolean_type_node, ".cfrpart");
+ ckinv = create_tmp_var (boolean_type_node, ".cfrinv");
+ ckblk = create_tmp_var (boolean_type_node, ".cfrblk");
+
+ gassign *ckfail_init = gimple_build_assign (ckfail, boolean_false_node);
+ gimple_seq_add_stmt (&ckseq, ckfail_init);
+ }
+
+ /* Insert SEQ before a resx or a call in INSBB. */
+ void insert_exit_check_in_block (gimple_seq seq, basic_block insbb)
+ {
+ gimple_stmt_iterator gsi = gsi_last_bb (insbb);
+
+ while (!gsi_end_p (gsi))
+ if (is_a <gresx *> (gsi_stmt (gsi))
+ || is_a <gcall *> (gsi_stmt (gsi)))
+ break;
+ else
+ gsi_prev (&gsi);
+
+ gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
+ }
+
+ /* Insert SEQ on E. */
+ void insert_exit_check_on_edge (gimple_seq seq, edge e)
+ {
+ gsi_insert_seq_on_edge_immediate (e, seq);
+ }
+
+ /* Add checking code to CHK_EDGES and CHKCALL_BLOCKS, and
+ initialization code on the entry edge. Before this point, the
+ CFG has been undisturbed, and all the needed data has been
+ collected and safely stowed. */
+ void check (chk_edges_t &chk_edges,
+ int count_chkcall, auto_sbitmap const &chkcall_blocks)
+ {
+ /* If we're using out-of-line checking, create and statically
+ initialize the CFG checking representation, generate the
+ checker call for the checking sequence, and insert it in all
+ exit edges, if there's more than one. If there's only one, we
+ use the same logic as the inline case to insert the check
+ sequence. */
+ if (rtcfg)
+ {
+ /* Unreverse the list, and drop the tail node turned into head. */
+ rtcfg = TREE_CHAIN (nreverse (rtcfg));
+
+ /* Turn the indices stored in TREE_PURPOSE into separate
+ nodes. It was useful to keep them together to enable
+ combination of masks and for clear separation of
+ terminators while constructing it, but now we have to turn
+ it into a sequence of words. */
+ for (tree node = rtcfg; node; node = TREE_CHAIN (node))
+ {
+ tree wordidx = TREE_PURPOSE (node);
+ if (!wordidx)
+ continue;
+
+ TREE_PURPOSE (node) = NULL_TREE;
+ TREE_CHAIN (node) = tree_cons (NULL_TREE,
+ fold_convert (vword_type, wordidx),
+ TREE_CHAIN (node));
+ }
+
+ /* Build the static initializer for the array with the CFG
+ representation for out-of-line checking. */
+ tree init = build_constructor_from_list (NULL_TREE, rtcfg);
+ TREE_TYPE (init) = build_array_type_nelts (vword_type,
+ CONSTRUCTOR_NELTS (init));
+ char buf[32];
+ ASM_GENERATE_INTERNAL_LABEL (buf, "Lhardcfg",
+ current_function_funcdef_no);
+ rtcfg = build_decl (UNKNOWN_LOCATION, VAR_DECL,
+ get_identifier (buf),
+ TREE_TYPE (init));
+ TREE_READONLY (rtcfg) = 1;
+ TREE_STATIC (rtcfg) = 1;
+ TREE_ADDRESSABLE (rtcfg) = 1;
+ TREE_USED (rtcfg) = 1;
+ DECL_ARTIFICIAL (rtcfg) = 1;
+ DECL_IGNORED_P (rtcfg) = 1;
+ DECL_INITIAL (rtcfg) = init;
+ make_decl_rtl (rtcfg);
+ varpool_node::finalize_decl (rtcfg);
+
+ /* Add the checker call to ckseq. */
+ gcall *call_chk = gimple_build_call (builtin_decl_explicit
+ (BUILT_IN___HARDCFR_CHECK), 3,
+ build_int_cst (sizetype,
+ num2idx (nblocks)),
+ build1 (ADDR_EXPR, vword_ptr,
+ visited),
+ build1 (ADDR_EXPR, vword_ptr,
+ rtcfg));
+ gimple_seq_add_stmt (&ckseq, call_chk);
+
+ gimple *clobber = gimple_build_assign (visited,
+ build_clobber
+ (TREE_TYPE (visited)));
+ gimple_seq_add_stmt (&ckseq, clobber);
+
+ /* If we have multiple exit edges, insert (copies of)
+ ckseq in all of them. */
+ for (int i = chk_edges.length (); i--; )
+ {
+ gimple_seq seq = ckseq;
+ /* Copy the sequence, unless we're dealing with the
+ last edge (we're counting down to zero). */
+ if (i || count_chkcall)
+ seq = gimple_seq_copy (seq);
+
+ edge e = chk_edges[i];
+
+ if (dump_file)
+ {
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ fprintf (dump_file,
+ "Inserting out-of-line check in"
+ " block %i's edge to exit.\n",
+ e->src->index);
+ else
+ fprintf (dump_file,
+ "Inserting out-of-line check in"
+ " block %i's edge to postcheck block %i.\n",
+ e->src->index, e->dest->index);
+ }
+
+ insert_exit_check_on_edge (seq, e);
+
+ gcc_checking_assert (!bitmap_bit_p (chkcall_blocks, e->src->index));
+ }
+
+ sbitmap_iterator it;
+ unsigned i;
+ EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
+
+ gimple_seq seq = ckseq;
+ gcc_checking_assert (count_chkcall > 0);
+ if (--count_chkcall)
+ seq = gimple_seq_copy (seq);
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Inserting out-of-line check before stmt in block %i.\n",
+ bb->index);
+
+ insert_exit_check_in_block (seq, bb);
+ }
+
+ gcc_checking_assert (count_chkcall == 0);
+ }
+ else
+ {
+ /* Inline checking requires a single exit edge. */
+ gimple *last = gimple_build_assign (visited,
+ build_clobber
+ (TREE_TYPE (visited)));
+ gimple_seq_add_stmt (&ckseq, last);
+
+ if (!count_chkcall)
+ {
+ edge e = single_pred_edge (EXIT_BLOCK_PTR_FOR_FN (cfun));
+
+ if (dump_file)
+ {
+ if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
+ fprintf (dump_file,
+ "Inserting out-of-line check in"
+ " block %i's edge to postcheck block %i.\n",
+ e->src->index, e->dest->index);
+ else
+ fprintf (dump_file,
+ "Inserting inline check in"
+ " block %i's edge to exit.\n",
+ e->src->index);
+ }
+
+ insert_exit_check_on_edge (ckseq, e);
+ }
+ else
+ {
+ gcc_checking_assert (count_chkcall == 1);
+
+ sbitmap_iterator it;
+ unsigned i;
+ EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it)
+ {
+ basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
+
+ gimple_seq seq = ckseq;
+ gcc_checking_assert (count_chkcall > 0);
+ if (--count_chkcall)
+ seq = gimple_seq_copy (seq);
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Inserting inline check before stmt in block %i.\n",
+ bb->index);
+
+ insert_exit_check_in_block (seq, bb);
+ }
+
+ gcc_checking_assert (count_chkcall == 0);
+ }
+
+ /* The inserted ckseq computes CKFAIL at LAST. Now we have to
+ conditionally trap on it. */
+ basic_block insbb = gimple_bb (last);
+
+ /* Create a block with the unconditional trap. */
+ basic_block trp = create_empty_bb (insbb);
+ gimple_stmt_iterator gsit = gsi_after_labels (trp);
+
+ gcall *trap = gimple_build_call (builtin_decl_explicit
+ (BUILT_IN_TRAP), 0);
+ gsi_insert_before (&gsit, trap, GSI_SAME_STMT);
+
+ if (BB_PARTITION (insbb))
+ BB_SET_PARTITION (trp, BB_COLD_PARTITION);
+
+ if (current_loops)
+ add_bb_to_loop (trp, current_loops->tree_root);
+
+ /* Insert a conditional branch to the trap block. If the
+ conditional wouldn't be the last stmt, split the block. */
+ gimple_stmt_iterator gsi = gsi_for_stmt (last);
+ if (!gsi_one_before_end_p (gsi))
+ split_block (gsi_bb (gsi), gsi_stmt (gsi));
+
+ gcond *cond = gimple_build_cond (NE_EXPR, ckfail,
+ fold_convert (TREE_TYPE (ckfail),
+ boolean_false_node),
+ NULL, NULL);
+ gsi_insert_after (&gsi, cond, GSI_SAME_STMT);
+
+ /* Adjust the edges. */
+ single_succ_edge (gsi_bb (gsi))->flags &= ~EDGE_FALLTHRU;
+ single_succ_edge (gsi_bb (gsi))->flags |= EDGE_FALSE_VALUE;
+ single_succ_edge (gsi_bb (gsi))->probability
+ = profile_probability::always ();
+ edge e = make_edge (gsi_bb (gsi), trp, EDGE_TRUE_VALUE);
+ e->probability = profile_probability::never ();
+ gcc_checking_assert (e->dest == trp);
+ gcc_checking_assert (!e->dest->count.initialized_p ());
+ e->dest->count = e->count ();
+
+ /* Set the trap's dominator after splitting. */
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, trp, gimple_bb (last));
+ }
+
+ /* Insert initializers for visited at the entry. Do this after
+ other insertions, to avoid messing with block numbers. */
+ gimple_seq iseq = NULL;
+
+ gcall *vinit = gimple_build_call (builtin_decl_explicit
+ (BUILT_IN_MEMSET), 3,
+ build1 (ADDR_EXPR,
+ build_pointer_type
+ (TREE_TYPE (visited)),
+ visited),
+ integer_zero_node,
+ TYPE_SIZE_UNIT (TREE_TYPE (visited)));
+ gimple_seq_add_stmt (&iseq, vinit);
+
+ gsi_insert_seq_on_edge_immediate (single_succ_edge
+ (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
+ iseq);
+ }
+
+ /* Push onto RTCFG a (mask, index) pair to test for IBB when BB is
+ visited. XSELF is to be the ENTRY or EXIT block (depending on
+ whether we're looking at preds or succs), to be remapped to BB
+ because we can't represent them, and there's no point in testing
+ them anyway. Return true if no further blocks need to be visited
+ in the list, because we've already encountered a
+ self-reference. */
+ bool
+ push_rtcfg_pair (basic_block ibb, basic_block bb,
+ basic_block xself)
+ {
+ /* We don't have a bit to test for the entry and exit
+ blocks, but it is always visited, so we test for the
+ block itself, which gets us the right result and
+ enables the self-test optimization below. */
+ if (ibb == xself)
+ ibb = bb;
+
+ tree mask, idx = vwordidx (ibb, &mask);
+ /* Combine masks with the same idx, but not if we're going
+ to optimize for self-test. */
+ if (ibb != bb && TREE_PURPOSE (rtcfg)
+ && tree_int_cst_equal (idx, TREE_PURPOSE (rtcfg)))
+ TREE_VALUE (rtcfg) = int_const_binop (BIT_IOR_EXPR, mask,
+ TREE_VALUE (rtcfg));
+ else
+ rtcfg = tree_cons (idx, mask, rtcfg);
+
+ /* For self-tests (i.e., tests that the block itself was
+ also visited), testing anything else is pointless,
+ because it's a tautology, so just drop other edges. */
+ if (ibb == bb)
+ {
+ while (TREE_PURPOSE (TREE_CHAIN (rtcfg)))
+ TREE_CHAIN (rtcfg) = TREE_CHAIN (TREE_CHAIN (rtcfg));
+ return true;
+ }
+
+ return false;
+ }
+
+ /* Add to CKSEQ stmts to clear CKPART if OBB is visited. */
+ void
+ build_block_check (basic_block obb)
+ {
+ tree vobb = fold_convert (TREE_TYPE (ckblk),
+ vindex (obb, &ckseq));
+ gassign *blkrunp = gimple_build_assign (ckblk, vobb);
+ gimple_seq_add_stmt (&ckseq, blkrunp);
+
+ gassign *blknotrunp = gimple_build_assign (ckinv,
+ EQ_EXPR,
+ ckblk,
+ fold_convert
+ (TREE_TYPE (ckblk),
+ boolean_false_node));
+ gimple_seq_add_stmt (&ckseq, blknotrunp);
+
+ gassign *andblk = gimple_build_assign (ckpart,
+ BIT_AND_EXPR,
+ ckpart, ckinv);
+ gimple_seq_add_stmt (&ckseq, andblk);
+ }
+
+ /* Add to BB code to set its bit in VISITED, and add to RTCFG or
+ CKSEQ the data or code needed to check BB's predecessors and
+ successors. If CHECKPOINT, assume the block is a checkpoint,
+ whether or not it has an edge to EXIT. If POSTCHECK, assume the
+ block post-dominates checkpoints and therefore no bitmap setting
+ or checks are to be performed in or for it. Do NOT change the
+ CFG. */
+ void visit (basic_block bb, bool checkpoint, bool postcheck)
+ {
+ /* Set the bit in VISITED when entering the block. */
+ gimple_stmt_iterator gsi = gsi_after_labels (bb);
+ if (!postcheck)
+ gsi_insert_seq_before (&gsi, vset (bb), GSI_SAME_STMT);
+
+ if (rtcfg)
+ {
+ if (!postcheck)
+ {
+ /* Build a list of (index, mask) terminated by (NULL, 0).
+ Consolidate masks with the same index when they're
+ adjacent. First, predecessors. Count backwards, because
+ we're going to reverse the list. The order shouldn't
+ matter, but let's not make it surprising. */
+ for (int i = EDGE_COUNT (bb->preds); i--; )
+ if (push_rtcfg_pair (EDGE_PRED (bb, i)->src, bb,
+ ENTRY_BLOCK_PTR_FOR_FN (cfun)))
+ break;
+ }
+ rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg);
+
+ if (!postcheck)
+ {
+ /* Then, successors. */
+ if (!checkpoint
+ || !push_rtcfg_pair (EXIT_BLOCK_PTR_FOR_FN (cfun),
+ bb, EXIT_BLOCK_PTR_FOR_FN (cfun)))
+ for (int i = EDGE_COUNT (bb->succs); i--; )
+ if (push_rtcfg_pair (EDGE_SUCC (bb, i)->dest, bb,
+ EXIT_BLOCK_PTR_FOR_FN (cfun)))
+ break;
+ }
+ rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg);
+ }
+ else if (!postcheck)
+ {
+ /* Schedule test to fail if the block was reached but somehow none
+ of its predecessors were. */
+ tree bit = fold_convert (TREE_TYPE (ckpart), vindex (bb, &ckseq));
+ gassign *blkrunp = gimple_build_assign (ckpart, bit);
+ gimple_seq_add_stmt (&ckseq, blkrunp);
+
+ for (int i = 0, e = EDGE_COUNT (bb->preds); i < e; i++)
+ build_block_check (EDGE_PRED (bb, i)->src);
+ gimple *orfailp = gimple_build_assign (ckfail, BIT_IOR_EXPR,
+ ckfail, ckpart);
+ gimple_seq_add_stmt (&ckseq, orfailp);
+
+ /* Likewise for successors. */
+ gassign *blkruns = gimple_build_assign (ckpart, unshare_expr (bit));
+ gimple_seq_add_stmt (&ckseq, blkruns);
+
+ if (checkpoint)
+ build_block_check (EXIT_BLOCK_PTR_FOR_FN (cfun));
+ for (int i = 0, e = EDGE_COUNT (bb->succs); i < e; i++)
+ build_block_check (EDGE_SUCC (bb, i)->dest);
+
+ gimple *orfails = gimple_build_assign (ckfail, BIT_IOR_EXPR,
+ ckfail, ckpart);
+ gimple_seq_add_stmt (&ckseq, orfails);
+ }
+ }
+};
+
+/* Avoid checking before noreturn calls that are known (expected,
+ really) to finish by throwing an exception, rather than by ending
+ the program or looping forever. Such functions have to be
+ annotated, with an attribute (expected_throw) or flag (ECF_XTHROW),
+ so that exception-raising functions, such as C++'s __cxa_throw,
+ __cxa_rethrow, and Ada's gnat_rcheck_*, gnat_reraise*,
+ ada.exception.raise_exception*, and the language-independent
+ unwinders could be detected here and handled differently from other
+ noreturn functions. */
+static bool
+always_throwing_noreturn_call_p (gimple *stmt)
+{
+ if (!is_a <gcall *> (stmt))
+ return is_a <gresx *> (stmt);
+
+ gcall *call = as_a <gcall *> (stmt);
+ return (gimple_call_noreturn_p (call)
+ && gimple_call_expected_throw_p (call));
+}
+
+/* Control flow redundancy hardening: record the execution path, and
+ verify at exit that an expect path was taken. */
+
+unsigned int
+pass_harden_control_flow_redundancy::execute (function *fun)
+{
+ bool const check_at_escaping_exceptions
+ = (flag_exceptions
+ && flag_harden_control_flow_redundancy_check_exceptions);
+ bool const check_before_noreturn_calls
+ = flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NEVER;
+ bool const check_before_nothrow_noreturn_calls
+ = (check_before_noreturn_calls
+ && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_NOTHROW);
+ bool const check_before_throwing_noreturn_calls
+ = (flag_exceptions
+ && check_before_noreturn_calls
+ && flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NOTHROW);
+ bool const check_before_always_throwing_noreturn_calls
+ = (flag_exceptions
+ && check_before_noreturn_calls
+ && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_ALWAYS);
+ basic_block bb;
+ basic_block bb_eh_cleanup = NULL;
+
+ if (flag_harden_control_flow_redundancy_skip_leaf)
+ {
+ bool found_calls_p = false;
+
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ for (gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ !gsi_end_p (gsi); gsi_prev (&gsi))
+ if (is_a <gcall *> (gsi_stmt (gsi)))
+ {
+ found_calls_p = true;
+ break;
+ }
+ if (found_calls_p)
+ break;
+ }
+
+ if (!found_calls_p)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "Disabling CFR for leaf function, as requested\n");
+
+ return 0;
+ }
+ }
+
+ if (check_at_escaping_exceptions)
+ {
+ int lp_eh_cleanup = -1;
+
+ /* Record the preexisting blocks, to avoid visiting newly-created
+ blocks. */
+ auto_sbitmap to_visit (last_basic_block_for_fn (fun));
+ bitmap_clear (to_visit);
+
+ FOR_EACH_BB_FN (bb, fun)
+ bitmap_set_bit (to_visit, bb->index);
+
+ /* Scan the blocks for stmts with escaping exceptions, that
+ wouldn't be denoted in the CFG, and associate them with an
+ empty cleanup handler around the whole function. Walk
+ backwards, so that even when we split the block, */
+ sbitmap_iterator it;
+ unsigned i;
+ EXECUTE_IF_SET_IN_BITMAP (to_visit, 0, i, it)
+ {
+ bb = BASIC_BLOCK_FOR_FN (fun, i);
+
+ for (gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ !gsi_end_p (gsi); gsi_prev (&gsi))
+ {
+ gimple *stmt = gsi_stmt (gsi);
+ if (!stmt_could_throw_p (fun, stmt))
+ continue;
+
+ /* If it must not throw, or if it already has a handler,
+ we need not worry about it. */
+ if (lookup_stmt_eh_lp (stmt) != 0)
+ continue;
+
+ /* Don't split blocks at, nor add EH edges to, tail
+ calls, we will add verification before the call
+ anyway. */
+ if (is_a <gcall *> (stmt)
+ && (gimple_call_must_tail_p (as_a <gcall *> (stmt))
+ || gimple_call_tail_p (as_a <gcall *> (stmt))
+ || returning_call_p (as_a <gcall *> (stmt))))
+ continue;
+
+ if (!gsi_one_before_end_p (gsi))
+ split_block (bb, stmt);
+ /* A resx or noreturn call needs not be associated with
+ the cleanup handler if we're going to add checking
+ before it. We only test cases that didn't require
+ block splitting because noreturn calls would always
+ be at the end of blocks, and we test for zero
+ successors because if there is an edge, it's not
+ noreturn, as any EH edges would have already been
+ caught by the lookup_stmt_eh_lp test above. */
+ else if (check_before_noreturn_calls
+ && EDGE_COUNT (bb->succs) == 0
+ && (is_a <gresx *> (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : (!is_a <gcall *> (stmt)
+ || !gimple_call_noreturn_p (stmt))
+ ? (gcc_unreachable (), false)
+ : (!flag_exceptions
+ || gimple_call_nothrow_p (as_a <gcall *> (stmt)))
+ ? check_before_nothrow_noreturn_calls
+ : always_throwing_noreturn_call_p (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : check_before_throwing_noreturn_calls))
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Bypassing cleanup for noreturn stmt"
+ " in block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+ continue;
+ }
+
+ if (!bb_eh_cleanup)
+ {
+ bb_eh_cleanup = create_empty_bb (bb);
+ if (dom_info_available_p (CDI_DOMINATORS))
+ set_immediate_dominator (CDI_DOMINATORS, bb_eh_cleanup, bb);
+ if (current_loops)
+ add_bb_to_loop (bb_eh_cleanup, current_loops->tree_root);
+
+ /* Make the new block an EH cleanup for the call. */
+ eh_region new_r = gen_eh_region_cleanup (NULL);
+ eh_landing_pad lp = gen_eh_landing_pad (new_r);
+ tree label = gimple_block_label (bb_eh_cleanup);
+ lp->post_landing_pad = label;
+ EH_LANDING_PAD_NR (label) = lp_eh_cleanup = lp->index;
+
+ /* Just propagate the exception.
+ We will later insert the verifier call. */
+ gimple_stmt_iterator ehgsi;
+ ehgsi = gsi_after_labels (bb_eh_cleanup);
+ gresx *resx = gimple_build_resx (new_r->index);
+ gsi_insert_before (&ehgsi, resx, GSI_SAME_STMT);
+
+ if (dump_file)
+ fprintf (dump_file,
+ "Created cleanup block %i:\n",
+ bb_eh_cleanup->index);
+ }
+ else if (dom_info_available_p (CDI_DOMINATORS))
+ {
+ basic_block immdom;
+ immdom = get_immediate_dominator (CDI_DOMINATORS,
+ bb_eh_cleanup);
+ if (!dominated_by_p (CDI_DOMINATORS, bb, immdom))
+ {
+ immdom = nearest_common_dominator (CDI_DOMINATORS,
+ immdom, bb);
+ set_immediate_dominator (CDI_DOMINATORS,
+ bb_eh_cleanup, immdom);
+ }
+ }
+
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Associated cleanup block with stmt in block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ add_stmt_to_eh_lp (stmt, lp_eh_cleanup);
+ /* Finally, wire the EH cleanup block into the CFG. */
+ edge neeh = make_eh_edges (stmt);
+ neeh->probability = profile_probability::never ();
+ gcc_checking_assert (neeh->dest == bb_eh_cleanup);
+ if (neeh->dest->count.initialized_p ())
+ neeh->dest->count += neeh->count ();
+ else
+ neeh->dest->count = neeh->count ();
+ }
+ }
+
+ if (bb_eh_cleanup)
+ {
+ /* A cfg_cleanup after bb_eh_cleanup makes for a more compact
+ rtcfg, and it avoids bb numbering differences when we split
+ blocks because of trailing debug insns only. */
+ cleanup_tree_cfg ();
+ gcc_checking_assert (EDGE_COUNT (bb_eh_cleanup->succs) == 0);
+ }
+ }
+
+ /* These record blocks with calls that are to be preceded by
+ checkpoints, such as noreturn calls (if so chosen), must-tail
+ calls, potential early-marked tail calls, and returning calls (if
+ so chosen). */
+ int count_chkcall = 0;
+ auto_sbitmap chkcall_blocks (last_basic_block_for_fn (fun));
+ bitmap_clear (chkcall_blocks);
+
+ /* We wish to add verification at blocks without successors, such as
+ noreturn calls (raising or not) and the reraise at the cleanup
+ block, but not other reraises: they will go through the cleanup
+ block. */
+ if (check_before_noreturn_calls)
+ FOR_EACH_BB_FN (bb, fun)
+ {
+ gimple_stmt_iterator gsi = gsi_last_bb (bb);
+ if (gsi_end_p (gsi))
+ continue;
+ gimple *stmt = gsi_stmt (gsi);
+
+ if (EDGE_COUNT (bb->succs) == 0)
+ {
+ /* A stmt at the end of a block without any successors is
+ either a resx or a noreturn call without a local
+ handler. Check that it's one of the desired
+ checkpoints. */
+ if (flag_exceptions && is_a <gresx *> (stmt)
+ ? (check_before_always_throwing_noreturn_calls
+ || bb == bb_eh_cleanup)
+ : (!is_a <gcall *> (stmt)
+ || !gimple_call_noreturn_p (stmt))
+ ? (stmt_can_make_abnormal_goto (stmt)
+ /* ??? Check before indirect nonlocal goto, or
+ calls thereof? */
+ ? false
+ /* Catch cases in which successors would be
+ expected. */
+ : (gcc_unreachable (), false))
+ : (!flag_exceptions
+ || gimple_call_nothrow_p (as_a <gcall *> (stmt)))
+ ? check_before_nothrow_noreturn_calls
+ : always_throwing_noreturn_call_p (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : check_before_throwing_noreturn_calls)
+ {
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Scheduling check before stmt"
+ " in succ-less block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ if (bitmap_set_bit (chkcall_blocks, bb->index))
+ count_chkcall++;
+ else
+ gcc_unreachable ();
+ }
+ continue;
+ }
+
+ /* If there are no exceptions, it would seem like any noreturn
+ call must have zero successor edges, but __builtin_return
+ gets successor edges. We don't want to handle it here, it
+ will be dealt with in sibcall_search_preds. Otherwise,
+ check for blocks without non-EH successors, but skip those
+ with resx stmts and edges (i.e., those other than that in
+ bb_eh_cleanup), since those will go through bb_eh_cleanup,
+ that will have been counted as noreturn above because it
+ has no successors. */
+ gcc_checking_assert (bb != bb_eh_cleanup
+ || !check_at_escaping_exceptions);
+ if (flag_exceptions && is_a <gresx *> (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : (!is_a <gcall *> (stmt)
+ || !gimple_call_noreturn_p (stmt))
+ ? false
+ : (!flag_exceptions
+ || gimple_call_nothrow_p (as_a <gcall *> (stmt)))
+ ? false /* rather than check_before_nothrow_noreturn_calls */
+ : always_throwing_noreturn_call_p (stmt)
+ ? check_before_always_throwing_noreturn_calls
+ : check_before_throwing_noreturn_calls)
+ {
+ gcc_checking_assert (single_succ_p (bb)
+ && (single_succ_edge (bb)->flags & EDGE_EH));
+
+ if (dump_file)
+ {
+ fprintf (dump_file,
+ "Scheduling check before stmt"
+ " in EH-succ block %i:\n",
+ bb->index);
+ print_gimple_stmt (dump_file, stmt, 0);
+ }
+
+ if (bitmap_set_bit (chkcall_blocks, bb->index))
+ count_chkcall++;
+ else
+ gcc_unreachable ();
+ }
+ }
+ else if (bb_eh_cleanup)
+ {
+ if (bitmap_set_bit (chkcall_blocks, bb_eh_cleanup->index))
+ count_chkcall++;
+ else
+ gcc_unreachable ();
+ }
+
+ gcc_checking_assert (!bb_eh_cleanup
+ || bitmap_bit_p (chkcall_blocks, bb_eh_cleanup->index));
+
+ /* If we don't have edges to exit nor noreturn calls (including the
+ cleanup reraise), then we may skip instrumentation: that would
+ amount to a function that ends with an infinite loop. */
+ if (!count_chkcall
+ && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) == 0)
+ {
+ if (dump_file)
+ fprintf (dump_file,
+ "Disabling CFR, no exit paths to check\n");
+
+ return 0;
+ }
+
+ /* Search for must-tail calls, early-marked potential tail calls,
+ and, if requested, returning calls. As we introduce early
+ checks, */
+ int count_postchk = 0;
+ auto_sbitmap postchk_blocks (last_basic_block_for_fn (fun));
+ bitmap_clear (postchk_blocks);
+ chk_edges_t chk_edges;
+ hardcfr_sibcall_search_preds (EXIT_BLOCK_PTR_FOR_FN (fun), chk_edges,
+ count_chkcall, chkcall_blocks,
+ count_postchk, postchk_blocks,
+ NULL);
+
+ rt_bb_visited vstd (chk_edges.length () + count_chkcall);
+
+ auto_sbitmap combined_blocks (last_basic_block_for_fn (fun));
+ bitmap_copy (combined_blocks, chkcall_blocks);
+ int i;
+ edge *e;
+ FOR_EACH_VEC_ELT (chk_edges, i, e)
+ if (!bitmap_set_bit (combined_blocks, (*e)->src->index))
+ /* There may be multiple chk_edges with the same src block;
+ guard againt overlaps with chkcall_blocks only. */
+ gcc_assert (!bitmap_bit_p (chkcall_blocks, (*e)->src->index));
+
+ /* Visit blocks in index order, because building rtcfg depends on
+ that. Blocks must be compact, which the cleanup_cfg requirement
+ ensures. This would also enable FOR_EACH_BB_FN to be used to
+ iterate in index order, but bb_eh_cleanup block splits and
+ insertions changes that. */
+ gcc_checking_assert (n_basic_blocks_for_fn (fun)
+ == last_basic_block_for_fn (fun));
+ for (int i = NUM_FIXED_BLOCKS; i < n_basic_blocks_for_fn (fun); i++)
+ {
+ bb = BASIC_BLOCK_FOR_FN (fun, i);
+ gcc_checking_assert (bb->index == i);
+ vstd.visit (bb, bitmap_bit_p (combined_blocks, i),
+ bitmap_bit_p (postchk_blocks, i));
+ }
+
+ vstd.check (chk_edges, count_chkcall, chkcall_blocks);
+
+ return
+ TODO_update_ssa
+ | TODO_cleanup_cfg
+ | TODO_verify_il;
+}
+
+/* Instantiate a hardcfr pass. */
+
+gimple_opt_pass *
+make_pass_harden_control_flow_redundancy (gcc::context *ctxt)
+{
+ return new pass_harden_control_flow_redundancy (ctxt);
+}
diff --git a/gcc/gimple.cc b/gcc/gimple.cc
index 46f2878..7924d90 100644
--- a/gcc/gimple.cc
+++ b/gcc/gimple.cc
@@ -399,6 +399,10 @@ gimple_build_call_from_tree (tree t, tree fnptrtype)
gimple_call_set_from_thunk (call, CALL_FROM_THUNK_P (t));
gimple_call_set_va_arg_pack (call, CALL_EXPR_VA_ARG_PACK (t));
gimple_call_set_nothrow (call, TREE_NOTHROW (t));
+ if (fndecl)
+ gimple_call_set_expected_throw (call,
+ flags_from_decl_or_type (fndecl)
+ & ECF_XTHROW);
gimple_call_set_by_descriptor (call, CALL_EXPR_BY_DESCRIPTOR (t));
copy_warning (call, t);
@@ -1550,6 +1554,8 @@ gimple_call_flags (const gimple *stmt)
if (stmt->subcode & GF_CALL_NOTHROW)
flags |= ECF_NOTHROW;
+ if (stmt->subcode & GF_CALL_XTHROW)
+ flags |= ECF_XTHROW;
if (stmt->subcode & GF_CALL_BY_DESCRIPTOR)
flags |= ECF_BY_DESCRIPTOR;
diff --git a/gcc/gimple.h b/gcc/gimple.h
index 2d0ac10..1b0cd4b 100644
--- a/gcc/gimple.h
+++ b/gcc/gimple.h
@@ -150,6 +150,7 @@ enum gf_mask {
GF_CALL_BY_DESCRIPTOR = 1 << 10,
GF_CALL_NOCF_CHECK = 1 << 11,
GF_CALL_FROM_NEW_OR_DELETE = 1 << 12,
+ GF_CALL_XTHROW = 1 << 13,
GF_OMP_PARALLEL_COMBINED = 1 << 0,
GF_OMP_TASK_TASKLOOP = 1 << 0,
GF_OMP_TASK_TASKWAIT = 1 << 1,
@@ -3561,6 +3562,28 @@ gimple_call_nothrow_p (gcall *s)
return (gimple_call_flags (s) & ECF_NOTHROW) != 0;
}
+/* If EXPECTED_THROW_P is true, GIMPLE_CALL S is a call that is known
+ to be more likely to throw than to run forever, terminate the
+ program or return by other means. */
+
+static inline void
+gimple_call_set_expected_throw (gcall *s, bool expected_throw_p)
+{
+ if (expected_throw_p)
+ s->subcode |= GF_CALL_XTHROW;
+ else
+ s->subcode &= ~GF_CALL_XTHROW;
+}
+
+/* Return true if S is a call that is more likely to end by
+ propagating an exception than by other means. */
+
+static inline bool
+gimple_call_expected_throw_p (gcall *s)
+{
+ return (gimple_call_flags (s) & ECF_XTHROW) != 0;
+}
+
/* If FOR_VAR is true, GIMPLE_CALL S is a call to builtin_alloca that
is known to be emitted for VLA objects. Those are wrapped by
stack_save/stack_restore calls and hence can't lead to unbounded
diff --git a/gcc/params.opt b/gcc/params.opt
index fffa8b1..f1202ab 100644
--- a/gcc/params.opt
+++ b/gcc/params.opt
@@ -174,6 +174,14 @@ Maximum number of arrays per SCoP.
Common Joined UInteger Var(param_graphite_max_nb_scop_params) Init(10) Param Optimization
Maximum number of parameters in a SCoP.
+-param=hardcfr-max-blocks=
+Common Joined UInteger Var(param_hardcfr_max_blocks) Init(0) Param Optimization
+Maximum number of blocks for -fharden-control-flow-redundancy.
+
+-param=hardcfr-max-inline-blocks=
+Common Joined UInteger Var(param_hardcfr_max_inline_blocks) Init(16) Param Optimization
+Maximum number of blocks for in-line -fharden-control-flow-redundancy.
+
-param=hash-table-verification-limit=
Common Joined UInteger Var(param_hash_table_verification_limit) Init(10) Param
The number of elements for which hash table verification is done for each searched element.
diff --git a/gcc/passes.def b/gcc/passes.def
index df7965d..1e1950b 100644
--- a/gcc/passes.def
+++ b/gcc/passes.def
@@ -193,6 +193,7 @@ along with GCC; see the file COPYING3. If not see
NEXT_PASS (pass_omp_device_lower);
NEXT_PASS (pass_omp_target_link);
NEXT_PASS (pass_adjust_alignment);
+ NEXT_PASS (pass_harden_control_flow_redundancy);
NEXT_PASS (pass_all_optimizations);
PUSH_INSERT_PASSES_WITHIN (pass_all_optimizations)
NEXT_PASS (pass_remove_cgraph_callee_edges);
diff --git a/gcc/testsuite/c-c++-common/harden-cfr-noret-never-O0.c b/gcc/testsuite/c-c++-common/harden-cfr-noret-never-O0.c
new file mode 100644
index 0000000..a6992eb
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/harden-cfr-noret-never-O0.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -O0 -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we don't insert checking before noreturn calls. -O0 is tested
+ separately because h is not found to be noreturn without optimization. */
+
+#include "torture/harden-cfr-noret.c"
+
+/* No out-of-line checks. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */
+/* Only one inline check at the end of f and of h2. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-always.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-always.c
new file mode 100644
index 0000000..26c0f27
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-always.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with always. */
+
+#include "harden-cfr-abrt.c"
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_abort in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-never.c
new file mode 100644
index 0000000..a9eca98
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-never.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with never. */
+
+#include "harden-cfr-abrt.c"
+
+/* No out-of-line checking. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 0 "hardcfr" } } */
+/* Inline checking only before return in f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-no-xthrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-no-xthrow.c
new file mode 100644
index 0000000..eb7589f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-no-xthrow.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with no-xthrow. */
+
+#include "harden-cfr-abrt.c"
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_abort in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-nothrow.c
new file mode 100644
index 0000000..24363bd
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt-nothrow.c
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call with =nothrow. */
+
+#include "harden-cfr-abrt.c"
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_abort in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt.c
new file mode 100644
index 0000000..1ed7273
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-abrt.c
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check the noreturn handling of a builtin call. */
+
+int f(int i) {
+ if (!i)
+ __builtin_abort ();
+ return i;
+}
+
+int g() {
+ __builtin_abort ();
+}
+
+/* Out-of-line checking, before both builtin_abort and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-always.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-always.c
new file mode 100644
index 0000000..6e0767a
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-always.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters with checking before
+ all noreturn calls. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c
new file mode 100644
index 0000000..779896c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-always.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling all checks before noreturn calls (leaving
+ returning calls enabled), we get checks before __builtin_return without
+ duplication (__builtin_return is both noreturn and a returning call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c
new file mode 100644
index 0000000..49ce17f
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-never.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling checks before never noreturn calls (leaving
+ returning calls enabled), we get checks before __builtin_return without
+ duplication (__builtin_return is both noreturn and a returning call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c
new file mode 100644
index 0000000..78e5bf4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-no-xthrow.c
@@ -0,0 +1,14 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling checks before no-xthrow-throwing noreturn calls
+ (leaving returning calls enabled), we get checks before __builtin_return
+ without duplication (__builtin_return is both noreturn and a returning
+ call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c
new file mode 100644
index 0000000..1512614
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noopt.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even disabling checks before both noreturn and returning
+ calls, we still get checks before __builtin_return. */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c
new file mode 100644
index 0000000..fd95bb7
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-noret.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even disabling checks before returning calls (leaving noreturn
+ calls enabled), we still get checks before __builtin_return. */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c
new file mode 100644
index 0000000..c5c3612
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-nothrow.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even enabling checks before nothrow noreturn calls (leaving
+ returning calls enabled), we get checks before __builtin_return without
+ duplication (__builtin_return is both noreturn and a returning call). */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c
new file mode 100644
index 0000000..137dfbb
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret-retcl.c
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that, even disabling checks before noreturn calls (leaving returning
+ calls enabled), we still get checks before __builtin_return. */
+
+#include "harden-cfr-bret.c"
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c
new file mode 100644
index 0000000..b459ff6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-bret.c
@@ -0,0 +1,17 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+int f(int i) {
+ if (i)
+ __builtin_return (&i);
+ return i;
+}
+
+int g(int i) {
+ __builtin_return (&i);
+}
+
+/* Out-of-line checking, before both builtin_return and return in f. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking before builtin_return in g. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-never.c
new file mode 100644
index 0000000..7fe0bb4
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-never.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters without checking before
+ noreturn calls. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param). */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 1 "hardcfr" } } */
+/* No checking for h (too many blocks) or main (no edges to exit block). */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-no-xthrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-no-xthrow.c
new file mode 100644
index 0000000..56ed9d5
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-no-xthrow.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters with checking before
+ all noreturn calls that aren't expected to throw. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-never.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-never.c
new file mode 100644
index 0000000..8bd2d13
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-never.c
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we don't insert checking before noreturn calls. -O0 is tested
+ separately because h is not found to be noreturn without optimization, which
+ affects codegen for h2, so h2 is omitted here at -O0. */
+
+#if !__OPTIMIZE__
+# define OMIT_H2
+#endif
+
+#include "harden-cfr-noret.c"
+
+
+/* No out-of-line checks. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */
+/* Only one inline check at the end of f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-noexcept.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-noexcept.c
new file mode 100644
index 0000000..a804a6c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-noexcept.c
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that -fno-exceptions makes for implicit nothrow in noreturn
+ handling. */
+
+#define ATTR_NOTHROW_OPT
+
+#include "harden-cfr-noret.c"
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-nothrow.c
new file mode 100644
index 0000000..f390cfd
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret-nothrow.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert checking before nothrow noreturn calls. */
+
+#include "harden-cfr-noret.c"
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-noret.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret.c
new file mode 100644
index 0000000..fdd8031
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-noret.c
@@ -0,0 +1,38 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert checking before all noreturn calls. */
+
+#ifndef ATTR_NOTHROW_OPT /* Overridden in harden-cfr-noret-noexcept. */
+#define ATTR_NOTHROW_OPT __attribute__ ((__nothrow__))
+#endif
+
+extern void __attribute__ ((__noreturn__)) ATTR_NOTHROW_OPT g (void);
+
+void f(int i) {
+ if (i)
+ /* Out-of-line checks here... */
+ g ();
+ /* ... and here. */
+}
+
+void __attribute__ ((__noinline__, __noclone__))
+h(void) {
+ /* Inline check here. */
+ g ();
+}
+
+#ifndef OMIT_H2 /* from harden-cfr-noret-never. */
+void h2(void) {
+ /* Inline check either here, whether because of noreturn or tail call... */
+ h ();
+ /* ... or here, if not optimizing. */
+}
+#endif
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-notail.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-notail.c
new file mode 100644
index 0000000..6d11487
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-notail.c
@@ -0,0 +1,8 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-exceptions -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+#include "harden-cfr-tail.c"
+
+/* Inline checking after the calls, disabling tail calling. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 5 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 0 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-nothrow.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-nothrow.c
new file mode 100644
index 0000000..da54fc0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-nothrow.c
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects -w" } */
+
+/* Check the instrumentation and the parameters without checking before
+ nothrow noreturn calls. */
+
+#include "harden-cfr.c"
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-returning.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-returning.c
new file mode 100644
index 0000000..550b02c
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-returning.c
@@ -0,0 +1,35 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-exceptions -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert checks before returning calls and alternate paths, even
+ at -O0, because of the explicit command-line flag. */
+
+void g (void);
+void g2 (void);
+void g3 (void);
+
+void f (int i) {
+ if (!i)
+ /* Out-of-line checks here... */
+ g ();
+ else if (i > 0)
+ /* here... */
+ g2 ();
+ /* else */
+ /* and in the implicit else here. */
+}
+
+void f2 (int i) {
+ if (!i)
+ /* Out-of-line check here... */
+ g ();
+ else if (i > 0)
+ /* here... */
+ g2 ();
+ else
+ /* and here. */
+ g3 ();
+}
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 6 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 0 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-skip-leaf.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-skip-leaf.c
new file mode 100644
index 0000000..85ecaa0
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-skip-leaf.c
@@ -0,0 +1,10 @@
+/* { dg-do run } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-skip-leaf -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Test skipping instrumentation of leaf functions. */
+
+#include "harden-cfr.c"
+
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 0 "hardcfr" } } */
+/* Only main isn't leaf. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr-tail.c b/gcc/testsuite/c-c++-common/torture/harden-cfr-tail.c
new file mode 100644
index 0000000..d5467ea
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr-tail.c
@@ -0,0 +1,52 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-hardcfr-check-exceptions -fdump-tree-hardcfr -ffat-lto-objects -Wno-return-type" } */
+
+/* Check that we insert CFR checking so as to not disrupt tail calls.
+ Mandatory tail calls are not available in C, and optimizing calls as tail
+ calls only takes place after hardcfr, so we insert checking before calls
+ followed by copies and return stmts with the same return value, that might
+ (or might not) end up optimized to tail calls. */
+
+extern int g (int i);
+
+int f1(int i) {
+ /* Inline check before the returning call. */
+ return g (i);
+}
+
+extern void g2 (int i);
+
+void f2(int i) {
+ /* Inline check before the returning call, that ignores the returned value,
+ matching the value-less return. */
+ g2 (i);
+ return;
+}
+
+void f3(int i) {
+ /* Inline check before the returning call. */
+ g (i);
+}
+
+void f4(int i) {
+ if (i)
+ /* Out-of-line check before the returning call. */
+ return g2 (i);
+ /* Out-of-line check before implicit return. */
+}
+
+int f5(int i) {
+ /* Not regarded as a returning call, returning value other than callee's
+ returned value. */
+ g (i);
+ /* Inline check after the non-returning call. */
+ return i;
+}
+
+/* Out-of-line checks in f4, before returning calls and before return. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking in all other functions. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 4 "hardcfr" } } */
+/* Check before tail-call in all but f5, but f4 is out-of-line. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 3 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/c-c++-common/torture/harden-cfr.c b/gcc/testsuite/c-c++-common/torture/harden-cfr.c
new file mode 100644
index 0000000..73824c6
--- /dev/null
+++ b/gcc/testsuite/c-c++-common/torture/harden-cfr.c
@@ -0,0 +1,84 @@
+/* { dg-do run } */
+/* { dg-options "-fharden-control-flow-redundancy -fdump-tree-hardcfr --param hardcfr-max-blocks=9 --param hardcfr-max-inline-blocks=5 -ffat-lto-objects" } */
+
+/* Check the instrumentation and the parameters. */
+
+int
+f (int i, int j)
+{
+ if (i < j)
+ return 2 * i;
+ else
+ return 3 * j;
+}
+
+int
+g (unsigned i, int j)
+{
+ switch (i)
+ {
+ case 0:
+ return j * 2;
+
+ case 1:
+ return j * 3;
+
+ case 2:
+ return j * 5;
+
+ default:
+ return j * 7;
+ }
+}
+
+int
+h (unsigned i, int j) /* { dg-warning "has more than 9 blocks, the requested maximum" } */
+{
+ switch (i)
+ {
+ case 0:
+ return j * 2;
+
+ case 1:
+ return j * 3;
+
+ case 2:
+ return j * 5;
+
+ case 3:
+ return j * 7;
+
+ case 4:
+ return j * 11;
+
+ case 5:
+ return j * 13;
+
+ case 6:
+ return j * 17;
+
+ case 7:
+ return j * 19;
+
+ default:
+ return j * 23;
+ }
+}
+
+int
+main (int argc, char *argv[])
+{
+ if (f (1, 2) != 2 || g (2, 5) != 25 || h (4, 3) != 33
+ || argc < 0)
+ __builtin_abort ();
+ /* Call exit, instead of returning, to avoid an edge to the exit block and
+ thus implicitly disable hardening of main, when checking before noreturn
+ calls is disabled. */
+ __builtin_exit (0);
+}
+
+/* Inlined checking thus trap for f. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 1 "hardcfr" } } */
+/* Out-of-line checking for g (param), and before both noreturn calls in main. */
+/* { dg-final { scan-tree-dump-times "__hardcfr_check" 3 "hardcfr" } } */
+/* No checking for h (too many blocks). */
diff --git a/gcc/testsuite/g++.dg/harden-cfr-throw-always-O0.C b/gcc/testsuite/g++.dg/harden-cfr-throw-always-O0.C
new file mode 100644
index 0000000..e3c109b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/harden-cfr-throw-always-O0.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects -O0" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. h2 and h2b get an extra resx without ehcleanup. */
+
+#define NO_OPTIMIZE
+
+#include "torture/harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 16 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/harden-cfr-throw-returning-O0.C b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-O0.C
new file mode 100644
index 0000000..207bdb7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-O0.C
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -foptimize-sibling-calls -fdump-tree-hardcfr -O0" } */
+
+/* -fhardcfr-check-returning-calls gets implicitly disabled because,
+ -at O0, -foptimize-sibling-calls has no effect. */
+
+#define NO_OPTIMIZE
+
+#include "torture/harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/harden-cfr-throw-returning-enabled-O0.C b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-enabled-O0.C
new file mode 100644
index 0000000..b2df689
--- /dev/null
+++ b/gcc/testsuite/g++.dg/harden-cfr-throw-returning-enabled-O0.C
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fdump-tree-hardcfr -O0" } */
+
+/* Explicitly enable -fhardcfr-check-returning-calls -at O0. */
+
+#include "torture/harden-cfr-throw.C"
+
+/* Same expectations as those in torture/harden-cfr-throw-returning.C. */
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 10 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-noret-always-no-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-always-no-nothrow.C
new file mode 100644
index 0000000..0d35920
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-always-no-nothrow.C
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C++ does NOT make for implicit nothrow in noreturn
+ handling. */
+
+#include "harden-cfr-noret-no-nothrow.C"
+
+/* All 3 noreturn calls. */
+/* { dg-final { scan-tree-dump-times "Bypassing cleanup" 3 "hardcfr" } } */
+/* Out-of-line checks in f. */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checks in h and h2. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-noret-never-no-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-never-no-nothrow.C
new file mode 100644
index 0000000..b7d247f
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-never-no-nothrow.C
@@ -0,0 +1,18 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C++ does NOT make for implicit nothrow in noreturn
+ handling. Expected results for =never and =nothrow are the same,
+ since the functions are not nothrow. */
+
+#include "harden-cfr-noret-no-nothrow.C"
+
+/* All 3 noreturn calls. */
+/* { dg-final { scan-tree-dump-times "Associated cleanup" 3 "hardcfr" } } */
+/* Out-of-line checks in f. */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checks in h and h2. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-noret-no-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-no-nothrow.C
new file mode 100644
index 0000000..62c58cf
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-noret-no-nothrow.C
@@ -0,0 +1,23 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C++ does NOT make for implicit nothrow in noreturn
+ handling. */
+
+#define ATTR_NOTHROW_OPT
+
+#if ! __OPTIMIZE__
+void __attribute__ ((__noreturn__)) h (void);
+#endif
+
+#include "../../c-c++-common/torture/harden-cfr-noret.c"
+
+/* All 3 noreturn calls. */
+/* { dg-final { scan-tree-dump-times "Associated cleanup" 3 "hardcfr" } } */
+/* Out-of-line checks in f. */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check in block \[0-9]*'s edge to exit" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checks in h and h2. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-always.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-always.C
new file mode 100644
index 0000000..4d303e7
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-always.C
@@ -0,0 +1,13 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=always -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 14 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* h, h2, h2b, and h4. */
+/* { dg-final { scan-tree-dump-times "Bypassing" 4 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-never.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-never.C
new file mode 100644
index 0000000..81c1b1a
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-never.C
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=never -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, without checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C
new file mode 100644
index 0000000..de37b2a
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow-expected.C
@@ -0,0 +1,16 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. */
+
+extern void __attribute__ ((__noreturn__, __expected_throw__)) g (void);
+extern void __attribute__ ((__noreturn__, __expected_throw__)) g2 (void);
+
+#include "harden-cfr-throw.C"
+
+/* In f and h3, there are checkpoints at return and exception escape. . */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 4 "hardcfr" } } */
+/* Other functions get a single cleanup checkpoint. */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 5 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow.C
new file mode 100644
index 0000000..720498b
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-no-xthrow.C
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=no-xthrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, and also checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nocleanup.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nocleanup.C
new file mode 100644
index 0000000..9f35936
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nocleanup.C
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -fno-hardcfr-check-exceptions -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we do not insert cleanups for checking around the bodies
+ of maybe-throwing functions. h4 doesn't get any checks, because we
+ don't have noreturn checking enabled. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 0 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 6 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nothrow.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nothrow.C
new file mode 100644
index 0000000..e1c2e8d
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-nothrow.C
@@ -0,0 +1,11 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions, without checking before noreturn
+ calls. */
+
+#include "harden-cfr-throw.C"
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw-returning.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-returning.C
new file mode 100644
index 0000000..37e4551
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw-returning.C
@@ -0,0 +1,31 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=never -foptimize-sibling-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions. These results depend on checking before
+ returning calls, which is only enabled when sibcall optimizations
+ are enabled, so change the optimization mode to -O1 for f and f2,
+ so that -foptimize-sibling-calls can take effect and enable
+ -fhardcfr-check-returning-calls, so that we get the same results.
+ There is a separate test for -O0. */
+
+#if ! __OPTIMIZE__
+void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) f(int i);
+void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) f2(int i);
+void __attribute__ ((__optimize__ (1, "-foptimize-sibling-calls"))) h3(void);
+#endif
+
+#include "harden-cfr-throw.C"
+
+/* f gets out-of-line checks before the unwrapped tail call and in the
+ else edge. */
+/* f2 gets out-of-line checks before both unwrapped tail calls. */
+/* h gets out-of-line checks before the implicit return and in the
+ cleanup block. */
+/* h2 and h2b get out-of-line checks before the cleanup returning
+ call, and in the cleanup block. */
+/* h3 gets an inline check before the __cxa_end_catch returning call. */
+/* h4 gets an inline check in the cleanup block. */
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 10 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/g++.dg/torture/harden-cfr-throw.C b/gcc/testsuite/g++.dg/torture/harden-cfr-throw.C
new file mode 100644
index 0000000..8e46b90
--- /dev/null
+++ b/gcc/testsuite/g++.dg/torture/harden-cfr-throw.C
@@ -0,0 +1,73 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-returning-calls -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+#if ! __OPTIMIZE__ && ! defined NO_OPTIMIZE
+/* Without optimization, functions with cleanups end up with an extra
+ resx that is not optimized out, so arrange to optimize them. */
+void __attribute__ ((__optimize__ (1))) h2(void);
+void __attribute__ ((__optimize__ (1))) h2b(void);
+#endif
+
+/* Check that we insert cleanups for checking around the bodies of
+ maybe-throwing functions. */
+
+extern void g (void);
+extern void g2 (void);
+
+void f(int i) {
+ if (i)
+ g ();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void f2(int i) {
+ if (i)
+ g ();
+ else
+ g2 ();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void h(void) {
+ try {
+ g ();
+ } catch (...) {
+ throw;
+ }
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+struct needs_cleanup {
+ ~needs_cleanup();
+};
+
+void h2(void) {
+ needs_cleanup y; /* No check in the cleanup handler. */
+ g();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+extern void __attribute__ ((__nothrow__)) another_cleanup (void*);
+
+void h2b(void) {
+ int x __attribute__ ((cleanup (another_cleanup)));
+ g();
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void h3(void) {
+ try {
+ throw 1;
+ } catch (...) {
+ }
+ /* Out-of-line checks here, and in the implicit handler. */
+}
+
+void h4(void) {
+ throw 1;
+ /* Inline check in the cleanup around the __cxa_throw noreturn call. */
+}
+
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 12 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "builtin_trap" 1 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Bypassing" 0 "hardcfr" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/harden-cfr-noret-no-nothrow.c b/gcc/testsuite/gcc.dg/torture/harden-cfr-noret-no-nothrow.c
new file mode 100644
index 0000000..8e4ee1f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/harden-cfr-noret-no-nothrow.c
@@ -0,0 +1,15 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-noreturn-calls=nothrow -fdump-tree-hardcfr -ffat-lto-objects" } */
+
+/* Check that C makes for implicit nothrow in noreturn handling. */
+
+#define ATTR_NOTHROW_OPT
+
+#include "../../c-c++-common/torture/harden-cfr-noret.c"
+
+/* One out-of-line check before the noreturn call in f, and another at the end
+ of f. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* One inline check in h, before the noreturn call, and another in h2, before
+ or after the call, depending on noreturn detection. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
diff --git a/gcc/testsuite/gcc.dg/torture/harden-cfr-tail-ub.c b/gcc/testsuite/gcc.dg/torture/harden-cfr-tail-ub.c
new file mode 100644
index 0000000..634d98f
--- /dev/null
+++ b/gcc/testsuite/gcc.dg/torture/harden-cfr-tail-ub.c
@@ -0,0 +1,40 @@
+/* { dg-do compile } */
+/* { dg-options "-fharden-control-flow-redundancy -fhardcfr-check-returning-calls -fno-hardcfr-check-exceptions -fdump-tree-hardcfr -ffat-lto-objects -Wno-return-type" } */
+
+/* In C only, check some additional cases (comparing with
+ c-c++-common/torture/harden-cfr-tail.c) of falling off the end of non-void
+ function. C++ would issue an unreachable call in these cases. */
+
+extern int g (int i);
+
+int f1(int i) {
+ /* Inline check before the returning call, that doesn't return anything. */
+ g (i);
+ /* Implicit return without value, despite the return type; this combination
+ enables tail-calling of g, and is recognized as a returning call. */
+}
+
+extern void g2 (int i);
+
+int f2(int i) {
+ /* Inline check before the returning call, that disregards its return
+ value. */
+ g2 (i);
+ /* Implicit return without value, despite the return type; this combination
+ enables tail-calling of g2, and is recognized as a returning call. */
+}
+
+int f3(int i) {
+ if (i)
+ /* Out-of-line check before the returning call. */
+ return g (i);
+ /* Out-of-line check before implicit return. */
+}
+
+/* Out-of-line checks in f3, before returning calls and before return. */
+/* { dg-final { scan-tree-dump-times "hardcfr_check" 2 "hardcfr" } } */
+/* Inline checking in all other functions. */
+/* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "hardcfr" } } */
+/* Check before tail-call in all functions, but f3 is out-of-line. */
+/* { dg-final { scan-tree-dump-times "Inserting inline check before stmt" 2 "hardcfr" } } */
+/* { dg-final { scan-tree-dump-times "Inserting out-of-line check before stmt" 1 "hardcfr" } } */
diff --git a/gcc/testsuite/gnat.dg/hardcfr.adb b/gcc/testsuite/gnat.dg/hardcfr.adb
new file mode 100644
index 0000000..abe1605
--- /dev/null
+++ b/gcc/testsuite/gnat.dg/hardcfr.adb
@@ -0,0 +1,76 @@
+-- { dg-do run }
+-- { dg-options "-fharden-control-flow-redundancy -fno-hardcfr-check-exceptions -fdump-tree-hardcfr --param=hardcfr-max-blocks=22 --param=hardcfr-max-inline-blocks=12 -O0" }
+
+procedure HardCFR is
+ function F (I, J : Integer) return Integer is
+ begin
+ if (I < J) then
+ return 2 * I;
+ else
+ return 3 * J;
+ end if;
+ end F;
+
+ function G (I : Natural; J : Integer) return Integer is
+ begin
+ case I is
+ when 0 =>
+ return J * 2;
+
+ when 1 =>
+ return J * 3;
+
+ when 2 =>
+ return J * 5;
+
+ when others =>
+ return J * 7;
+ end case;
+ end G;
+
+ function H (I : Natural; -- { dg-warning "has more than 22 blocks, the requested maximum" }
+ J : Integer)
+ return Integer is
+ begin
+ case I is
+ when 0 =>
+ return J * 2;
+
+ when 1 =>
+ return J * 3;
+
+ when 2 =>
+ return J * 5;
+
+ when 3 =>
+ return J * 7;
+
+ when 4 =>
+ return J * 11;
+
+ when 5 =>
+ return J * 13;
+
+ when 6 =>
+ return J * 17;
+
+ when 7 =>
+ return J * 19;
+
+ when others =>
+ return J * 23;
+ end case;
+ end H;
+begin
+ if (F (1, 2) /= 2 or else F (3, 2) /= 6
+ or else G (2, 5) /= 25 or else H (4, 3) /= 33)
+ then
+ raise Program_Error;
+ end if;
+end HardCFR;
+
+-- HardCFR and HardCFR.F:
+-- { dg-final { scan-tree-dump-times ".builtin_trap" 2 "hardcfr" } }
+
+-- This is __builtin___hardcfr_check in HardCFR.G:
+-- { dg-final { scan-tree-dump-times ".builtin " 1 "hardcfr" } }
diff --git a/gcc/tree-core.h b/gcc/tree-core.h
index 77417db..2c89b65 100644
--- a/gcc/tree-core.h
+++ b/gcc/tree-core.h
@@ -95,6 +95,9 @@ struct die_struct;
/* Nonzero if this is a cold function. */
#define ECF_COLD (1 << 15)
+/* Nonzero if this is a function expected to end with an exception. */
+#define ECF_XTHROW (1 << 16)
+
/* Call argument flags. */
/* Nonzero if the argument is not used by the function. */
diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
index 79a5f33..09e6ada 100644
--- a/gcc/tree-pass.h
+++ b/gcc/tree-pass.h
@@ -657,6 +657,8 @@ extern gimple_opt_pass *make_pass_gimple_isel (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_harden_compares (gcc::context *ctxt);
extern gimple_opt_pass *make_pass_harden_conditional_branches (gcc::context
*ctxt);
+extern gimple_opt_pass *make_pass_harden_control_flow_redundancy (gcc::context
+ *ctxt);
/* Current optimization pass. */
extern opt_pass *current_pass;
diff --git a/gcc/tree.cc b/gcc/tree.cc
index 69369c6..f7bfd9e 100644
--- a/gcc/tree.cc
+++ b/gcc/tree.cc
@@ -9748,6 +9748,10 @@ set_call_expr_flags (tree decl, int flags)
DECL_ATTRIBUTES (decl));
if ((flags & ECF_TM_PURE) && flag_tm)
apply_tm_attr (decl, get_identifier ("transaction_pure"));
+ if ((flags & ECF_XTHROW))
+ DECL_ATTRIBUTES (decl)
+ = tree_cons (get_identifier ("expected_throw"),
+ NULL, DECL_ATTRIBUTES (decl));
/* Looping const or pure is implied by noreturn.
There is currently no way to declare looping const or looping pure alone. */
gcc_assert (!(flags & ECF_LOOPING_CONST_OR_PURE)
@@ -9960,7 +9964,8 @@ build_common_builtin_nodes (void)
ftype = build_function_type_list (void_type_node, NULL_TREE);
local_define_builtin ("__builtin_cxa_end_cleanup", ftype,
BUILT_IN_CXA_END_CLEANUP,
- "__cxa_end_cleanup", ECF_NORETURN | ECF_LEAF);
+ "__cxa_end_cleanup",
+ ECF_NORETURN | ECF_XTHROW | ECF_LEAF);
}
ftype = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
@@ -9969,7 +9974,7 @@ build_common_builtin_nodes (void)
((targetm_common.except_unwind_info (&global_options)
== UI_SJLJ)
? "_Unwind_SjLj_Resume" : "_Unwind_Resume"),
- ECF_NORETURN);
+ ECF_NORETURN | ECF_XTHROW);
if (builtin_decl_explicit (BUILT_IN_RETURN_ADDRESS) == NULL_TREE)
{