aboutsummaryrefslogtreecommitdiff
path: root/gcc/analyzer
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2021-09-13 10:37:49 -0700
committerIan Lance Taylor <iant@golang.org>2021-09-13 10:37:49 -0700
commite252b51ccde010cbd2a146485d8045103cd99533 (patch)
treee060f101cdc32bf5e520de8e5275db9d4236b74c /gcc/analyzer
parentf10c7c4596dda99d2ee872c995ae4aeda65adbdf (diff)
parent104c05c5284b7822d770ee51a7d91946c7e56d50 (diff)
downloadgcc-e252b51ccde010cbd2a146485d8045103cd99533.zip
gcc-e252b51ccde010cbd2a146485d8045103cd99533.tar.gz
gcc-e252b51ccde010cbd2a146485d8045103cd99533.tar.bz2
Merge from trunk revision 104c05c5284b7822d770ee51a7d91946c7e56d50.
Diffstat (limited to 'gcc/analyzer')
-rw-r--r--gcc/analyzer/ChangeLog1610
-rw-r--r--gcc/analyzer/analysis-plan.cc4
-rw-r--r--gcc/analyzer/analyzer.cc162
-rw-r--r--gcc/analyzer/analyzer.h85
-rw-r--r--gcc/analyzer/analyzer.opt8
-rw-r--r--gcc/analyzer/call-info.cc162
-rw-r--r--gcc/analyzer/call-info.h83
-rw-r--r--gcc/analyzer/call-string.cc151
-rw-r--r--gcc/analyzer/call-string.h52
-rw-r--r--gcc/analyzer/checker-path.cc48
-rw-r--r--gcc/analyzer/checker-path.h28
-rw-r--r--gcc/analyzer/complexity.cc16
-rw-r--r--gcc/analyzer/complexity.h1
-rw-r--r--gcc/analyzer/constraint-manager.cc1385
-rw-r--r--gcc/analyzer/constraint-manager.h192
-rw-r--r--gcc/analyzer/diagnostic-manager.cc218
-rw-r--r--gcc/analyzer/diagnostic-manager.h6
-rw-r--r--gcc/analyzer/engine.cc970
-rw-r--r--gcc/analyzer/exploded-graph.h178
-rw-r--r--gcc/analyzer/feasible-graph.cc12
-rw-r--r--gcc/analyzer/feasible-graph.h7
-rw-r--r--gcc/analyzer/pending-diagnostic.h13
-rw-r--r--gcc/analyzer/program-point.cc39
-rw-r--r--gcc/analyzer/program-point.h3
-rw-r--r--gcc/analyzer/program-state.cc271
-rw-r--r--gcc/analyzer/program-state.h32
-rw-r--r--gcc/analyzer/region-model-asm.cc303
-rw-r--r--gcc/analyzer/region-model-impl-calls.cc362
-rw-r--r--gcc/analyzer/region-model-manager.cc482
-rw-r--r--gcc/analyzer/region-model-reachability.cc19
-rw-r--r--gcc/analyzer/region-model-reachability.h8
-rw-r--r--gcc/analyzer/region-model.cc1327
-rw-r--r--gcc/analyzer/region-model.h347
-rw-r--r--gcc/analyzer/region.cc378
-rw-r--r--gcc/analyzer/region.h146
-rw-r--r--gcc/analyzer/sm-file.cc67
-rw-r--r--gcc/analyzer/sm-malloc.cc250
-rw-r--r--gcc/analyzer/sm-pattern-test.cc24
-rw-r--r--gcc/analyzer/sm-sensitive.cc26
-rw-r--r--gcc/analyzer/sm-signal.cc43
-rw-r--r--gcc/analyzer/sm-taint.cc12
-rw-r--r--gcc/analyzer/sm.cc14
-rw-r--r--gcc/analyzer/sm.h39
-rw-r--r--gcc/analyzer/state-purge.cc142
-rw-r--r--gcc/analyzer/state-purge.h4
-rw-r--r--gcc/analyzer/store.cc1152
-rw-r--r--gcc/analyzer/store.h325
-rw-r--r--gcc/analyzer/supergraph.cc210
-rw-r--r--gcc/analyzer/supergraph.h38
-rw-r--r--gcc/analyzer/svalue.cc635
-rw-r--r--gcc/analyzer/svalue.h448
51 files changed, 10928 insertions, 1609 deletions
diff --git a/gcc/analyzer/ChangeLog b/gcc/analyzer/ChangeLog
index 58b0fbb..03ba64f 100644
--- a/gcc/analyzer/ChangeLog
+++ b/gcc/analyzer/ChangeLog
@@ -1,3 +1,1613 @@
+2021-09-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/102225
+ * analyzer.h (compat_types_p): New decl.
+ * constraint-manager.cc
+ (constraint_manager::get_or_add_equiv_class): Guard against NULL
+ type when checking for pointer types.
+ * region-model-impl-calls.cc (region_model::impl_call_realloc):
+ Guard against NULL lhs type/region. Guard against the size value
+ not being of a compatible type for dynamic extents.
+ * region-model.cc (compat_types_p): Make non-static.
+
+2021-08-30 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99260
+ * analyzer.h (class custom_edge_info): New class, adapted from
+ exploded_edge::custom_info_t. Make member functions const.
+ Make update_model return bool, converting edge param from
+ reference to a pointer, and adding a ctxt param.
+ (class path_context): New class.
+ * call-info.cc: New file.
+ * call-info.h: New file.
+ * engine.cc: Include "analyzer/call-info.h" and <memory>.
+ (impl_region_model_context::impl_region_model_context): Update for
+ new m_path_ctxt field.
+ (impl_region_model_context::bifurcate): New.
+ (impl_region_model_context::terminate_path): New.
+ (impl_region_model_context::get_malloc_map): New.
+ (impl_sm_context::impl_sm_context): Update for new m_path_ctxt
+ field.
+ (impl_sm_context::get_fndecl_for_call): Likewise.
+ (impl_sm_context::set_next_state): Likewise.
+ (impl_sm_context::warn): Likewise.
+ (impl_sm_context::is_zero_assignment): Likewise.
+ (impl_sm_context::get_path_context): New.
+ (impl_sm_context::m_path_ctxt): New.
+ (impl_region_model_context::on_condition): Update for new
+ path_ctxt param. Handle m_enode_for_diag being NULL.
+ (impl_region_model_context::on_phi): Update for new path_ctxt
+ param.
+ (exploded_node::on_stmt): Add path_ctxt param, updating ctor calls
+ to use it as necessary. Use it to bail out after sm-handling,
+ if needed.
+ (exploded_node::detect_leaks): Update for new path_ctxt param.
+ (dynamic_call_info_t::update_model): Update for conversion of
+ exploded_edge::custom_info_t to custom_edge_info.
+ (dynamic_call_info_t::add_events_to_path): Likewise.
+ (rewind_info_t::update_model): Likewise.
+ (rewind_info_t::add_events_to_path): Likewise.
+ (exploded_edge::exploded_edge): Likewise.
+ (exploded_graph::add_edge): Likewise.
+ (exploded_graph::maybe_process_run_of_before_supernode_enodes):
+ Update for new path_ctxt param.
+ (class impl_path_context): New.
+ (exploded_graph::process_node): Update for new path_ctxt param.
+ Create an impl_path_context and pass it to exploded_node::on_stmt.
+ Use it to terminate iterating stmts if terminate_path is called
+ on it. After processing a run of stmts, query path_ctxt to
+ potentially terminate the analysis path, and/or to "bifurcate" the
+ analysis into multiple additional paths.
+ (feasibility_state::maybe_update_for_edge): Update for new
+ update_model ctxt param.
+ * exploded-graph.h
+ (impl_region_model_context::impl_region_model_context): Add
+ path_ctxt param.
+ (impl_region_model_context::bifurcate): New.
+ (impl_region_model_context::terminate_path): New
+ (impl_region_model_context::get_ext_state): New.
+ (impl_region_model_context::get_malloc_map): New.
+ (impl_region_model_context::m_path_ctxt): New field.
+ (exploded_node::on_stmt): Add path_ctxt param.
+ (class exploded_edge::custom_info_t): Move to analyzer.h, renaming
+ to custom_edge_info, and making the changes as noted in analyzer.h
+ above.
+ (exploded_edge::exploded_edge): Update for these changes to
+ exploded_edge::custom_info_t.
+ (exploded_edge::m_custom_info): Likewise.
+ (class dynamic_call_info_t): Likewise.
+ (class rewind_info_t): Likewise.
+ (exploded_graph::add_edge): Likewise.
+ * program-state.cc (program_state::on_edge): Update for new
+ path_ctxt param.
+ (program_state::push_call): Likewise.
+ (program_state::returning_call): Likewise.
+ (program_state::prune_for_point): Likewise.
+ * region-model-impl-calls.cc: Include "analyzer/call-info.h".
+ (call_details::get_fndecl_for_call): New.
+ (region_model::impl_call_realloc): Reimplement.
+ * region-model.cc (region_model::on_call_pre): Move call to
+ impl_call_realloc to...
+ (region_model::on_call_post): ...here. Consolidate creation
+ of call_details instance.
+ (noop_region_model_context::bifurcate): New.
+ (noop_region_model_context::terminate_path): New.
+ * region-model.h (call_details::get_call_stmt): New.
+ (call_details::get_fndecl_for_call): New.
+ (region_model::on_realloc_with_move): New.
+ (region_model_context::bifurcate): New.
+ (region_model_context::terminate_path): New.
+ (region_model_context::get_ext_state): New.
+ (region_model_context::get_malloc_map): New.
+ (noop_region_model_context::bifurcate): New.
+ (noop_region_model_context::terminate_path): New.
+ (noop_region_model_context::get_ext_state): New.
+ (noop_region_model_context::get_malloc_map): New.
+ * sm-malloc.cc: Include "analyzer/program-state.h".
+ (malloc_state_machine::on_realloc_call): Reimplement.
+ (malloc_state_machine::on_realloc_with_move): New.
+ (region_model::on_realloc_with_move): New.
+ * sm-signal.cc (class signal_delivery_edge_info_t): Update for
+ conversion from exploded_edge::custom_info_t to custom_edge_info.
+ * sm.h (sm_context::get_path_context): New.
+ * svalue.cc (svalue::maybe_get_constant): Call
+ unwrap_any_unmergeable.
+
+2021-08-25 Ankur Saini <arsenic@sourceware.org>
+
+ PR analyzer/101980
+ * engine.cc (exploded_graph::maybe_create_dynamic_call): Don't create
+ calls if max recursion limit is reached.
+
+2021-08-23 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer.h (struct rejected_constraint): Convert to...
+ (class rejected_constraint): ...this.
+ (class bounded_ranges): New forward decl.
+ (class bounded_ranges_manager): New forward decl.
+ * constraint-manager.cc: Include "analyzer/analyzer-logging.h" and
+ "tree-pretty-print.h".
+ (can_plus_one_p): New.
+ (plus_one): New.
+ (can_minus_one_p): New.
+ (minus_one): New.
+ (bounded_range::bounded_range): New.
+ (dump_cst): New.
+ (bounded_range::dump_to_pp): New.
+ (bounded_range::dump): New.
+ (bounded_range::to_json): New.
+ (bounded_range::set_json_attr): New.
+ (bounded_range::contains_p): New.
+ (bounded_range::intersects_p): New.
+ (bounded_range::operator==): New.
+ (bounded_range::cmp): New.
+ (bounded_ranges::bounded_ranges): New.
+ (bounded_ranges::bounded_ranges): New.
+ (bounded_ranges::bounded_ranges): New.
+ (bounded_ranges::canonicalize): New.
+ (bounded_ranges::validate): New.
+ (bounded_ranges::operator==): New.
+ (bounded_ranges::dump_to_pp): New.
+ (bounded_ranges::dump): New.
+ (bounded_ranges::to_json): New.
+ (bounded_ranges::eval_condition): New.
+ (bounded_ranges::contain_p): New.
+ (bounded_ranges::cmp): New.
+ (bounded_ranges_manager::~bounded_ranges_manager): New.
+ (bounded_ranges_manager::get_or_create_empty): New.
+ (bounded_ranges_manager::get_or_create_point): New.
+ (bounded_ranges_manager::get_or_create_range): New.
+ (bounded_ranges_manager::get_or_create_union): New.
+ (bounded_ranges_manager::get_or_create_intersection): New.
+ (bounded_ranges_manager::get_or_create_inverse): New.
+ (bounded_ranges_manager::consolidate): New.
+ (bounded_ranges_manager::get_or_create_ranges_for_switch): New.
+ (bounded_ranges_manager::create_ranges_for_switch): New.
+ (bounded_ranges_manager::make_case_label_ranges): New.
+ (bounded_ranges_manager::log_stats): New.
+ (bounded_ranges_constraint::print): New.
+ (bounded_ranges_constraint::to_json): New.
+ (bounded_ranges_constraint::operator==): New.
+ (bounded_ranges_constraint::add_to_hash): New.
+ (constraint_manager::constraint_manager): Update for new field
+ m_bounded_ranges_constraints.
+ (constraint_manager::operator=): Likewise.
+ (constraint_manager::hash): Likewise.
+ (constraint_manager::operator==): Likewise.
+ (constraint_manager::print): Likewise.
+ (constraint_manager::dump_to_pp): Likewise.
+ (constraint_manager::to_json): Likewise.
+ (constraint_manager::add_unknown_constraint): Update the lhs_ec_id
+ if necessary in existing constraints when combining equivalence
+ classes. Add similar code for handling
+ m_bounded_ranges_constraints.
+ (constraint_manager::add_constraint_internal): Add comment.
+ (constraint_manager::add_bounded_ranges): New.
+ (constraint_manager::eval_condition): Use new field
+ m_bounded_ranges_constraints.
+ (constraint_manager::purge): Update bounded_ranges_constraint
+ instances.
+ (constraint_manager::canonicalize): Update for new field.
+ (merger_fact_visitor::on_ranges): New.
+ (constraint_manager::for_each_fact): Use new field
+ m_bounded_ranges_constraints.
+ (constraint_manager::validate): Fix off-by-one error needed due
+ to bug fixed above in add_unknown_constraint. Validate the EC IDs
+ in m_bounded_ranges_constraints.
+ (constraint_manager::get_range_manager): New.
+ (selftest::assert_dump_bounded_range_eq): New.
+ (ASSERT_DUMP_BOUNDED_RANGE_EQ): New.
+ (selftest::test_bounded_range): New.
+ (selftest::assert_dump_bounded_ranges_eq): New.
+ (ASSERT_DUMP_BOUNDED_RANGES_EQ): New.
+ (selftest::test_bounded_ranges): New.
+ (selftest::run_constraint_manager_tests): Call the new selftests.
+ * constraint-manager.h (struct bounded_range): New.
+ (struct bounded_ranges): New.
+ (template <> struct default_hash_traits<bounded_ranges::key_t>): New.
+ (class bounded_ranges_manager): New.
+ (fact_visitor::on_ranges): New pure virtual function.
+ (class bounded_ranges_constraint): New.
+ (constraint_manager::add_bounded_ranges): New decl.
+ (constraint_manager::get_range_manager): New decl.
+ (constraint_manager::m_bounded_ranges_constraints): New field.
+ * diagnostic-manager.cc (epath_finder::process_worklist_item):
+ Transfer ownership of rc to add_feasibility_problem.
+ * engine.cc (feasibility_problem::dump_to_pp): Use get_model.
+ * feasible-graph.cc (infeasible_node::dump_dot): Update for
+ conversion of m_rc to a pointer.
+ (feasible_graph::add_feasibility_problem): Pass RC by pointer and
+ take ownership.
+ * feasible-graph.h (infeasible_node::infeasible_node): Pass RC by
+ pointer and take ownership.
+ (infeasible_node::~infeasible_node): New.
+ (infeasible_node::m_rc): Convert to a pointer.
+ (feasible_graph::add_feasibility_problem): Pass RC by pointer and
+ take ownership.
+ * region-model-manager.cc: Include
+ "analyzer/constraint-manager.h".
+ (region_model_manager::region_model_manager): Initializer new
+ field m_range_mgr.
+ (region_model_manager::~region_model_manager): Delete it.
+ (region_model_manager::log_stats): Call log_stats on it.
+ * region-model.cc (region_model::add_constraint): Use new subclass
+ rejected_op_constraint.
+ (region_model::apply_constraints_for_gswitch): Reimplement using
+ bounded_ranges_manager.
+ (rejected_constraint::dump_to_pp): Convert to...
+ (rejected_op_constraint::dump_to_pp): ...this.
+ (rejected_ranges_constraint::dump_to_pp): New.
+ * region-model.h (struct purge_stats): Add field
+ m_num_bounded_ranges_constraints.
+ (region_model_manager::get_range_manager): New.
+ (region_model_manager::m_range_mgr): New.
+ (region_model::get_range_manager): New.
+ (struct rejected_constraint): Split into...
+ (class rejected_constraint):...this new abstract base class,
+ and...
+ (class rejected_op_constraint): ...this new concrete subclass.
+ (class rejected_ranges_constraint): New.
+ * supergraph.cc: Include "tree-cfg.h".
+ (supergraph::supergraph): Drop idx param from add_cfg_edge.
+ (supergraph::add_cfg_edge): Drop idx param.
+ (switch_cfg_superedge::switch_cfg_superedge): Move here from
+ header. Populate m_case_labels with all cases which go to DST.
+ (switch_cfg_superedge::dump_label_to_pp): Reimplement to use
+ m_case_labels.
+ (switch_cfg_superedge::get_case_label): Delete.
+ * supergraph.h (supergraphadd_cfg_edge): Drop "idx" param.
+ (switch_cfg_superedge::switch_cfg_superedge): Drop idx param and
+ move implementation to supergraph.cc.
+ (switch_cfg_superedge::get_case_label): Delete.
+ (switch_cfg_superedge::get_case_labels): New.
+ (switch_cfg_superedge::m_idx): Delete.
+ (switch_cfg_superedge::m_case_labels): New field.
+
+2021-08-23 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101875
+ * sm-file.cc (file_diagnostic::describe_state_change): Handle
+ change.m_expr being NULL.
+
+2021-08-23 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101837
+ * analyzer.cc (maybe_reconstruct_from_def_stmt): Bail if fn is
+ NULL, and assert that it's non-NULL before passing it to
+ build_call_array_loc.
+
+2021-08-23 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101962
+ * region-model.cc (region_model::eval_condition_without_cm):
+ Refactor comparison against zero, adding a check for
+ POINTER_PLUS_EXPR of non-NULL.
+
+2021-08-23 David Malcolm <dmalcolm@redhat.com>
+
+ * store.cc (bit_range::intersects_p): New overload.
+ (bit_range::operator-): New.
+ (binding_cluster::maybe_get_compound_binding): Handle the partial
+ overlap case.
+ (selftest::test_bit_range_intersects_p): Add test coverage for
+ new overload of bit_range::intersects_p.
+ * store.h (bit_range::intersects_p): New overload.
+ (bit_range::operator-): New.
+
+2021-08-23 Ankur Saini <arsenic@sourceware.org>
+
+ PR analyzer/102020
+ * diagnostic-manager.cc
+ (diagnostic_manager::prune_for_sm_diagnostic)<case EK_CALL_EDGE>: Fix typo.
+
+2021-08-21 Ankur Saini <arsenic@sourceware.org>
+
+ PR analyzer/101980
+ * diagnostic-manager.cc
+ (diagnostic_manager::prune_for_sm_diagnostic)<case EK_CALL_EDGE>: Use
+ caller_model only when the supergraph_edge doesn't exixt.
+ (diagnostic_manager::prune_for_sm_diagnostic)<case EK_RETURN_EDGE>:
+ Likewise.
+ * engine.cc (exploded_graph::create_dynamic_call): Rename to...
+ (exploded_graph::maybe_create_dynamic_call): ...this, return call
+ creation status.
+ (exploded_graph::process_node): Handle calls which were not dynamically
+ discovered.
+ * exploded-graph.h (exploded_graph::create_dynamic_call): Rename to...
+ (exploded_graph::maybe_create_dynamic_call): ...this.
+ * region-model.cc (region_model::update_for_gcall): New param, use it
+ to push call to frame.
+ (region_model::update_for_call_superedge): Pass callee function to
+ update_for_gcall.
+ * region-model.h (region_model::update_for_gcall): New param.
+
+2021-08-18 Ankur Saini <arsenic@sourceware.org>
+
+ PR analyzer/97114
+ * region-model.cc (region_model::get_rvalue_1): Add case for
+ OBJ_TYPE_REF.
+
+2021-08-18 Ankur Saini <arsenic@sourceware.org>
+
+ PR analyzer/100546
+ * analysis-plan.cc (analysis_plan::use_summary_p): Don't use call
+ summaries if there is no callgraph edge
+ * checker-path.cc (call_event::call_event): Handle calls events that
+ are not represented by a supergraph call edge
+ (return_event::return_event): Likewise.
+ (call_event::get_desc): Work with new call_event structure.
+ (return_event::get_desc): Likeise.
+ * checker-path.h (call_event::m_src_snode): New field.
+ (call_event::m_dest_snode): New field.
+ (return_event::m_src_snode): New field.
+ (return_event::m_dest_snode): New field.
+ * diagnostic-manager.cc
+ (diagnostic_manager::prune_for_sm_diagnostic)<case EK_CALL_EDGE>:
+ Refactor to work with edges without callgraph edge.
+ (diagnostic_manager::prune_for_sm_diagnostic)<case EK_RETURN_EDGE>:
+ Likewise.
+ * engine.cc (dynamic_call_info_t::update_model): New function.
+ (dynamic_call_info_t::add_events_to_path): New function.
+ (exploded_graph::create_dynamic_call): New function.
+ (exploded_graph::process_node): Work with dynamically discovered calls.
+ * exploded-graph.h (class dynamic_call_info_t): New class.
+ (exploded_graph::create_dynamic_call): New decl.
+ * program-point.cc (program_point::push_to_call_stack): New function.
+ (program_point::pop_from_call_stack): New function.
+ * program-point.h (program_point::push_to_call_stack): New decl.
+ (program_point::pop_from_call_stack): New decl.
+ * program-state.cc (program_state::push_call): New function.
+ (program_state::returning_call): New function.
+ * program-state.h (program_state::push_call): New decl.
+ (program_state::returning_call): New decl.
+ * region-model.cc (region_model::update_for_gcall) New function.
+ (region_model::update_for_return_gcall): New function.
+ (egion_model::update_for_call_superedge): Get the underlying gcall and
+ update for gcall.
+ (region_model::update_for_return_superedge): Likewise.
+ * region-model.h (region_model::update_for_gcall): New decl.
+ (region_model::update_for_return_gcall): New decl.
+ * state-purge.cc (state_purge_per_ssa_name::process_point): Update to
+ work with calls without underlying cgraph edge.
+ * supergraph.cc (supergraph::supergraph) Split snodes at every callsite.
+ * supergraph.h (supernode::get_returning_call) New accessor.
+
+2021-08-04 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101570
+ * analyzer.cc (maybe_reconstruct_from_def_stmt): Add GIMPLE_ASM
+ case.
+ * analyzer.h (class asm_output_svalue): New forward decl.
+ (class reachable_regions): New forward decl.
+ * complexity.cc (complexity::from_vec_svalue): New.
+ * complexity.h (complexity::from_vec_svalue): New decl.
+ * engine.cc (feasibility_state::maybe_update_for_edge): Handle
+ asm stmts by calling on_asm_stmt.
+ * region-model-asm.cc: New file.
+ * region-model-manager.cc
+ (region_model_manager::maybe_fold_asm_output_svalue): New.
+ (region_model_manager::get_or_create_asm_output_svalue): New.
+ (region_model_manager::log_stats): Log m_asm_output_values_map.
+ * region-model.cc (region_model::on_stmt_pre): Handle GIMPLE_ASM.
+ * region-model.h (visitor::visit_asm_output_svalue): New.
+ (region_model_manager::get_or_create_asm_output_svalue): New decl.
+ (region_model_manager::maybe_fold_asm_output_svalue): New decl.
+ (region_model_manager::asm_output_values_map_t): New typedef.
+ (region_model_manager::m_asm_output_values_map): New field.
+ (region_model::on_asm_stmt): New.
+ * store.cc (binding_cluster::on_asm): New.
+ * store.h (binding_cluster::on_asm): New decl.
+ * svalue.cc (svalue::cmp_ptr): Handle SK_ASM_OUTPUT.
+ (asm_output_svalue::dump_to_pp): New.
+ (asm_output_svalue::dump_input): New.
+ (asm_output_svalue::input_idx_to_asm_idx): New.
+ (asm_output_svalue::accept): New.
+ * svalue.h (enum svalue_kind): Add SK_ASM_OUTPUT.
+ (svalue::dyn_cast_asm_output_svalue): New.
+ (class asm_output_svalue): New.
+ (is_a_helper <const asm_output_svalue *>::test): New.
+ (struct default_hash_traits<asm_output_svalue::key_t>): New.
+
+2021-08-03 Jakub Jelinek <jakub@redhat.com>
+
+ PR analyzer/101721
+ * sm-malloc.cc (known_allocator_p): Only check DECL_FUNCTION_CODE on
+ BUILT_IN_NORMAL builtins.
+
+2021-07-29 Ankur Saini <arsenic@sourceware.org>
+
+ * call-string.cc (call_string::element_t::operator==): New operator.
+ (call_String::element_t::operator!=): New operator.
+ (call_string::element_t::get_caller_function): New function.
+ (call_string::element_t::get_callee_function): New function.
+ (call_string::call_string): Refactor to Initialise m_elements.
+ (call_string::operator=): Refactor to work with m_elements.
+ (call_string::operator==): Likewise.
+ (call_string::to_json): Likewise.
+ (call_string::hash): Refactor to hash e.m_caller.
+ (call_string::push_call): Refactor to work with m_elements.
+ (call_string::push_call): New overload to push call via supernodes.
+ (call_string::pop): Refactor to work with m_elements.
+ (call_string::calc_recursion_depth): Likewise.
+ (call_string::cmp): Likewise.
+ (call_string::validate): Likewise.
+ (call_string::operator[]): Likewise.
+ * call-string.h (class supernode): New forward decl.
+ (struct call_string::element_t): New struct.
+ (call_string::call_string): Refactor to initialise m_elements.
+ (call_string::bool empty_p): Refactor to work with m_elements.
+ (call_string::get_callee_node): New decl.
+ (call_string::get_caller_node): New decl.
+ (m_elements): Replaces m_return_edges.
+ * program-point.cc (program_point::get_function_at_depth): Refactor to
+ work with new call-string format.
+ (program_point::validate): Likewise.
+ (program_point::on_edge): Likewise.
+
+2021-07-28 David Malcolm <dmalcolm@redhat.com>
+
+ * region-model.cc (region_model::on_call_pre): Treat
+ IFN_UBSAN_BOUNDS, BUILT_IN_STACK_SAVE, and BUILT_IN_STACK_RESTORE
+ as no-ops, rather than handling them as unknown functions.
+
+2021-07-28 David Malcolm <dmalcolm@redhat.com>
+
+ * region-model-impl-calls.cc (region_model::impl_call_alloca):
+ Drop redundant return value.
+ (region_model::impl_call_builtin_expect): Likewise.
+ (region_model::impl_call_calloc): Likewise.
+ (region_model::impl_call_malloc): Likewise.
+ (region_model::impl_call_memset): Likewise.
+ (region_model::impl_call_operator_new): Likewise.
+ (region_model::impl_call_operator_delete): Likewise.
+ (region_model::impl_call_strlen): Likewise.
+ * region-model.cc (region_model::on_call_pre): Fix return value of
+ known functions that don't have unknown side-effects.
+ * region-model.h (region_model::impl_call_alloca): Drop redundant
+ return value.
+ (region_model::impl_call_builtin_expect): Likewise.
+ (region_model::impl_call_calloc): Likewise.
+ (region_model::impl_call_malloc): Likewise.
+ (region_model::impl_call_memset): Likewise.
+ (region_model::impl_call_strlen): Likewise.
+ (region_model::impl_call_operator_new): Likewise.
+ (region_model::impl_call_operator_delete): Likewise.
+
+2021-07-28 Siddhesh Poyarekar <siddhesh@gotplt.org>
+
+ * analyzer.cc (is_named_call_p, is_std_named_call_p): Make
+ first argument a const_tree.
+ * analyzer.h (is_named_call_p, -s_std_named_call_p): Likewise.
+ * sm-malloc.cc (known_allocator_p): New function.
+ (malloc_state_machine::on_stmt): Use it.
+
+2021-07-28 Siddhesh Poyarekar <siddhesh@gotplt.org>
+
+ * sm-malloc.cc
+ (malloc_state_machine::get_or_create_deallocator): Recognize
+ __builtin_free.
+
+2021-07-26 David Malcolm <dmalcolm@redhat.com>
+
+ * region-model.cc (region_model::on_call_pre): Always set conjured
+ LHS, not just for SSA names.
+
+2021-07-23 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-manager.cc
+ (class auto_disable_complexity_checks): New.
+ (epath_finder::explore_feasible_paths): Use it to disable
+ complexity checks whilst processing the worklist.
+ * region-model-manager.cc
+ (region_model_manager::region_model_manager): Initialize
+ m_check_complexity.
+ (region_model_manager::reject_if_too_complex): Bail if
+ m_check_complexity is false.
+ * region-model.h
+ (region_model_manager::enable_complexity_check): New.
+ (region_model_manager::disable_complexity_check): New.
+ (region_model_manager::m_check_complexity): New.
+
+2021-07-21 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101547
+ * sm-file.cc (file_leak::emit): Handle m_arg being NULL.
+ (file_leak::describe_final_event): Handle ev.m_expr being NULL.
+
+2021-07-21 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101522
+ * store.cc (binding_cluster::purge_state_involving): Don't change
+ m_map whilst iterating through it.
+
+2021-07-21 David Malcolm <dmalcolm@redhat.com>
+
+ * region-model.cc (region_model::handle_phi): Add "old_state"
+ param and use it.
+ (region_model::update_for_phis): Update so that all of the phi
+ stmts are effectively handled simultaneously, rather than in
+ order.
+ * region-model.h (region_model::handle_phi): Add "old_state"
+ param.
+ * state-purge.cc (self_referential_phi_p): Replace with...
+ (name_used_by_phis_p): ...this new function.
+ (state_purge_per_ssa_name::process_point): Update to use the
+ above, so that all phi stmts at a basic block are effectively
+ considered simultaneously, and only consider the phi arguments for
+ the pertinent in-edge.
+ * supergraph.cc (cfg_superedge::get_phi_arg_idx): New.
+ (cfg_superedge::get_phi_arg): Use the above.
+ * supergraph.h (cfg_superedge::get_phi_arg_idx): New decl.
+
+2021-07-21 David Malcolm <dmalcolm@redhat.com>
+
+ * state-purge.cc (state_purge_annotator::add_node_annotations):
+ Rather than erroneously always using the NULL in-edge, determine
+ each relevant in-edge, and print the appropriate data for each
+ in-edge. Use print_needed to print the data as comma-separated
+ lists of SSA names.
+ (print_vec_of_names): Add "within_table" param and use it.
+ (state_purge_annotator::add_stmt_annotations): Factor out
+ collation and printing code into...
+ (state_purge_annotator::print_needed): ...this new function.
+ * state-purge.h (state_purge_annotator::print_needed): New decl.
+
+2021-07-21 David Malcolm <dmalcolm@redhat.com>
+
+ * program-point.cc (function_point::print): Show src BB index at
+ BEFORE_SUPERNODE.
+
+2021-07-21 David Malcolm <dmalcolm@redhat.com>
+
+ * svalue.cc (infix_p): New.
+ (binop_svalue::dump_to_pp): Use it to print MIN_EXPR and MAX_EXPR
+ in prefix form, rather than infix.
+
+2021-07-19 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101503
+ * constraint-manager.cc (constraint_manager::add_constraint): Use
+ can_have_associated_state_p rather than testing for unknown.
+ (constraint_manager::get_or_add_equiv_class): Likewise.
+ * program-state.cc (sm_state_map::set_state): Likewise.
+ (sm_state_map::impl_set_state): Add assertion.
+ * region-model-manager.cc
+ (region_model_manager::maybe_fold_unaryop): Handle poisoned
+ values.
+ (region_model_manager::maybe_fold_binop): Move handling of unknown
+ values...
+ (region_model_manager::get_or_create_binop): ...to here, and
+ generalize to use can_have_associated_state_p.
+ (region_model_manager::maybe_fold_sub_svalue): Use
+ can_have_associated_state_p rather than testing for unknown.
+ (region_model_manager::maybe_fold_repeated_svalue): Use unknown
+ when the size or repeated value is "unknown"/"poisoned".
+ * region-model.cc (region_model::purge_state_involving): Reject
+ attempts to purge unknown/poisoned svalues, as these svalues
+ should not have state associated with them.
+ * svalue.cc (sub_svalue::sub_svalue): Assert that we're building
+ on top of an svalue with can_have_associated_state_p.
+ (repeated_svalue::repeated_svalue): Likewise.
+ (bits_within_svalue::bits_within_svalue): Likewise.
+ * svalue.h (svalue::can_have_associated_state_p): New.
+ (unknown_svalue::can_have_associated_state_p): New.
+ (poisoned_svalue::can_have_associated_state_p): New.
+ (unaryop_svalue::unaryop_svalue): Assert that we're building on
+ top of an svalue with can_have_associated_state_p.
+ (binop_svalue::binop_svalue): Likewise.
+ (widening_svalue::widening_svalue): Likewise.
+
+2021-07-16 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer.h (enum access_direction): New.
+ * engine.cc (exploded_node::on_longjmp): Update for new param of
+ get_store_value.
+ * program-state.cc (program_state::prune_for_point): Likewise.
+ * region-model-impl-calls.cc (region_model::impl_call_memcpy):
+ Replace call to check_for_writable_region with call to
+ check_region_for_write.
+ (region_model::impl_call_memset): Likewise.
+ (region_model::impl_call_strcpy): Likewise.
+ * region-model-reachability.cc (reachable_regions::add): Update
+ for new param of get_store_value.
+ * region-model.cc (region_model::get_rvalue_1): Likewise, also for
+ get_rvalue_for_bits.
+ (region_model::get_store_value): Add ctxt param and use it to call
+ check_region_for_read.
+ (region_model::get_rvalue_for_bits): Add ctxt param and use it to
+ call get_store_value.
+ (region_model::check_region_access): New.
+ (region_model::check_region_for_write): New.
+ (region_model::check_region_for_read): New.
+ (region_model::set_value): Update comment. Replace call to
+ check_for_writable_region with call to check_region_for_write.
+ * region-model.h (region_model::get_rvalue_for_bits): Add ctxt
+ param.
+ (region_model::get_store_value): Add ctxt param.
+ (region_model::check_region_access): New decl.
+ (region_model::check_region_for_write): New decl.
+ (region_model::check_region_for_read): New decl.
+ * region.cc (region_model::copy_region): Update call to
+ get_store_value.
+ * svalue.cc (initial_svalue::implicitly_live_p): Likewise.
+
+2021-07-16 David Malcolm <dmalcolm@redhat.com>
+
+ * engine.cc (exploded_node::on_stmt_pre): Handle
+ __analyzer_dump_state.
+ * program-state.cc (extrinsic_state::get_sm_idx_by_name): New.
+ (program_state::impl_call_analyzer_dump_state): New.
+ * program-state.h (extrinsic_state::get_sm_idx_by_name): New decl.
+ (program_state::impl_call_analyzer_dump_state): New decl.
+ * region-model-impl-calls.cc
+ (call_details::get_arg_string_literal): New.
+ * region-model.h (call_details::get_arg_string_literal): New decl.
+
+2021-07-16 David Malcolm <dmalcolm@redhat.com>
+
+ * program-state.cc (program_state::detect_leaks): Simplify using
+ svalue::maybe_get_region.
+ * region-model-impl-calls.cc (region_model::impl_call_fgets): Likewise.
+ (region_model::impl_call_fread): Likewise.
+ (region_model::impl_call_free): Likewise.
+ (region_model::impl_call_operator_delete): Likewise.
+ * region-model.cc (selftest::test_stack_frames): Likewise.
+ (selftest::test_state_merging): Likewise.
+ * svalue.cc (svalue::maybe_get_region): New.
+ * svalue.h (svalue::maybe_get_region): New decl.
+
+2021-07-15 David Malcolm <dmalcolm@redhat.com>
+
+ * svalue.h (is_a_helper <placeholder_svalue *>::test): Make
+ param and template param const.
+ (is_a_helper <widening_svalue *>::test): Likewise.
+ (is_a_helper <compound_svalue *>::test): Likewise.
+ (is_a_helper <conjured_svalue *>::test): Likewise.
+
+2021-07-15 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/95006
+ PR analyzer/94713
+ PR analyzer/94714
+ * analyzer.cc (maybe_reconstruct_from_def_stmt): Split out
+ GIMPLE_ASSIGN case into...
+ (get_diagnostic_tree_for_gassign_1): New.
+ (get_diagnostic_tree_for_gassign): New.
+ * analyzer.h (get_diagnostic_tree_for_gassign): New decl.
+ * analyzer.opt (Wanalyzer-write-to-string-literal): New.
+ * constraint-manager.cc (class svalue_purger): New.
+ (constraint_manager::purge_state_involving): New.
+ * constraint-manager.h
+ (constraint_manager::purge_state_involving): New.
+ * diagnostic-manager.cc (saved_diagnostic::supercedes_p): New.
+ (dedupe_winners::handle_interactions): New.
+ (diagnostic_manager::emit_saved_diagnostics): Call it.
+ * diagnostic-manager.h (saved_diagnostic::supercedes_p): New decl.
+ * engine.cc (impl_region_model_context::warn): Convert return type
+ to bool. Return false if the diagnostic isn't saved.
+ (impl_region_model_context::purge_state_involving): New.
+ (impl_sm_context::get_state): Use NULL ctxt when querying old
+ rvalue.
+ (impl_sm_context::set_next_state): Use new sval when querying old
+ state.
+ (class dump_path_diagnostic): Move to region-model.cc
+ (exploded_node::on_stmt): Move to on_stmt_pre and on_stmt_post.
+ Remove call to purge_state_involving.
+ (exploded_node::on_stmt_pre): New, based on the above. Move most
+ of it to region_model::on_stmt_pre.
+ (exploded_node::on_stmt_post): Likewise, moving to
+ region_model::on_stmt_post.
+ (class stale_jmp_buf): Fix parent class to use curiously recurring
+ template pattern.
+ (feasibility_state::maybe_update_for_edge): Call on_call_pre and
+ on_call_post on gcalls.
+ * exploded-graph.h (impl_region_model_context::warn): Return bool.
+ (impl_region_model_context::purge_state_involving): New decl.
+ (exploded_node::on_stmt_pre): New decl.
+ (exploded_node::on_stmt_post): New decl.
+ * pending-diagnostic.h (pending_diagnostic::use_of_uninit_p): New.
+ (pending_diagnostic::supercedes_p): New.
+ * program-state.cc (sm_state_map::get_state): Inherit state for
+ conjured_svalue as well as initial_svalue.
+ (sm_state_map::purge_state_involving): Also support SK_CONJURED.
+ * region-model-impl-calls.cc (call_details::get_uncertainty):
+ Handle m_ctxt being NULL.
+ (call_details::get_or_create_conjured_svalue): New.
+ (region_model::impl_call_fgets): New.
+ (region_model::impl_call_fread): New.
+ * region-model-manager.cc
+ (region_model_manager::get_or_create_initial_value): Return an
+ uninitialized poisoned value for regions that can't have initial
+ values.
+ * region-model-reachability.cc
+ (reachable_regions::mark_escaped_clusters): Handle ctxt being
+ NULL.
+ * region-model.cc (region_to_value_map::purge_state_involving): New.
+ (poisoned_value_diagnostic::use_of_uninit_p): New.
+ (poisoned_value_diagnostic::emit): Handle POISON_KIND_UNINIT.
+ (poisoned_value_diagnostic::describe_final_event): Likewise.
+ (region_model::check_for_poison): New.
+ (region_model::on_assignment): Call it.
+ (class dump_path_diagnostic): Move here from engine.cc.
+ (region_model::on_stmt_pre): New, based on exploded_node::on_stmt.
+ (region_model::on_call_pre): Move the setting of the LHS to a
+ conjured svalue to before the checks for specific functions.
+ Handle "fgets", "fgets_unlocked", and "fread".
+ (region_model::purge_state_involving): New.
+ (region_model::handle_unrecognized_call): Handle ctxt being NULL.
+ (region_model::get_rvalue): Call check_for_poison.
+ (selftest::test_stack_frames): Use NULL for context when getting
+ uninitialized rvalue.
+ (selftest::test_alloca): Likewise.
+ * region-model.h (region_to_value_map::purge_state_involving): New
+ decl.
+ (call_details::get_or_create_conjured_svalue): New decl.
+ (region_model::on_stmt_pre): New decl.
+ (region_model::purge_state_involving): New decl.
+ (region_model::impl_call_fgets): New decl.
+ (region_model::impl_call_fread): New decl.
+ (region_model::check_for_poison): New decl.
+ (region_model_context::warn): Return bool.
+ (region_model_context::purge_state_involving): New.
+ (noop_region_model_context::warn): Return bool.
+ (noop_region_model_context::purge_state_involving): New.
+ (test_region_model_context:: warn): Return bool.
+ * region.cc (region::get_memory_space): New.
+ (region::can_have_initial_svalue_p): New.
+ (region::involves_p): New.
+ * region.h (enum memory_space): New.
+ (region::get_memory_space): New decl.
+ (region::can_have_initial_svalue_p): New decl.
+ (region::involves_p): New decl.
+ * sm-malloc.cc (use_after_free::supercedes_p): New.
+ * store.cc (binding_cluster::purge_state_involving): New.
+ (store::purge_state_involving): New.
+ * store.h (class symbolic_binding): New forward decl.
+ (binding_key::dyn_cast_symbolic_binding): New.
+ (symbolic_binding::dyn_cast_symbolic_binding): New.
+ (binding_cluster::purge_state_involving): New.
+ (store::purge_state_involving): New.
+ * svalue.cc (svalue::can_merge_p): Reject attempts to merge
+ poisoned svalues with other svalues, so that we identify
+ paths in which a variable is conditionally uninitialized.
+ (involvement_visitor::visit_conjured_svalue): New.
+ (svalue::involves_p): Also handle SK_CONJURED.
+ (poison_kind_to_str): Handle POISON_KIND_UNINIT.
+ (poisoned_svalue::maybe_fold_bits_within): New.
+ * svalue.h (enum poison_kind): Add POISON_KIND_UNINIT.
+ (poisoned_svalue::maybe_fold_bits_within): New decl.
+
+2021-07-15 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer.opt (fdump-analyzer-exploded-paths): New.
+ * diagnostic-manager.cc
+ (diagnostic_manager::emit_saved_diagnostic): Implement it.
+ * engine.cc (exploded_path::dump_to_pp): Add ext_state param and
+ use it to dump states if non-NULL.
+ (exploded_path::dump): Likewise.
+ (exploded_path::dump_to_file): New.
+ * exploded-graph.h (exploded_path::dump_to_pp): Add ext_state
+ param.
+ (exploded_path::dump): Likewise.
+ (exploded_path::dump): Likewise.
+ (exploded_path::dump_to_file): New.
+
+2021-07-15 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer.cc (fixup_tree_for_diagnostic_1): Use DECL_DEBUG_EXPR
+ if it's available.
+ * engine.cc (readability): Likewise.
+
+2021-07-15 David Malcolm <dmalcolm@redhat.com>
+
+ * state-purge.cc (self_referential_phi_p): New.
+ (state_purge_per_ssa_name::process_point): Don't purge an SSA name
+ at its def-stmt if the def-stmt is self-referential.
+
+2021-07-07 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-manager.cc (null_assignment_sm_context::get_state):
+ New overload.
+ (null_assignment_sm_context::set_next_state): New overload.
+ (null_assignment_sm_context::get_diagnostic_tree): New.
+ * engine.cc (impl_sm_context::get_state): New overload.
+ (impl_sm_context::set_next_state): New overload.
+ (impl_sm_context::get_diagnostic_tree): New overload.
+ (impl_region_model_context::on_condition): Convert params from
+ tree to const svalue *.
+ * exploded-graph.h (impl_region_model_context::on_condition):
+ Likewise.
+ * region-model.cc (region_model::on_call_pre): Move handling of
+ internal calls to before checking for get_fndecl_for_call.
+ (region_model::add_constraints_from_binop): New.
+ (region_model::add_constraint): Split out into a new overload
+ working on const svalue * rather than tree. Call
+ add_constraints_from_binop. Drop call to
+ add_any_constraints_from_ssa_def_stmt.
+ (region_model::add_any_constraints_from_ssa_def_stmt): Delete.
+ (region_model::add_any_constraints_from_gassign): Delete.
+ (region_model::add_any_constraints_from_gcall): Delete.
+ * region-model.h
+ (region_model::add_any_constraints_from_ssa_def_stmt): Delete.
+ (region_model::add_any_constraints_from_gassign): Delete.
+ (region_model::add_any_constraints_from_gcall): Delete.
+ (region_model::add_constraint): Add overload decl.
+ (region_model::add_constraints_from_binop): New decl.
+ (region_model_context::on_condition): Convert params from tree to
+ const svalue *.
+ (noop_region_model_context::on_condition): Likewise.
+ * sm-file.cc (fileptr_state_machine::condition): Likewise.
+ * sm-malloc.cc (malloc_state_machine::on_condition): Likewise.
+ * sm-pattern-test.cc: Include tristate.h, selftest.h,
+ analyzer/call-string.h, analyzer/program-point.h,
+ analyzer/store.h, and analyzer/region-model.h.
+ (pattern_test_state_machine::on_condition): Convert params from tree to
+ const svalue *.
+ * sm-sensitive.cc (sensitive_state_machine::on_condition): Delete.
+ * sm-signal.cc (signal_state_machine::on_condition): Delete.
+ * sm-taint.cc (taint_state_machine::on_condition): Convert params
+ from tree to const svalue *.
+ * sm.cc: Include tristate.h, selftest.h, analyzer/call-string.h,
+ analyzer/program-point.h, analyzer/store.h, and
+ analyzer/region-model.h.
+ (any_pointer_p): Add overload taking const svalue *sval.
+ * sm.h (any_pointer_p): Add overload taking const svalue *sval.
+ (state_machine::on_condition): Convert params from tree to
+ const svalue *. Provide no-op default implementation.
+ (sm_context::get_state): Add overload taking const svalue *sval.
+ (sm_context::set_next_state): Likewise.
+ (sm_context::on_transition): Likewise.
+ (sm_context::get_diagnostic_tree): Likewise.
+ * svalue.cc (svalue::all_zeroes_p): New.
+ (constant_svalue::all_zeroes_p): New.
+ (repeated_svalue::all_zeroes_p): Convert to vfunc.
+ * svalue.h (svalue::all_zeroes_p): New decl.
+ (constant_svalue::all_zeroes_p): New decl.
+ (repeated_svalue::all_zeroes_p): Convert decl to vfunc.
+
+2021-06-30 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/95006
+ * analyzer.h (class repeated_svalue): New forward decl.
+ (class bits_within_svalue): New forward decl.
+ (class sized_region): New forward decl.
+ (get_field_at_bit_offset): New forward decl.
+ * engine.cc (exploded_graph::get_or_create_node): Validate the
+ merged state.
+ (exploded_graph::maybe_process_run_of_before_supernode_enodes):
+ Validate the states at each stage.
+ * program-state.cc (program_state::validate): Validate
+ m_region_model.
+ * region-model-impl-calls.cc (region_model::impl_call_memset):
+ Replace special-case logic for handling constant sizes with
+ a call to fill_region of a sized_region with the given fill value.
+ * region-model-manager.cc (maybe_undo_optimize_bit_field_compare):
+ Drop DK_direct.
+ (region_model_manager::maybe_fold_sub_svalue): Fold element-based
+ subregions of an initial value into initial values of an element.
+ Fold subvalues of repeated svalues.
+ (region_model_manager::maybe_fold_repeated_svalue): New.
+ (region_model_manager::get_or_create_repeated_svalue): New.
+ (get_bit_range_for_field): New.
+ (get_byte_range_for_field): New.
+ (get_field_at_byte_range): New.
+ (region_model_manager::maybe_fold_bits_within_svalue): New.
+ (region_model_manager::get_or_create_bits_within): New.
+ (region_model_manager::get_sized_region): New.
+ (region_model_manager::log_stats): Update for addition of
+ m_repeated_values_map, m_bits_within_values_map, and
+ m_sized_regions.
+ * region-model.cc (region_model::validate): New.
+ (region_model::on_assignment): Drop enum binding_kind.
+ (region_model::get_initial_value_for_global): Likewise.
+ (region_model::get_rvalue_for_bits): Replace body with call to
+ get_or_create_bits_within.
+ (region_model::get_capacity): Handle RK_SIZED.
+ (region_model::set_value): Drop enum binding_kind.
+ (region_model::fill_region): New.
+ (region_model::get_representative_path_var_1): Handle RK_SIZED.
+ * region-model.h (visitor::visit_repeated_svalue): New.
+ (visitor::visit_bits_within_svalue): New.
+ (region_model_manager::get_or_create_repeated_svalue): New decl.
+ (region_model_manager::get_or_create_bits_within): New decl.
+ (region_model_manager::get_sized_region): New decl.
+ (region_model_manager::maybe_fold_repeated_svalue): New decl.
+ (region_model_manager::maybe_fold_bits_within_svalue): New decl.
+ (region_model_manager::repeated_values_map_t): New typedef.
+ (region_model_manager::m_repeated_values_map): New field.
+ (region_model_manager::bits_within_values_map_t): New typedef.
+ (region_model_manager::m_bits_within_values_map): New field.
+ (region_model_manager::m_sized_regions): New field.
+ (region_model::fill_region): New decl.
+ * region.cc (region::get_base_region): Handle RK_SIZED.
+ (region::base_region_p): Likewise.
+ (region::get_byte_size_sval): New.
+ (get_field_at_bit_offset): Make non-static.
+ (region::calc_offset): Move implementation of cases to
+ get_relative_concrete_offset vfunc implementations. Handle
+ RK_SIZED.
+ (region::get_relative_concrete_offset): New.
+ (decl_region::get_svalue_for_initializer): Drop enum binding_kind.
+ (field_region::get_relative_concrete_offset): New, from
+ region::calc_offset.
+ (element_region::get_relative_concrete_offset): Likewise.
+ (offset_region::get_relative_concrete_offset): Likewise.
+ (sized_region::accept): New.
+ (sized_region::dump_to_pp): New.
+ (sized_region::get_byte_size): New.
+ (sized_region::get_bit_size): New.
+ * region.h (enum region_kind): Add RK_SIZED.
+ (region::dyn_cast_sized_region): New.
+ (region::get_byte_size): Make virtual.
+ (region::get_bit_size): Likewise.
+ (region::get_byte_size_sval): New decl.
+ (region::get_relative_concrete_offset): New decl.
+ (field_region::get_relative_concrete_offset): New decl.
+ (element_region::get_relative_concrete_offset): Likewise.
+ (offset_region::get_relative_concrete_offset): Likewise.
+ (class sized_region): New.
+ * store.cc (binding_kind_to_string): Delete.
+ (binding_key::make): Drop enum binding_kind.
+ (binding_key::dump_to_pp): Delete.
+ (binding_key::cmp_ptrs): Drop enum binding_kind.
+ (bit_range::contains_p): New.
+ (byte_range::dump): New.
+ (byte_range::contains_p): New.
+ (byte_range::cmp): New.
+ (concrete_binding::dump_to_pp): Drop enum binding_kind.
+ (concrete_binding::cmp_ptr_ptr): Likewise.
+ (symbolic_binding::dump_to_pp): Likewise.
+ (symbolic_binding::cmp_ptr_ptr): Likewise.
+ (binding_map::apply_ctor_val_to_range): Likewise.
+ (binding_map::apply_ctor_pair_to_child_region): Likewise.
+ (binding_map::get_overlapping_bindings): New.
+ (binding_map::remove_overlapping_bindings): New.
+ (binding_cluster::validate): New.
+ (binding_cluster::bind): Drop enum binding_kind.
+ (binding_cluster::bind_compound_sval): Likewise.
+ (binding_cluster::purge_region): Likewise.
+ (binding_cluster::zero_fill_region): Reimplement in terms of...
+ (binding_cluster::fill_region): New.
+ (binding_cluster::mark_region_as_unknown): Drop enum binding_kind.
+ (binding_cluster::get_binding): Likewise.
+ (binding_cluster::get_binding_recursive): Likewise.
+ (binding_cluster::get_any_binding): Likewise.
+ (binding_cluster::maybe_get_compound_binding): Reimplement.
+ (binding_cluster::get_overlapping_bindings): Delete.
+ (binding_cluster::remove_overlapping_bindings): Reimplement in
+ terms of binding_map::remove_overlapping_bindings.
+ (binding_cluster::can_merge_p): Update for removal of
+ enum binding_kind.
+ (binding_cluster::on_unknown_fncall): Drop enum binding_kind.
+ (binding_cluster::maybe_get_simple_value): Likewise.
+ (store_manager::get_concrete_binding): Likewise.
+ (store_manager::get_symbolic_binding): Likewise.
+ (store::validate): New.
+ (store::set_value): Drop enum binding_kind.
+ (store::zero_fill_region): Reimplement in terms of...
+ (store::fill_region): New.
+ (selftest::test_binding_key_overlap): Drop enum binding_kind.
+ * store.h (enum binding_kind): Delete.
+ (binding_kind_to_string): Delete decl.
+ (binding_key::make): Drop enum binding_kind.
+ (binding_key::dump_to_pp): Make pure virtual.
+ (binding_key::get_kind): Delete.
+ (binding_key::mark_deleted): Delete.
+ (binding_key::mark_empty): Delete.
+ (binding_key::is_deleted): Delete.
+ (binding_key::is_empty): Delete.
+ (binding_key::binding_key): Delete.
+ (binding_key::impl_hash): Delete.
+ (binding_key::impl_eq): Delete.
+ (binding_key::m_kind): Delete.
+ (bit_range::get_last_bit_offset): New.
+ (bit_range::contains_p): New.
+ (byte_range::contains_p): New.
+ (byte_range::operator==): New.
+ (byte_range::get_start_byte_offset): New.
+ (byte_range::get_next_byte_offset): New.
+ (byte_range::get_last_byte_offset): New.
+ (byte_range::as_bit_range): New.
+ (byte_range::cmp): New.
+ (concrete_binding::concrete_binding): Drop enum binding_kind.
+ (concrete_binding::hash): Likewise.
+ (concrete_binding::operator==): Likewise.
+ (concrete_binding::mark_deleted): New.
+ (concrete_binding::mark_empty): New.
+ (concrete_binding::is_deleted): New.
+ (concrete_binding::is_empty): New.
+ (default_hash_traits<ana::concrete_binding>::empty_zero_p): Make false.
+ (symbolic_binding::symbolic_binding): Drop enum binding_kind.
+ (symbolic_binding::hash): Likewise.
+ (symbolic_binding::operator==): Likewise.
+ (symbolic_binding::mark_deleted): New.
+ (symbolic_binding::mark_empty): New.
+ (symbolic_binding::is_deleted): New.
+ (symbolic_binding::is_empty): New.
+ (binding_map::remove_overlapping_bindings): New decl.
+ (binding_map::get_overlapping_bindings): New decl.
+ (binding_cluster::validate): New decl.
+ (binding_cluster::bind): Drop enum binding_kind.
+ (binding_cluster::fill_region): New decl.
+ (binding_cluster::get_binding): Drop enum binding_kind.
+ (binding_cluster::get_binding_recursive): Likewise.
+ (binding_cluster::get_overlapping_bindings): Delete.
+ (store::validate): New decl.
+ (store::set_value): Drop enum binding_kind.
+ (store::fill_region): New decl.
+ (store_manager::get_concrete_binding): Drop enum binding_kind.
+ (store_manager::get_symbolic_binding): Likewise.
+ * svalue.cc (svalue::cmp_ptr): Handle SK_REPEATED and
+ SK_BITS_WITHIN.
+ (svalue::extract_bit_range): New.
+ (svalue::maybe_fold_bits_within): New.
+ (constant_svalue::maybe_fold_bits_within): New.
+ (unknown_svalue::maybe_fold_bits_within): New.
+ (unaryop_svalue::maybe_fold_bits_within): New.
+ (repeated_svalue::repeated_svalue): New.
+ (repeated_svalue::dump_to_pp): New.
+ (repeated_svalue::accept): New.
+ (repeated_svalue::all_zeroes_p): New.
+ (repeated_svalue::maybe_fold_bits_within): New.
+ (bits_within_svalue::bits_within_svalue): New.
+ (bits_within_svalue::dump_to_pp): New.
+ (bits_within_svalue::maybe_fold_bits_within): New.
+ (bits_within_svalue::accept): New.
+ (bits_within_svalue::implicitly_live_p): New.
+ (compound_svalue::maybe_fold_bits_within): New.
+ * svalue.h (enum svalue_kind): Add SK_REPEATED and SK_BITS_WITHIN.
+ (svalue::dyn_cast_repeated_svalue): New.
+ (svalue::dyn_cast_bits_within_svalue): New.
+ (svalue::extract_bit_range): New decl.
+ (svalue::maybe_fold_bits_within): New vfunc decl.
+ (region_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (region_svalue::key_t::is_empty): Likewise.
+ (default_hash_traits<region_svalue::key_t>::empty_zero_p): Make false.
+ (constant_svalue::maybe_fold_bits_within): New.
+ (unknown_svalue::maybe_fold_bits_within): New.
+ (poisoned_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (poisoned_svalue::key_t::is_empty): Likewise.
+ (default_hash_traits<poisoned_svalue::key_t>::empty_zero_p): Make
+ false.
+ (setjmp_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (setjmp_svalue::key_t::is_empty): Likewise.
+ (default_hash_traits<setjmp_svalue::key_t>::empty_zero_p): Make
+ false.
+ (unaryop_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (unaryop_svalue::key_t::is_empty): Likewise.
+ (unaryop_svalue::maybe_fold_bits_within): New.
+ (default_hash_traits<unaryop_svalue::key_t>::empty_zero_p): Make
+ false.
+ (binop_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (binop_svalue::key_t::is_empty): Likewise.
+ (default_hash_traits<binop_svalue::key_t>::empty_zero_p): Make
+ false.
+ (sub_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (sub_svalue::key_t::is_empty): Likewise.
+ (default_hash_traits<sub_svalue::key_t>::empty_zero_p): Make
+ false.
+ (class repeated_svalue): New.
+ (is_a_helper <const repeated_svalue *>::test): New.
+ (struct default_hash_traits<repeated_svalue::key_t>): New.
+ (class bits_within_svalue): New.
+ (is_a_helper <const bits_within_svalue *>::test): New.
+ (struct default_hash_traits<bits_within_svalue::key_t>): New.
+ (widening_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (widening_svalue::key_t::is_empty): Likewise.
+ (default_hash_traits<widening_svalue::key_t>::empty_zero_p): Make
+ false.
+ (compound_svalue::key_t::mark_empty): Use 2 rather than NULL_TREE.
+ (compound_svalue::key_t::is_empty): Likewise.
+ (compound_svalue::maybe_fold_bits_within): New.
+ (default_hash_traits<compound_svalue::key_t>::empty_zero_p): Make
+ false.
+
+2021-06-28 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer.h (byte_offset_t): New typedef.
+ * store.cc (bit_range::dump_to_pp): Dump as a byte range if
+ possible.
+ (bit_range::as_byte_range): New.
+ (byte_range::dump_to_pp): New.
+ * store.h (class byte_range): New forward decl.
+ (struct bit_range): Add comment.
+ (bit_range::as_byte_range): New decl.
+ (struct byte_range): New.
+
+2021-06-22 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/101143
+ * region-model.cc (compat_types_p): New function.
+ (region_model::create_region_for_heap_alloc): Convert assertion to
+ an error check.
+ (region_model::create_region_for_alloca): Likewise.
+
+2021-06-18 David Malcolm <dmalcolm@redhat.com>
+
+ * store.cc (binding_cluster::get_any_binding): Make symbolic reads
+ from a cluster with concrete bindings return unknown.
+
+2021-06-18 David Malcolm <dmalcolm@redhat.com>
+
+ * region-model-manager.cc
+ (region_model_manager::get_or_create_int_cst): New.
+ (region_model_manager::maybe_undo_optimize_bit_field_compare): Use
+ it to simplify away a local tree.
+ * region-model.cc (region_model::on_setjmp): Likewise.
+ (region_model::on_longjmp): Likewise.
+ * region-model.h (region_model_manager::get_or_create_int_cst):
+ New decl.
+ * store.cc (binding_cluster::zero_fill_region): Use it to simplify
+ away a local tree.
+
+2021-06-18 David Malcolm <dmalcolm@redhat.com>
+
+ * checker-path.cc (class custom_event): Make abstract to allow for
+ custom vfuncs, splitting existing implementation into...
+ (class precanned_custom_event): New subclass.
+ (custom_event::get_desc): Move to...
+ (precanned_custom_event::get_desc): ...subclass.
+ * checker-path.h (class custom_event): Make abstract to allow for
+ custom vfuncs, splitting existing implementation into...
+ (class precanned_custom_event): New subclass.
+ * diagnostic-manager.cc (diagnostic_manager::add_events_for_eedge):
+ Use precanned_custom_event.
+ * engine.cc
+ (stale_jmp_buf::maybe_add_custom_events_for_superedge): Likewise.
+ * sm-signal.cc (signal_delivery_edge_info_t::add_events_to_path):
+ Likewise.
+
+2021-06-15 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99212
+ PR analyzer/101082
+ * engine.cc: Include "target.h".
+ (impl_run_checkers): Log BITS_BIG_ENDIAN, BYTES_BIG_ENDIAN, and
+ WORDS_BIG_ENDIAN.
+ * region-model-manager.cc
+ (region_model_manager::maybe_fold_binop): Move support for masking
+ via ARG0 & CST into...
+ (region_model_manager::maybe_undo_optimize_bit_field_compare):
+ ...this new function. Flatten by converting from nested
+ conditionals to a series of early return statements to reject
+ failures. Reject if type is not unsigned_char_type_node.
+ Handle BYTES_BIG_ENDIAN when determining which bits are bound
+ in the binding_map.
+ * region-model.h
+ (region_model_manager::maybe_undo_optimize_bit_field_compare):
+ New decl.
+ * store.cc (bit_range::dump): New function.
+ * store.h (bit_range::dump): New decl.
+
+2021-06-15 David Malcolm <dmalcolm@redhat.com>
+
+ * engine.cc (exploded_node::on_stmt): Handle __analyzer_dump_capacity.
+ (exploded_node::on_stmt): Drop m_sm_changes from on_stmt_flags.
+ (state_change_requires_new_enode_p): New function...
+ (exploded_graph::process_node): Call it, rather than querying
+ flags.m_sm_changes, so that dynamic-extent differences can also
+ trigger the splitting of nodes.
+ * exploded-graph.h (struct on_stmt_flags): Drop field m_sm_changes.
+ * program-state.cc (program_state::detect_leaks): Purge dead
+ heap-allocated regions from dynamic extents.
+ (selftest::test_program_state_1): Fix type of "size_in_bytes".
+ (selftest::test_program_state_merging): Likewise.
+ * region-model-impl-calls.cc
+ (region_model::impl_call_analyzer_dump_capacity): New.
+ (region_model::impl_call_free): Remove dynamic extents from the
+ freed region.
+ * region-model-reachability.h
+ (reachable_regions::begin_mutable_base_regs): New.
+ (reachable_regions::end_mutable_base_regs): New.
+ * region-model.cc: Include "tree-object-size.h".
+ (region_model::region_model): Support new field m_dynamic_extents.
+ (region_model::operator=): Likewise.
+ (region_model::operator==): Likewise.
+ (region_model::dump_to_pp): Dump sizes of dynamic regions.
+ (region_model::handle_unrecognized_call): Purge dynamic extents
+ from any regions that have escaped mutably:.
+ (region_model::get_capacity): New function.
+ (region_model::add_constraint): Unset dynamic extents when a
+ heap-allocated region's address is NULL.
+ (region_model::unbind_region_and_descendents): Purge dynamic
+ extents of unbound regions.
+ (region_model::can_merge_with_p): Call
+ m_dynamic_extents.can_merge_with_p.
+ (region_model::create_region_for_heap_alloc): Assert that
+ size_in_bytes's type is compatible with size_type_node. Update
+ for renaming of record_dynamic_extents to set_dynamic_extents.
+ (region_model::create_region_for_alloca): Likewise.
+ (region_model::record_dynamic_extents): Rename to...
+ (region_model::set_dynamic_extents): ...this. Assert that
+ size_in_bytes's type is compatible with size_type_node. Add it
+ to the m_dynamic_extents map.
+ (region_model::get_dynamic_extents): New.
+ (region_model::unset_dynamic_extents): New.
+ (selftest::test_state_merging): Fix type of "size".
+ (selftest::test_malloc_constraints): Likewise.
+ (selftest::test_malloc): Verify dynamic extents.
+ (selftest::test_alloca): Likewise.
+ * region-model.h (region_to_value_map::is_empty): New.
+ (region_model::dynamic_extents_t): New typedef.
+ (region_model::impl_call_analyzer_dump_capacity): New decl.
+ (region_model::get_dynamic_extents): New function.
+ (region_model::get_dynamic_extents): New decl.
+ (region_model::set_dynamic_extents): New decl.
+ (region_model::unset_dynamic_extents): New decl.
+ (region_model::get_capacity): New decl.
+ (region_model::record_dynamic_extents): Rename to set_dynamic_extents.
+ (region_model::m_dynamic_extents): New field.
+
+2021-06-15 David Malcolm <dmalcolm@redhat.com>
+
+ * region-model.cc (region_to_value_map::operator=): New.
+ (region_to_value_map::operator==): New.
+ (region_to_value_map::dump_to_pp): New.
+ (region_to_value_map::dump): New.
+ (region_to_value_map::can_merge_with_p): New.
+ * region-model.h (class region_to_value_map): New class.
+
+2021-06-13 Trevor Saunders <tbsaunde@tbsaunde.org>
+
+ * call-string.cc (call_string::call_string): Use range based for
+ to iterate over vec<>.
+ (call_string::to_json): Likewise.
+ (call_string::hash): Likewise.
+ (call_string::calc_recursion_depth): Likewise.
+ * checker-path.cc (checker_path::fixup_locations): Likewise.
+ * constraint-manager.cc (equiv_class::equiv_class): Likewise.
+ (equiv_class::to_json): Likewise.
+ (equiv_class::hash): Likewise.
+ (constraint_manager::to_json): Likewise.
+ * engine.cc (impl_region_model_context::on_svalue_leak):
+ Likewise.
+ (on_liveness_change): Likewise.
+ (impl_region_model_context::on_unknown_change): Likewise.
+ * program-state.cc (sm_state_map::set_state): Likewise.
+ * region-model.cc (test_canonicalization_4): Likewise.
+
+2021-06-11 David Malcolm <dmalcolm@redhat.com>
+
+ * engine.cc (worklist::key_t::cmp): Move sort by call_string to
+ before SCC.
+
+2021-06-09 David Malcolm <dmalcolm@redhat.com>
+
+ * region-model.cc (region_model::get_lvalue_1): Make const.
+ (region_model::get_lvalue): Likewise.
+ (region_model::get_rvalue_1): Likewise.
+ (region_model::get_rvalue): Likewise.
+ (region_model::deref_rvalue): Likewise.
+ (region_model::get_rvalue_for_bits): Likewise.
+ * region-model.h (region_model::get_lvalue): Likewise.
+ (region_model::get_rvalue): Likewise.
+ (region_model::deref_rvalue): Likewise.
+ (region_model::get_rvalue_for_bits): Likewise.
+ (region_model::get_lvalue_1): Likewise.
+ (region_model::get_rvalue_1): Likewise.
+
+2021-06-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99212
+ * region-model-manager.cc
+ (region_model_manager::maybe_fold_binop): Add support for folding
+ BIT_AND_EXPR of compound_svalue and a mask constant.
+ * region-model.cc (region_model::get_rvalue_1): Implement
+ BIT_FIELD_REF in terms of...
+ (region_model::get_rvalue_for_bits): New function.
+ * region-model.h (region_model::get_rvalue_for_bits): New decl.
+ * store.cc (bit_range::from_mask): New function.
+ (selftest::test_bit_range_intersects_p): New selftest.
+ (selftest::assert_bit_range_from_mask_eq): New.
+ (ASSERT_BIT_RANGE_FROM_MASK_EQ): New macro.
+ (selftest::assert_no_bit_range_from_mask_eq): New.
+ (ASSERT_NO_BIT_RANGE_FROM_MASK): New macro.
+ (selftest::test_bit_range_from_mask): New selftest.
+ (selftest::analyzer_store_cc_tests): Call the new selftests.
+ * store.h (bit_range::intersects_p): New.
+ (bit_range::from_mask): New decl.
+ (concrete_binding::get_bit_range): New accessor.
+ (store_manager::get_concrete_binding): New overload taking
+ const bit_range &.
+
+2021-06-08 David Malcolm <dmalcolm@redhat.com>
+
+ * analyzer.h (int_size_in_bits): New decl.
+ * region.cc (int_size_in_bits): New function.
+ (region::get_bit_size): Reimplement in terms of the above.
+
+2021-06-08 David Malcolm <dmalcolm@redhat.com>
+
+ * store.cc (concrete_binding::dump_to_pp): Move bulk of
+ implementation to...
+ (bit_range::dump_to_pp): ...this new function.
+ (bit_range::cmp): New.
+ (concrete_binding::overlaps_p): Update for use of bit_range.
+ (concrete_binding::cmp_ptr_ptr): Likewise.
+ * store.h (struct bit_range): New.
+ (class concrete_binding): Replace fields m_start_bit_offset and
+ m_size_in_bits with new field m_bit_range.
+
+2021-06-08 David Malcolm <dmalcolm@redhat.com>
+
+ * svalue.h (conjured_svalue::iterator_t): Delete.
+
+2021-06-03 David Malcolm <dmalcolm@redhat.com>
+
+ * store.h (store::get_direct_binding): Remove unused decl.
+ (store::get_default_binding): Likewise.
+
+2021-06-03 David Malcolm <dmalcolm@redhat.com>
+
+ * svalue.cc (poisoned_svalue::dump_to_pp): Dump type.
+ (compound_svalue::dump_to_pp): Dump any type.
+
+2021-05-18 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/100615
+ * sm-malloc.cc: Include "analyzer/function-set.h".
+ (malloc_state_machine::on_stmt): Call unaffected_by_call_p and
+ bail on the functions it recognizes.
+ (malloc_state_machine::unaffected_by_call_p): New.
+
+2021-05-10 Martin Liska <mliska@suse.cz>
+
+ * sm-file.cc (is_file_using_fn_p): Use startswith
+ function instead of strncmp.
+
+2021-05-10 Martin Liska <mliska@suse.cz>
+
+ * program-state.cc (program_state::operator=): Remove
+ __cplusplus >= 201103.
+ (program_state::program_state): Likewise.
+ * program-state.h: Likewise.
+ * region-model.h (class region_model): Remove dead code.
+
+2021-04-24 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/100244
+ * sm-malloc.cc (free_of_non_heap::describe_state_change):
+ Bulletproof against change.m_expr being NULL.
+
+2021-04-13 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/98599
+ * supergraph.cc (saved_uids::make_uid_unique): New.
+ (saved_uids::restore_uids): New.
+ (supergraph::supergraph): Replace assignments to stmt->uid with
+ calls to m_stmt_uids.make_uid_unique.
+ (supergraph::~supergraph): New.
+ * supergraph.h (class saved_uids): New.
+ (supergraph::~supergraph): New decl.
+ (supergraph::m_stmt_uids): New field.
+
+2021-04-10 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/100011
+ * region-model.cc (region_model::on_assignment): Avoid NULL
+ dereference if ctxt is NULL when assigning from a STRING_CST.
+
+2021-04-08 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99042
+ PR analyzer/99774
+ * engine.cc
+ (impl_region_model_context::impl_region_model_context): Add
+ uncertainty param and use it to initialize m_uncertainty.
+ (impl_region_model_context::get_uncertainty): New.
+ (impl_sm_context::get_fndecl_for_call): Add NULL for new
+ uncertainty param when constructing impl_region_model_context.
+ (impl_sm_context::get_state): Likewise.
+ (impl_sm_context::set_next_state): Likewise.
+ (impl_sm_context::warn): Likewise.
+ (exploded_node::on_stmt): Add uncertainty param
+ and use it when constructing impl_region_model_context.
+ (exploded_node::on_edge): Add uncertainty param and pass
+ to on_edge call.
+ (exploded_node::detect_leaks): Create uncertainty_t and pass to
+ impl_region_model_context.
+ (exploded_graph::get_or_create_node): Create uncertainty_t and
+ pass to prune_for_point.
+ (maybe_process_run_of_before_supernode_enodes): Create
+ uncertainty_t and pass to impl_region_model_context.
+ (exploded_graph::process_node): Create uncertainty_t instances and
+ pass around as needed.
+ * exploded-graph.h
+ (impl_region_model_context::impl_region_model_context): Add
+ uncertainty param.
+ (impl_region_model_context::get_uncertainty): New decl.
+ (impl_region_model_context::m_uncertainty): New field.
+ (exploded_node::on_stmt): Add uncertainty param.
+ (exploded_node::on_edge): Likewise.
+ * program-state.cc (sm_state_map::on_liveness_change): Get
+ uncertainty from context and use it to unset sm-state from
+ svalues as appropriate.
+ (program_state::on_edge): Add uncertainty param and use it when
+ constructing impl_region_model_context. Fix indentation.
+ (program_state::prune_for_point): Add uncertainty param and use it
+ when constructing impl_region_model_context.
+ (program_state::detect_leaks): Get any uncertainty from ctxt and
+ use it to get maybe-live svalues for dest_state, rather than
+ definitely-live ones; use this when determining which svalues
+ have leaked.
+ (selftest::test_program_state_merging): Create uncertainty_t and
+ pass to impl_region_model_context.
+ * program-state.h (program_state::on_edge): Add uncertainty param.
+ (program_state::prune_for_point): Likewise.
+ * region-model-impl-calls.cc (call_details::get_uncertainty): New.
+ (region_model::impl_call_memcpy): Pass uncertainty to
+ mark_region_as_unknown call.
+ (region_model::impl_call_memset): Likewise.
+ (region_model::impl_call_strcpy): Likewise.
+ * region-model-reachability.cc (reachable_regions::handle_sval):
+ Also add sval to m_mutable_svals.
+ * region-model.cc (region_model::on_assignment): Pass any
+ uncertainty from ctxt to the store::set_value call.
+ (region_model::handle_unrecognized_call): Get any uncertainty from
+ ctxt and use it to record mutable svalues at the unknown call.
+ (region_model::get_reachable_svalues): Add uncertainty param and
+ use it to mark any maybe-bound svalues as being reachable.
+ (region_model::set_value): Pass any uncertainty from ctxt to the
+ store::set_value call.
+ (region_model::mark_region_as_unknown): Add uncertainty param and
+ pass it on to the store::mark_region_as_unknown call.
+ (region_model::update_for_call_summary): Add uncertainty param and
+ pass it on to the region_model::mark_region_as_unknown call.
+ * region-model.h (call_details::get_uncertainty): New decl.
+ (region_model::get_reachable_svalues): Add uncertainty param.
+ (region_model::mark_region_as_unknown): Add uncertainty param.
+ (region_model_context::get_uncertainty): New vfunc.
+ (noop_region_model_context::get_uncertainty): New vfunc
+ implementation.
+ * store.cc (dump_svalue_set): New.
+ (uncertainty_t::dump_to_pp): New.
+ (uncertainty_t::dump): New.
+ (binding_cluster::clobber_region): Pass NULL for uncertainty to
+ remove_overlapping_bindings.
+ (binding_cluster::mark_region_as_unknown): Add uncertainty param
+ and pass it to remove_overlapping_bindings.
+ (binding_cluster::remove_overlapping_bindings): Add uncertainty param.
+ Use it to record any svalues that were in clobbered bindings.
+ (store::set_value): Add uncertainty param. Pass it to
+ binding_cluster::mark_region_as_unknown when handling symbolic
+ regions.
+ (store::mark_region_as_unknown): Add uncertainty param and pass it
+ to binding_cluster::mark_region_as_unknown.
+ (store::remove_overlapping_bindings): Add uncertainty param and
+ pass it to binding_cluster::remove_overlapping_bindings.
+ * store.h (binding_cluster::mark_region_as_unknown): Add
+ uncertainty param.
+ (binding_cluster::remove_overlapping_bindings): Likewise.
+ (store::set_value): Likewise.
+ (store::mark_region_as_unknown): Likewise.
+
+2021-04-05 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99906
+ * analyzer.cc (maybe_reconstruct_from_def_stmt): Fix NULL
+ dereference on calls with zero arguments.
+ * sm-malloc.cc (malloc_state_machine::on_stmt): When handling
+ __attribute__((nonnull)), only call get_diagnostic_tree if the
+ result will be used.
+
+2021-04-05 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99886
+ * diagnostic-manager.cc
+ (diagnostic_manager::prune_interproc_events): Use signed integers
+ when subtracting one from path->num_events ().
+ (diagnostic_manager::consolidate_conditions): Likewise. Convert
+ next_idx to a signed int.
+
+2021-04-01 David Malcolm <dmalcolm@redhat.com>
+
+ * diagnostic-manager.cc (diagnostic_manager::add_diagnostic): Make
+ enode param non-constant, and call add_diagnostic on it. Add
+ enode index to log message.
+ (diagnostic_manager::add_diagnostic): Make enode param
+ non-constant.
+ * diagnostic-manager.h (diagnostic_manager::add_diagnostic):
+ Likewise for both decls.
+ * engine.cc
+ (impl_region_model_context::impl_region_model_context): Likewise
+ for enode_for_diag.
+ (impl_sm_context::impl_sm_context): Likewise.
+ (impl_sm_context::m_enode_for_diag): Likewise.
+ (exploded_node::dump_dot): Don't pass the diagnostic manager
+ to dump_saved_diagnostics.
+ (exploded_node::dump_saved_diagnostics): Drop param. Iterate
+ directly through all saved diagnostics for the enode, rather
+ than all saved diagnostics in the diagnostic_manager and
+ filtering.
+ (exploded_node::on_stmt): Make non-const.
+ (exploded_node::on_edge): Likewise.
+ (exploded_node::on_longjmp): Likewise.
+ (exploded_node::detect_leaks): Likewise.
+ (exploded_graph::get_or_create_node): Make enode_for_diag param
+ non-const.
+ (exploded_graph_annotator::print_enode): Iterate
+ directly through all saved diagnostics for the enode, rather
+ than all saved diagnostics in the diagnostic_manager and
+ filtering.
+ * exploded-graph.h
+ (impl_region_model_context::impl_region_model_context): Make
+ enode_for_diag param non-constant.
+ (impl_region_model_context::m_enode_for_diag): Likewise.
+ (exploded_node::dump_saved_diagnostics): Drop param.
+ (exploded_node::on_stmt): Make non-const.
+ (exploded_node::on_edge): Likewise.
+ (exploded_node::on_longjmp): Likewise.
+ (exploded_node::detect_leaks): Likewise.
+ (exploded_node::add_diagnostic): New.
+ (exploded_node::get_num_diagnostics): New.
+ (exploded_node::get_saved_diagnostic): New.
+ (exploded_node::m_saved_diagnostics): New.
+ (exploded_graph::get_or_create_node): Make enode_for_diag param
+ non-constant.
+ * feasible-graph.cc (feasible_node::dump_dot): Drop
+ diagnostic_manager from call to dump_saved_diagnostics.
+ * program-state.cc (program_state::on_edge): Convert enode param
+ to non-const pointer.
+ (program_state::prune_for_point): Likewise for enode_for_diag
+ param.
+ * program-state.h (program_state::on_edge): Convert enode param
+ to non-const pointer.
+ (program_state::prune_for_point): Likewise for enode_for_diag
+ param.
+
+2021-03-31 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99771
+ * analyzer.cc (maybe_reconstruct_from_def_stmt): New.
+ (fixup_tree_for_diagnostic_1): New.
+ (fixup_tree_for_diagnostic): New.
+ * analyzer.h (fixup_tree_for_diagnostic): New decl.
+ * checker-path.cc (call_event::get_desc): Call
+ fixup_tree_for_diagnostic and use it for the call_with_state call.
+ (warning_event::get_desc): Likewise for the final_event and
+ make_label_text calls.
+ * engine.cc (impl_region_model_context::on_state_leak): Likewise
+ for the on_leak and add_diagnostic calls.
+ * region-model.cc (region_model::get_representative_tree):
+ Likewise for the result.
+
+2021-03-30 David Malcolm <dmalcolm@redhat.com>
+
+ * region.h (region::dump_to_pp): Remove old decl.
+
+2021-03-30 David Malcolm <dmalcolm@redhat.com>
+
+ * sm-file.cc (fileptr_state_machine::on_stmt): Only call
+ get_diagnostic_tree if the result will be used.
+ * sm-malloc.cc (malloc_state_machine::on_stmt): Likewise.
+ (malloc_state_machine::on_deallocator_call): Likewise.
+ (malloc_state_machine::on_realloc_call): Likewise.
+ (malloc_state_machine::on_realloc_call): Likewise.
+ * sm-sensitive.cc
+ (sensitive_state_machine::warn_for_any_exposure): Likewise.
+ * sm-taint.cc (taint_state_machine::on_stmt): Likewise.
+
+2021-03-25 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/93695
+ PR analyzer/99044
+ PR analyzer/99716
+ * engine.cc (exploded_node::on_stmt): Clear sm-state involving
+ an SSA name at the def-stmt of that SSA name.
+ * program-state.cc (sm_state_map::purge_state_involving): New.
+ * program-state.h (sm_state_map::purge_state_involving): New decl.
+ * region-model.cc (selftest::test_involves_p): New.
+ (selftest::analyzer_region_model_cc_tests): Call it.
+ * svalue.cc (class involvement_visitor): New class
+ (svalue::involves_p): New.
+ * svalue.h (svalue::involves_p): New decl.
+
+2021-03-19 David Malcolm <dmalcolm@redhat.com>
+
+ PR analyzer/99614
+ * diagnostic-manager.cc (class epath_finder): Add
+ DISABLE_COPY_AND_ASSIGN.
+
2021-03-15 Martin Liska <mliska@suse.cz>
* sm-file.cc (get_file_using_fns): Add missing comma in initializer.
diff --git a/gcc/analyzer/analysis-plan.cc b/gcc/analyzer/analysis-plan.cc
index 7dfc48e..57a6dcb 100644
--- a/gcc/analyzer/analysis-plan.cc
+++ b/gcc/analyzer/analysis-plan.cc
@@ -109,6 +109,10 @@ analysis_plan::use_summary_p (const cgraph_edge *edge) const
if (!flag_analyzer_call_summaries)
return false;
+ /* Don't use call summaries if there is no callgraph edge */
+ if (!edge || !edge->callee)
+ return false;
+
/* TODO: don't count callsites each time. */
int num_call_sites = 0;
const cgraph_node *callee = edge->callee;
diff --git a/gcc/analyzer/analyzer.cc b/gcc/analyzer/analyzer.cc
index df8d881..f6e9c9d 100644
--- a/gcc/analyzer/analyzer.cc
+++ b/gcc/analyzer/analyzer.cc
@@ -60,6 +60,160 @@ get_stmt_location (const gimple *stmt, function *fun)
return stmt->location;
}
+static tree
+fixup_tree_for_diagnostic_1 (tree expr, hash_set<tree> *visited);
+
+/* Attemp to generate a tree for the LHS of ASSIGN_STMT.
+ VISITED must be non-NULL; it is used to ensure termination. */
+
+static tree
+get_diagnostic_tree_for_gassign_1 (const gassign *assign_stmt,
+ hash_set<tree> *visited)
+{
+ enum tree_code code = gimple_assign_rhs_code (assign_stmt);
+
+ /* Reverse the effect of extract_ops_from_tree during
+ gimplification. */
+ switch (get_gimple_rhs_class (code))
+ {
+ default:
+ case GIMPLE_INVALID_RHS:
+ gcc_unreachable ();
+ case GIMPLE_TERNARY_RHS:
+ case GIMPLE_BINARY_RHS:
+ case GIMPLE_UNARY_RHS:
+ {
+ tree t = make_node (code);
+ TREE_TYPE (t) = TREE_TYPE (gimple_assign_lhs (assign_stmt));
+ unsigned num_rhs_args = gimple_num_ops (assign_stmt) - 1;
+ for (unsigned i = 0; i < num_rhs_args; i++)
+ {
+ tree op = gimple_op (assign_stmt, i + 1);
+ if (op)
+ {
+ op = fixup_tree_for_diagnostic_1 (op, visited);
+ if (op == NULL_TREE)
+ return NULL_TREE;
+ }
+ TREE_OPERAND (t, i) = op;
+ }
+ return t;
+ }
+ case GIMPLE_SINGLE_RHS:
+ {
+ tree op = gimple_op (assign_stmt, 1);
+ op = fixup_tree_for_diagnostic_1 (op, visited);
+ return op;
+ }
+ }
+}
+
+/* Subroutine of fixup_tree_for_diagnostic_1, called on SSA names.
+ Attempt to reconstruct a a tree expression for SSA_NAME
+ based on its def-stmt.
+ SSA_NAME must be non-NULL.
+ VISITED must be non-NULL; it is used to ensure termination.
+
+ Return NULL_TREE if there is a problem. */
+
+static tree
+maybe_reconstruct_from_def_stmt (tree ssa_name,
+ hash_set<tree> *visited)
+{
+ /* Ensure termination. */
+ if (visited->contains (ssa_name))
+ return NULL_TREE;
+ visited->add (ssa_name);
+
+ gimple *def_stmt = SSA_NAME_DEF_STMT (ssa_name);
+
+ switch (gimple_code (def_stmt))
+ {
+ default:
+ gcc_unreachable ();
+ case GIMPLE_ASM:
+ case GIMPLE_NOP:
+ case GIMPLE_PHI:
+ /* Can't handle these. */
+ return NULL_TREE;
+ case GIMPLE_ASSIGN:
+ return get_diagnostic_tree_for_gassign_1
+ (as_a <const gassign *> (def_stmt), visited);
+ case GIMPLE_CALL:
+ {
+ gcall *call_stmt = as_a <gcall *> (def_stmt);
+ tree return_type = gimple_call_return_type (call_stmt);
+ tree fn = fixup_tree_for_diagnostic_1 (gimple_call_fn (call_stmt),
+ visited);
+ if (fn == NULL_TREE)
+ return NULL_TREE;
+ unsigned num_args = gimple_call_num_args (call_stmt);
+ auto_vec<tree> args (num_args);
+ for (unsigned i = 0; i < num_args; i++)
+ {
+ tree arg = gimple_call_arg (call_stmt, i);
+ arg = fixup_tree_for_diagnostic_1 (arg, visited);
+ if (arg == NULL_TREE)
+ return NULL_TREE;
+ args.quick_push (arg);
+ }
+ gcc_assert (fn);
+ return build_call_array_loc (gimple_location (call_stmt),
+ return_type, fn,
+ num_args, args.address ());
+ }
+ break;
+ }
+}
+
+/* Subroutine of fixup_tree_for_diagnostic: attempt to fixup EXPR,
+ which can be NULL.
+ VISITED must be non-NULL; it is used to ensure termination. */
+
+static tree
+fixup_tree_for_diagnostic_1 (tree expr, hash_set<tree> *visited)
+{
+ if (expr
+ && TREE_CODE (expr) == SSA_NAME
+ && (SSA_NAME_VAR (expr) == NULL_TREE
+ || DECL_ARTIFICIAL (SSA_NAME_VAR (expr))))
+ {
+ if (tree var = SSA_NAME_VAR (expr))
+ if (VAR_P (var) && DECL_HAS_DEBUG_EXPR_P (var))
+ return DECL_DEBUG_EXPR (var);
+ if (tree expr2 = maybe_reconstruct_from_def_stmt (expr, visited))
+ return expr2;
+ }
+ return expr;
+}
+
+/* We don't want to print '<unknown>' in our diagnostics (PR analyzer/99771),
+ but sometimes we generate diagnostics involving an ssa name for a
+ temporary.
+
+ Work around this by attempting to reconstruct a tree expression for
+ such temporaries based on their def-stmts.
+
+ Otherwise return EXPR.
+
+ EXPR can be NULL. */
+
+tree
+fixup_tree_for_diagnostic (tree expr)
+{
+ hash_set<tree> visited;
+ return fixup_tree_for_diagnostic_1 (expr, &visited);
+}
+
+/* Attempt to generate a tree for the LHS of ASSIGN_STMT. */
+
+tree
+get_diagnostic_tree_for_gassign (const gassign *assign_stmt)
+{
+ hash_set<tree> visited;
+ return get_diagnostic_tree_for_gassign_1 (assign_stmt, &visited);
+}
+
} // namespace ana
/* Helper function for checkers. Is the CALL to the given function name,
@@ -90,7 +244,7 @@ is_special_named_call_p (const gcall *call, const char *funcname,
Compare with special_function_p in calls.c. */
bool
-is_named_call_p (tree fndecl, const char *funcname)
+is_named_call_p (const_tree fndecl, const char *funcname)
{
gcc_assert (fndecl);
gcc_assert (funcname);
@@ -142,7 +296,7 @@ is_std_function_p (const_tree fndecl)
/* Like is_named_call_p, but look for std::FUNCNAME. */
bool
-is_std_named_call_p (tree fndecl, const char *funcname)
+is_std_named_call_p (const_tree fndecl, const char *funcname)
{
gcc_assert (fndecl);
gcc_assert (funcname);
@@ -164,7 +318,7 @@ is_std_named_call_p (tree fndecl, const char *funcname)
arguments? */
bool
-is_named_call_p (tree fndecl, const char *funcname,
+is_named_call_p (const_tree fndecl, const char *funcname,
const gcall *call, unsigned int num_args)
{
gcc_assert (fndecl);
@@ -182,7 +336,7 @@ is_named_call_p (tree fndecl, const char *funcname,
/* Like is_named_call_p, but check for std::FUNCNAME. */
bool
-is_std_named_call_p (tree fndecl, const char *funcname,
+is_std_named_call_p (const_tree fndecl, const char *funcname,
const gcall *call, unsigned int num_args)
{
gcc_assert (fndecl);
diff --git a/gcc/analyzer/analyzer.h b/gcc/analyzer/analyzer.h
index f50ac66..3ba4e21 100644
--- a/gcc/analyzer/analyzer.h
+++ b/gcc/analyzer/analyzer.h
@@ -46,11 +46,14 @@ class svalue;
class unaryop_svalue;
class binop_svalue;
class sub_svalue;
+ class repeated_svalue;
+ class bits_within_svalue;
class unmergeable_svalue;
class placeholder_svalue;
class widening_svalue;
class compound_svalue;
class conjured_svalue;
+ class asm_output_svalue;
typedef hash_set<const svalue *> svalue_set;
class region;
class frame_region;
@@ -60,6 +63,7 @@ class region;
class symbolic_region;
class element_region;
class offset_region;
+ class sized_region;
class cast_region;
class field_region;
class string_region;
@@ -71,9 +75,12 @@ class region_model;
class region_model_context;
class impl_region_model_context;
class call_details;
-struct rejected_constraint;
+class rejected_constraint;
class constraint_manager;
class equiv_class;
+class reachable_regions;
+class bounded_ranges;
+class bounded_ranges_manager;
class pending_diagnostic;
class state_change_event;
@@ -108,6 +115,8 @@ extern void dump_quoted_tree (pretty_printer *pp, tree t);
extern void print_quoted_type (pretty_printer *pp, tree t);
extern int readability_comparator (const void *p1, const void *p2);
extern int tree_cmp (const void *p1, const void *p2);
+extern tree fixup_tree_for_diagnostic (tree);
+extern tree get_diagnostic_tree_for_gassign (const gassign *);
/* A tree, extended with stack frame information for locals, so that
we can distinguish between different values of locals within a potentially
@@ -141,8 +150,13 @@ public:
typedef offset_int bit_offset_t;
typedef offset_int bit_size_t;
+typedef offset_int byte_offset_t;
typedef offset_int byte_size_t;
+extern bool int_size_in_bits (const_tree type, bit_size_t *out);
+
+extern tree get_field_at_bit_offset (tree record_type, bit_offset_t bit_offset);
+
/* The location of a region expressesd as an offset relative to a
base region. */
@@ -189,6 +203,8 @@ private:
extern location_t get_stmt_location (const gimple *stmt, function *fun);
+extern bool compat_types_p (tree src_type, tree dst_type);
+
/* Passed by pointer to PLUGIN_ANALYZER_INIT callbacks. */
class plugin_analyzer_init_iface
@@ -198,15 +214,74 @@ public:
virtual logger *get_logger () const = 0;
};
+/* An enum for describing the direction of an access to memory. */
+
+enum access_direction
+{
+ DIR_READ,
+ DIR_WRITE
+};
+
+/* Abstract base class for associating custom data with an
+ exploded_edge, for handling non-standard edges such as
+ rewinding from a longjmp, signal handlers, etc.
+ Also used when "bifurcating" state: splitting the execution
+ path in non-standard ways (e.g. for simulating the various
+ outcomes of "realloc"). */
+
+class custom_edge_info
+{
+public:
+ virtual ~custom_edge_info () {}
+
+ /* Hook for making .dot label more readable. */
+ virtual void print (pretty_printer *pp) const = 0;
+
+ /* Hook for updating MODEL within exploded_path::feasible_p
+ and when handling bifurcation. */
+ virtual bool update_model (region_model *model,
+ const exploded_edge *eedge,
+ region_model_context *ctxt) const = 0;
+
+ virtual void add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge) const = 0;
+};
+
+/* Abstract base class for splitting state.
+
+ Most of the state-management code in the analyzer involves
+ modifying state objects in-place, which assumes a single outcome.
+
+ This class provides an escape hatch to allow for multiple outcomes
+ for such updates e.g. for modelling multiple outcomes from function
+ calls, such as the various outcomes of "realloc". */
+
+class path_context
+{
+public:
+ virtual ~path_context () {}
+
+ /* Hook for clients to split state with a non-standard path.
+ Take ownership of INFO. */
+ virtual void bifurcate (custom_edge_info *info) = 0;
+
+ /* Hook for clients to terminate the standard path. */
+ virtual void terminate_path () = 0;
+
+ /* Hook for clients to determine if the standard path has been
+ terminated. */
+ virtual bool terminate_path_p () const = 0;
+};
+
} // namespace ana
extern bool is_special_named_call_p (const gcall *call, const char *funcname,
unsigned int num_args);
-extern bool is_named_call_p (tree fndecl, const char *funcname);
-extern bool is_named_call_p (tree fndecl, const char *funcname,
+extern bool is_named_call_p (const_tree fndecl, const char *funcname);
+extern bool is_named_call_p (const_tree fndecl, const char *funcname,
const gcall *call, unsigned int num_args);
-extern bool is_std_named_call_p (tree fndecl, const char *funcname);
-extern bool is_std_named_call_p (tree fndecl, const char *funcname,
+extern bool is_std_named_call_p (const_tree fndecl, const char *funcname);
+extern bool is_std_named_call_p (const_tree fndecl, const char *funcname,
const gcall *call, unsigned int num_args);
extern bool is_setjmp_call_p (const gcall *call);
extern bool is_longjmp_call_p (const gcall *call);
diff --git a/gcc/analyzer/analyzer.opt b/gcc/analyzer/analyzer.opt
index dd34495..6ddb6e3 100644
--- a/gcc/analyzer/analyzer.opt
+++ b/gcc/analyzer/analyzer.opt
@@ -134,6 +134,10 @@ Wanalyzer-write-to-string-literal
Common Var(warn_analyzer_write_to_string_literal) Init(1) Warning
Warn about code paths which attempt to write to a string literal.
+Wanalyzer-use-of-uninitialized-value
+Common Var(warn_analyzer_use_of_uninitialized_value) Init(1) Warning
+Warn about code paths in which an uninitialized value is used.
+
Wanalyzer-too-complex
Common Var(warn_analyzer_too_complex) Init(0) Warning
Warn if the code is too complicated for the analyzer to fully explore.
@@ -210,6 +214,10 @@ fdump-analyzer-exploded-nodes-3
Common RejectNegative Var(flag_dump_analyzer_exploded_nodes_3)
Dump a textual representation of the exploded graph to SRCFILE.eg-ID.txt.
+fdump-analyzer-exploded-paths
+Common RejectNegative Var(flag_dump_analyzer_exploded_paths)
+Dump a textual representation of each diagnostic's exploded path to SRCFILE.IDX.KIND.epath.txt.
+
fdump-analyzer-feasibility
Common RejectNegative Var(flag_dump_analyzer_feasibility)
Dump various analyzer internals to SRCFILE.*.fg.dot and SRCFILE.*.tg.dot.
diff --git a/gcc/analyzer/call-info.cc b/gcc/analyzer/call-info.cc
new file mode 100644
index 0000000..1d44cb8
--- /dev/null
+++ b/gcc/analyzer/call-info.cc
@@ -0,0 +1,162 @@
+/* Subclasses of custom_edge_info for describing outcomes of function calls.
+ Copyright (C) 2021 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "options.h"
+#include "cgraph.h"
+#include "tree-pretty-print.h"
+#include "tristate.h"
+#include "bitmap.h"
+#include "selftest.h"
+#include "function.h"
+#include "json.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "ordered-hash-map.h"
+#include "cfg.h"
+#include "digraph.h"
+#include "analyzer/supergraph.h"
+#include "sbitmap.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+#include "analyzer/constraint-manager.h"
+#include "diagnostic-event-id.h"
+#include "analyzer/sm.h"
+#include "analyzer/pending-diagnostic.h"
+#include "analyzer/region-model-reachability.h"
+#include "analyzer/analyzer-selftests.h"
+#include "analyzer/program-state.h"
+#include "diagnostic-path.h"
+#include "analyzer/checker-path.h"
+#include "analyzer/diagnostic-manager.h"
+#include "alloc-pool.h"
+#include "fibonacci_heap.h"
+#include "shortest-paths.h"
+#include "analyzer/exploded-graph.h"
+#include "analyzer/call-info.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* class call_info : public custom_eedge_info_t. */
+
+/* Implementation of custom_edge_info::print vfunc for call_info:
+ use get_desc to get a label_text, and print it to PP. */
+
+void
+call_info::print (pretty_printer *pp) const
+{
+ label_text desc (get_desc (pp_show_color (pp)));
+ pp_string (pp, desc.m_buffer);
+ desc.maybe_free ();
+}
+
+/* Implementation of custom_edge_info::add_events_to_path vfunc for
+ call_info: add a custom_event using call_info::get_desc as its
+ description. */
+
+void
+call_info::add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge) const
+{
+ class call_event : public custom_event
+ {
+ public:
+ call_event (location_t loc, tree fndecl, int depth,
+ const call_info *call_info)
+ : custom_event (loc, fndecl, depth),
+ m_call_info (call_info)
+ {}
+
+ label_text get_desc (bool can_colorize) const
+ {
+ return m_call_info->get_desc (can_colorize);
+ }
+
+ private:
+ const call_info *m_call_info;
+ };
+
+ const exploded_node *src_node = eedge.m_src;
+ const program_point &src_point = src_node->get_point ();
+ tree caller_fndecl = src_point.get_fndecl ();
+ const int stack_depth = src_point.get_stack_depth ();
+
+ emission_path->add_event (new call_event (get_call_stmt ()->location,
+ caller_fndecl,
+ stack_depth,
+ this));
+}
+
+/* Recreate a call_details instance from this call_info. */
+
+call_details
+call_info::get_call_details (region_model *model,
+ region_model_context *ctxt) const
+{
+ return call_details (m_call_stmt, model, ctxt);
+}
+
+/* call_info's ctor.
+
+ The call_info instance will outlive the call_details instance;
+ call_details instances are typically created on the stack. */
+
+call_info::call_info (const call_details &cd)
+: m_call_stmt (cd.get_call_stmt ()),
+ m_fndecl (cd.get_fndecl_for_call ())
+{
+ gcc_assert (m_fndecl);
+}
+
+/* class success_call_info : public call_info. */
+
+/* Implementation of call_info::get_desc vfunc for success_call_info. */
+
+label_text
+success_call_info::get_desc (bool can_colorize) const
+{
+ return make_label_text (can_colorize, "when %qE succeeds", get_fndecl ());
+}
+
+/* class failed_call_info : public call_info. */
+
+/* Implementation of call_info::get_desc vfunc for failed_call_info. */
+
+label_text
+failed_call_info::get_desc (bool can_colorize) const
+{
+ return make_label_text (can_colorize, "when %qE fails", get_fndecl ());
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
diff --git a/gcc/analyzer/call-info.h b/gcc/analyzer/call-info.h
new file mode 100644
index 0000000..369d217
--- /dev/null
+++ b/gcc/analyzer/call-info.h
@@ -0,0 +1,83 @@
+/* Subclasses of custom_edge_info for describing outcomes of function calls.
+ Copyright (C) 2021 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_ANALYZER_CALL_INFO_H
+#define GCC_ANALYZER_CALL_INFO_H
+
+namespace ana {
+
+/* Subclass of custom_edge_info for an outcome of a call.
+ This is still abstract; the update_model and get_desc vfuncs must be
+ implemented. */
+
+class call_info : public custom_edge_info
+{
+public:
+ void print (pretty_printer *pp) const FINAL OVERRIDE;
+ void add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge) const FINAL OVERRIDE;
+
+ const gcall *get_call_stmt () const { return m_call_stmt; }
+ tree get_fndecl () const { return m_fndecl; }
+
+ virtual label_text get_desc (bool can_colorize) const = 0;
+
+ call_details get_call_details (region_model *model,
+ region_model_context *ctxt) const;
+
+protected:
+ call_info (const call_details &cd);
+
+private:
+ const gcall *m_call_stmt;
+ tree m_fndecl;
+};
+
+/* Subclass of call_info for a "success" outcome of a call,
+ adding a "when `FNDECL' succeeds" message.
+ This is still abstract: the custom_edge_info::update_model vfunc
+ must be implemented. */
+
+class success_call_info : public call_info
+{
+public:
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+protected:
+ success_call_info (const call_details &cd) : call_info (cd) {}
+};
+
+/* Subclass of call_info for a "failure" outcome of a call,
+ adding a "when `FNDECL' fails" message.
+ This is still abstract: the custom_edge_info::update_model vfunc
+ must be implemented. */
+
+class failed_call_info : public call_info
+{
+public:
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
+
+protected:
+ failed_call_info (const call_details &cd) : call_info (cd) {}
+};
+
+} // namespace ana
+
+#endif /* GCC_ANALYZER_CALL_INFO_H */
diff --git a/gcc/analyzer/call-string.cc b/gcc/analyzer/call-string.cc
index 224b2e2..1e652a0 100644
--- a/gcc/analyzer/call-string.cc
+++ b/gcc/analyzer/call-string.cc
@@ -45,15 +45,42 @@ along with GCC; see the file COPYING3. If not see
/* class call_string. */
+/* struct call_string::element_t. */
+
+/* call_string::element_t's equality operator. */
+
+bool
+call_string::element_t::operator== (const call_string::element_t &other) const
+{
+ return (m_caller == other.m_caller && m_callee == other.m_callee);
+}
+
+/* call_string::element_t's inequality operator. */
+bool
+call_string::element_t::operator!= (const call_string::element_t &other) const
+{
+ return !(*this == other);
+}
+
+function *
+call_string::element_t::get_caller_function () const
+{
+ return m_caller->get_function ();
+}
+
+function *
+call_string::element_t::get_callee_function () const
+{
+ return m_callee->get_function ();
+}
+
/* call_string's copy ctor. */
call_string::call_string (const call_string &other)
-: m_return_edges (other.m_return_edges.length ())
+: m_elements (other.m_elements.length ())
{
- const return_superedge *e;
- int i;
- FOR_EACH_VEC_ELT (other.m_return_edges, i, e)
- m_return_edges.quick_push (e);
+ for (const call_string::element_t &e : other.m_elements)
+ m_elements.quick_push (e);
}
/* call_string's assignment operator. */
@@ -62,12 +89,12 @@ call_string&
call_string::operator= (const call_string &other)
{
// would be much simpler if we could rely on vec<> assignment op
- m_return_edges.truncate (0);
- m_return_edges.reserve (other.m_return_edges.length (), true);
- const return_superedge *e;
+ m_elements.truncate (0);
+ m_elements.reserve (other.m_elements.length (), true);
+ call_string::element_t *e;
int i;
- FOR_EACH_VEC_ELT (other.m_return_edges, i, e)
- m_return_edges.quick_push (e);
+ FOR_EACH_VEC_ELT (other.m_elements, i, e)
+ m_elements.quick_push (*e);
return *this;
}
@@ -76,12 +103,12 @@ call_string::operator= (const call_string &other)
bool
call_string::operator== (const call_string &other) const
{
- if (m_return_edges.length () != other.m_return_edges.length ())
+ if (m_elements.length () != other.m_elements.length ())
return false;
- const return_superedge *e;
+ call_string::element_t *e;
int i;
- FOR_EACH_VEC_ELT (m_return_edges, i, e)
- if (e != other.m_return_edges[i])
+ FOR_EACH_VEC_ELT (m_elements, i, e)
+ if (*e != other.m_elements[i])
return false;
return true;
}
@@ -93,15 +120,15 @@ call_string::print (pretty_printer *pp) const
{
pp_string (pp, "[");
- const return_superedge *e;
+ call_string::element_t *e;
int i;
- FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ FOR_EACH_VEC_ELT (m_elements, i, e)
{
if (i > 0)
pp_string (pp, ", ");
pp_printf (pp, "(SN: %i -> SN: %i in %s)",
- e->m_src->m_index, e->m_dest->m_index,
- function_name (e->m_dest->m_fun));
+ e->m_callee->m_index, e->m_caller->m_index,
+ function_name (e->m_caller->m_fun));
}
pp_string (pp, "]");
@@ -111,24 +138,22 @@ call_string::print (pretty_printer *pp) const
[{"src_snode_idx" : int,
"dst_snode_idx" : int,
"funcname" : str},
- ...for each return_superedge in the callstring]. */
+ ...for each element in the callstring]. */
json::value *
call_string::to_json () const
{
json::array *arr = new json::array ();
- const return_superedge *e;
- int i;
- FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ for (const call_string::element_t &e : m_elements)
{
json::object *e_obj = new json::object ();
e_obj->set ("src_snode_idx",
- new json::integer_number (e->m_src->m_index));
+ new json::integer_number (e.m_callee->m_index));
e_obj->set ("dst_snode_idx",
- new json::integer_number (e->m_dest->m_index));
+ new json::integer_number (e.m_caller->m_index));
e_obj->set ("funcname",
- new json::string (function_name (e->m_dest->m_fun)));
+ new json::string (function_name (e.m_caller->m_fun)));
arr->append (e_obj);
}
@@ -141,10 +166,8 @@ hashval_t
call_string::hash () const
{
inchash::hash hstate;
- int i;
- const return_superedge *e;
- FOR_EACH_VEC_ELT (m_return_edges, i, e)
- hstate.add_ptr (e);
+ for (const call_string::element_t &e : m_elements)
+ hstate.add_ptr (e.m_caller);
return hstate.end ();
}
@@ -158,24 +181,36 @@ call_string::push_call (const supergraph &sg,
gcc_assert (call_sedge);
const return_superedge *return_sedge = call_sedge->get_edge_for_return (sg);
gcc_assert (return_sedge);
- m_return_edges.safe_push (return_sedge);
+ call_string::element_t e (return_sedge->m_dest, return_sedge->m_src);
+ m_elements.safe_push (e);
+}
+
+void
+call_string::push_call (const supernode *caller,
+ const supernode *callee)
+{
+ call_string::element_t e (caller, callee);
+ m_elements.safe_push (e);
+}
+
+call_string::element_t
+call_string::pop ()
+{
+ return m_elements.pop();
}
/* Count the number of times the top-most call site appears in the
stack. */
-
int
call_string::calc_recursion_depth () const
{
- if (m_return_edges.is_empty ())
+ if (m_elements.is_empty ())
return 0;
- const return_superedge *top_return_sedge
- = m_return_edges[m_return_edges.length () - 1];
+ const call_string::element_t top_return_sedge
+ = m_elements[m_elements.length () - 1];
int result = 0;
- const return_superedge *e;
- int i;
- FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ for (const call_string::element_t &e : m_elements)
if (e == top_return_sedge)
++result;
return result;
@@ -209,13 +244,15 @@ call_string::cmp (const call_string &a,
if (i >= len_b)
return -1;
- /* Otherwise, compare the edges. */
- const return_superedge *edge_a = a[i];
- const return_superedge *edge_b = b[i];
- int src_cmp = edge_a->m_src->m_index - edge_b->m_src->m_index;
+ /* Otherwise, compare the node pairs. */
+ const call_string::element_t a_node_pair = a[i];
+ const call_string::element_t b_node_pair = b[i];
+ int src_cmp
+ = a_node_pair.m_callee->m_index - b_node_pair.m_callee->m_index;
if (src_cmp)
return src_cmp;
- int dest_cmp = edge_a->m_dest->m_index - edge_b->m_dest->m_index;
+ int dest_cmp
+ = a_node_pair.m_caller->m_index - b_node_pair.m_caller->m_index;
if (dest_cmp)
return dest_cmp;
i++;
@@ -223,6 +260,26 @@ call_string::cmp (const call_string &a,
}
}
+/* Return the pointer to callee of the topmost call in the stack,
+ or NULL if stack is empty. */
+const supernode *
+call_string::get_callee_node () const
+{
+ if(m_elements.is_empty ())
+ return NULL;
+ return m_elements[m_elements.length () - 1].m_callee;
+}
+
+/* Return the pointer to caller of the topmost call in the stack,
+ or NULL if stack is empty. */
+const supernode *
+call_string::get_caller_node () const
+{
+ if(m_elements.is_empty ())
+ return NULL;
+ return m_elements[m_elements.length () - 1].m_caller;
+}
+
/* Assert that this object is sane. */
void
@@ -234,12 +291,14 @@ call_string::validate () const
#endif
/* Each entry's "caller" should be the "callee" of the previous entry. */
- const return_superedge *e;
+ call_string::element_t *e;
int i;
- FOR_EACH_VEC_ELT (m_return_edges, i, e)
+ FOR_EACH_VEC_ELT (m_elements, i, e)
if (i > 0)
- gcc_assert (e->get_caller_function ()
- == m_return_edges[i - 1]->get_callee_function ());
+ {
+ gcc_assert (e->get_caller_function () ==
+ m_elements[i - 1].get_callee_function ());
+ }
}
#endif /* #if ENABLE_ANALYZER */
diff --git a/gcc/analyzer/call-string.h b/gcc/analyzer/call-string.h
index 7721571..a1ac60d 100644
--- a/gcc/analyzer/call-string.h
+++ b/gcc/analyzer/call-string.h
@@ -24,22 +24,48 @@ along with GCC; see the file COPYING3. If not see
namespace ana {
class supergraph;
+class supernode;
class call_superedge;
class return_superedge;
+
/* A string of return_superedge pointers, representing a call stack
at a program point.
This is used to ensure that we generate interprocedurally valid paths
i.e. that we return to the same callsite that called us.
- The class actually stores the return edges, rather than the call edges,
- since that's what we need to compare against. */
+ The class stores returning calls ( which may be represented by a
+ returning superedge ). We do so because this is what we need to compare
+ against. */
class call_string
{
public:
- call_string () : m_return_edges () {}
+ /* A struct representing an element in the call_string.
+
+ Each element represents a path from m_callee to m_caller which represents
+ returning from function. */
+
+ struct element_t
+ {
+ element_t (const supernode *caller, const supernode *callee)
+ : m_caller (caller), m_callee (callee)
+ {
+ }
+
+ bool operator== (const element_t &other) const;
+ bool operator!= (const element_t &other) const;
+
+ /* Accessors */
+ function *get_caller_function () const;
+ function *get_callee_function () const;
+
+ const supernode *m_caller;
+ const supernode *m_callee;
+ };
+
+ call_string () : m_elements () {}
call_string (const call_string &other);
call_string& operator= (const call_string &other);
@@ -51,27 +77,35 @@ public:
hashval_t hash () const;
- bool empty_p () const { return m_return_edges.is_empty (); }
+ bool empty_p () const { return m_elements.is_empty (); }
void push_call (const supergraph &sg,
const call_superedge *sedge);
- const return_superedge *pop () { return m_return_edges.pop (); }
+
+ void push_call (const supernode *src,
+ const supernode *dest);
+
+ element_t pop ();
int calc_recursion_depth () const;
static int cmp (const call_string &a,
const call_string &b);
- unsigned length () const { return m_return_edges.length (); }
- const return_superedge *operator[] (unsigned idx) const
+ /* Accessors */
+
+ const supernode *get_callee_node () const;
+ const supernode *get_caller_node () const;
+ unsigned length () const { return m_elements.length (); }
+ element_t operator[] (unsigned idx) const
{
- return m_return_edges[idx];
+ return m_elements[idx];
}
void validate () const;
private:
- auto_vec<const return_superedge *> m_return_edges;
+ auto_vec<element_t> m_elements;
};
} // namespace ana
diff --git a/gcc/analyzer/checker-path.cc b/gcc/analyzer/checker-path.cc
index e6e3ec1..e132f00 100644
--- a/gcc/analyzer/checker-path.cc
+++ b/gcc/analyzer/checker-path.cc
@@ -162,14 +162,14 @@ debug_event::get_desc (bool) const
return label_text::borrow (m_desc);
}
-/* class custom_event : public checker_event. */
+/* class precanned_custom_event : public custom_event. */
/* Implementation of diagnostic_event::get_desc vfunc for
- custom_event.
+ precanned_custom_event.
Use the saved string as the event's description. */
label_text
-custom_event::get_desc (bool) const
+precanned_custom_event::get_desc (bool) const
{
return label_text::borrow (m_desc);
}
@@ -614,7 +614,11 @@ call_event::call_event (const exploded_edge &eedge,
location_t loc, tree fndecl, int depth)
: superedge_event (EK_CALL_EDGE, eedge, loc, fndecl, depth)
{
- gcc_assert (eedge.m_sedge->m_kind == SUPEREDGE_CALL);
+ if (eedge.m_sedge)
+ gcc_assert (eedge.m_sedge->m_kind == SUPEREDGE_CALL);
+
+ m_src_snode = eedge.m_src->get_supernode ();
+ m_dest_snode = eedge.m_dest->get_supernode ();
}
/* Implementation of diagnostic_event::get_desc vfunc for
@@ -634,12 +638,13 @@ call_event::get_desc (bool can_colorize) const
if (m_critical_state && m_pending_diagnostic)
{
gcc_assert (m_var);
+ tree var = fixup_tree_for_diagnostic (m_var);
label_text custom_desc
= m_pending_diagnostic->describe_call_with_state
(evdesc::call_with_state (can_colorize,
- m_sedge->m_src->m_fun->decl,
- m_sedge->m_dest->m_fun->decl,
- m_var,
+ m_src_snode->m_fun->decl,
+ m_dest_snode->m_fun->decl,
+ var,
m_critical_state));
if (custom_desc.m_buffer)
return custom_desc;
@@ -647,8 +652,8 @@ call_event::get_desc (bool can_colorize) const
return make_label_text (can_colorize,
"calling %qE from %qE",
- m_sedge->m_dest->m_fun->decl,
- m_sedge->m_src->m_fun->decl);
+ m_dest_snode->m_fun->decl,
+ m_src_snode->m_fun->decl);
}
/* Override of checker_event::is_call_p for calls. */
@@ -667,7 +672,11 @@ return_event::return_event (const exploded_edge &eedge,
location_t loc, tree fndecl, int depth)
: superedge_event (EK_RETURN_EDGE, eedge, loc, fndecl, depth)
{
- gcc_assert (eedge.m_sedge->m_kind == SUPEREDGE_RETURN);
+ if (eedge.m_sedge)
+ gcc_assert (eedge.m_sedge->m_kind == SUPEREDGE_RETURN);
+
+ m_src_snode = eedge.m_src->get_supernode ();
+ m_dest_snode = eedge.m_dest->get_supernode ();
}
/* Implementation of diagnostic_event::get_desc vfunc for
@@ -693,16 +702,16 @@ return_event::get_desc (bool can_colorize) const
label_text custom_desc
= m_pending_diagnostic->describe_return_of_state
(evdesc::return_of_state (can_colorize,
- m_sedge->m_dest->m_fun->decl,
- m_sedge->m_src->m_fun->decl,
+ m_dest_snode->m_fun->decl,
+ m_src_snode->m_fun->decl,
m_critical_state));
if (custom_desc.m_buffer)
return custom_desc;
}
return make_label_text (can_colorize,
"returning to %qE from %qE",
- m_sedge->m_dest->m_fun->decl,
- m_sedge->m_src->m_fun->decl);
+ m_dest_snode->m_fun->decl,
+ m_src_snode->m_fun->decl);
}
/* Override of checker_event::is_return_p for returns. */
@@ -880,19 +889,20 @@ warning_event::get_desc (bool can_colorize) const
{
if (m_pending_diagnostic)
{
+ tree var = fixup_tree_for_diagnostic (m_var);
label_text ev_desc
= m_pending_diagnostic->describe_final_event
- (evdesc::final_event (can_colorize, m_var, m_state));
+ (evdesc::final_event (can_colorize, var, m_state));
if (ev_desc.m_buffer)
{
if (m_sm && flag_analyzer_verbose_state_changes)
{
label_text result;
- if (m_var)
+ if (var)
result = make_label_text (can_colorize,
"%s (%qE is in state %qs)",
ev_desc.m_buffer,
- m_var, m_state->get_name ());
+ var, m_state->get_name ());
else
result = make_label_text (can_colorize,
"%s (in global state %qs)",
@@ -999,9 +1009,7 @@ checker_path::add_final_event (const state_machine *sm,
void
checker_path::fixup_locations (pending_diagnostic *pd)
{
- checker_event *e;
- int i;
- FOR_EACH_VEC_ELT (m_events, i, e)
+ for (checker_event *e : m_events)
e->set_location (pd->fixup_location (e->get_location ()));
}
diff --git a/gcc/analyzer/checker-path.h b/gcc/analyzer/checker-path.h
index f76bb94..27634c2 100644
--- a/gcc/analyzer/checker-path.h
+++ b/gcc/analyzer/checker-path.h
@@ -56,6 +56,7 @@ extern const char *event_kind_to_string (enum event_kind ek);
checker_event
debug_event (EK_DEBUG)
custom_event (EK_CUSTOM)
+ precanned_custom_event
statement_event (EK_STMT)
function_entry_event (EK_FUNCTION_ENTRY)
state_change_event (EK_STATE_CHANGE)
@@ -144,19 +145,30 @@ private:
char *m_desc;
};
-/* A concrete event subclass for custom events. These are not filtered,
+/* An abstract event subclass for custom events. These are not filtered,
as they are likely to be pertinent to the diagnostic. */
class custom_event : public checker_event
{
+protected:
+ custom_event (location_t loc, tree fndecl, int depth)
+ : checker_event (EK_CUSTOM, loc, fndecl, depth)
+ {
+ }
+};
+
+/* A concrete custom_event subclass with a precanned message. */
+
+class precanned_custom_event : public custom_event
+{
public:
- custom_event (location_t loc, tree fndecl, int depth,
- const char *desc)
- : checker_event (EK_CUSTOM, loc, fndecl, depth),
+ precanned_custom_event (location_t loc, tree fndecl, int depth,
+ const char *desc)
+ : custom_event (loc, fndecl, depth),
m_desc (xstrdup (desc))
{
}
- ~custom_event ()
+ ~precanned_custom_event ()
{
free (m_desc);
}
@@ -326,6 +338,9 @@ public:
label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
bool is_call_p () const FINAL OVERRIDE;
+
+ const supernode *m_src_snode;
+ const supernode *m_dest_snode;
};
/* A concrete event subclass for an interprocedural return. */
@@ -339,6 +354,9 @@ public:
label_text get_desc (bool can_colorize) const FINAL OVERRIDE;
bool is_return_p () const FINAL OVERRIDE;
+
+ const supernode *m_src_snode;
+ const supernode *m_dest_snode;
};
/* A concrete event subclass for the start of a consolidated run of CFG
diff --git a/gcc/analyzer/complexity.cc b/gcc/analyzer/complexity.cc
index ece4272..ae9f982 100644
--- a/gcc/analyzer/complexity.cc
+++ b/gcc/analyzer/complexity.cc
@@ -90,6 +90,22 @@ complexity::from_pair (const complexity &c1, const complexity &c2)
MAX (c1.m_max_depth, c2.m_max_depth) + 1);
}
+/* Get complexity for a new node that references the svalues in VEC. */
+
+complexity
+complexity::from_vec_svalue (const vec<const svalue *> &vec)
+{
+ unsigned num_nodes = 0;
+ unsigned max_depth = 0;
+ for (auto iter_sval : vec)
+ {
+ const complexity &iter_c = iter_sval->get_complexity ();
+ num_nodes += iter_c.m_num_nodes;
+ max_depth = MAX (max_depth, iter_c.m_max_depth);
+ }
+ return complexity (num_nodes + 1, max_depth + 1);
+}
+
} // namespace ana
#endif /* #if ENABLE_ANALYZER */
diff --git a/gcc/analyzer/complexity.h b/gcc/analyzer/complexity.h
index 459987e..85c0372 100644
--- a/gcc/analyzer/complexity.h
+++ b/gcc/analyzer/complexity.h
@@ -36,6 +36,7 @@ struct complexity
complexity (const region *reg);
complexity (const svalue *sval);
static complexity from_pair (const complexity &c1, const complexity &c);
+ static complexity from_vec_svalue (const vec<const svalue *> &vec);
/* The total number of svalues and regions in the tree of this
entity, including the entity itself. */
diff --git a/gcc/analyzer/constraint-manager.cc b/gcc/analyzer/constraint-manager.cc
index 4dadd20..6df23fb 100644
--- a/gcc/analyzer/constraint-manager.cc
+++ b/gcc/analyzer/constraint-manager.cc
@@ -42,12 +42,14 @@ along with GCC; see the file COPYING3. If not see
#include "sbitmap.h"
#include "bitmap.h"
#include "tristate.h"
+#include "analyzer/analyzer-logging.h"
#include "analyzer/call-string.h"
#include "analyzer/program-point.h"
#include "analyzer/store.h"
#include "analyzer/region-model.h"
#include "analyzer/constraint-manager.h"
#include "analyzer/analyzer-selftests.h"
+#include "tree-pretty-print.h"
#if ENABLE_ANALYZER
@@ -65,6 +67,50 @@ compare_constants (tree lhs_const, enum tree_code op, tree rhs_const)
return tristate (tristate::TS_UNKNOWN);
}
+/* Return true iff CST is below the maximum value for its type. */
+
+static bool
+can_plus_one_p (tree cst)
+{
+ gcc_assert (CONSTANT_CLASS_P (cst));
+ return tree_int_cst_lt (cst, TYPE_MAX_VALUE (TREE_TYPE (cst)));
+}
+
+/* Return (CST + 1). */
+
+static tree
+plus_one (tree cst)
+{
+ gcc_assert (CONSTANT_CLASS_P (cst));
+ gcc_assert (can_plus_one_p (cst));
+ tree result = fold_build2 (PLUS_EXPR, TREE_TYPE (cst),
+ cst, integer_one_node);
+ gcc_assert (CONSTANT_CLASS_P (result));
+ return result;
+}
+
+/* Return true iff CST is above the minimum value for its type. */
+
+static bool
+can_minus_one_p (tree cst)
+{
+ gcc_assert (CONSTANT_CLASS_P (cst));
+ return tree_int_cst_lt (TYPE_MIN_VALUE (TREE_TYPE (cst)), cst);
+}
+
+/* Return (CST - 1). */
+
+static tree
+minus_one (tree cst)
+{
+ gcc_assert (CONSTANT_CLASS_P (cst));
+ gcc_assert (can_minus_one_p (cst));
+ tree result = fold_build2 (MINUS_EXPR, TREE_TYPE (cst),
+ cst, integer_one_node);
+ gcc_assert (CONSTANT_CLASS_P (result));
+ return result;
+}
+
/* struct bound. */
/* Ensure that this bound is closed by converting an open bound to a
@@ -255,6 +301,678 @@ range::above_upper_bound (tree rhs_const) const
m_upper_bound.m_constant).is_true ();
}
+/* struct bounded_range. */
+
+bounded_range::bounded_range (const_tree lower, const_tree upper)
+: m_lower (const_cast<tree> (lower)),
+ m_upper (const_cast<tree> (upper))
+{
+ if (lower && upper)
+ {
+ gcc_assert (TREE_CODE (m_lower) == INTEGER_CST);
+ gcc_assert (TREE_CODE (m_upper) == INTEGER_CST);
+ /* We should have lower <= upper. */
+ gcc_assert (!tree_int_cst_lt (m_upper, m_lower));
+ }
+ else
+ {
+ /* Purely for pending on-stack values, for
+ writing back to. */
+ gcc_assert (m_lower == NULL_TREE);
+ gcc_assert (m_lower == NULL_TREE);
+ }
+}
+
+static void
+dump_cst (pretty_printer *pp, tree cst, bool show_types)
+{
+ gcc_assert (cst);
+ if (show_types)
+ {
+ pp_character (pp, '(');
+ dump_generic_node (pp, TREE_TYPE (cst), 0, (dump_flags_t)0, false);
+ pp_character (pp, ')');
+ }
+ dump_generic_node (pp, cst, 0, (dump_flags_t)0, false);
+}
+
+/* Dump this object to PP. */
+
+void
+bounded_range::dump_to_pp (pretty_printer *pp, bool show_types) const
+{
+ if (tree_int_cst_equal (m_lower, m_upper))
+ dump_cst (pp, m_lower, show_types);
+ else
+ {
+ pp_character (pp, '[');
+ dump_cst (pp, m_lower, show_types);
+ pp_string (pp, ", ");
+ dump_cst (pp, m_upper, show_types);
+ pp_character (pp, ']');
+ }
+}
+
+/* Dump this object to stderr. */
+
+void
+bounded_range::dump (bool show_types) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, show_types);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+json::object *
+bounded_range::to_json () const
+{
+ json::object *range_obj = new json::object ();
+ set_json_attr (range_obj, "lower", m_lower);
+ set_json_attr (range_obj, "upper", m_upper);
+ return range_obj;
+}
+
+/* Subroutine of bounded_range::to_json. */
+
+void
+bounded_range::set_json_attr (json::object *obj, const char *name, tree value)
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_printf (&pp, "%E", value);
+ obj->set (name, new json::string (pp_formatted_text (&pp)));
+}
+
+
+/* Return true iff CST is within this range. */
+
+bool
+bounded_range::contains_p (tree cst) const
+{
+ /* Reject if below lower bound. */
+ if (tree_int_cst_lt (cst, m_lower))
+ return false;
+ /* Reject if above lower bound. */
+ if (tree_int_cst_lt (m_upper, cst))
+ return false;
+ return true;
+}
+
+/* If this range intersects OTHER, return true, writing
+ the intersection to *OUT if OUT is non-NULL.
+ Return false if they do not intersect. */
+
+bool
+bounded_range::intersects_p (const bounded_range &other,
+ bounded_range *out) const
+{
+ const tree max_lower
+ = (tree_int_cst_le (m_lower, other.m_lower)
+ ? other.m_lower : m_lower);
+ gcc_assert (TREE_CODE (max_lower) == INTEGER_CST);
+ const tree min_upper
+ = (tree_int_cst_le (m_upper, other.m_upper)
+ ? m_upper : other.m_upper);
+ gcc_assert (TREE_CODE (min_upper) == INTEGER_CST);
+
+ if (tree_int_cst_le (max_lower, min_upper))
+ {
+ if (out)
+ *out = bounded_range (max_lower, min_upper);
+ return true;
+ }
+ else
+ return false;
+}
+
+bool
+bounded_range::operator== (const bounded_range &other) const
+{
+ return (tree_int_cst_equal (m_lower, other.m_lower)
+ && tree_int_cst_equal (m_upper, other.m_upper));
+}
+
+int
+bounded_range::cmp (const bounded_range &br1, const bounded_range &br2)
+{
+ if (int cmp_lower = tree_int_cst_compare (br1.m_lower,
+ br2.m_lower))
+ return cmp_lower;
+ return tree_int_cst_compare (br1.m_upper, br2.m_upper);
+}
+
+/* struct bounded_ranges. */
+
+/* Construct a bounded_ranges instance from a single range. */
+
+bounded_ranges::bounded_ranges (const bounded_range &range)
+: m_ranges (1)
+{
+ m_ranges.quick_push (range);
+ canonicalize ();
+ validate ();
+}
+
+/* Construct a bounded_ranges instance from multiple ranges. */
+
+bounded_ranges::bounded_ranges (const vec<bounded_range> &ranges)
+: m_ranges (ranges.length ())
+{
+ m_ranges.safe_splice (ranges);
+ canonicalize ();
+ validate ();
+}
+
+/* Construct a bounded_ranges instance for values of LHS for which
+ (LHS OP RHS_CONST) is true (e.g. "(LHS > 3)". */
+
+bounded_ranges::bounded_ranges (enum tree_code op, tree rhs_const)
+: m_ranges ()
+{
+ gcc_assert (TREE_CODE (rhs_const) == INTEGER_CST);
+ tree type = TREE_TYPE (rhs_const);
+ switch (op)
+ {
+ default:
+ gcc_unreachable ();
+ case EQ_EXPR:
+ m_ranges.safe_push (bounded_range (rhs_const, rhs_const));
+ break;
+
+ case GE_EXPR:
+ m_ranges.safe_push (bounded_range (rhs_const, TYPE_MAX_VALUE (type)));
+ break;
+
+ case LE_EXPR:
+ m_ranges.safe_push (bounded_range (TYPE_MIN_VALUE (type), rhs_const));
+ break;
+
+ case NE_EXPR:
+ if (tree_int_cst_lt (TYPE_MIN_VALUE (type), rhs_const))
+ m_ranges.safe_push (bounded_range (TYPE_MIN_VALUE (type),
+ minus_one (rhs_const)));
+ if (tree_int_cst_lt (rhs_const, TYPE_MAX_VALUE (type)))
+ m_ranges.safe_push (bounded_range (plus_one (rhs_const),
+ TYPE_MAX_VALUE (type)));
+ break;
+ case GT_EXPR:
+ if (tree_int_cst_lt (rhs_const, TYPE_MAX_VALUE (type)))
+ m_ranges.safe_push (bounded_range (plus_one (rhs_const),
+ TYPE_MAX_VALUE (type)));
+ break;
+ case LT_EXPR:
+ if (tree_int_cst_lt (TYPE_MIN_VALUE (type), rhs_const))
+ m_ranges.safe_push (bounded_range (TYPE_MIN_VALUE (type),
+ minus_one (rhs_const)));
+ break;
+ }
+ canonicalize ();
+ validate ();
+}
+
+/* Subroutine of ctors for fixing up m_ranges.
+ Also, initialize m_hash. */
+
+void
+bounded_ranges::canonicalize ()
+{
+ /* Sort the ranges. */
+ m_ranges.qsort ([](const void *p1, const void *p2) -> int
+ {
+ const bounded_range &br1 = *(const bounded_range *)p1;
+ const bounded_range &br2 = *(const bounded_range *)p2;
+ return bounded_range::cmp (br1, br2);
+ });
+
+ /* Merge ranges that are touching or overlapping. */
+ for (unsigned i = 1; i < m_ranges.length (); )
+ {
+ bounded_range *prev = &m_ranges[i - 1];
+ const bounded_range *next = &m_ranges[i];
+ if (prev->intersects_p (*next, NULL)
+ || (can_plus_one_p (prev->m_upper)
+ && tree_int_cst_equal (plus_one (prev->m_upper),
+ next->m_lower)))
+ {
+ prev->m_upper = next->m_upper;
+ m_ranges.ordered_remove (i);
+ }
+ else
+ i++;
+ }
+
+ /* Initialize m_hash. */
+ inchash::hash hstate (0);
+ for (const auto &iter : m_ranges)
+ {
+ inchash::add_expr (iter.m_lower, hstate);
+ inchash::add_expr (iter.m_upper, hstate);
+ }
+ m_hash = hstate.end ();
+}
+
+/* Assert that this object is valid. */
+
+void
+bounded_ranges::validate () const
+{
+ /* Skip this in a release build. */
+#if !CHECKING_P
+ return;
+#endif
+
+ for (unsigned i = 1; i < m_ranges.length (); i++)
+ {
+ const bounded_range &prev = m_ranges[i - 1];
+ const bounded_range &next = m_ranges[i];
+
+ /* Give up if we somehow have incompatible different types. */
+ if (!types_compatible_p (TREE_TYPE (prev.m_upper),
+ TREE_TYPE (next.m_lower)))
+ continue;
+
+ /* Verify sorted. */
+ gcc_assert (tree_int_cst_lt (prev.m_upper, next.m_lower));
+
+ gcc_assert (can_plus_one_p (prev.m_upper));
+ /* otherwise there's no room for "next". */
+
+ /* Verify no ranges touch each other. */
+ gcc_assert (tree_int_cst_lt (plus_one (prev.m_upper), next.m_lower));
+ }
+}
+
+/* bounded_ranges equality operator. */
+
+bool
+bounded_ranges::operator== (const bounded_ranges &other) const
+{
+ if (m_ranges.length () != other.m_ranges.length ())
+ return false;
+ for (unsigned i = 0; i < m_ranges.length (); i++)
+ {
+ if (m_ranges[i] != other.m_ranges[i])
+ return false;
+ }
+ return true;
+}
+
+/* Dump this object to PP. */
+
+void
+bounded_ranges::dump_to_pp (pretty_printer *pp, bool show_types) const
+{
+ pp_character (pp, '{');
+ for (unsigned i = 0; i < m_ranges.length (); ++i)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ m_ranges[i].dump_to_pp (pp, show_types);
+ }
+ pp_character (pp, '}');
+}
+
+/* Dump this object to stderr. */
+
+DEBUG_FUNCTION void
+bounded_ranges::dump (bool show_types) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, show_types);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+json::value *
+bounded_ranges::to_json () const
+{
+ json::array *arr_obj = new json::array ();
+
+ for (unsigned i = 0; i < m_ranges.length (); ++i)
+ arr_obj->append (m_ranges[i].to_json ());
+
+ return arr_obj;
+}
+
+/* Determine whether (X OP RHS_CONST) is known to be true or false
+ for all X in the ranges expressed by this object. */
+
+tristate
+bounded_ranges::eval_condition (enum tree_code op,
+ tree rhs_const,
+ bounded_ranges_manager *mgr) const
+{
+ /* Convert (X OP RHS_CONST) to a bounded_ranges instance and find
+ the intersection of that with this object. */
+ bounded_ranges other (op, rhs_const);
+ const bounded_ranges *intersection
+ = mgr->get_or_create_intersection (this, &other);
+
+ if (intersection->m_ranges.length () > 0)
+ {
+ /* We can use pointer equality to check for equality,
+ due to instance consolidation. */
+ if (intersection == this)
+ return tristate (tristate::TS_TRUE);
+ else
+ return tristate (tristate::TS_UNKNOWN);
+ }
+ else
+ /* No intersection. */
+ return tristate (tristate::TS_FALSE);
+}
+
+/* Return true if CST is within any of the ranges. */
+
+bool
+bounded_ranges::contain_p (tree cst) const
+{
+ gcc_assert (TREE_CODE (cst) == INTEGER_CST);
+ for (const auto &iter : m_ranges)
+ {
+ /* TODO: should we optimize this based on sorting? */
+ if (iter.contains_p (cst))
+ return true;
+ }
+ return false;
+}
+
+int
+bounded_ranges::cmp (const bounded_ranges *a, const bounded_ranges *b)
+{
+ if (int cmp_length = ((int)a->m_ranges.length ()
+ - (int)b->m_ranges.length ()))
+ return cmp_length;
+ for (unsigned i = 0; i < a->m_ranges.length (); i++)
+ {
+ if (int cmp_range = bounded_range::cmp (a->m_ranges[i], b->m_ranges[i]))
+ return cmp_range;
+ }
+ /* They are equal. They ought to have been consolidated, so we should
+ have two pointers to the same object. */
+ gcc_assert (a == b);
+ return 0;
+}
+
+/* class bounded_ranges_manager. */
+
+/* bounded_ranges_manager's dtor. */
+
+bounded_ranges_manager::~bounded_ranges_manager ()
+{
+ /* Delete the managed objects. */
+ for (const auto &iter : m_map)
+ delete iter.second;
+}
+
+/* Get the bounded_ranges instance for the empty set, creating it if
+ necessary. */
+
+const bounded_ranges *
+bounded_ranges_manager::get_or_create_empty ()
+{
+ auto_vec<bounded_range> empty_vec;
+
+ return consolidate (new bounded_ranges (empty_vec));
+}
+
+/* Get the bounded_ranges instance for {CST}, creating it if necessary. */
+
+const bounded_ranges *
+bounded_ranges_manager::get_or_create_point (const_tree cst)
+{
+ gcc_assert (TREE_CODE (cst) == INTEGER_CST);
+
+ return get_or_create_range (cst, cst);
+}
+
+/* Get the bounded_ranges instance for {[LOWER_BOUND..UPPER_BOUND]},
+ creating it if necessary. */
+
+const bounded_ranges *
+bounded_ranges_manager::get_or_create_range (const_tree lower_bound,
+ const_tree upper_bound)
+{
+ gcc_assert (TREE_CODE (lower_bound) == INTEGER_CST);
+ gcc_assert (TREE_CODE (upper_bound) == INTEGER_CST);
+
+ return consolidate
+ (new bounded_ranges (bounded_range (lower_bound, upper_bound)));
+}
+
+/* Get the bounded_ranges instance for the union of OTHERS,
+ creating it if necessary. */
+
+const bounded_ranges *
+bounded_ranges_manager::
+get_or_create_union (const vec <const bounded_ranges *> &others)
+{
+ auto_vec<bounded_range> ranges;
+ for (const auto &r : others)
+ ranges.safe_splice (r->m_ranges);
+ return consolidate (new bounded_ranges (ranges));
+}
+
+/* Get the bounded_ranges instance for the intersection of A and B,
+ creating it if necessary. */
+
+const bounded_ranges *
+bounded_ranges_manager::get_or_create_intersection (const bounded_ranges *a,
+ const bounded_ranges *b)
+{
+ auto_vec<bounded_range> ranges;
+ unsigned a_idx = 0;
+ unsigned b_idx = 0;
+ while (a_idx < a->m_ranges.length ()
+ && b_idx < b->m_ranges.length ())
+ {
+ const bounded_range &r_a = a->m_ranges[a_idx];
+ const bounded_range &r_b = b->m_ranges[b_idx];
+
+ bounded_range intersection (NULL_TREE, NULL_TREE);
+ if (r_a.intersects_p (r_b, &intersection))
+ {
+ ranges.safe_push (intersection);
+ }
+ if (tree_int_cst_lt (r_a.m_lower, r_b.m_lower))
+ {
+ a_idx++;
+ }
+ else
+ {
+ if (tree_int_cst_lt (r_a.m_upper, r_b.m_upper))
+ a_idx++;
+ else
+ b_idx++;
+ }
+ }
+
+ return consolidate (new bounded_ranges (ranges));
+}
+
+/* Get the bounded_ranges instance for the inverse of OTHER relative
+ to TYPE, creating it if necessary.
+ This is for use when handling "default" in switch statements, where
+ OTHER represents all the other cases. */
+
+const bounded_ranges *
+bounded_ranges_manager::get_or_create_inverse (const bounded_ranges *other,
+ tree type)
+{
+ tree min_val = TYPE_MIN_VALUE (type);
+ tree max_val = TYPE_MAX_VALUE (type);
+ if (other->m_ranges.length () == 0)
+ return get_or_create_range (min_val, max_val);
+ auto_vec<bounded_range> ranges;
+ tree first_lb = other->m_ranges[0].m_lower;
+ if (tree_int_cst_lt (min_val, first_lb)
+ && can_minus_one_p (first_lb))
+ ranges.safe_push (bounded_range (min_val,
+ minus_one (first_lb)));
+ for (unsigned i = 1; i < other->m_ranges.length (); i++)
+ {
+ tree prev_ub = other->m_ranges[i - 1].m_upper;
+ tree iter_lb = other->m_ranges[i].m_lower;
+ gcc_assert (tree_int_cst_lt (prev_ub, iter_lb));
+ if (can_plus_one_p (prev_ub) && can_minus_one_p (iter_lb))
+ ranges.safe_push (bounded_range (plus_one (prev_ub),
+ minus_one (iter_lb)));
+ }
+ tree last_ub
+ = other->m_ranges[other->m_ranges.length () - 1].m_upper;
+ if (tree_int_cst_lt (last_ub, max_val)
+ && can_plus_one_p (last_ub))
+ ranges.safe_push (bounded_range (plus_one (last_ub), max_val));
+
+ return consolidate (new bounded_ranges (ranges));
+}
+
+/* If an object equal to INST is already present, delete INST and
+ return the existing object.
+ Otherwise add INST and return it. */
+
+const bounded_ranges *
+bounded_ranges_manager::consolidate (bounded_ranges *inst)
+{
+ if (bounded_ranges **slot = m_map.get (inst))
+ {
+ delete inst;
+ return *slot;
+ }
+ m_map.put (inst, inst);
+ return inst;
+}
+
+/* Get the bounded_ranges instance for EDGE of SWITCH_STMT,
+ creating it if necessary, and caching it by edge. */
+
+const bounded_ranges *
+bounded_ranges_manager::
+get_or_create_ranges_for_switch (const switch_cfg_superedge *edge,
+ const gswitch *switch_stmt)
+{
+ /* Look in per-edge cache. */
+ if (const bounded_ranges ** slot = m_edge_cache.get (edge))
+ return *slot;
+
+ /* Not yet in cache. */
+ const bounded_ranges *all_cases_ranges
+ = create_ranges_for_switch (*edge, switch_stmt);
+ m_edge_cache.put (edge, all_cases_ranges);
+ return all_cases_ranges;
+}
+
+/* Get the bounded_ranges instance for EDGE of SWITCH_STMT,
+ creating it if necessary, for edges for which the per-edge
+ cache has not yet been populated. */
+
+const bounded_ranges *
+bounded_ranges_manager::
+create_ranges_for_switch (const switch_cfg_superedge &edge,
+ const gswitch *switch_stmt)
+{
+ /* Get the ranges for each case label. */
+ auto_vec <const bounded_ranges *> case_ranges_vec
+ (gimple_switch_num_labels (switch_stmt));
+
+ for (tree case_label : edge.get_case_labels ())
+ {
+ /* Get the ranges for this case label. */
+ const bounded_ranges *case_ranges
+ = make_case_label_ranges (switch_stmt, case_label);
+ case_ranges_vec.quick_push (case_ranges);
+ }
+
+ /* Combine all the ranges for each case label into a single collection
+ of ranges. */
+ const bounded_ranges *all_cases_ranges
+ = get_or_create_union (case_ranges_vec);
+ return all_cases_ranges;
+}
+
+/* Get the bounded_ranges instance for CASE_LABEL within
+ SWITCH_STMT. */
+
+const bounded_ranges *
+bounded_ranges_manager::
+make_case_label_ranges (const gswitch *switch_stmt,
+ tree case_label)
+{
+ gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR);
+ tree lower_bound = CASE_LOW (case_label);
+ tree upper_bound = CASE_HIGH (case_label);
+ if (lower_bound)
+ {
+ if (upper_bound)
+ /* Range. */
+ return get_or_create_range (lower_bound, upper_bound);
+ else
+ /* Single-value. */
+ return get_or_create_point (lower_bound);
+ }
+ else
+ {
+ /* The default case.
+ Add exclusions based on the other cases. */
+ auto_vec <const bounded_ranges *> other_case_ranges
+ (gimple_switch_num_labels (switch_stmt));
+ for (unsigned other_idx = 1;
+ other_idx < gimple_switch_num_labels (switch_stmt);
+ other_idx++)
+ {
+ tree other_label = gimple_switch_label (switch_stmt,
+ other_idx);
+ const bounded_ranges *other_ranges
+ = make_case_label_ranges (switch_stmt, other_label);
+ other_case_ranges.quick_push (other_ranges);
+ }
+ const bounded_ranges *other_cases_ranges
+ = get_or_create_union (other_case_ranges);
+ tree type = TREE_TYPE (gimple_switch_index (switch_stmt));
+ return get_or_create_inverse (other_cases_ranges, type);
+ }
+}
+
+/* Dump the number of objects of each class that were managed by this
+ manager to LOGGER.
+ If SHOW_OBJS is true, also dump the objects themselves. */
+
+void
+bounded_ranges_manager::log_stats (logger *logger, bool show_objs) const
+{
+ LOG_SCOPE (logger);
+ logger->log (" # %s: %li", "ranges", m_map.elements ());
+ if (!show_objs)
+ return;
+
+ auto_vec<const bounded_ranges *> vec_objs (m_map.elements ());
+ for (const auto &iter : m_map)
+ vec_objs.quick_push (iter.second);
+ vec_objs.qsort
+ ([](const void *p1, const void *p2) -> int
+ {
+ const bounded_ranges *br1 = *(const bounded_ranges * const *)p1;
+ const bounded_ranges *br2 = *(const bounded_ranges * const *)p2;
+ return bounded_ranges::cmp (br1, br2);
+ });
+
+ for (const auto &iter : vec_objs)
+ {
+ logger->start_log_line ();
+ pretty_printer *pp = logger->get_printer ();
+ pp_string (pp, " ");
+ iter->dump_to_pp (pp, true);
+ logger->end_log_line ();
+ }
+}
+
/* class equiv_class. */
/* equiv_class's default ctor. */
@@ -270,9 +988,7 @@ equiv_class::equiv_class (const equiv_class &other)
: m_constant (other.m_constant), m_cst_sval (other.m_cst_sval),
m_vars (other.m_vars.length ())
{
- int i;
- const svalue *sval;
- FOR_EACH_VEC_ELT (other.m_vars, i, sval)
+ for (const svalue *sval : other.m_vars)
m_vars.quick_push (sval);
}
@@ -310,9 +1026,7 @@ equiv_class::to_json () const
json::object *ec_obj = new json::object ();
json::array *sval_arr = new json::array ();
- int i;
- const svalue *sval;
- FOR_EACH_VEC_ELT (m_vars, i, sval)
+ for (const svalue *sval : m_vars)
sval_arr->append (sval->to_json ());
ec_obj->set ("svals", sval_arr);
@@ -337,9 +1051,7 @@ equiv_class::hash () const
inchash::hash hstate;
inchash::add_expr (m_constant, hstate);
- int i;
- const svalue *sval;
- FOR_EACH_VEC_ELT (m_vars, i, sval)
+ for (const svalue * sval : m_vars)
hstate.add_ptr (sval);
return hstate.end ();
}
@@ -582,6 +1294,49 @@ constraint::implied_by (const constraint &other,
return false;
}
+/* class bounded_ranges_constraint. */
+
+void
+bounded_ranges_constraint::print (pretty_printer *pp,
+ const constraint_manager &cm) const
+{
+ m_ec_id.print (pp);
+ pp_string (pp, ": ");
+ m_ec_id.get_obj (cm).print (pp);
+ pp_string (pp, ": ");
+ m_ranges->dump_to_pp (pp, true);
+}
+
+json::object *
+bounded_ranges_constraint::to_json () const
+{
+ json::object *con_obj = new json::object ();
+
+ con_obj->set ("ec", new json::integer_number (m_ec_id.as_int ()));
+ con_obj->set ("ranges", m_ranges->to_json ());
+
+ return con_obj;
+}
+
+bool
+bounded_ranges_constraint::
+operator== (const bounded_ranges_constraint &other) const
+{
+ if (m_ec_id != other.m_ec_id)
+ return false;
+
+ /* We can compare by pointer, since the bounded_ranges_manager
+ consolidates instances. */
+ return m_ranges == other.m_ranges;
+}
+
+void
+bounded_ranges_constraint::add_to_hash (inchash::hash *hstate) const
+{
+ hstate->add_int (m_ec_id.m_idx);
+ hstate->merge_hash (m_ranges->get_hash ());
+}
+
/* class equiv_class_id. */
/* Get the underlying equiv_class for this ID from CM. */
@@ -618,6 +1373,7 @@ equiv_class_id::print (pretty_printer *pp) const
constraint_manager::constraint_manager (const constraint_manager &other)
: m_equiv_classes (other.m_equiv_classes.length ()),
m_constraints (other.m_constraints.length ()),
+ m_bounded_ranges_constraints (other.m_bounded_ranges_constraints.length ()),
m_mgr (other.m_mgr)
{
int i;
@@ -627,6 +1383,8 @@ constraint_manager::constraint_manager (const constraint_manager &other)
constraint *c;
FOR_EACH_VEC_ELT (other.m_constraints, i, c)
m_constraints.quick_push (*c);
+ for (const auto &iter : other.m_bounded_ranges_constraints)
+ m_bounded_ranges_constraints.quick_push (iter);
}
/* constraint_manager's assignment operator. */
@@ -636,6 +1394,7 @@ constraint_manager::operator= (const constraint_manager &other)
{
gcc_assert (m_equiv_classes.length () == 0);
gcc_assert (m_constraints.length () == 0);
+ gcc_assert (m_bounded_ranges_constraints.length () == 0);
int i;
equiv_class *ec;
@@ -646,6 +1405,8 @@ constraint_manager::operator= (const constraint_manager &other)
m_constraints.reserve (other.m_constraints.length ());
FOR_EACH_VEC_ELT (other.m_constraints, i, c)
m_constraints.quick_push (*c);
+ for (const auto &iter : other.m_bounded_ranges_constraints)
+ m_bounded_ranges_constraints.quick_push (iter);
return *this;
}
@@ -664,6 +1425,8 @@ constraint_manager::hash () const
hstate.merge_hash (ec->hash ());
FOR_EACH_VEC_ELT (m_constraints, i, c)
hstate.merge_hash (c->hash ());
+ for (const auto &iter : m_bounded_ranges_constraints)
+ iter.add_to_hash (&hstate);
return hstate.end ();
}
@@ -676,6 +1439,9 @@ constraint_manager::operator== (const constraint_manager &other) const
return false;
if (m_constraints.length () != other.m_constraints.length ())
return false;
+ if (m_bounded_ranges_constraints.length ()
+ != other.m_bounded_ranges_constraints.length ())
+ return false;
int i;
equiv_class *ec;
@@ -690,6 +1456,13 @@ constraint_manager::operator== (const constraint_manager &other) const
if (!(*c == other.m_constraints[i]))
return false;
+ for (unsigned i = 0; i < m_bounded_ranges_constraints.length (); i++)
+ {
+ if (m_bounded_ranges_constraints[i]
+ != other.m_bounded_ranges_constraints[i])
+ return false;
+ }
+
return true;
}
@@ -717,6 +1490,18 @@ constraint_manager::print (pretty_printer *pp) const
pp_string (pp, " && ");
c->print (pp, *this);
}
+ if (m_bounded_ranges_constraints.length ())
+ {
+ pp_string (pp, " | ");
+ i = 0;
+ for (const auto &iter : m_bounded_ranges_constraints)
+ {
+ if (i > 0)
+ pp_string (pp, " && ");
+ iter.print (pp, *this);
+ i++;
+ }
+ }
pp_printf (pp, "}");
}
@@ -768,6 +1553,30 @@ constraint_manager::dump_to_pp (pretty_printer *pp, bool multiline) const
}
if (!multiline)
pp_string (pp, "}");
+ if (m_bounded_ranges_constraints.length ())
+ {
+ if (multiline)
+ pp_string (pp, " ");
+ pp_string (pp, "ranges:");
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, "{");
+ i = 0;
+ for (const auto &iter : m_bounded_ranges_constraints)
+ {
+ if (multiline)
+ pp_string (pp, " ");
+ else if (i > 0)
+ pp_string (pp, " && ");
+ iter.print (pp, *this);
+ if (multiline)
+ pp_newline (pp);
+ i++;
+ }
+ if (!multiline)
+ pp_string (pp, "}");
+ }
}
/* Dump a multiline representation of this constraint_manager to FP. */
@@ -811,9 +1620,7 @@ constraint_manager::to_json () const
/* Equivalence classes. */
{
json::array *ec_arr = new json::array ();
- int i;
- equiv_class *ec;
- FOR_EACH_VEC_ELT (m_equiv_classes, i, ec)
+ for (const equiv_class *ec : m_equiv_classes)
ec_arr->append (ec->to_json ());
cm_obj->set ("ecs", ec_arr);
}
@@ -821,13 +1628,19 @@ constraint_manager::to_json () const
/* Constraints. */
{
json::array *con_arr = new json::array ();
- int i;
- constraint *c;
- FOR_EACH_VEC_ELT (m_constraints, i, c)
- con_arr->append (c->to_json ());
+ for (const constraint &c : m_constraints)
+ con_arr->append (c.to_json ());
cm_obj->set ("constraints", con_arr);
}
+ /* m_bounded_ranges_constraints. */
+ {
+ json::array *con_arr = new json::array ();
+ for (const auto &c : m_bounded_ranges_constraints)
+ con_arr->append (c.to_json ());
+ cm_obj->set ("bounded_ranges_constraints", con_arr);
+ }
+
return cm_obj;
}
@@ -843,9 +1656,9 @@ constraint_manager::add_constraint (const svalue *lhs,
lhs = lhs->unwrap_any_unmergeable ();
rhs = rhs->unwrap_any_unmergeable ();
- /* Nothing can be known about unknown values. */
- if (lhs->get_kind () == SK_UNKNOWN
- || rhs->get_kind () == SK_UNKNOWN)
+ /* Nothing can be known about unknown/poisoned values. */
+ if (!lhs->can_have_associated_state_p ()
+ || !rhs->can_have_associated_state_p ())
/* Not a contradiction. */
return true;
@@ -946,6 +1759,8 @@ constraint_manager::add_unknown_constraint (equiv_class_id lhs_ec_id,
if (final_ec != old_ec)
m_equiv_classes[rhs_ec_id.m_idx] = final_ec;
delete old_ec;
+ if (lhs_ec_id == final_ec_id)
+ lhs_ec_id = rhs_ec_id;
/* Update the constraints. */
constraint *c;
@@ -965,6 +1780,14 @@ constraint_manager::add_unknown_constraint (equiv_class_id lhs_ec_id,
if (c->m_rhs == final_ec_id)
c->m_rhs = rhs_ec_id;
}
+ bounded_ranges_constraint *brc;
+ FOR_EACH_VEC_ELT (m_bounded_ranges_constraints, i, brc)
+ {
+ if (brc->m_ec_id == rhs_ec_id)
+ brc->m_ec_id = lhs_ec_id;
+ if (brc->m_ec_id == final_ec_id)
+ brc->m_ec_id = rhs_ec_id;
+ }
/* We may now have self-comparisons due to the merger; these
constraints should be removed. */
@@ -1018,6 +1841,8 @@ constraint_manager::add_constraint_internal (equiv_class_id lhs_id,
/* Add the constraint. */
m_constraints.safe_push (new_c);
+ /* We don't yet update m_bounded_ranges_constraints here yet. */
+
if (!flag_analyzer_transitivity)
return;
@@ -1151,6 +1976,80 @@ constraint_manager::add_constraint_internal (equiv_class_id lhs_id,
}
}
+/* Attempt to add the constraint that SVAL is within RANGES to this
+ constraint_manager.
+
+ Return true if the constraint was successfully added (or is already
+ known to be true).
+ Return false if the constraint contradicts existing knowledge. */
+
+bool
+constraint_manager::add_bounded_ranges (const svalue *sval,
+ const bounded_ranges *ranges)
+{
+ sval = sval->unwrap_any_unmergeable ();
+
+ /* Nothing can be known about unknown/poisoned values. */
+ if (!sval->can_have_associated_state_p ())
+ /* Not a contradiction. */
+ return true;
+
+ /* If SVAL is a constant, then we can look at RANGES directly. */
+ if (tree cst = sval->maybe_get_constant ())
+ {
+ /* If the ranges contain CST, then it's a successful no-op;
+ otherwise it's a contradiction. */
+ return ranges->contain_p (cst);
+ }
+
+ equiv_class_id ec_id = get_or_add_equiv_class (sval);
+
+ /* If the EC has a constant, it's either true or false. */
+ const equiv_class &ec = ec_id.get_obj (*this);
+ if (tree ec_cst = ec.get_any_constant ())
+ {
+ if (ranges->contain_p (ec_cst))
+ /* We already have SVAL == EC_CST, within RANGES, so
+ we can discard RANGES and succeed. */
+ return true;
+ else
+ /* We already have SVAL == EC_CST, not within RANGES, so
+ we can reject RANGES as a contradiction. */
+ return false;
+ }
+
+ /* We have at most one per ec_id. */
+ /* Iterate through each range in RANGES. */
+ for (auto iter : m_bounded_ranges_constraints)
+ {
+ if (iter.m_ec_id == ec_id)
+ {
+ /* Update with intersection, or fail if empty. */
+ bounded_ranges_manager *mgr = get_range_manager ();
+ const bounded_ranges *intersection
+ = mgr->get_or_create_intersection (iter.m_ranges, ranges);
+ if (intersection->empty_p ())
+ {
+ /* No intersection; fail. */
+ return false;
+ }
+ else
+ {
+ /* Update with intersection; succeed. */
+ iter.m_ranges = intersection;
+ validate ();
+ return true;
+ }
+ }
+ }
+ m_bounded_ranges_constraints.safe_push
+ (bounded_ranges_constraint (ec_id, ranges));
+
+ validate ();
+
+ return true;
+}
+
/* Look for SVAL within the equivalence classes of this constraint_manager;
if found, return true, writing the id to *OUT if OUT is non-NULL,
otherwise return false. */
@@ -1185,14 +2084,15 @@ constraint_manager::get_or_add_equiv_class (const svalue *sval)
{
equiv_class_id result (-1);
- gcc_assert (sval->get_kind () != SK_UNKNOWN);
+ gcc_assert (sval->can_have_associated_state_p ());
/* Convert all NULL pointers to (void *) to avoid state explosions
involving all of the various (foo *)NULL vs (bar *)NULL. */
- if (POINTER_TYPE_P (sval->get_type ()))
- if (tree cst = sval->maybe_get_constant ())
- if (zerop (cst))
- sval = m_mgr->get_or_create_constant_svalue (null_pointer_node);
+ if (sval->get_type ())
+ if (POINTER_TYPE_P (sval->get_type ()))
+ if (tree cst = sval->maybe_get_constant ())
+ if (zerop (cst))
+ sval = m_mgr->get_or_create_constant_svalue (null_pointer_node);
/* Try svalue match. */
if (get_equiv_class_by_svalue (sval, &result))
@@ -1289,6 +2189,8 @@ constraint_manager::eval_condition (equiv_class_id lhs_ec,
}
}
+ /* We don't use m_bounded_ranges_constraints here yet. */
+
return tristate (tristate::TS_UNKNOWN);
}
@@ -1414,6 +2316,12 @@ constraint_manager::eval_condition (equiv_class_id lhs_ec,
}
}
}
+
+ bounded_ranges_manager *mgr = get_range_manager ();
+ for (const auto &iter : m_bounded_ranges_constraints)
+ if (iter.m_ec_id == lhs_ec)
+ return iter.m_ranges->eval_condition (op, rhs_const, mgr);
+
/* Look at existing bounds on LHS_EC. */
range lhs_bounds = get_ec_bounds (lhs_ec);
return lhs_bounds.eval_condition (op, rhs_const);
@@ -1562,6 +2470,29 @@ constraint_manager::purge (const PurgeCriteria &p, purge_stats *stats)
con_idx++;
}
}
+
+ /* Update bounded_ranges_constraint instances. */
+ for (unsigned r_idx = 0;
+ r_idx < m_bounded_ranges_constraints.length (); )
+ {
+ bounded_ranges_constraint *brc
+ = &m_bounded_ranges_constraints[r_idx];
+
+ /* Remove if it refers to the deleted EC. */
+ if (brc->m_ec_id == ec_idx)
+ {
+ m_bounded_ranges_constraints.ordered_remove (r_idx);
+ if (stats)
+ stats->m_num_bounded_ranges_constraints++;
+ }
+ else
+ {
+ /* Renumber any EC ids that refer to ECs that have
+ had their idx changed. */
+ brc->m_ec_id.update_for_removal (ec_idx);
+ r_idx++;
+ }
+ }
}
else
ec_idx++;
@@ -1620,6 +2551,17 @@ constraint_manager::purge (const PurgeCriteria &p, purge_stats *stats)
c->m_lhs.update_for_removal (ec_idx);
c->m_rhs.update_for_removal (ec_idx);
}
+
+ /* Likewise for m_bounded_ranges_constraints. */
+ for (unsigned r_idx = 0;
+ r_idx < m_bounded_ranges_constraints.length ();
+ r_idx++)
+ {
+ bounded_ranges_constraint *brc
+ = &m_bounded_ranges_constraints[r_idx];
+ brc->m_ec_id.update_for_removal (ec_idx);
+ }
+
continue;
}
}
@@ -1663,6 +2605,29 @@ on_liveness_change (const svalue_set &live_svalues,
purge (p, NULL);
}
+class svalue_purger
+{
+public:
+ svalue_purger (const svalue *sval) : m_sval (sval) {}
+
+ bool should_purge_p (const svalue *sval) const
+ {
+ return sval->involves_p (m_sval);
+ }
+
+private:
+ const svalue *m_sval;
+};
+
+/* Purge any state involving SVAL. */
+
+void
+constraint_manager::purge_state_involving (const svalue *sval)
+{
+ svalue_purger p (sval);
+ purge (p, NULL);
+}
+
/* Comparator for use by constraint_manager::canonicalize.
Sort a pair of equiv_class instances, using the representative
svalue as a sort key. */
@@ -1738,6 +2703,9 @@ constraint_manager::canonicalize ()
used_ecs.add (m_equiv_classes[c->m_rhs.as_int ()]);
}
+ for (const auto &iter : m_bounded_ranges_constraints)
+ used_ecs.add (m_equiv_classes[iter.m_ec_id.as_int ()]);
+
/* Purge unused ECs: those that aren't used by constraints and
that effectively have only one svalue (either in m_constant
or in m_vars). */
@@ -1778,6 +2746,9 @@ constraint_manager::canonicalize ()
ec_id_map.update (&c->m_rhs);
}
+ for (auto &iter : m_bounded_ranges_constraints)
+ ec_id_map.update (&iter.m_ec_id);
+
/* Finally, sort the constraints. */
m_constraints.qsort (constraint_cmp);
}
@@ -1822,6 +2793,32 @@ public:
}
}
+ void on_ranges (const svalue *lhs_sval,
+ const bounded_ranges *ranges) FINAL OVERRIDE
+ {
+ for (const auto &iter : m_cm_b->m_bounded_ranges_constraints)
+ {
+ const equiv_class &ec_rhs = iter.m_ec_id.get_obj (*m_cm_b);
+ for (unsigned i = 0; i < ec_rhs.m_vars.length (); i++)
+ {
+ const svalue *rhs_sval = ec_rhs.m_vars[i];
+ if (lhs_sval == rhs_sval)
+ {
+ /* Union of the two ranges. */
+ auto_vec <const bounded_ranges *> pair (2);
+ pair.quick_push (ranges);
+ pair.quick_push (iter.m_ranges);
+ bounded_ranges_manager *ranges_mgr
+ = m_cm_b->get_range_manager ();
+ const bounded_ranges *union_
+ = ranges_mgr->get_or_create_union (pair);
+ bool sat = m_out->add_bounded_ranges (lhs_sval, union_);
+ gcc_assert (sat);
+ }
+ }
+ }
+ }
+
private:
const constraint_manager *m_cm_b;
constraint_manager *m_out;
@@ -1895,6 +2892,16 @@ constraint_manager::for_each_fact (fact_visitor *visitor) const
visitor->on_fact (ec_lhs.m_vars[i], code, ec_rhs.m_vars[j]);
}
}
+
+ for (const auto &iter : m_bounded_ranges_constraints)
+ {
+ const equiv_class &ec_lhs = iter.m_ec_id.get_obj (*this);
+ for (unsigned i = 0; i < ec_lhs.m_vars.length (); i++)
+ {
+ const svalue *lhs_sval = ec_lhs.m_vars[i];
+ visitor->on_ranges (lhs_sval, iter.m_ranges);
+ }
+ }
}
/* Assert that this object is valid. */
@@ -1932,12 +2939,24 @@ constraint_manager::validate () const
FOR_EACH_VEC_ELT (m_constraints, i, c)
{
gcc_assert (!c->m_lhs.null_p ());
- gcc_assert (c->m_lhs.as_int () <= (int)m_equiv_classes.length ());
+ gcc_assert (c->m_lhs.as_int () < (int)m_equiv_classes.length ());
gcc_assert (!c->m_rhs.null_p ());
- gcc_assert (c->m_rhs.as_int () <= (int)m_equiv_classes.length ());
+ gcc_assert (c->m_rhs.as_int () < (int)m_equiv_classes.length ());
+ }
+
+ for (const auto &iter : m_bounded_ranges_constraints)
+ {
+ gcc_assert (!iter.m_ec_id.null_p ());
+ gcc_assert (iter.m_ec_id.as_int () < (int)m_equiv_classes.length ());
}
}
+bounded_ranges_manager *
+constraint_manager::get_range_manager () const
+{
+ return m_mgr->get_range_manager ();
+}
+
#if CHECKING_P
namespace selftest {
@@ -2683,6 +3702,318 @@ test_many_constants ()
}
}
+/* Implementation detail of ASSERT_DUMP_BOUNDED_RANGES_EQ. */
+
+static void
+assert_dump_bounded_range_eq (const location &loc,
+ const bounded_range &range,
+ const char *expected)
+{
+ auto_fix_quotes sentinel;
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ range.dump_to_pp (&pp, false);
+ ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
+}
+
+/* Assert that BR.dump (false) is EXPECTED. */
+
+#define ASSERT_DUMP_BOUNDED_RANGE_EQ(BR, EXPECTED) \
+ SELFTEST_BEGIN_STMT \
+ assert_dump_bounded_range_eq ((SELFTEST_LOCATION), (BR), (EXPECTED)); \
+ SELFTEST_END_STMT
+
+/* Verify that bounded_range works as expected. */
+
+static void
+test_bounded_range ()
+{
+ tree u8_0 = build_int_cst (unsigned_char_type_node, 0);
+ tree u8_1 = build_int_cst (unsigned_char_type_node, 1);
+ tree u8_64 = build_int_cst (unsigned_char_type_node, 64);
+ tree u8_128 = build_int_cst (unsigned_char_type_node, 128);
+ tree u8_255 = build_int_cst (unsigned_char_type_node, 255);
+
+ tree s8_0 = build_int_cst (signed_char_type_node, 0);
+ tree s8_1 = build_int_cst (signed_char_type_node, 1);
+ tree s8_2 = build_int_cst (signed_char_type_node, 2);
+
+ bounded_range br_u8_0 (u8_0, u8_0);
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_0, "0");
+ ASSERT_TRUE (br_u8_0.contains_p (u8_0));
+ ASSERT_FALSE (br_u8_0.contains_p (u8_1));
+ ASSERT_TRUE (br_u8_0.contains_p (s8_0));
+ ASSERT_FALSE (br_u8_0.contains_p (s8_1));
+
+ bounded_range br_u8_0_1 (u8_0, u8_1);
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_0_1, "[0, 1]");
+
+ bounded_range tmp (NULL_TREE, NULL_TREE);
+ ASSERT_TRUE (br_u8_0.intersects_p (br_u8_0_1, &tmp));
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (tmp, "0");
+
+ bounded_range br_u8_64_128 (u8_64, u8_128);
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_64_128, "[64, 128]");
+
+ ASSERT_FALSE (br_u8_0.intersects_p (br_u8_64_128, NULL));
+ ASSERT_FALSE (br_u8_64_128.intersects_p (br_u8_0, NULL));
+
+ bounded_range br_u8_128_255 (u8_128, u8_255);
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (br_u8_128_255, "[128, 255]");
+ ASSERT_TRUE (br_u8_128_255.intersects_p (br_u8_64_128, &tmp));
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (tmp, "128");
+
+ bounded_range br_s8_2 (s8_2, s8_2);
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (br_s8_2, "2");
+ bounded_range br_s8_2_u8_255 (s8_2, u8_255);
+ ASSERT_DUMP_BOUNDED_RANGE_EQ (br_s8_2_u8_255, "[2, 255]");
+}
+
+/* Implementation detail of ASSERT_DUMP_BOUNDED_RANGES_EQ. */
+
+static void
+assert_dump_bounded_ranges_eq (const location &loc,
+ const bounded_ranges *ranges,
+ const char *expected)
+{
+ auto_fix_quotes sentinel;
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ ranges->dump_to_pp (&pp, false);
+ ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
+}
+
+/* Implementation detail of ASSERT_DUMP_BOUNDED_RANGES_EQ. */
+
+static void
+assert_dump_bounded_ranges_eq (const location &loc,
+ const bounded_ranges &ranges,
+ const char *expected)
+{
+ auto_fix_quotes sentinel;
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ ranges.dump_to_pp (&pp, false);
+ ASSERT_STREQ_AT (loc, pp_formatted_text (&pp), expected);
+}
+
+/* Assert that BRS.dump (false) is EXPECTED. */
+
+#define ASSERT_DUMP_BOUNDED_RANGES_EQ(BRS, EXPECTED) \
+ SELFTEST_BEGIN_STMT \
+ assert_dump_bounded_ranges_eq ((SELFTEST_LOCATION), (BRS), (EXPECTED)); \
+ SELFTEST_END_STMT
+
+/* Verify that the bounded_ranges class works as expected. */
+
+static void
+test_bounded_ranges ()
+{
+ bounded_ranges_manager mgr;
+
+ tree ch0 = build_int_cst (unsigned_char_type_node, 0);
+ tree ch1 = build_int_cst (unsigned_char_type_node, 1);
+ tree ch2 = build_int_cst (unsigned_char_type_node, 2);
+ tree ch3 = build_int_cst (unsigned_char_type_node, 3);
+ tree ch128 = build_int_cst (unsigned_char_type_node, 128);
+ tree ch129 = build_int_cst (unsigned_char_type_node, 129);
+ tree ch254 = build_int_cst (unsigned_char_type_node, 254);
+ tree ch255 = build_int_cst (unsigned_char_type_node, 255);
+
+ const bounded_ranges *empty = mgr.get_or_create_empty ();
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (empty, "{}");
+
+ const bounded_ranges *point0 = mgr.get_or_create_point (ch0);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (point0, "{0}");
+
+ const bounded_ranges *point1 = mgr.get_or_create_point (ch1);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (point1, "{1}");
+
+ const bounded_ranges *point2 = mgr.get_or_create_point (ch2);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (point2, "{2}");
+
+ const bounded_ranges *range0_128 = mgr.get_or_create_range (ch0, ch128);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (range0_128, "{[0, 128]}");
+
+ const bounded_ranges *range0_255 = mgr.get_or_create_range (ch0, ch255);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (range0_255, "{[0, 255]}");
+
+ ASSERT_FALSE (empty->contain_p (ch0));
+ ASSERT_FALSE (empty->contain_p (ch1));
+ ASSERT_FALSE (empty->contain_p (ch255));
+
+ ASSERT_TRUE (point0->contain_p (ch0));
+ ASSERT_FALSE (point0->contain_p (ch1));
+ ASSERT_FALSE (point0->contain_p (ch255));
+
+ ASSERT_FALSE (point1->contain_p (ch0));
+ ASSERT_TRUE (point1->contain_p (ch1));
+ ASSERT_FALSE (point0->contain_p (ch255));
+
+ ASSERT_TRUE (range0_128->contain_p (ch0));
+ ASSERT_TRUE (range0_128->contain_p (ch1));
+ ASSERT_TRUE (range0_128->contain_p (ch128));
+ ASSERT_FALSE (range0_128->contain_p (ch129));
+ ASSERT_FALSE (range0_128->contain_p (ch254));
+ ASSERT_FALSE (range0_128->contain_p (ch255));
+
+ const bounded_ranges *inv0_128
+ = mgr.get_or_create_inverse (range0_128, unsigned_char_type_node);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (inv0_128, "{[129, 255]}");
+
+ const bounded_ranges *range128_129 = mgr.get_or_create_range (ch128, ch129);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (range128_129, "{[128, 129]}");
+
+ const bounded_ranges *inv128_129
+ = mgr.get_or_create_inverse (range128_129, unsigned_char_type_node);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (inv128_129, "{[0, 127], [130, 255]}");
+
+ /* Intersection. */
+ {
+ /* Intersection of disjoint ranges should be empty set. */
+ const bounded_ranges *intersect0_1
+ = mgr.get_or_create_intersection (point0, point1);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (intersect0_1, "{}");
+ }
+
+ /* Various tests of "union of ranges". */
+ {
+ {
+ /* Touching points should be merged into a range. */
+ auto_vec <const bounded_ranges *> v;
+ v.safe_push (point0);
+ v.safe_push (point1);
+ const bounded_ranges *union_0_and_1 = mgr.get_or_create_union (v);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (union_0_and_1, "{[0, 1]}");
+ }
+
+ {
+ /* Overlapping and out-of-order. */
+ auto_vec <const bounded_ranges *> v;
+ v.safe_push (inv0_128); // {[129, 255]}
+ v.safe_push (range128_129);
+ const bounded_ranges *union_129_255_and_128_129
+ = mgr.get_or_create_union (v);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (union_129_255_and_128_129, "{[128, 255]}");
+ }
+
+ {
+ /* Union of R and inverse(R) should be full range of type. */
+ auto_vec <const bounded_ranges *> v;
+ v.safe_push (range128_129);
+ v.safe_push (inv128_129);
+ const bounded_ranges *union_ = mgr.get_or_create_union (v);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (union_, "{[0, 255]}");
+ }
+
+ /* Union with an endpoint. */
+ {
+ const bounded_ranges *range2_to_255
+ = mgr.get_or_create_range (ch2, ch255);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (range2_to_255, "{[2, 255]}");
+ auto_vec <const bounded_ranges *> v;
+ v.safe_push (point0);
+ v.safe_push (point2);
+ v.safe_push (range2_to_255);
+ const bounded_ranges *union_ = mgr.get_or_create_union (v);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (union_, "{0, [2, 255]}");
+ }
+
+ /* Construct from vector of bounded_range. */
+ {
+ auto_vec<bounded_range> v;
+ v.safe_push (bounded_range (ch2, ch2));
+ v.safe_push (bounded_range (ch0, ch0));
+ v.safe_push (bounded_range (ch2, ch255));
+ bounded_ranges br (v);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (&br, "{0, [2, 255]}");
+ }
+ }
+
+ /* Various tests of "inverse". */
+ {
+ {
+ const bounded_ranges *range_1_to_3 = mgr.get_or_create_range (ch1, ch3);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (range_1_to_3, "{[1, 3]}");
+ const bounded_ranges *inv
+ = mgr.get_or_create_inverse (range_1_to_3, unsigned_char_type_node);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (inv, "{0, [4, 255]}");
+ }
+ {
+ const bounded_ranges *range_1_to_255
+ = mgr.get_or_create_range (ch1, ch255);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (range_1_to_255, "{[1, 255]}");
+ const bounded_ranges *inv
+ = mgr.get_or_create_inverse (range_1_to_255, unsigned_char_type_node);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (inv, "{0}");
+ }
+ {
+ const bounded_ranges *range_0_to_254
+ = mgr.get_or_create_range (ch0, ch254);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (range_0_to_254, "{[0, 254]}");
+ const bounded_ranges *inv
+ = mgr.get_or_create_inverse (range_0_to_254, unsigned_char_type_node);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (inv, "{255}");
+ }
+ }
+
+ /* "case 'a'-'z': case 'A-Z':" vs "default:", for ASCII. */
+ {
+ tree ch65 = build_int_cst (unsigned_char_type_node, 65);
+ tree ch90 = build_int_cst (unsigned_char_type_node, 90);
+
+ tree ch97 = build_int_cst (unsigned_char_type_node, 97);
+ tree ch122 = build_int_cst (unsigned_char_type_node, 122);
+
+ const bounded_ranges *A_to_Z = mgr.get_or_create_range (ch65, ch90);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (A_to_Z, "{[65, 90]}");
+ const bounded_ranges *a_to_z = mgr.get_or_create_range (ch97, ch122);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (a_to_z, "{[97, 122]}");
+ auto_vec <const bounded_ranges *> v;
+ v.safe_push (A_to_Z);
+ v.safe_push (a_to_z);
+ const bounded_ranges *label_ranges = mgr.get_or_create_union (v);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (label_ranges, "{[65, 90], [97, 122]}");
+ const bounded_ranges *default_ranges
+ = mgr.get_or_create_inverse (label_ranges, unsigned_char_type_node);
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (default_ranges,
+ "{[0, 64], [91, 96], [123, 255]}");
+ }
+
+ /* Verify ranges from ops. */
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (EQ_EXPR, ch128),
+ "{128}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (NE_EXPR, ch128),
+ "{[0, 127], [129, 255]}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LT_EXPR, ch128),
+ "{[0, 127]}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LE_EXPR, ch128),
+ "{[0, 128]}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GE_EXPR, ch128),
+ "{[128, 255]}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GT_EXPR, ch128),
+ "{[129, 255]}");
+ /* Ops at endpoints of type ranges. */
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LE_EXPR, ch0),
+ "{0}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (LT_EXPR, ch0),
+ "{}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (NE_EXPR, ch0),
+ "{[1, 255]}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GE_EXPR, ch255),
+ "{255}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (GT_EXPR, ch255),
+ "{}");
+ ASSERT_DUMP_BOUNDED_RANGES_EQ (bounded_ranges (NE_EXPR, ch255),
+ "{[0, 254]}");
+
+ /* Verify that instances are consolidated by mgr. */
+ ASSERT_EQ (mgr.get_or_create_point (ch0),
+ mgr.get_or_create_point (ch0));
+ ASSERT_NE (mgr.get_or_create_point (ch0),
+ mgr.get_or_create_point (ch1));
+}
+
/* Run the selftests in this file, temporarily overriding
flag_analyzer_transitivity with TRANSITIVITY. */
@@ -2702,6 +4033,8 @@ run_constraint_manager_tests (bool transitivity)
test_constraint_impl ();
test_equality ();
test_many_constants ();
+ test_bounded_range ();
+ test_bounded_ranges ();
flag_analyzer_transitivity = saved_flag_analyzer_transitivity;
}
diff --git a/gcc/analyzer/constraint-manager.h b/gcc/analyzer/constraint-manager.h
index 3173610..0a430ea 100644
--- a/gcc/analyzer/constraint-manager.h
+++ b/gcc/analyzer/constraint-manager.h
@@ -64,6 +64,164 @@ struct range
bound m_upper_bound;
};
+/* A closed range of values with constant integer bounds
+ e.g. [3, 5] for the set {3, 4, 5}. */
+
+struct bounded_range
+{
+ bounded_range (const_tree lower, const_tree upper);
+
+ void dump_to_pp (pretty_printer *pp, bool show_types) const;
+ void dump (bool show_types) const;
+
+ json::object *to_json () const;
+
+ bool contains_p (tree cst) const;
+
+ bool intersects_p (const bounded_range &other,
+ bounded_range *out) const;
+
+ bool operator== (const bounded_range &other) const;
+ bool operator!= (const bounded_range &other) const
+ {
+ return !(*this == other);
+ }
+
+ static int cmp (const bounded_range &a, const bounded_range &b);
+
+ tree m_lower;
+ tree m_upper;
+
+private:
+ static void set_json_attr (json::object *obj, const char *name, tree value);
+};
+
+/* A collection of bounded_range instances, suitable
+ for representing the ranges on a case label within a switch
+ statement. */
+
+struct bounded_ranges
+{
+public:
+ typedef bounded_ranges key_t;
+
+ bounded_ranges (const bounded_range &range);
+ bounded_ranges (const vec<bounded_range> &ranges);
+ bounded_ranges (enum tree_code op, tree rhs_const);
+
+ bool operator== (const bounded_ranges &other) const;
+
+ hashval_t get_hash () const { return m_hash; }
+
+ void dump_to_pp (pretty_printer *pp, bool show_types) const;
+ void dump (bool show_types) const;
+
+ json::value *to_json () const;
+
+ tristate eval_condition (enum tree_code op,
+ tree rhs_const,
+ bounded_ranges_manager *mgr) const;
+
+ bool contain_p (tree cst) const;
+ bool empty_p () const { return m_ranges.length () == 0; }
+
+ static int cmp (const bounded_ranges *a, const bounded_ranges *b);
+
+private:
+ void canonicalize ();
+ void validate () const;
+
+ friend class bounded_ranges_manager;
+
+ auto_vec<bounded_range> m_ranges;
+ hashval_t m_hash;
+};
+
+} // namespace ana
+
+template <> struct default_hash_traits<bounded_ranges::key_t>
+: public member_function_hash_traits<bounded_ranges::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
+/* An object to own and consolidate bounded_ranges instances.
+ This also caches the mapping from switch_cfg_superedge
+ bounded_ranges instances, so that get_or_create_ranges_for_switch is
+ memoized. */
+
+class bounded_ranges_manager
+{
+public:
+ ~bounded_ranges_manager ();
+
+ const bounded_ranges *
+ get_or_create_ranges_for_switch (const switch_cfg_superedge *edge,
+ const gswitch *switch_stmt);
+
+ const bounded_ranges *get_or_create_empty ();
+ const bounded_ranges *get_or_create_point (const_tree value);
+ const bounded_ranges *get_or_create_range (const_tree lower_bound,
+ const_tree upper_bound);
+ const bounded_ranges *
+ get_or_create_union (const vec <const bounded_ranges *> &others);
+ const bounded_ranges *
+ get_or_create_intersection (const bounded_ranges *a,
+ const bounded_ranges *b);
+ const bounded_ranges *
+ get_or_create_inverse (const bounded_ranges *other, tree type);
+
+ void log_stats (logger *logger, bool show_objs) const;
+
+private:
+ const bounded_ranges *
+ create_ranges_for_switch (const switch_cfg_superedge &edge,
+ const gswitch *switch_stmt);
+
+ const bounded_ranges *
+ make_case_label_ranges (const gswitch *switch_stmt,
+ tree case_label);
+
+ const bounded_ranges *consolidate (bounded_ranges *);
+
+ struct hash_traits_t : public typed_noop_remove<bounded_ranges *>
+ {
+ typedef bounded_ranges *key_type;
+ typedef bounded_ranges *value_type;
+
+ static inline bool
+ equal (const key_type &k1, const key_type &k2)
+ {
+ return *k1 == *k2;
+ }
+ static inline hashval_t
+ hash (const key_type &k)
+ {
+ return k->get_hash ();
+ }
+ static inline bool is_empty (key_type k) { return k == NULL; }
+ static inline void mark_empty (key_type &k) { k = NULL; }
+ static inline bool is_deleted (key_type k)
+ {
+ return k == reinterpret_cast<key_type> (1);
+ }
+
+ static const bool empty_zero_p = true;
+ };
+ struct traits_t : public simple_hashmap_traits<hash_traits_t,
+ bounded_ranges *>
+ {
+ };
+ typedef hash_map<bounded_ranges *, bounded_ranges *, traits_t> map_t;
+ map_t m_map;
+
+ typedef hash_map<const switch_cfg_superedge *,
+ const bounded_ranges *> edge_cache_t;
+ edge_cache_t m_edge_cache;
+};
+
/* An equivalence class within a constraint manager: a set of
svalues that are known to all be equal to each other,
together with an optional tree constant that they are equal to. */
@@ -190,6 +348,33 @@ class fact_visitor
virtual void on_fact (const svalue *lhs,
enum tree_code,
const svalue *rhs) = 0;
+ virtual void on_ranges (const svalue *lhs,
+ const bounded_ranges *ranges) = 0;
+};
+
+class bounded_ranges_constraint
+{
+public:
+ bounded_ranges_constraint (equiv_class_id ec_id,
+ const bounded_ranges *ranges)
+ : m_ec_id (ec_id), m_ranges (ranges)
+ {
+ }
+
+ void print (pretty_printer *pp, const constraint_manager &cm) const;
+
+ json::object *to_json () const;
+
+ bool operator== (const bounded_ranges_constraint &other) const;
+ bool operator!= (const bounded_ranges_constraint &other) const
+ {
+ return !(*this == other);
+ }
+
+ void add_to_hash (inchash::hash *hstate) const;
+
+ equiv_class_id m_ec_id;
+ const bounded_ranges *m_ranges;
};
/* A collection of equivalence classes and constraints on them.
@@ -248,6 +433,9 @@ public:
enum tree_code op,
equiv_class_id rhs_ec_id);
+ bool add_bounded_ranges (const svalue *sval,
+ const bounded_ranges *ranges);
+
bool get_equiv_class_by_svalue (const svalue *sval,
equiv_class_id *out) const;
equiv_class_id get_or_add_equiv_class (const svalue *sval);
@@ -269,6 +457,7 @@ public:
void on_liveness_change (const svalue_set &live_svalues,
const region_model *model);
+ void purge_state_involving (const svalue *sval);
void canonicalize ();
@@ -280,8 +469,11 @@ public:
void validate () const;
+ bounded_ranges_manager *get_range_manager () const;
+
auto_delete_vec<equiv_class> m_equiv_classes;
auto_vec<constraint> m_constraints;
+ auto_vec<bounded_ranges_constraint> m_bounded_ranges_constraints;
private:
void add_constraint_internal (equiv_class_id lhs_id,
diff --git a/gcc/analyzer/diagnostic-manager.cc b/gcc/analyzer/diagnostic-manager.cc
index 1a3535c..7ffe000 100644
--- a/gcc/analyzer/diagnostic-manager.cc
+++ b/gcc/analyzer/diagnostic-manager.cc
@@ -95,6 +95,8 @@ public:
feasibility_problem **out_problem);
private:
+ DISABLE_COPY_AND_ASSIGN(epath_finder);
+
exploded_path *explore_feasible_paths (const exploded_node *target_enode,
const char *desc, unsigned diag_idx);
bool process_worklist_item (feasible_worklist *worklist,
@@ -290,6 +292,34 @@ private:
const shortest_paths<eg_traits, exploded_path> &m_sep;
};
+/* When we're building the exploded graph we want to simplify
+ overly-complicated symbolic values down to "UNKNOWN" to try to avoid
+ state explosions and unbounded chains of exploration.
+
+ However, when we're building the feasibility graph for a diagnostic
+ (actually a tree), we don't want UNKNOWN values, as conditions on them
+ are also unknown: we don't want to have a contradiction such as a path
+ where (VAL != 0) and then (VAL == 0) along the same path.
+
+ Hence this is an RAII class for temporarily disabling complexity-checking
+ in the region_model_manager, for use within
+ epath_finder::explore_feasible_paths. */
+
+class auto_disable_complexity_checks
+{
+public:
+ auto_disable_complexity_checks (region_model_manager *mgr) : m_mgr (mgr)
+ {
+ m_mgr->disable_complexity_check ();
+ }
+ ~auto_disable_complexity_checks ()
+ {
+ m_mgr->enable_complexity_check ();
+ }
+private:
+ region_model_manager *m_mgr;
+};
+
/* Attempt to find the shortest feasible path from the origin to
TARGET_ENODE by iteratively building a feasible_graph, in which
every path to a feasible_node is feasible by construction.
@@ -342,6 +372,8 @@ epath_finder::explore_feasible_paths (const exploded_node *target_enode,
logger *logger = get_logger ();
LOG_SCOPE (logger);
+ region_model_manager *mgr = m_eg.get_engine ()->get_model_manager ();
+
/* Determine the shortest path to TARGET_ENODE from each node in
the exploded graph. */
shortest_paths<eg_traits, exploded_path> sep
@@ -361,8 +393,7 @@ epath_finder::explore_feasible_paths (const exploded_node *target_enode,
/* Populate the worklist with the origin node. */
{
- feasibility_state init_state (m_eg.get_engine ()->get_model_manager (),
- m_eg.get_supergraph ());
+ feasibility_state init_state (mgr, m_eg.get_supergraph ());
feasible_node *origin = fg.add_node (m_eg.get_origin (), init_state, 0);
worklist.add_node (origin);
}
@@ -374,11 +405,15 @@ epath_finder::explore_feasible_paths (const exploded_node *target_enode,
/* Set this if we find a feasible path to TARGET_ENODE. */
exploded_path *best_path = NULL;
- while (process_worklist_item (&worklist, tg, &fg, target_enode, diag_idx,
- &best_path))
- {
- /* Empty; the work is done within process_worklist_item. */
- }
+ {
+ auto_disable_complexity_checks sentinel (mgr);
+
+ while (process_worklist_item (&worklist, tg, &fg, target_enode, diag_idx,
+ &best_path))
+ {
+ /* Empty; the work is done within process_worklist_item. */
+ }
+ }
if (logger)
{
@@ -485,8 +520,7 @@ epath_finder::process_worklist_item (feasible_worklist *worklist,
gcc_assert (rc);
fg->add_feasibility_problem (fnode,
succ_eedge,
- *rc);
- delete rc;
+ rc);
/* Give up if there have been too many infeasible edges. */
if (fg->get_num_infeasible ()
@@ -720,6 +754,18 @@ saved_diagnostic::add_duplicate (saved_diagnostic *other)
m_duplicates.safe_push (other);
}
+/* Return true if this diagnostic supercedes OTHER, and that OTHER should
+ therefore not be emitted. */
+
+bool
+saved_diagnostic::supercedes_p (const saved_diagnostic &other) const
+{
+ /* They should be at the same stmt. */
+ if (m_stmt != other.m_stmt)
+ return false;
+ return m_d->supercedes_p (*other.m_d);
+}
+
/* State for building a checker_path from a particular exploded_path.
In particular, this precomputes reachability information: the set of
source enodes for which a path be found to the diagnostic enode. */
@@ -789,7 +835,7 @@ diagnostic_manager::diagnostic_manager (logger *logger, engine *eng,
void
diagnostic_manager::add_diagnostic (const state_machine *sm,
- const exploded_node *enode,
+ exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *finder,
tree var,
@@ -807,16 +853,17 @@ diagnostic_manager::add_diagnostic (const state_machine *sm,
= new saved_diagnostic (sm, enode, snode, stmt, finder, var, sval,
state, d, m_saved_diagnostics.length ());
m_saved_diagnostics.safe_push (sd);
+ enode->add_diagnostic (sd);
if (get_logger ())
- log ("adding saved diagnostic %i at SN %i: %qs",
+ log ("adding saved diagnostic %i at SN %i to EN %i: %qs",
sd->get_index (),
- snode->m_index, d->get_kind ());
+ snode->m_index, enode->m_index, d->get_kind ());
}
/* Queue pending_diagnostic D at ENODE for later emission. */
void
-diagnostic_manager::add_diagnostic (const exploded_node *enode,
+diagnostic_manager::add_diagnostic (exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *finder,
pending_diagnostic *d)
@@ -1018,6 +1065,38 @@ public:
}
}
+ /* Handle interactions between the dedupe winners, so that some
+ diagnostics can supercede others (of different kinds).
+
+ We want use-after-free to supercede use-of-unitialized-value,
+ so that if we have these at the same stmt, we don't emit
+ a use-of-uninitialized, just the use-after-free. */
+
+ void handle_interactions (diagnostic_manager *dm)
+ {
+ LOG_SCOPE (dm->get_logger ());
+ auto_vec<const dedupe_key *> superceded;
+ for (auto outer : m_map)
+ {
+ const saved_diagnostic *outer_sd = outer.second;
+ for (auto inner : m_map)
+ {
+ const saved_diagnostic *inner_sd = inner.second;
+ if (inner_sd->supercedes_p (*outer_sd))
+ {
+ superceded.safe_push (outer.first);
+ if (dm->get_logger ())
+ dm->log ("sd[%i] \"%s\" superceded by sd[%i] \"%s\"",
+ outer_sd->get_index (), outer_sd->m_d->get_kind (),
+ inner_sd->get_index (), inner_sd->m_d->get_kind ());
+ break;
+ }
+ }
+ }
+ for (auto iter : superceded)
+ m_map.remove (iter);
+ }
+
/* Emit the simplest diagnostic within each set. */
void emit_best (diagnostic_manager *dm,
@@ -1092,6 +1171,8 @@ diagnostic_manager::emit_saved_diagnostics (const exploded_graph &eg)
FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
best_candidates.add (get_logger (), &pf, sd);
+ best_candidates.handle_interactions (this);
+
/* For each dedupe-key, call emit_saved_diagnostic on the "best"
saved_diagnostic. */
best_candidates.emit_best (this, eg);
@@ -1161,6 +1242,17 @@ diagnostic_manager::emit_saved_diagnostic (const exploded_graph &eg,
inform_n (loc, num_dupes,
"%i duplicate", "%i duplicates",
num_dupes);
+ if (flag_dump_analyzer_exploded_paths)
+ {
+ auto_timevar tv (TV_ANALYZER_DUMP);
+ pretty_printer pp;
+ pp_printf (&pp, "%s.%i.%s.epath.txt",
+ dump_base_name, sd.get_index (), sd.m_d->get_kind ());
+ char *filename = xstrdup (pp_formatted_text (&pp));
+ epath->dump_to_file (filename, eg.get_ext_state ());
+ inform (loc, "exploded path written to %qs", filename);
+ free (filename);
+ }
}
delete pp;
}
@@ -1374,6 +1466,14 @@ struct null_assignment_sm_context : public sm_context
return current;
}
+ state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
+ const svalue *sval) FINAL OVERRIDE
+ {
+ const sm_state_map *old_smap = m_old_state->m_checker_states[m_sm_idx];
+ state_machine::state_t current = old_smap->get_state (sval, m_ext_state);
+ return current;
+ }
+
void set_next_state (const gimple *stmt,
tree var,
state_machine::state_t to,
@@ -1398,6 +1498,28 @@ struct null_assignment_sm_context : public sm_context
*m_new_state));
}
+ void set_next_state (const gimple *stmt,
+ const svalue *sval,
+ state_machine::state_t to,
+ tree origin ATTRIBUTE_UNUSED) FINAL OVERRIDE
+ {
+ state_machine::state_t from = get_state (stmt, sval);
+ if (from != m_sm.get_start_state ())
+ return;
+
+ const supernode *supernode = m_point->get_supernode ();
+ int stack_depth = m_point->get_stack_depth ();
+
+ m_emission_path->add_event (new state_change_event (supernode,
+ m_stmt,
+ stack_depth,
+ m_sm,
+ sval,
+ from, to,
+ NULL,
+ *m_new_state));
+ }
+
void warn (const supernode *, const gimple *,
tree, pending_diagnostic *d) FINAL OVERRIDE
{
@@ -1409,6 +1531,11 @@ struct null_assignment_sm_context : public sm_context
return expr;
}
+ tree get_diagnostic_tree (const svalue *sval) FINAL OVERRIDE
+ {
+ return m_new_state->m_region_model->get_representative_tree (sval);
+ }
+
state_machine::state_t get_global_state () const FINAL OVERRIDE
{
return 0;
@@ -1584,7 +1711,7 @@ diagnostic_manager::add_events_for_eedge (const path_builder &pb,
"this path would have been rejected as infeasible"
" at this edge: ");
pb.get_feasibility_problem ()->dump_to_pp (&pp);
- emission_path->add_event (new custom_event
+ emission_path->add_event (new precanned_custom_event
(dst_point.get_location (),
dst_point.get_fndecl (),
dst_stack_depth,
@@ -1965,18 +2092,28 @@ diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
case EK_CALL_EDGE:
{
call_event *event = (call_event *)base_event;
- const callgraph_superedge& cg_superedge
- = event->get_callgraph_superedge ();
const region_model *callee_model
= event->m_eedge.m_dest->get_state ().m_region_model;
+ const region_model *caller_model
+ = event->m_eedge.m_src->get_state ().m_region_model;
tree callee_var = callee_model->get_representative_tree (sval);
- /* We could just use caller_model->get_representative_tree (sval);
- to get the caller_var, but for now use
- map_expr_from_callee_to_caller so as to only record critical
- state for parms and the like. */
callsite_expr expr;
- tree caller_var
- = cg_superedge.map_expr_from_callee_to_caller (callee_var, &expr);
+
+ tree caller_var;
+ if(event->m_sedge)
+ {
+ const callgraph_superedge& cg_superedge
+ = event->get_callgraph_superedge ();
+ if (cg_superedge.m_cedge)
+ caller_var
+ = cg_superedge.map_expr_from_callee_to_caller (callee_var,
+ &expr);
+ else
+ caller_var = caller_model->get_representative_tree (sval);
+ }
+ else
+ caller_var = caller_model->get_representative_tree (sval);
+
if (caller_var)
{
if (get_logger ())
@@ -1998,15 +2135,28 @@ diagnostic_manager::prune_for_sm_diagnostic (checker_path *path,
if (sval)
{
return_event *event = (return_event *)base_event;
- const callgraph_superedge& cg_superedge
- = event->get_callgraph_superedge ();
- const region_model *caller_model
- = event->m_eedge.m_dest->get_state ().m_region_model;
- tree caller_var = caller_model->get_representative_tree (sval);
+ const region_model *caller_model
+ = event->m_eedge.m_dest->get_state ().m_region_model;
+ tree caller_var = caller_model->get_representative_tree (sval);
+ const region_model *callee_model
+ = event->m_eedge.m_src->get_state ().m_region_model;
callsite_expr expr;
- tree callee_var
- = cg_superedge.map_expr_from_caller_to_callee (caller_var,
- &expr);
+
+ tree callee_var;
+ if (event->m_sedge)
+ {
+ const callgraph_superedge& cg_superedge
+ = event->get_callgraph_superedge ();
+ if (cg_superedge.m_cedge)
+ callee_var
+ = cg_superedge.map_expr_from_caller_to_callee (caller_var,
+ &expr);
+ else
+ callee_var = callee_model->get_representative_tree (sval);
+ }
+ else
+ callee_var = callee_model->get_representative_tree (sval);
+
if (callee_var)
{
if (get_logger ())
@@ -2078,7 +2228,7 @@ diagnostic_manager::prune_interproc_events (checker_path *path) const
do
{
changed = false;
- int idx = path->num_events () - 1;
+ int idx = (signed)path->num_events () - 1;
while (idx >= 0)
{
/* Prune [..., call, function-entry, return, ...] triples. */
@@ -2197,7 +2347,9 @@ diagnostic_manager::consolidate_conditions (checker_path *path) const
if (flag_analyzer_verbose_edges)
return;
- for (unsigned start_idx = 0; start_idx < path->num_events () - 1; start_idx++)
+ for (int start_idx = 0;
+ start_idx < (signed)path->num_events () - 1;
+ start_idx++)
{
if (path->cfg_edge_pair_at_p (start_idx))
{
@@ -2228,7 +2380,7 @@ diagnostic_manager::consolidate_conditions (checker_path *path) const
[start_idx, next_idx)
where all apart from the final event are on the same line,
and all are either TRUE or FALSE edges, matching the initial. */
- unsigned next_idx = start_idx + 2;
+ int next_idx = start_idx + 2;
while (path->cfg_edge_pair_at_p (next_idx)
&& same_line_as_p (start_exp_loc, path, next_idx))
{
diff --git a/gcc/analyzer/diagnostic-manager.h b/gcc/analyzer/diagnostic-manager.h
index 1454977..ad2eb4d 100644
--- a/gcc/analyzer/diagnostic-manager.h
+++ b/gcc/analyzer/diagnostic-manager.h
@@ -58,6 +58,8 @@ public:
unsigned get_index () const { return m_idx; }
+ bool supercedes_p (const saved_diagnostic &other) const;
+
//private:
const state_machine *m_sm;
const exploded_node *m_enode;
@@ -101,7 +103,7 @@ public:
json::object *to_json () const;
void add_diagnostic (const state_machine *sm,
- const exploded_node *enode,
+ exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *finder,
tree var,
@@ -109,7 +111,7 @@ public:
state_machine::state_t state,
pending_diagnostic *d);
- void add_diagnostic (const exploded_node *enode,
+ void add_diagnostic (exploded_node *enode,
const supernode *snode, const gimple *stmt,
stmt_finder *finder,
pending_diagnostic *d);
diff --git a/gcc/analyzer/engine.cc b/gcc/analyzer/engine.cc
index 5792c14..24f0931 100644
--- a/gcc/analyzer/engine.cc
+++ b/gcc/analyzer/engine.cc
@@ -62,8 +62,11 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/checker-path.h"
#include "analyzer/state-purge.h"
#include "analyzer/bar-chart.h"
+#include "analyzer/call-info.h"
#include <zlib.h>
#include "plugin.h"
+#include "target.h"
+#include <memory>
/* For an overview, see gcc/doc/analyzer.texi. */
@@ -75,9 +78,11 @@ namespace ana {
impl_region_model_context::
impl_region_model_context (exploded_graph &eg,
- const exploded_node *enode_for_diag,
+ exploded_node *enode_for_diag,
const program_state *old_state,
program_state *new_state,
+ uncertainty_t *uncertainty,
+ path_context *path_ctxt,
const gimple *stmt,
stmt_finder *stmt_finder)
: m_eg (&eg), m_logger (eg.get_logger ()),
@@ -86,40 +91,58 @@ impl_region_model_context (exploded_graph &eg,
m_new_state (new_state),
m_stmt (stmt),
m_stmt_finder (stmt_finder),
- m_ext_state (eg.get_ext_state ())
+ m_ext_state (eg.get_ext_state ()),
+ m_uncertainty (uncertainty),
+ m_path_ctxt (path_ctxt)
{
}
impl_region_model_context::
impl_region_model_context (program_state *state,
const extrinsic_state &ext_state,
+ uncertainty_t *uncertainty,
logger *logger)
: m_eg (NULL), m_logger (logger), m_enode_for_diag (NULL),
m_old_state (NULL),
m_new_state (state),
m_stmt (NULL),
m_stmt_finder (NULL),
- m_ext_state (ext_state)
+ m_ext_state (ext_state),
+ m_uncertainty (uncertainty),
+ m_path_ctxt (NULL)
{
}
-void
+bool
impl_region_model_context::warn (pending_diagnostic *d)
{
LOG_FUNC (get_logger ());
+ if (m_stmt == NULL && m_stmt_finder == NULL)
+ {
+ if (get_logger ())
+ get_logger ()->log ("rejecting diagnostic: no stmt");
+ delete d;
+ return false;
+ }
if (m_eg)
- m_eg->get_diagnostic_manager ().add_diagnostic
- (m_enode_for_diag, m_enode_for_diag->get_supernode (),
- m_stmt, m_stmt_finder, d);
+ {
+ m_eg->get_diagnostic_manager ().add_diagnostic
+ (m_enode_for_diag, m_enode_for_diag->get_supernode (),
+ m_stmt, m_stmt_finder, d);
+ return true;
+ }
+ else
+ {
+ delete d;
+ return false;
+ }
}
void
impl_region_model_context::on_svalue_leak (const svalue *sval)
{
- int sm_idx;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ for (sm_state_map *smap : m_new_state->m_checker_states)
smap->on_svalue_leak (sval, this);
}
@@ -128,9 +151,7 @@ impl_region_model_context::
on_liveness_change (const svalue_set &live_svalues,
const region_model *model)
{
- int sm_idx;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ for (sm_state_map *smap : m_new_state->m_checker_states)
smap->on_liveness_change (live_svalues, model, this);
}
@@ -138,9 +159,7 @@ void
impl_region_model_context::on_unknown_change (const svalue *sval,
bool is_mutable)
{
- int sm_idx;
- sm_state_map *smap;
- FOR_EACH_VEC_ELT (m_new_state->m_checker_states, sm_idx, smap)
+ for (sm_state_map *smap : m_new_state->m_checker_states)
smap->on_unknown_change (sval, is_mutable, m_ext_state);
}
@@ -150,6 +169,56 @@ impl_region_model_context::on_escaped_function (tree fndecl)
m_eg->on_escaped_function (fndecl);
}
+uncertainty_t *
+impl_region_model_context::get_uncertainty ()
+{
+ return m_uncertainty;
+}
+
+/* Purge state involving SVAL. The region_model has already been purged,
+ so we only need to purge other state in the program_state:
+ the sm-state. */
+
+void
+impl_region_model_context::purge_state_involving (const svalue *sval)
+{
+ int i;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (m_new_state->m_checker_states, i, smap)
+ smap->purge_state_involving (sval, m_ext_state);
+}
+
+void
+impl_region_model_context::bifurcate (custom_edge_info *info)
+{
+ if (m_path_ctxt)
+ m_path_ctxt->bifurcate (info);
+ else
+ delete info;
+}
+
+void
+impl_region_model_context::terminate_path ()
+{
+ if (m_path_ctxt)
+ return m_path_ctxt->terminate_path ();
+}
+
+bool
+impl_region_model_context::get_malloc_map (sm_state_map **out_smap,
+ const state_machine **out_sm,
+ unsigned *out_sm_idx)
+{
+ unsigned malloc_sm_idx;
+ if (!m_ext_state.get_sm_idx_by_name ("malloc", &malloc_sm_idx))
+ return false;
+
+ *out_smap = m_new_state->m_checker_states[malloc_sm_idx];
+ *out_sm = &m_ext_state.get_sm (malloc_sm_idx);
+ *out_sm_idx = malloc_sm_idx;
+ return true;
+}
+
/* struct setjmp_record. */
int
@@ -199,17 +268,19 @@ public:
impl_sm_context (exploded_graph &eg,
int sm_idx,
const state_machine &sm,
- const exploded_node *enode_for_diag,
+ exploded_node *enode_for_diag,
const program_state *old_state,
program_state *new_state,
const sm_state_map *old_smap,
sm_state_map *new_smap,
+ path_context *path_ctxt,
stmt_finder *stmt_finder = NULL)
: sm_context (sm_idx, sm),
m_logger (eg.get_logger ()),
m_eg (eg), m_enode_for_diag (enode_for_diag),
m_old_state (old_state), m_new_state (new_state),
m_old_smap (old_smap), m_new_smap (new_smap),
+ m_path_ctxt (path_ctxt),
m_stmt_finder (stmt_finder)
{
}
@@ -219,27 +290,36 @@ public:
tree get_fndecl_for_call (const gcall *call) FINAL OVERRIDE
{
impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
- call);
+ (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
+ NULL, call);
region_model *model = m_new_state->m_region_model;
return model->get_fndecl_for_call (call, &old_ctxt);
}
- state_machine::state_t get_state (const gimple *stmt,
+ state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
tree var)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
- impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
- stmt);
+ /* Use NULL ctxt on this get_rvalue call to avoid triggering
+ uninitialized value warnings. */
const svalue *var_old_sval
- = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
+ = m_old_state->m_region_model->get_rvalue (var, NULL);
state_machine::state_t current
= m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
return current;
}
+ state_machine::state_t get_state (const gimple *stmt ATTRIBUTE_UNUSED,
+ const svalue *sval)
+ {
+ logger * const logger = get_logger ();
+ LOG_FUNC (logger);
+ state_machine::state_t current
+ = m_old_smap->get_state (sval, m_eg.get_ext_state ());
+ return current;
+ }
+
void set_next_state (const gimple *stmt,
tree var,
@@ -248,22 +328,18 @@ public:
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
- impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, NULL, NULL/*m_enode->get_state ()*/,
- stmt);
- const svalue *var_old_sval
- = m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
-
impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
m_old_state, m_new_state,
+ NULL, NULL,
stmt);
const svalue *var_new_sval
= m_new_state->m_region_model->get_rvalue (var, &new_ctxt);
const svalue *origin_new_sval
= m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
+ /* We use the new sval here to avoid issues with uninitialized values. */
state_machine::state_t current
- = m_old_smap->get_state (var_old_sval, m_eg.get_ext_state ());
+ = m_old_smap->get_state (var_new_sval, m_eg.get_ext_state ());
if (logger)
logger->log ("%s: state transition of %qE: %s -> %s",
m_sm.get_name (),
@@ -274,13 +350,48 @@ public:
to, origin_new_sval, m_eg.get_ext_state ());
}
+ void set_next_state (const gimple *stmt,
+ const svalue *sval,
+ state_machine::state_t to,
+ tree origin)
+ {
+ logger * const logger = get_logger ();
+ LOG_FUNC (logger);
+ impl_region_model_context old_ctxt
+ (m_eg, m_enode_for_diag, NULL, NULL, NULL/*m_enode->get_state ()*/,
+ NULL, stmt);
+
+ impl_region_model_context new_ctxt (m_eg, m_enode_for_diag,
+ m_old_state, m_new_state,
+ NULL, NULL,
+ stmt);
+ const svalue *origin_new_sval
+ = m_new_state->m_region_model->get_rvalue (origin, &new_ctxt);
+
+ state_machine::state_t current
+ = m_old_smap->get_state (sval, m_eg.get_ext_state ());
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("%s: state transition of ",
+ m_sm.get_name ());
+ sval->dump_to_pp (logger->get_printer (), true);
+ logger->log_partial (": %s -> %s",
+ current->get_name (),
+ to->get_name ());
+ logger->end_log_line ();
+ }
+ m_new_smap->set_state (m_new_state->m_region_model, sval,
+ to, origin_new_sval, m_eg.get_ext_state ());
+ }
+
void warn (const supernode *snode, const gimple *stmt,
tree var, pending_diagnostic *d) FINAL OVERRIDE
{
LOG_FUNC (get_logger ());
gcc_assert (d); // take ownership
impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL);
+ (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, NULL);
const svalue *var_old_sval
= m_old_state->m_region_model->get_rvalue (var, &old_ctxt);
@@ -317,6 +428,11 @@ public:
return expr;
}
+ tree get_diagnostic_tree (const svalue *sval) FINAL OVERRIDE
+ {
+ return m_new_state->m_region_model->get_representative_tree (sval);
+ }
+
state_machine::state_t get_global_state () const FINAL OVERRIDE
{
return m_old_state->m_checker_states[m_sm_idx]->get_global_state ();
@@ -340,7 +456,7 @@ public:
if (!assign_stmt)
return NULL_TREE;
impl_region_model_context old_ctxt
- (m_eg, m_enode_for_diag, m_old_state, m_new_state, stmt);
+ (m_eg, m_enode_for_diag, m_old_state, m_new_state, NULL, NULL, stmt);
if (const svalue *sval
= m_new_state->m_region_model->get_gassign_result (assign_stmt,
&old_ctxt))
@@ -350,13 +466,19 @@ public:
return NULL_TREE;
}
+ path_context *get_path_context () const FINAL OVERRIDE
+ {
+ return m_path_ctxt;
+ }
+
log_user m_logger;
exploded_graph &m_eg;
- const exploded_node *m_enode_for_diag;
+ exploded_node *m_enode_for_diag;
const program_state *m_old_state;
program_state *m_new_state;
const sm_state_map *m_old_smap;
sm_state_map *m_new_smap;
+ path_context *m_path_ctxt;
stmt_finder *m_stmt_finder;
};
@@ -471,9 +593,22 @@ readability (const_tree expr)
case SSA_NAME:
{
if (tree var = SSA_NAME_VAR (expr))
- /* Slightly favor the underlying var over the SSA name to
- avoid having them compare equal. */
- return readability (var) - 1;
+ {
+ if (DECL_ARTIFICIAL (var))
+ {
+ /* If we have an SSA name for an artificial var,
+ only use it if it has a debug expr associated with
+ it that fixup_tree_for_diagnostic can use. */
+ if (VAR_P (var) && DECL_HAS_DEBUG_EXPR_P (var))
+ return readability (DECL_DEBUG_EXPR (var)) - 1;
+ }
+ else
+ {
+ /* Slightly favor the underlying var over the SSA name to
+ avoid having them compare equal. */
+ return readability (var) - 1;
+ }
+ }
/* Avoid printing '<unknown>' for SSA names for temporaries. */
return -1;
}
@@ -634,12 +769,13 @@ impl_region_model_context::on_state_leak (const state_machine &sm,
}
}
- pending_diagnostic *pd = sm.on_leak (leaked_tree);
+ tree leaked_tree_for_diag = fixup_tree_for_diagnostic (leaked_tree);
+ pending_diagnostic *pd = sm.on_leak (leaked_tree_for_diag);
if (pd)
m_eg->get_diagnostic_manager ().add_diagnostic
(&sm, m_enode_for_diag, m_enode_for_diag->get_supernode (),
m_stmt, &stmt_finder,
- leaked_tree, sval, state, pd);
+ leaked_tree_for_diag, sval, state, pd);
}
/* Implementation of region_model_context::on_condition vfunc.
@@ -647,7 +783,9 @@ impl_region_model_context::on_state_leak (const state_machine &sm,
state transitions. */
void
-impl_region_model_context::on_condition (tree lhs, enum tree_code op, tree rhs)
+impl_region_model_context::on_condition (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs)
{
int sm_idx;
sm_state_map *smap;
@@ -657,9 +795,13 @@ impl_region_model_context::on_condition (tree lhs, enum tree_code op, tree rhs)
impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
m_old_state, m_new_state,
m_old_state->m_checker_states[sm_idx],
- m_new_state->m_checker_states[sm_idx]);
+ m_new_state->m_checker_states[sm_idx],
+ m_path_ctxt);
sm.on_condition (&sm_ctxt,
- m_enode_for_diag->get_supernode (), m_stmt,
+ (m_enode_for_diag
+ ? m_enode_for_diag->get_supernode ()
+ : NULL),
+ m_stmt,
lhs, op, rhs);
}
}
@@ -679,7 +821,8 @@ impl_region_model_context::on_phi (const gphi *phi, tree rhs)
impl_sm_context sm_ctxt (*m_eg, sm_idx, sm, m_enode_for_diag,
m_old_state, m_new_state,
m_old_state->m_checker_states[sm_idx],
- m_new_state->m_checker_states[sm_idx]);
+ m_new_state->m_checker_states[sm_idx],
+ m_path_ctxt);
sm.on_phi (&sm_ctxt, m_enode_for_diag->get_supernode (), phi, rhs);
}
}
@@ -949,7 +1092,7 @@ exploded_node::dump_dot (graphviz_out *gv, const dump_args_t &args) const
dump_processed_stmts (pp);
}
- dump_saved_diagnostics (pp, args.m_eg.get_diagnostic_manager ());
+ dump_saved_diagnostics (pp);
args.dump_extra_info (this, pp);
@@ -987,18 +1130,15 @@ exploded_node::dump_processed_stmts (pretty_printer *pp) const
/* Dump any saved_diagnostics at this enode to PP. */
void
-exploded_node::dump_saved_diagnostics (pretty_printer *pp,
- const diagnostic_manager &dm) const
+exploded_node::dump_saved_diagnostics (pretty_printer *pp) const
{
- for (unsigned i = 0; i < dm.get_num_diagnostics (); i++)
+ unsigned i;
+ const saved_diagnostic *sd;
+ FOR_EACH_VEC_ELT (m_saved_diagnostics, i, sd)
{
- const saved_diagnostic *sd = dm.get_saved_diagnostic (i);
- if (sd->m_enode == this)
- {
- pp_printf (pp, "DIAGNOSTIC: %s (sd: %i)",
- sd->m_d->get_kind (), sd->get_index ());
- pp_newline (pp);
- }
+ pp_printf (pp, "DIAGNOSTIC: %s (sd: %i)",
+ sd->m_d->get_kind (), sd->get_index ());
+ pp_newline (pp);
}
}
@@ -1091,26 +1231,6 @@ fndecl_has_gimple_body_p (tree fndecl)
namespace ana {
-/* A pending_diagnostic subclass for implementing "__analyzer_dump_path". */
-
-class dump_path_diagnostic
- : public pending_diagnostic_subclass<dump_path_diagnostic>
-{
-public:
- bool emit (rich_location *richloc) FINAL OVERRIDE
- {
- inform (richloc, "path");
- return true;
- }
-
- const char *get_kind () const FINAL OVERRIDE { return "dump_path_diagnostic"; }
-
- bool operator== (const dump_path_diagnostic &) const
- {
- return true;
- }
-};
-
/* Modify STATE in place, applying the effects of the stmt at this node's
point. */
@@ -1118,7 +1238,9 @@ exploded_node::on_stmt_flags
exploded_node::on_stmt (exploded_graph &eg,
const supernode *snode,
const gimple *stmt,
- program_state *state) const
+ program_state *state,
+ uncertainty_t *uncertainty,
+ path_context *path_ctxt)
{
logger *logger = eg.get_logger ();
LOG_SCOPE (logger);
@@ -1142,98 +1264,18 @@ exploded_node::on_stmt (exploded_graph &eg,
const program_state old_state (*state);
impl_region_model_context ctxt (eg, this,
- &old_state, state,
- stmt);
+ &old_state, state, uncertainty,
+ path_ctxt, stmt);
bool unknown_side_effects = false;
bool terminate_path = false;
- switch (gimple_code (stmt))
- {
- default:
- /* No-op for now. */
- break;
-
- case GIMPLE_ASSIGN:
- {
- const gassign *assign = as_a <const gassign *> (stmt);
- state->m_region_model->on_assignment (assign, &ctxt);
- }
- break;
-
- case GIMPLE_ASM:
- /* No-op for now. */
- break;
-
- case GIMPLE_CALL:
- {
- /* Track whether we have a gcall to a function that's not recognized by
- anything, for which we don't have a function body, or for which we
- don't know the fndecl. */
- const gcall *call = as_a <const gcall *> (stmt);
-
- /* Debugging/test support. */
- if (is_special_named_call_p (call, "__analyzer_describe", 2))
- state->m_region_model->impl_call_analyzer_describe (call, &ctxt);
- else if (is_special_named_call_p (call, "__analyzer_dump", 0))
- {
- /* Handle the builtin "__analyzer_dump" by dumping state
- to stderr. */
- state->dump (eg.get_ext_state (), true);
- }
- else if (is_special_named_call_p (call, "__analyzer_dump_path", 0))
- {
- /* Handle the builtin "__analyzer_dump_path" by queuing a
- diagnostic at this exploded_node. */
- ctxt.warn (new dump_path_diagnostic ());
- }
- else if (is_special_named_call_p (call, "__analyzer_dump_region_model",
- 0))
- {
- /* Handle the builtin "__analyzer_dump_region_model" by dumping
- the region model's state to stderr. */
- state->m_region_model->dump (false);
- }
- else if (is_special_named_call_p (call, "__analyzer_eval", 1))
- state->m_region_model->impl_call_analyzer_eval (call, &ctxt);
- else if (is_special_named_call_p (call, "__analyzer_break", 0))
- {
- /* Handle the builtin "__analyzer_break" by triggering a
- breakpoint. */
- /* TODO: is there a good cross-platform way to do this? */
- raise (SIGINT);
- }
- else if (is_special_named_call_p (call,
- "__analyzer_dump_exploded_nodes",
- 1))
- {
- /* This is handled elsewhere. */
- }
- else if (is_setjmp_call_p (call))
- state->m_region_model->on_setjmp (call, this, &ctxt);
- else if (is_longjmp_call_p (call))
- {
- on_longjmp (eg, call, state, &ctxt);
- return on_stmt_flags::terminate_path ();
- }
- else
- unknown_side_effects
- = state->m_region_model->on_call_pre (call, &ctxt, &terminate_path);
- }
- break;
-
- case GIMPLE_RETURN:
- {
- const greturn *return_ = as_a <const greturn *> (stmt);
- state->m_region_model->on_return (return_, &ctxt);
- }
- break;
- }
+ on_stmt_pre (eg, stmt, state, &terminate_path,
+ &unknown_side_effects, &ctxt);
if (terminate_path)
return on_stmt_flags::terminate_path ();
- bool any_sm_changes = false;
int sm_idx;
sm_state_map *smap;
FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
@@ -1243,18 +1285,77 @@ exploded_node::on_stmt (exploded_graph &eg,
= old_state.m_checker_states[sm_idx];
sm_state_map *new_smap = state->m_checker_states[sm_idx];
impl_sm_context sm_ctxt (eg, sm_idx, sm, this, &old_state, state,
- old_smap, new_smap);
+ old_smap, new_smap, path_ctxt);
+
/* Allow the state_machine to handle the stmt. */
if (sm.on_stmt (&sm_ctxt, snode, stmt))
unknown_side_effects = false;
- if (*old_smap != *new_smap)
- any_sm_changes = true;
}
+ if (path_ctxt->terminate_path_p ())
+ return on_stmt_flags::terminate_path ();
+
+ on_stmt_post (stmt, state, unknown_side_effects, &ctxt);
+
+ return on_stmt_flags ();
+}
+
+/* Handle the pre-sm-state part of STMT, modifying STATE in-place.
+ Write true to *OUT_TERMINATE_PATH if the path should be terminated.
+ Write true to *OUT_UNKNOWN_SIDE_EFFECTS if the stmt has unknown
+ side effects. */
+
+void
+exploded_node::on_stmt_pre (exploded_graph &eg,
+ const gimple *stmt,
+ program_state *state,
+ bool *out_terminate_path,
+ bool *out_unknown_side_effects,
+ region_model_context *ctxt)
+{
+ /* Handle special-case calls that require the full program_state. */
if (const gcall *call = dyn_cast <const gcall *> (stmt))
- state->m_region_model->on_call_post (call, unknown_side_effects, &ctxt);
+ {
+ if (is_special_named_call_p (call, "__analyzer_dump", 0))
+ {
+ /* Handle the builtin "__analyzer_dump" by dumping state
+ to stderr. */
+ state->dump (eg.get_ext_state (), true);
+ return;
+ }
+ else if (is_special_named_call_p (call, "__analyzer_dump_state", 2))
+ state->impl_call_analyzer_dump_state (call, eg.get_ext_state (),
+ ctxt);
+ else if (is_setjmp_call_p (call))
+ {
+ state->m_region_model->on_setjmp (call, this, ctxt);
+ return;
+ }
+ else if (is_longjmp_call_p (call))
+ {
+ on_longjmp (eg, call, state, ctxt);
+ *out_terminate_path = true;
+ return;
+ }
+ }
+
+ /* Otherwise, defer to m_region_model. */
+ state->m_region_model->on_stmt_pre (stmt,
+ out_terminate_path,
+ out_unknown_side_effects,
+ ctxt);
+}
- return on_stmt_flags (any_sm_changes);
+/* Handle the post-sm-state part of STMT, modifying STATE in-place. */
+
+void
+exploded_node::on_stmt_post (const gimple *stmt,
+ program_state *state,
+ bool unknown_side_effects,
+ region_model_context *ctxt)
+{
+ if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ state->m_region_model->on_call_post (call, unknown_side_effects, ctxt);
}
/* Consider the effect of following superedge SUCC from this node.
@@ -1277,14 +1378,15 @@ bool
exploded_node::on_edge (exploded_graph &eg,
const superedge *succ,
program_point *next_point,
- program_state *next_state) const
+ program_state *next_state,
+ uncertainty_t *uncertainty)
{
LOG_FUNC (eg.get_logger ());
if (!next_point->on_edge (eg, succ))
return false;
- if (!next_state->on_edge (eg, *this, succ))
+ if (!next_state->on_edge (eg, this, succ, uncertainty))
return false;
return true;
@@ -1320,7 +1422,7 @@ valid_longjmp_stack_p (const program_point &longjmp_point,
where the enclosing function of the "setjmp" has returned (and thus
the stack frame no longer exists). */
-class stale_jmp_buf : public pending_diagnostic_subclass<dump_path_diagnostic>
+class stale_jmp_buf : public pending_diagnostic_subclass<stale_jmp_buf>
{
public:
stale_jmp_buf (const gcall *setjmp_call, const gcall *longjmp_call,
@@ -1365,7 +1467,7 @@ public:
{
/* Compare with diagnostic_manager::add_events_for_superedge. */
const int src_stack_depth = src_point.get_stack_depth ();
- m_stack_pop_event = new custom_event
+ m_stack_pop_event = new precanned_custom_event
(src_point.get_location (),
src_point.get_fndecl (),
src_stack_depth,
@@ -1409,7 +1511,7 @@ void
exploded_node::on_longjmp (exploded_graph &eg,
const gcall *longjmp_call,
program_state *new_state,
- region_model_context *ctxt) const
+ region_model_context *ctxt)
{
tree buf_ptr = gimple_call_arg (longjmp_call, 0);
gcc_assert (POINTER_TYPE_P (TREE_TYPE (buf_ptr)));
@@ -1419,7 +1521,8 @@ exploded_node::on_longjmp (exploded_graph &eg,
const region *buf = new_region_model->deref_rvalue (buf_ptr_sval, buf_ptr,
ctxt);
- const svalue *buf_content_sval = new_region_model->get_store_value (buf);
+ const svalue *buf_content_sval
+ = new_region_model->get_store_value (buf, ctxt);
const setjmp_svalue *setjmp_sval
= buf_content_sval->dyn_cast_setjmp_svalue ();
if (!setjmp_sval)
@@ -1518,7 +1621,7 @@ exploded_node::on_longjmp (exploded_graph &eg,
leaks. */
void
-exploded_node::detect_leaks (exploded_graph &eg) const
+exploded_node::detect_leaks (exploded_graph &eg)
{
LOG_FUNC_1 (eg.get_logger (), "EN: %i", m_index);
@@ -1540,8 +1643,9 @@ exploded_node::detect_leaks (exploded_graph &eg) const
gcc_assert (new_state.m_region_model);
+ uncertainty_t uncertainty;
impl_region_model_context ctxt (eg, this,
- &old_state, &new_state,
+ &old_state, &new_state, &uncertainty, NULL,
get_stmt ());
const svalue *result = NULL;
new_state.m_region_model->pop_frame (NULL, &result, &ctxt);
@@ -1576,21 +1680,70 @@ exploded_node::dump_succs_and_preds (FILE *outf) const
}
}
-/* class rewind_info_t : public exploded_edge::custom_info_t. */
+/* class dynamic_call_info_t : public custom_edge_info. */
+
+/* Implementation of custom_edge_info::update_model vfunc
+ for dynamic_call_info_t.
+
+ Update state for the dynamically discorverd calls */
+
+bool
+dynamic_call_info_t::update_model (region_model *model,
+ const exploded_edge *eedge,
+ region_model_context *) const
+{
+ gcc_assert (eedge);
+ const program_state &dest_state = eedge->m_dest->get_state ();
+ *model = *dest_state.m_region_model;
+ return true;
+}
+
+/* Implementation of custom_edge_info::add_events_to_path vfunc
+ for dynamic_call_info_t. */
-/* Implementation of exploded_edge::custom_info_t::update_model vfunc
+void
+dynamic_call_info_t::add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge) const
+{
+ const exploded_node *src_node = eedge.m_src;
+ const program_point &src_point = src_node->get_point ();
+ const int src_stack_depth = src_point.get_stack_depth ();
+ const exploded_node *dest_node = eedge.m_dest;
+ const program_point &dest_point = dest_node->get_point ();
+ const int dest_stack_depth = dest_point.get_stack_depth ();
+
+ if (m_is_returning_call)
+ emission_path->add_event (new return_event (eedge, (m_dynamic_call
+ ? m_dynamic_call->location
+ : UNKNOWN_LOCATION),
+ dest_point.get_fndecl (),
+ dest_stack_depth));
+ else
+ emission_path->add_event (new call_event (eedge, (m_dynamic_call
+ ? m_dynamic_call->location
+ : UNKNOWN_LOCATION),
+ src_point.get_fndecl (),
+ src_stack_depth));
+
+}
+
+/* class rewind_info_t : public custom_edge_info. */
+
+/* Implementation of custom_edge_info::update_model vfunc
for rewind_info_t.
Update state for the special-case of a rewind of a longjmp
to a setjmp (which doesn't have a superedge, but does affect
state). */
-void
+bool
rewind_info_t::update_model (region_model *model,
- const exploded_edge &eedge)
+ const exploded_edge *eedge,
+ region_model_context *) const
{
- const program_point &longjmp_point = eedge.m_src->get_point ();
- const program_point &setjmp_point = eedge.m_dest->get_point ();
+ gcc_assert (eedge);
+ const program_point &longjmp_point = eedge->m_src->get_point ();
+ const program_point &setjmp_point = eedge->m_dest->get_point ();
gcc_assert (longjmp_point.get_stack_depth ()
>= setjmp_point.get_stack_depth ());
@@ -1598,14 +1751,15 @@ rewind_info_t::update_model (region_model *model,
model->on_longjmp (get_longjmp_call (),
get_setjmp_call (),
setjmp_point.get_stack_depth (), NULL);
+ return true;
}
-/* Implementation of exploded_edge::custom_info_t::add_events_to_path vfunc
+/* Implementation of custom_edge_info::add_events_to_path vfunc
for rewind_info_t. */
void
rewind_info_t::add_events_to_path (checker_path *emission_path,
- const exploded_edge &eedge)
+ const exploded_edge &eedge) const
{
const exploded_node *src_node = eedge.m_src;
const program_point &src_point = src_node->get_point ();
@@ -1632,7 +1786,7 @@ rewind_info_t::add_events_to_path (checker_path *emission_path,
exploded_edge::exploded_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
- custom_info_t *custom_info)
+ custom_edge_info *custom_info)
: dedge<eg_traits> (src, dest), m_sedge (sedge),
m_custom_info (custom_info)
{
@@ -1967,7 +2121,25 @@ worklist::key_t::cmp (const worklist::key_t &ka, const worklist::key_t &kb)
return cmp;
}
- /* First, order by SCC. */
+ /* Sort by callstring, so that nodes with deeper call strings are processed
+ before those with shallower call strings.
+ If we have
+ splitting BB
+ / \
+ / \
+ fn call no fn call
+ \ /
+ \ /
+ join BB
+ then we want the path inside the function call to be fully explored up
+ to the return to the join BB before we explore on the "no fn call" path,
+ so that both enodes at the join BB reach the front of the worklist at
+ the same time and thus have a chance of being merged. */
+ int cs_cmp = call_string::cmp (call_string_a, call_string_b);
+ if (cs_cmp)
+ return cs_cmp;
+
+ /* Order by SCC. */
int scc_id_a = ka.get_scc_id (ka.m_enode);
int scc_id_b = kb.get_scc_id (kb.m_enode);
if (scc_id_a != scc_id_b)
@@ -1996,11 +2168,6 @@ worklist::key_t::cmp (const worklist::key_t &ka, const worklist::key_t &kb)
gcc_assert (snode_a == snode_b);
- /* The points might vary by callstring; try sorting by callstring. */
- int cs_cmp = call_string::cmp (call_string_a, call_string_b);
- if (cs_cmp)
- return cs_cmp;
-
/* Order within supernode via program point. */
int within_snode_cmp
= function_point::cmp_within_supernode (point_a.get_function_point (),
@@ -2137,7 +2304,7 @@ exploded_graph::add_function_entry (function *fun)
exploded_node *
exploded_graph::get_or_create_node (const program_point &point,
const program_state &state,
- const exploded_node *enode_for_diag)
+ exploded_node *enode_for_diag)
{
logger * const logger = get_logger ();
LOG_FUNC (logger);
@@ -2172,8 +2339,9 @@ exploded_graph::get_or_create_node (const program_point &point,
/* Prune state to try to improve the chances of a cache hit,
avoiding generating redundant nodes. */
+ uncertainty_t uncertainty;
program_state pruned_state
- = state.prune_for_point (*this, point, enode_for_diag);
+ = state.prune_for_point (*this, point, enode_for_diag, &uncertainty);
pruned_state.validate (get_ext_state ());
@@ -2230,6 +2398,7 @@ exploded_graph::get_or_create_node (const program_point &point,
if (pruned_state.can_merge_with_p (existing_state, point,
&merged_state))
{
+ merged_state.validate (m_ext_state);
if (logger)
logger->log ("merging new state with that of EN: %i",
existing_enode->m_index);
@@ -2322,7 +2491,7 @@ exploded_graph::get_or_create_node (const program_point &point,
exploded_edge *
exploded_graph::add_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
- exploded_edge::custom_info_t *custom_info)
+ custom_edge_info *custom_info)
{
if (get_logger ())
get_logger ()->log ("creating edge EN: %i -> EN: %i",
@@ -2749,17 +2918,21 @@ maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
items.quick_push (it);
const program_state &state = iter_enode->get_state ();
program_state *next_state = &it->m_processed_state;
+ next_state->validate (m_ext_state);
const program_point &iter_point = iter_enode->get_point ();
if (const superedge *iter_sedge = iter_point.get_from_edge ())
{
+ uncertainty_t uncertainty;
impl_region_model_context ctxt (*this, iter_enode,
- &state, next_state, NULL);
+ &state, next_state,
+ &uncertainty, NULL, NULL);
const cfg_superedge *last_cfg_superedge
= iter_sedge->dyn_cast_cfg_superedge ();
if (last_cfg_superedge)
next_state->m_region_model->update_for_phis
(snode, last_cfg_superedge, &ctxt);
}
+ next_state->validate (m_ext_state);
}
/* Attempt to partition the items into a set of merged states.
@@ -2776,10 +2949,12 @@ maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
unsigned iter_merger_idx;
FOR_EACH_VEC_ELT (merged_states, iter_merger_idx, merged_state)
{
+ merged_state->validate (m_ext_state);
program_state merge (m_ext_state);
if (it_state.can_merge_with_p (*merged_state, next_point, &merge))
{
*merged_state = merge;
+ merged_state->validate (m_ext_state);
it->m_merger_idx = iter_merger_idx;
if (logger)
logger->log ("reusing merger state %i for item %i (EN: %i)",
@@ -2878,6 +3053,173 @@ stmt_requires_new_enode_p (const gimple *stmt,
return false;
}
+/* Return true if OLD_STATE and NEW_STATE are sufficiently different that
+ we should split enodes and create an exploded_edge separating them
+ (which makes it easier to identify state changes of intereset when
+ constructing checker_paths). */
+
+static bool
+state_change_requires_new_enode_p (const program_state &old_state,
+ const program_state &new_state)
+{
+ /* Changes in dynamic extents signify creations of heap/alloca regions
+ and resizings of heap regions; likely to be of interest in
+ diagnostic paths. */
+ if (old_state.m_region_model->get_dynamic_extents ()
+ != new_state.m_region_model->get_dynamic_extents ())
+ return true;
+
+ /* Changes in sm-state are of interest. */
+ int sm_idx;
+ sm_state_map *smap;
+ FOR_EACH_VEC_ELT (old_state.m_checker_states, sm_idx, smap)
+ {
+ const sm_state_map *old_smap = old_state.m_checker_states[sm_idx];
+ const sm_state_map *new_smap = new_state.m_checker_states[sm_idx];
+ if (*old_smap != *new_smap)
+ return true;
+ }
+
+ return false;
+}
+
+/* Create enodes and eedges for the function calls that doesn't have an
+ underlying call superedge.
+
+ Such case occurs when GCC's middle end didn't know which function to
+ call but the analyzer does (with the help of current state).
+
+ Some example such calls are dynamically dispatched calls to virtual
+ functions or calls that happen via function pointer. */
+
+bool
+exploded_graph::maybe_create_dynamic_call (const gcall *call,
+ tree fn_decl,
+ exploded_node *node,
+ program_state next_state,
+ program_point &next_point,
+ uncertainty_t *uncertainty,
+ logger *logger)
+{
+ LOG_FUNC (logger);
+
+ const program_point *this_point = &node->get_point ();
+ function *fun = DECL_STRUCT_FUNCTION (fn_decl);
+ if (fun)
+ {
+ const supergraph &sg = this->get_supergraph ();
+ supernode *sn_entry = sg.get_node_for_function_entry (fun);
+ supernode *sn_exit = sg.get_node_for_function_exit (fun);
+
+ program_point new_point
+ = program_point::before_supernode (sn_entry,
+ NULL,
+ this_point->get_call_string ());
+
+ new_point.push_to_call_stack (sn_exit,
+ next_point.get_supernode());
+
+ /* Impose a maximum recursion depth and don't analyze paths
+ that exceed it further.
+ This is something of a blunt workaround, but it only
+ applies to recursion (and mutual recursion), not to
+ general call stacks. */
+ if (new_point.get_call_string ().calc_recursion_depth ()
+ > param_analyzer_max_recursion_depth)
+ {
+ if (logger)
+ logger->log ("rejecting call edge: recursion limit exceeded");
+ return false;
+ }
+
+ next_state.push_call (*this, node, call, uncertainty);
+
+ if (next_state.m_valid)
+ {
+ if (logger)
+ logger->log ("Discovered call to %s [SN: %i -> SN: %i]",
+ function_name(fun),
+ this_point->get_supernode ()->m_index,
+ sn_entry->m_index);
+
+ exploded_node *enode = get_or_create_node (new_point,
+ next_state,
+ node);
+ if (enode)
+ add_edge (node,enode, NULL,
+ new dynamic_call_info_t (call));
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Subclass of path_context for use within exploded_graph::process_node,
+ so that we can split states e.g. at "realloc" calls. */
+
+class impl_path_context : public path_context
+{
+public:
+ impl_path_context (const program_state *cur_state)
+ : m_cur_state (cur_state),
+ m_terminate_path (false)
+ {
+ }
+
+ bool bifurcation_p () const
+ {
+ return m_custom_eedge_infos.length () > 0;
+ }
+
+ const program_state &get_state_at_bifurcation () const
+ {
+ gcc_assert (m_state_at_bifurcation);
+ return *m_state_at_bifurcation;
+ }
+
+ void
+ bifurcate (custom_edge_info *info) FINAL OVERRIDE
+ {
+ if (m_state_at_bifurcation)
+ /* Verify that the state at bifurcation is consistent when we
+ split into multiple out-edges. */
+ gcc_assert (*m_state_at_bifurcation == *m_cur_state);
+ else
+ /* Take a copy of the cur_state at the moment when bifurcation
+ happens. */
+ m_state_at_bifurcation
+ = std::unique_ptr<program_state> (new program_state (*m_cur_state));
+
+ /* Take ownership of INFO. */
+ m_custom_eedge_infos.safe_push (info);
+ }
+
+ void terminate_path () FINAL OVERRIDE
+ {
+ m_terminate_path = true;
+ }
+
+ bool terminate_path_p () const FINAL OVERRIDE
+ {
+ return m_terminate_path;
+ }
+
+ const vec<custom_edge_info *> & get_custom_eedge_infos ()
+ {
+ return m_custom_eedge_infos;
+ }
+
+private:
+ const program_state *m_cur_state;
+
+ /* Lazily-created copy of the state before the split. */
+ std::unique_ptr<program_state> m_state_at_bifurcation;
+
+ auto_vec <custom_edge_info *> m_custom_eedge_infos;
+
+ bool m_terminate_path;
+};
+
/* The core of exploded_graph::process_worklist (the main analysis loop),
handling one node in the worklist.
@@ -2927,11 +3269,13 @@ exploded_graph::process_node (exploded_node *node)
case PK_BEFORE_SUPERNODE:
{
program_state next_state (state);
+ uncertainty_t uncertainty;
if (point.get_from_edge ())
{
impl_region_model_context ctxt (*this, node,
- &state, &next_state, NULL);
+ &state, &next_state,
+ &uncertainty, NULL, NULL);
const cfg_superedge *last_cfg_superedge
= point.get_from_edge ()->dyn_cast_cfg_superedge ();
if (last_cfg_superedge)
@@ -2969,6 +3313,10 @@ exploded_graph::process_node (exploded_node *node)
the sm-state-change occurs on an edge where the src enode has
exactly one stmt, the one that caused the change. */
program_state next_state (state);
+
+ impl_path_context path_ctxt (&next_state);
+
+ uncertainty_t uncertainty;
const supernode *snode = point.get_supernode ();
unsigned stmt_idx;
const gimple *prev_stmt = NULL;
@@ -2990,7 +3338,8 @@ exploded_graph::process_node (exploded_node *node)
/* Process the stmt. */
exploded_node::on_stmt_flags flags
- = node->on_stmt (*this, snode, stmt, &next_state);
+ = node->on_stmt (*this, snode, stmt, &next_state, &uncertainty,
+ &path_ctxt);
node->m_num_processed_stmts++;
/* If flags.m_terminate_path, stop analyzing; any nodes/edges
@@ -3001,7 +3350,8 @@ exploded_graph::process_node (exploded_node *node)
if (next_state.m_region_model)
{
impl_region_model_context ctxt (*this, node,
- &old_state, &next_state, stmt);
+ &old_state, &next_state,
+ &uncertainty, NULL, stmt);
program_state::detect_leaks (old_state, next_state, NULL,
get_ext_state (), &ctxt);
}
@@ -3013,9 +3363,13 @@ exploded_graph::process_node (exploded_node *node)
point.get_call_string ())
: program_point::after_supernode (point.get_supernode (),
point.get_call_string ()));
- next_state = next_state.prune_for_point (*this, next_point, node);
+ next_state = next_state.prune_for_point (*this, next_point, node,
+ &uncertainty);
- if (flags.m_sm_changes || flag_analyzer_fine_grained)
+ if (flag_analyzer_fine_grained
+ || state_change_requires_new_enode_p (old_state, next_state)
+ || path_ctxt.bifurcation_p ()
+ || path_ctxt.terminate_path_p ())
{
program_point split_point
= program_point::before_stmt (point.get_supernode (),
@@ -3059,17 +3413,77 @@ exploded_graph::process_node (exploded_node *node)
point.get_call_string ())
: program_point::after_supernode (point.get_supernode (),
point.get_call_string ()));
- exploded_node *next = get_or_create_node (next_point, next_state, node);
- if (next)
- add_edge (node, next, NULL);
+ if (path_ctxt.terminate_path_p ())
+ {
+ if (logger)
+ logger->log ("not adding node: terminating path");
+ }
+ else
+ {
+ exploded_node *next
+ = get_or_create_node (next_point, next_state, node);
+ if (next)
+ add_edge (node, next, NULL);
+ }
+
+ /* If we have custom edge infos, "bifurcate" the state
+ accordingly, potentially creating a new state/enode/eedge
+ instances. For example, to handle a "realloc" call, we
+ might split into 3 states, for the "failure",
+ "resizing in place", and "moving to a new buffer" cases. */
+ for (auto edge_info : path_ctxt.get_custom_eedge_infos ())
+ {
+ if (logger)
+ {
+ logger->start_log_line ();
+ logger->log_partial ("bifurcating for edge: ");
+ edge_info->print (logger->get_printer ());
+ logger->end_log_line ();
+ }
+ program_state bifurcated_new_state
+ (path_ctxt.get_state_at_bifurcation ());
+
+ /* Apply edge_info to state. */
+ impl_region_model_context
+ bifurcation_ctxt (*this,
+ NULL, // enode_for_diag
+ &path_ctxt.get_state_at_bifurcation (),
+ &bifurcated_new_state,
+ NULL, // uncertainty_t *uncertainty
+ NULL, // path_context *path_ctxt
+ stmt);
+ if (edge_info->update_model (bifurcated_new_state.m_region_model,
+ NULL, /* no exploded_edge yet. */
+ &bifurcation_ctxt))
+ {
+ exploded_node *next2
+ = get_or_create_node (next_point, bifurcated_new_state, node);
+ if (next2)
+ {
+ /* Take ownership of edge_info. */
+ add_edge (node, next2, NULL, edge_info);
+ }
+ else
+ delete edge_info;
+ }
+ else
+ {
+ if (logger)
+ logger->log ("infeasible state, not adding node");
+ delete edge_info;
+ }
+ }
}
break;
case PK_AFTER_SUPERNODE:
{
+ bool found_a_superedge = false;
+ bool is_an_exit_block = false;
/* If this is an EXIT BB, detect leaks, and potentially
create a function summary. */
if (point.get_supernode ()->return_p ())
{
+ is_an_exit_block = true;
node->detect_leaks (*this);
if (flag_analyzer_call_summaries
&& point.get_call_string ().empty_p ())
@@ -3097,6 +3511,7 @@ exploded_graph::process_node (exploded_node *node)
superedge *succ;
FOR_EACH_VEC_ELT (point.get_supernode ()->m_succs, i, succ)
{
+ found_a_superedge = true;
if (logger)
logger->log ("considering SN: %i -> SN: %i",
succ->m_src->m_index, succ->m_dest->m_index);
@@ -3105,20 +3520,102 @@ exploded_graph::process_node (exploded_node *node)
= program_point::before_supernode (succ->m_dest, succ,
point.get_call_string ());
program_state next_state (state);
-
- if (!node->on_edge (*this, succ, &next_point, &next_state))
+ uncertainty_t uncertainty;
+
+ /* Make use the current state and try to discover and analyse
+ indirect function calls (a call that doesn't have an underlying
+ cgraph edge representing call).
+
+ Some examples of such calls are virtual function calls
+ and calls that happen via a function pointer. */
+ if (succ->m_kind == SUPEREDGE_INTRAPROCEDURAL_CALL
+ && !(succ->get_any_callgraph_edge ()))
+ {
+ const gcall *call
+ = point.get_supernode ()->get_final_call ();
+
+ impl_region_model_context ctxt (*this,
+ node,
+ &state,
+ &next_state,
+ &uncertainty,
+ NULL,
+ point.get_stmt());
+
+ region_model *model = state.m_region_model;
+ bool call_discovered = false;
+
+ if (tree fn_decl = model->get_fndecl_for_call(call,&ctxt))
+ call_discovered = maybe_create_dynamic_call (call,
+ fn_decl,
+ node,
+ next_state,
+ next_point,
+ &uncertainty,
+ logger);
+ if (!call_discovered)
+ {
+ /* An unknown function or a special function was called
+ at this point, in such case, don't terminate the
+ analysis of the current function.
+
+ The analyzer handles calls to such functions while
+ analysing the stmt itself, so the the function call
+ must have been handled by the anlyzer till now. */
+ exploded_node *next
+ = get_or_create_node (next_point,
+ next_state,
+ node);
+ if (next)
+ add_edge (node, next, succ);
+ }
+ }
+
+ if (!node->on_edge (*this, succ, &next_point, &next_state,
+ &uncertainty))
{
if (logger)
logger->log ("skipping impossible edge to SN: %i",
succ->m_dest->m_index);
continue;
}
-
exploded_node *next = get_or_create_node (next_point, next_state,
node);
if (next)
add_edge (node, next, succ);
}
+
+ /* Return from the calls which doesn't have a return superedge.
+ Such case occurs when GCC's middle end didn't knew which function to
+ call but analyzer did. */
+ if((is_an_exit_block && !found_a_superedge)
+ && (!point.get_call_string ().empty_p ()))
+ {
+ const call_string cs = point.get_call_string ();
+ program_point next_point
+ = program_point::before_supernode (cs.get_caller_node (),
+ NULL,
+ cs);
+ program_state next_state (state);
+ uncertainty_t uncertainty;
+
+ const gcall *call
+ = next_point.get_supernode ()->get_returning_call ();
+
+ if(call)
+ next_state.returning_call (*this, node, call, &uncertainty);
+
+ if (next_state.m_valid)
+ {
+ next_point.pop_from_call_stack ();
+ exploded_node *enode = get_or_create_node (next_point,
+ next_state,
+ node);
+ if (enode)
+ add_edge (node, enode, NULL,
+ new dynamic_call_info_t (call, true));
+ }
+ }
}
break;
}
@@ -3477,10 +3974,12 @@ exploded_path::feasible_p (logger *logger, feasibility_problem **out,
return true;
}
-/* Dump this path in multiline form to PP. */
+/* Dump this path in multiline form to PP.
+ If EXT_STATE is non-NULL, then show the nodes. */
void
-exploded_path::dump_to_pp (pretty_printer *pp) const
+exploded_path::dump_to_pp (pretty_printer *pp,
+ const extrinsic_state *ext_state) const
{
for (unsigned i = 0; i < m_edges.length (); i++)
{
@@ -3490,28 +3989,48 @@ exploded_path::dump_to_pp (pretty_printer *pp) const
eedge->m_src->m_index,
eedge->m_dest->m_index);
pp_newline (pp);
+
+ if (ext_state)
+ eedge->m_dest->dump_to_pp (pp, *ext_state);
}
}
/* Dump this path in multiline form to FP. */
void
-exploded_path::dump (FILE *fp) const
+exploded_path::dump (FILE *fp, const extrinsic_state *ext_state) const
{
pretty_printer pp;
pp_format_decoder (&pp) = default_tree_printer;
pp_show_color (&pp) = pp_show_color (global_dc->printer);
pp.buffer->stream = fp;
- dump_to_pp (&pp);
+ dump_to_pp (&pp, ext_state);
pp_flush (&pp);
}
/* Dump this path in multiline form to stderr. */
DEBUG_FUNCTION void
-exploded_path::dump () const
+exploded_path::dump (const extrinsic_state *ext_state) const
{
- dump (stderr);
+ dump (stderr, ext_state);
+}
+
+/* Dump this path verbosely to FILENAME. */
+
+void
+exploded_path::dump_to_file (const char *filename,
+ const extrinsic_state &ext_state) const
+{
+ FILE *fp = fopen (filename, "w");
+ if (!fp)
+ return;
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp.buffer->stream = fp;
+ dump_to_pp (&pp, &ext_state);
+ pp_flush (&pp);
+ fclose (fp);
}
/* class feasibility_problem. */
@@ -3526,7 +4045,7 @@ feasibility_problem::dump_to_pp (pretty_printer *pp) const
pp_string (pp, "; rejected constraint: ");
m_rc->dump_to_pp (pp);
pp_string (pp, "; rmodel: ");
- m_rc->m_model.dump_to_pp (pp, true, false);
+ m_rc->get_model ().dump_to_pp (pp, true, false);
}
}
@@ -3588,6 +4107,15 @@ feasibility_state::maybe_update_for_edge (logger *logger,
if (const gassign *assign = dyn_cast <const gassign *> (stmt))
m_model.on_assignment (assign, NULL);
+ else if (const gasm *asm_stmt = dyn_cast <const gasm *> (stmt))
+ m_model.on_asm_stmt (asm_stmt, NULL);
+ else if (const gcall *call = dyn_cast <const gcall *> (stmt))
+ {
+ bool terminate_path;
+ bool unknown_side_effects
+ = m_model.on_call_pre (call, NULL, &terminate_path);
+ m_model.on_call_post (call, unknown_side_effects, NULL);
+ }
else if (const greturn *return_ = dyn_cast <const greturn *> (stmt))
m_model.on_return (return_, NULL);
}
@@ -3629,7 +4157,7 @@ feasibility_state::maybe_update_for_edge (logger *logger,
}
else if (eedge->m_custom_info)
{
- eedge->m_custom_info->update_model (&m_model, *eedge);
+ eedge->m_custom_info->update_model (&m_model, eedge, NULL);
}
}
@@ -4638,16 +5166,13 @@ private:
break;
}
gv->end_tdtr ();
+
/* Dump any saved_diagnostics at this enode. */
- {
- const diagnostic_manager &dm = m_eg.get_diagnostic_manager ();
- for (unsigned i = 0; i < dm.get_num_diagnostics (); i++)
- {
- const saved_diagnostic *sd = dm.get_saved_diagnostic (i);
- if (sd->m_enode == enode)
- print_saved_diagnostic (gv, sd);
- }
- }
+ for (unsigned i = 0; i < enode->get_num_diagnostics (); i++)
+ {
+ const saved_diagnostic *sd = enode->get_saved_diagnostic (i);
+ print_saved_diagnostic (gv, sd);
+ }
pp_printf (pp, "</TABLE>");
pp_printf (pp, "</TD>");
}
@@ -4766,6 +5291,13 @@ impl_run_checkers (logger *logger)
{
LOG_SCOPE (logger);
+ if (logger)
+ {
+ logger->log ("BITS_BIG_ENDIAN: %i", BITS_BIG_ENDIAN ? 1 : 0);
+ logger->log ("BYTES_BIG_ENDIAN: %i", BYTES_BIG_ENDIAN ? 1 : 0);
+ logger->log ("WORDS_BIG_ENDIAN: %i", WORDS_BIG_ENDIAN ? 1 : 0);
+ }
+
/* If using LTO, ensure that the cgraph nodes have function bodies. */
cgraph_node *node;
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
diff --git a/gcc/analyzer/exploded-graph.h b/gcc/analyzer/exploded-graph.h
index deb739f..b9c1767 100644
--- a/gcc/analyzer/exploded-graph.h
+++ b/gcc/analyzer/exploded-graph.h
@@ -30,21 +30,24 @@ class impl_region_model_context : public region_model_context
{
public:
impl_region_model_context (exploded_graph &eg,
- const exploded_node *enode_for_diag,
+ exploded_node *enode_for_diag,
/* TODO: should we be getting the ECs from the
old state, rather than the new? */
const program_state *old_state,
program_state *new_state,
+ uncertainty_t *uncertainty,
+ path_context *path_ctxt,
const gimple *stmt,
stmt_finder *stmt_finder = NULL);
impl_region_model_context (program_state *state,
const extrinsic_state &ext_state,
+ uncertainty_t *uncertainty,
logger *logger = NULL);
- void warn (pending_diagnostic *d) FINAL OVERRIDE;
+ bool warn (pending_diagnostic *d) FINAL OVERRIDE;
void on_svalue_leak (const svalue *) OVERRIDE;
void on_liveness_change (const svalue_set &live_svalues,
const region_model *model) FINAL OVERRIDE;
@@ -57,7 +60,9 @@ class impl_region_model_context : public region_model_context
const svalue *sval,
state_machine::state_t state);
- void on_condition (tree lhs, enum tree_code op, tree rhs) FINAL OVERRIDE;
+ void on_condition (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs) FINAL OVERRIDE;
void on_unknown_change (const svalue *sval, bool is_mutable) FINAL OVERRIDE;
@@ -68,14 +73,30 @@ class impl_region_model_context : public region_model_context
void on_escaped_function (tree fndecl) FINAL OVERRIDE;
+ uncertainty_t *get_uncertainty () FINAL OVERRIDE;
+
+ void purge_state_involving (const svalue *sval) FINAL OVERRIDE;
+
+ void bifurcate (custom_edge_info *info) FINAL OVERRIDE;
+ void terminate_path () FINAL OVERRIDE;
+ const extrinsic_state *get_ext_state () const FINAL OVERRIDE
+ {
+ return &m_ext_state;
+ }
+ bool get_malloc_map (sm_state_map **out_smap,
+ const state_machine **out_sm,
+ unsigned *out_sm_idx) FINAL OVERRIDE;
+
exploded_graph *m_eg;
log_user m_logger;
- const exploded_node *m_enode_for_diag;
+ exploded_node *m_enode_for_diag;
const program_state *m_old_state;
program_state *m_new_state;
const gimple *m_stmt;
stmt_finder *m_stmt_finder;
const extrinsic_state &m_ext_state;
+ uncertainty_t *m_uncertainty;
+ path_context *m_path_ctxt;
};
/* A <program_point, program_state> pair, used internally by
@@ -186,58 +207,59 @@ class exploded_node : public dnode<eg_traits>
void dump (const extrinsic_state &ext_state) const;
void dump_processed_stmts (pretty_printer *pp) const;
- void dump_saved_diagnostics (pretty_printer *pp,
- const diagnostic_manager &dm) const;
+ void dump_saved_diagnostics (pretty_printer *pp) const;
json::object *to_json (const extrinsic_state &ext_state) const;
/* The result of on_stmt. */
struct on_stmt_flags
{
- on_stmt_flags (bool sm_changes)
- : m_sm_changes (sm_changes),
- m_terminate_path (false)
+ on_stmt_flags () : m_terminate_path (false)
{}
static on_stmt_flags terminate_path ()
{
- return on_stmt_flags (true, true);
+ return on_stmt_flags (true);
}
- static on_stmt_flags state_change (bool any_sm_changes)
- {
- return on_stmt_flags (any_sm_changes, false);
- }
-
- /* Did any sm-changes occur handling the stmt. */
- bool m_sm_changes : 1;
-
/* Should we stop analyzing this path (on_stmt may have already
added nodes/edges, e.g. when handling longjmp). */
bool m_terminate_path : 1;
private:
- on_stmt_flags (bool sm_changes,
- bool terminate_path)
- : m_sm_changes (sm_changes),
- m_terminate_path (terminate_path)
+ on_stmt_flags (bool terminate_path)
+ : m_terminate_path (terminate_path)
{}
};
on_stmt_flags on_stmt (exploded_graph &eg,
const supernode *snode,
const gimple *stmt,
- program_state *state) const;
+ program_state *state,
+ uncertainty_t *uncertainty,
+ path_context *path_ctxt);
+ void on_stmt_pre (exploded_graph &eg,
+ const gimple *stmt,
+ program_state *state,
+ bool *out_terminate_path,
+ bool *out_unknown_side_effects,
+ region_model_context *ctxt);
+ void on_stmt_post (const gimple *stmt,
+ program_state *state,
+ bool unknown_side_effects,
+ region_model_context *ctxt);
+
bool on_edge (exploded_graph &eg,
const superedge *succ,
program_point *next_point,
- program_state *next_state) const;
+ program_state *next_state,
+ uncertainty_t *uncertainty);
void on_longjmp (exploded_graph &eg,
const gcall *call,
program_state *new_state,
- region_model_context *ctxt) const;
+ region_model_context *ctxt);
- void detect_leaks (exploded_graph &eg) const;
+ void detect_leaks (exploded_graph &eg);
const program_point &get_point () const { return m_ps.get_point (); }
const supernode *get_supernode () const
@@ -269,6 +291,19 @@ class exploded_node : public dnode<eg_traits>
m_status = status;
}
+ void add_diagnostic (const saved_diagnostic *sd)
+ {
+ m_saved_diagnostics.safe_push (sd);
+ }
+ unsigned get_num_diagnostics () const
+ {
+ return m_saved_diagnostics.length ();
+ }
+ const saved_diagnostic *get_saved_diagnostic (unsigned idx) const
+ {
+ return m_saved_diagnostics[idx];
+ }
+
private:
DISABLE_COPY_AND_ASSIGN (exploded_node);
@@ -278,6 +313,10 @@ private:
enum status m_status;
+ /* The saved_diagnostics at this enode, borrowed from the
+ diagnostic_manager. */
+ auto_vec <const saved_diagnostic *> m_saved_diagnostics;
+
public:
/* The index of this exploded_node. */
const int m_index;
@@ -293,28 +332,9 @@ public:
class exploded_edge : public dedge<eg_traits>
{
public:
- /* Abstract base class for associating custom data with an
- exploded_edge, for handling non-standard edges such as
- rewinding from a longjmp, signal handlers, etc. */
- class custom_info_t
- {
- public:
- virtual ~custom_info_t () {}
-
- /* Hook for making .dot label more readable . */
- virtual void print (pretty_printer *pp) = 0;
-
- /* Hook for updating MODEL within exploded_path::feasible_p. */
- virtual void update_model (region_model *model,
- const exploded_edge &eedge) = 0;
-
- virtual void add_events_to_path (checker_path *emission_path,
- const exploded_edge &eedge) = 0;
- };
-
exploded_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
- custom_info_t *custom_info);
+ custom_edge_info *custom_info);
~exploded_edge ();
void dump_dot (graphviz_out *gv, const dump_args_t &args)
const FINAL OVERRIDE;
@@ -330,16 +350,48 @@ class exploded_edge : public dedge<eg_traits>
a signal is delivered to a signal-handler.
Owned by this class. */
- custom_info_t *m_custom_info;
+ custom_edge_info *m_custom_info;
private:
DISABLE_COPY_AND_ASSIGN (exploded_edge);
};
+/* Extra data for an exploded_edge that represents dynamic call info ( calls
+ that doesn't have an underlying superedge representing the call ). */
+
+class dynamic_call_info_t : public custom_edge_info
+{
+public:
+ dynamic_call_info_t (const gcall *dynamic_call,
+ const bool is_returning_call = false)
+ : m_dynamic_call (dynamic_call),
+ m_is_returning_call (is_returning_call)
+ {}
+
+ void print (pretty_printer *pp) const FINAL OVERRIDE
+ {
+ if (m_is_returning_call)
+ pp_string (pp, "dynamic_return");
+ else
+ pp_string (pp, "dynamic_call");
+ }
+
+ bool update_model (region_model *model,
+ const exploded_edge *eedge,
+ region_model_context *ctxt) const FINAL OVERRIDE;
+
+ void add_events_to_path (checker_path *emission_path,
+ const exploded_edge &eedge) const FINAL OVERRIDE;
+private:
+ const gcall *m_dynamic_call;
+ const bool m_is_returning_call;
+};
+
+
/* Extra data for an exploded_edge that represents a rewind from a
longjmp to a setjmp (or from a siglongjmp to a sigsetjmp). */
-class rewind_info_t : public exploded_edge::custom_info_t
+class rewind_info_t : public custom_edge_info
{
public:
rewind_info_t (const setjmp_record &setjmp_record,
@@ -348,16 +400,17 @@ public:
m_longjmp_call (longjmp_call)
{}
- void print (pretty_printer *pp) FINAL OVERRIDE
+ void print (pretty_printer *pp) const FINAL OVERRIDE
{
pp_string (pp, "rewind");
}
- void update_model (region_model *model,
- const exploded_edge &eedge) FINAL OVERRIDE;
+ bool update_model (region_model *model,
+ const exploded_edge *eedge,
+ region_model_context *ctxt) const FINAL OVERRIDE;
void add_events_to_path (checker_path *emission_path,
- const exploded_edge &eedge) FINAL OVERRIDE;
+ const exploded_edge &eedge) const FINAL OVERRIDE;
const program_point &get_setjmp_point () const
{
@@ -759,12 +812,20 @@ public:
bool maybe_process_run_of_before_supernode_enodes (exploded_node *node);
void process_node (exploded_node *node);
+ bool maybe_create_dynamic_call (const gcall *call,
+ tree fn_decl,
+ exploded_node *node,
+ program_state next_state,
+ program_point &next_point,
+ uncertainty_t *uncertainty,
+ logger *logger);
+
exploded_node *get_or_create_node (const program_point &point,
const program_state &state,
- const exploded_node *enode_for_diag);
+ exploded_node *enode_for_diag);
exploded_edge *add_edge (exploded_node *src, exploded_node *dest,
const superedge *sedge,
- exploded_edge::custom_info_t *custom = NULL);
+ custom_edge_info *custom = NULL);
per_program_point_data *
get_or_create_per_program_point_data (const program_point &);
@@ -882,9 +943,12 @@ public:
exploded_node *get_final_enode () const;
- void dump_to_pp (pretty_printer *pp) const;
- void dump (FILE *fp) const;
- void dump () const;
+ void dump_to_pp (pretty_printer *pp,
+ const extrinsic_state *ext_state) const;
+ void dump (FILE *fp, const extrinsic_state *ext_state) const;
+ void dump (const extrinsic_state *ext_state = NULL) const;
+ void dump_to_file (const char *filename,
+ const extrinsic_state &ext_state) const;
bool feasible_p (logger *logger, feasibility_problem **out,
engine *eng, const exploded_graph *eg) const;
diff --git a/gcc/analyzer/feasible-graph.cc b/gcc/analyzer/feasible-graph.cc
index bb409d6..3b85896 100644
--- a/gcc/analyzer/feasible-graph.cc
+++ b/gcc/analyzer/feasible-graph.cc
@@ -79,7 +79,7 @@ base_feasible_node::dump_dot_id (pretty_printer *pp) const
void
feasible_node::dump_dot (graphviz_out *gv,
- const dump_args_t &args) const
+ const dump_args_t &) const
{
pretty_printer *pp = gv->get_pp ();
@@ -102,8 +102,7 @@ feasible_node::dump_dot (graphviz_out *gv,
pp_newline (pp);
m_inner_node->dump_processed_stmts (pp);
- m_inner_node->dump_saved_diagnostics
- (pp, args.m_inner_args.m_eg.get_diagnostic_manager ());
+ m_inner_node->dump_saved_diagnostics (pp);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
@@ -130,7 +129,7 @@ infeasible_node::dump_dot (graphviz_out *gv,
pp_string (pp, "rejected constraint:");
pp_newline (pp);
- m_rc.dump_to_pp (pp);
+ m_rc->dump_to_pp (pp);
pp_write_text_as_dot_label_to_stream (pp, /*for_record=*/true);
@@ -179,12 +178,13 @@ feasible_graph::add_node (const exploded_node *enode,
}
/* Add an infeasible_node to this graph and an infeasible_edge connecting
- to it from SRC_FNODE, capturing a failure of RC along EEDGE. */
+ to it from SRC_FNODE, capturing a failure of RC along EEDGE.
+ Takes ownership of RC. */
void
feasible_graph::add_feasibility_problem (feasible_node *src_fnode,
const exploded_edge *eedge,
- const rejected_constraint &rc)
+ rejected_constraint *rc)
{
infeasible_node *dst_fnode
= new infeasible_node (eedge->m_dest, m_nodes.length (), rc);
diff --git a/gcc/analyzer/feasible-graph.h b/gcc/analyzer/feasible-graph.h
index 5a580f4..07696fa 100644
--- a/gcc/analyzer/feasible-graph.h
+++ b/gcc/analyzer/feasible-graph.h
@@ -115,17 +115,18 @@ class infeasible_node : public base_feasible_node
{
public:
infeasible_node (const exploded_node *inner_node, unsigned index,
- const rejected_constraint &rc)
+ rejected_constraint *rc)
: base_feasible_node (inner_node, index),
m_rc (rc)
{
}
+ ~infeasible_node () { delete m_rc; }
void dump_dot (graphviz_out *gv,
const dump_args_t &args) const FINAL OVERRIDE;
private:
- rejected_constraint m_rc;
+ rejected_constraint *m_rc;
};
/* Base class of edge within a feasible_graph. */
@@ -192,7 +193,7 @@ class feasible_graph : public digraph <fg_traits>
void add_feasibility_problem (feasible_node *src_fnode,
const exploded_edge *eedge,
- const rejected_constraint &rc);
+ rejected_constraint *rc);
exploded_path *make_epath (feasible_node *fnode) const;
diff --git a/gcc/analyzer/pending-diagnostic.h b/gcc/analyzer/pending-diagnostic.h
index 571fc1b..48e2b3e 100644
--- a/gcc/analyzer/pending-diagnostic.h
+++ b/gcc/analyzer/pending-diagnostic.h
@@ -154,6 +154,9 @@ class pending_diagnostic
/* Hand-coded RTTI: get an ID for the subclass. */
virtual const char *get_kind () const = 0;
+ /* A vfunc for identifying "use of uninitialized value". */
+ virtual bool use_of_uninit_p () const { return false; }
+
/* Compare for equality with OTHER, which might be of a different
subclass. */
@@ -269,6 +272,16 @@ class pending_diagnostic
{
return false;
}
+
+ /* Vfunc for determining that this pending_diagnostic supercedes OTHER,
+ and that OTHER should therefore not be emitted.
+ They have already been tested for being at the same stmt. */
+
+ virtual bool
+ supercedes_p (const pending_diagnostic &other ATTRIBUTE_UNUSED) const
+ {
+ return false;
+ }
};
/* A template to make it easier to make subclasses of pending_diagnostic.
diff --git a/gcc/analyzer/program-point.cc b/gcc/analyzer/program-point.cc
index d8cfc61..25d56af 100644
--- a/gcc/analyzer/program-point.cc
+++ b/gcc/analyzer/program-point.cc
@@ -119,8 +119,15 @@ function_point::print (pretty_printer *pp, const format &f) const
case PK_BEFORE_SUPERNODE:
{
if (m_from_edge)
- pp_printf (pp, "before SN: %i (from SN: %i)",
- m_supernode->m_index, m_from_edge->m_src->m_index);
+ {
+ if (basic_block bb = m_from_edge->m_src->m_bb)
+ pp_printf (pp, "before SN: %i (from SN: %i (bb: %i))",
+ m_supernode->m_index, m_from_edge->m_src->m_index,
+ bb->index);
+ else
+ pp_printf (pp, "before SN: %i (from SN: %i)",
+ m_supernode->m_index, m_from_edge->m_src->m_index);
+ }
else
pp_printf (pp, "before SN: %i (NULL from-edge)",
m_supernode->m_index);
@@ -323,6 +330,24 @@ program_point::to_json () const
return point_obj;
}
+/* Update the callstack to represent a call from caller to callee.
+
+ Generally used to push a custom call to a perticular program point
+ where we don't have a superedge representing the call. */
+void
+program_point::push_to_call_stack (const supernode *caller,
+ const supernode *callee)
+{
+ m_call_string.push_call (callee, caller);
+}
+
+/* Pop the topmost call from the current callstack. */
+void
+program_point::pop_from_call_stack ()
+{
+ m_call_string.pop ();
+}
+
/* Generate a hash value for this program_point. */
hashval_t
@@ -343,7 +368,7 @@ program_point::get_function_at_depth (unsigned depth) const
if (depth == m_call_string.length ())
return m_function_point.get_function ();
else
- return m_call_string[depth]->get_caller_function ();
+ return m_call_string[depth].get_caller_function ();
}
/* Assert that this object is sane. */
@@ -360,7 +385,7 @@ program_point::validate () const
/* The "callee" of the final entry in the callstring should be the
function of the m_function_point. */
if (m_call_string.length () > 0)
- gcc_assert (m_call_string[m_call_string.length () - 1]->get_callee_function ()
+ gcc_assert (m_call_string[m_call_string.length () - 1].get_callee_function ()
== get_function ());
}
@@ -431,8 +456,10 @@ program_point::on_edge (exploded_graph &eg,
logger->log ("rejecting return edge: empty call string");
return false;
}
- const return_superedge *top_of_stack = m_call_string.pop ();
- if (top_of_stack != succ)
+ const call_string::element_t top_of_stack = m_call_string.pop ();
+ call_string::element_t current_call_string_element (succ->m_dest,
+ succ->m_src);
+ if (top_of_stack != current_call_string_element)
{
if (logger)
logger->log ("rejecting return edge: return to wrong callsite");
diff --git a/gcc/analyzer/program-point.h b/gcc/analyzer/program-point.h
index 5f86745..6bae29b 100644
--- a/gcc/analyzer/program-point.h
+++ b/gcc/analyzer/program-point.h
@@ -293,7 +293,8 @@ public:
}
bool on_edge (exploded_graph &eg, const superedge *succ);
-
+ void push_to_call_stack (const supernode *caller, const supernode *callee);
+ void pop_from_call_stack ();
void validate () const;
/* For before_stmt, go to next stmt. */
diff --git a/gcc/analyzer/program-state.cc b/gcc/analyzer/program-state.cc
index e427fff..c1ff0d8 100644
--- a/gcc/analyzer/program-state.cc
+++ b/gcc/analyzer/program-state.cc
@@ -131,6 +131,27 @@ extrinsic_state::get_model_manager () const
return NULL; /* for selftests. */
}
+/* Try to find a state machine named NAME.
+ If found, return true and write its index to *OUT.
+ Otherwise return false. */
+
+bool
+extrinsic_state::get_sm_idx_by_name (const char *name, unsigned *out) const
+{
+ unsigned i;
+ state_machine *sm;
+ FOR_EACH_VEC_ELT (m_checkers, i, sm)
+ if (0 == strcmp (name, sm->get_name ()))
+ {
+ /* Found NAME. */
+ *out = i;
+ return true;
+ }
+
+ /* NAME not found. */
+ return false;
+}
+
/* struct sm_state_map::entry_t. */
int
@@ -372,21 +393,31 @@ sm_state_map::get_state (const svalue *sval,
INIT_VAL(foo). */
if (m_sm.inherited_state_p ())
if (region_model_manager *mgr = ext_state.get_model_manager ())
- if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
- {
- const region *reg = init_sval->get_region ();
- /* Try recursing upwards (up to the base region for the cluster). */
- if (!reg->base_region_p ())
- if (const region *parent_reg = reg->get_parent_region ())
- {
- const svalue *parent_init_sval
- = mgr->get_or_create_initial_value (parent_reg);
- state_machine::state_t parent_state
- = get_state (parent_init_sval, ext_state);
- if (parent_state)
- return parent_state;
- }
- }
+ {
+ if (const initial_svalue *init_sval = sval->dyn_cast_initial_svalue ())
+ {
+ const region *reg = init_sval->get_region ();
+ /* Try recursing upwards (up to the base region for the
+ cluster). */
+ if (!reg->base_region_p ())
+ if (const region *parent_reg = reg->get_parent_region ())
+ {
+ const svalue *parent_init_sval
+ = mgr->get_or_create_initial_value (parent_reg);
+ state_machine::state_t parent_state
+ = get_state (parent_init_sval, ext_state);
+ if (parent_state)
+ return parent_state;
+ }
+ }
+ else if (const sub_svalue *sub_sval = sval->dyn_cast_sub_svalue ())
+ {
+ const svalue *parent_sval = sub_sval->get_parent ();
+ if (state_machine::state_t parent_state
+ = get_state (parent_sval, ext_state))
+ return parent_state;
+ }
+ }
return m_sm.get_default_state (sval);
}
@@ -422,8 +453,8 @@ sm_state_map::set_state (region_model *model,
if (model == NULL)
return;
- /* Reject attempts to set state on UNKNOWN. */
- if (sval->get_kind () == SK_UNKNOWN)
+ /* Reject attempts to set state on UNKNOWN/POISONED. */
+ if (!sval->can_have_associated_state_p ())
return;
equiv_class &ec = model->get_constraints ()->get_equiv_class (sval);
@@ -441,10 +472,8 @@ sm_state_map::set_state (const equiv_class &ec,
const svalue *origin,
const extrinsic_state &ext_state)
{
- int i;
- const svalue *sval;
bool any_changed = false;
- FOR_EACH_VEC_ELT (ec.m_vars, i, sval)
+ for (const svalue *sval : ec.m_vars)
any_changed |= impl_set_state (sval, state, origin, ext_state);
return any_changed;
}
@@ -463,6 +492,8 @@ sm_state_map::impl_set_state (const svalue *sval,
if (get_state (sval, ext_state) == state)
return false;
+ gcc_assert (sval->can_have_associated_state_p ());
+
/* Special-case state 0 as the default value. */
if (state == 0)
{
@@ -516,6 +547,7 @@ sm_state_map::on_liveness_change (const svalue_set &live_svalues,
impl_region_model_context *ctxt)
{
svalue_set svals_to_unset;
+ uncertainty_t *uncertainty = ctxt->get_uncertainty ();
auto_vec<const svalue *> leaked_svals (m_map.elements ());
for (map_t::iterator iter = m_map.begin ();
@@ -530,6 +562,9 @@ sm_state_map::on_liveness_change (const svalue_set &live_svalues,
if (!m_sm.can_purge_p (e.m_state))
leaked_svals.quick_push (iter_sval);
}
+ if (uncertainty)
+ if (uncertainty->unknown_sm_state_p (iter_sval))
+ svals_to_unset.add (iter_sval);
}
leaked_svals.qsort (svalue::cmp_ptr_ptr);
@@ -586,6 +621,37 @@ sm_state_map::on_unknown_change (const svalue *sval,
impl_set_state (*iter, (state_machine::state_t)0, NULL, ext_state);
}
+/* Purge state for things involving SVAL.
+ For use when SVAL changes meaning, at the def_stmt on an SSA_NAME. */
+
+void
+sm_state_map::purge_state_involving (const svalue *sval,
+ const extrinsic_state &ext_state)
+{
+ /* Currently svalue::involves_p requires this. */
+ if (!(sval->get_kind () == SK_INITIAL
+ || sval->get_kind () == SK_CONJURED))
+ return;
+
+ svalue_set svals_to_unset;
+
+ for (map_t::iterator iter = m_map.begin ();
+ iter != m_map.end ();
+ ++iter)
+ {
+ const svalue *key = (*iter).first;
+ entry_t e = (*iter).second;
+ if (!m_sm.can_purge_p (e.m_state))
+ continue;
+ if (key->involves_p (sval))
+ svals_to_unset.add (key);
+ }
+
+ for (svalue_set::iterator iter = svals_to_unset.begin ();
+ iter != svals_to_unset.end (); ++iter)
+ impl_set_state (*iter, (state_machine::state_t)0, NULL, ext_state);
+}
+
/* Comparator for imposing an order on sm_state_map instances. */
int
@@ -697,7 +763,6 @@ program_state::operator= (const program_state &other)
return *this;
}
-#if __cplusplus >= 201103
/* Move constructor for program_state (when building with C++11). */
program_state::program_state (program_state &&other)
: m_region_model (other.m_region_model),
@@ -713,7 +778,6 @@ program_state::program_state (program_state &&other)
m_valid = other.m_valid;
}
-#endif
/* program_state's dtor. */
@@ -929,11 +993,12 @@ program_state::get_current_function () const
bool
program_state::on_edge (exploded_graph &eg,
- const exploded_node &enode,
- const superedge *succ)
+ exploded_node *enode,
+ const superedge *succ,
+ uncertainty_t *uncertainty)
{
/* Update state. */
- const program_point &point = enode.get_point ();
+ const program_point &point = enode->get_point ();
const gimple *last_stmt = point.get_supernode ()->get_last_stmt ();
/* For conditionals and switch statements, add the
@@ -945,9 +1010,10 @@ program_state::on_edge (exploded_graph &eg,
sm-state transitions (e.g. transitions due to ptrs becoming known
to be NULL or non-NULL) */
- impl_region_model_context ctxt (eg, &enode,
- &enode.get_state (),
+ impl_region_model_context ctxt (eg, enode,
+ &enode->get_state (),
this,
+ uncertainty, NULL,
last_stmt);
if (!m_region_model->maybe_update_for_edge (*succ,
last_stmt,
@@ -961,13 +1027,59 @@ program_state::on_edge (exploded_graph &eg,
return false;
}
- program_state::detect_leaks (enode.get_state (), *this,
- NULL, eg.get_ext_state (),
- &ctxt);
+ program_state::detect_leaks (enode->get_state (), *this,
+ NULL, eg.get_ext_state (),
+ &ctxt);
return true;
}
+/* Update this program_state to reflect a call to function
+ represented by CALL_STMT.
+ currently used only when the call doesn't have a superedge representing
+ the call ( like call via a function pointer ) */
+void
+program_state::push_call (exploded_graph &eg,
+ exploded_node *enode,
+ const gcall *call_stmt,
+ uncertainty_t *uncertainty)
+{
+ /* Update state. */
+ const program_point &point = enode->get_point ();
+ const gimple *last_stmt = point.get_supernode ()->get_last_stmt ();
+
+ impl_region_model_context ctxt (eg, enode,
+ &enode->get_state (),
+ this,
+ uncertainty,
+ NULL,
+ last_stmt);
+ m_region_model->update_for_gcall (call_stmt, &ctxt);
+}
+
+/* Update this program_state to reflect a return from function
+ call to which is represented by CALL_STMT.
+ currently used only when the call doesn't have a superedge representing
+ the return */
+void
+program_state::returning_call (exploded_graph &eg,
+ exploded_node *enode,
+ const gcall *call_stmt,
+ uncertainty_t *uncertainty)
+{
+ /* Update state. */
+ const program_point &point = enode->get_point ();
+ const gimple *last_stmt = point.get_supernode ()->get_last_stmt ();
+
+ impl_region_model_context ctxt (eg, enode,
+ &enode->get_state (),
+ this,
+ uncertainty,
+ NULL,
+ last_stmt);
+ m_region_model->update_for_return_gcall (call_stmt, &ctxt);
+}
+
/* Generate a simpler version of THIS, discarding state that's no longer
relevant at POINT.
The idea is that we're more likely to be able to consolidate
@@ -977,7 +1089,8 @@ program_state::on_edge (exploded_graph &eg,
program_state
program_state::prune_for_point (exploded_graph &eg,
const program_point &point,
- const exploded_node *enode_for_diag) const
+ exploded_node *enode_for_diag,
+ uncertainty_t *uncertainty) const
{
logger * const logger = eg.get_logger ();
LOG_SCOPE (logger);
@@ -1017,7 +1130,7 @@ program_state::prune_for_point (exploded_graph &eg,
temporaries keep the value reachable until the frame is
popped. */
const svalue *sval
- = new_state.m_region_model->get_store_value (reg);
+ = new_state.m_region_model->get_store_value (reg, NULL);
if (!new_state.can_purge_p (eg.get_ext_state (), sval)
&& SSA_NAME_VAR (ssa_name))
{
@@ -1041,6 +1154,7 @@ program_state::prune_for_point (exploded_graph &eg,
impl_region_model_context ctxt (eg, enode_for_diag,
this,
&new_state,
+ uncertainty, NULL,
point.get_stmt ());
detect_leaks (*this, new_state, NULL, eg.get_ext_state (), &ctxt);
}
@@ -1108,6 +1222,7 @@ program_state::validate (const extrinsic_state &ext_state) const
#endif
gcc_assert (m_checker_states.length () == ext_state.get_num_checkers ());
+ m_region_model->validate ();
}
static void
@@ -1159,6 +1274,7 @@ program_state::detect_leaks (const program_state &src_state,
{
logger *logger = ext_state.get_logger ();
LOG_SCOPE (logger);
+ const uncertainty_t *uncertainty = ctxt->get_uncertainty ();
if (logger)
{
pretty_printer *pp = logger->get_printer ();
@@ -1177,31 +1293,46 @@ program_state::detect_leaks (const program_state &src_state,
extra_sval->dump_to_pp (pp, true);
logger->end_log_line ();
}
+ if (uncertainty)
+ {
+ logger->start_log_line ();
+ pp_string (pp, "uncertainty: ");
+ uncertainty->dump_to_pp (pp, true);
+ logger->end_log_line ();
+ }
}
- /* Get svalues reachable from each of src_state and dst_state. */
- svalue_set src_svalues;
- svalue_set dest_svalues;
- src_state.m_region_model->get_reachable_svalues (&src_svalues, NULL);
- dest_state.m_region_model->get_reachable_svalues (&dest_svalues, extra_sval);
+ /* Get svalues reachable from each of src_state and dest_state.
+ Get svalues *known* to be reachable in src_state.
+ Pass in uncertainty for dest_state so that we additionally get svalues that
+ *might* still be reachable in dst_state. */
+ svalue_set known_src_svalues;
+ src_state.m_region_model->get_reachable_svalues (&known_src_svalues,
+ NULL, NULL);
+ svalue_set maybe_dest_svalues;
+ dest_state.m_region_model->get_reachable_svalues (&maybe_dest_svalues,
+ extra_sval, uncertainty);
if (logger)
{
- log_set_of_svalues (logger, "src_state reachable svalues:", src_svalues);
- log_set_of_svalues (logger, "dest_state reachable svalues:",
- dest_svalues);
+ log_set_of_svalues (logger, "src_state known reachable svalues:",
+ known_src_svalues);
+ log_set_of_svalues (logger, "dest_state maybe reachable svalues:",
+ maybe_dest_svalues);
}
- auto_vec <const svalue *> dead_svals (src_svalues.elements ());
- for (svalue_set::iterator iter = src_svalues.begin ();
- iter != src_svalues.end (); ++iter)
+ auto_vec <const svalue *> dead_svals (known_src_svalues.elements ());
+ for (svalue_set::iterator iter = known_src_svalues.begin ();
+ iter != known_src_svalues.end (); ++iter)
{
const svalue *sval = (*iter);
/* For each sval reachable from SRC_STATE, determine if it is
- live in DEST_STATE: either explicitly reachable, or implicitly
- live based on the set of explicitly reachable svalues.
- Record those that have ceased to be live. */
- if (!sval->live_p (&dest_svalues, dest_state.m_region_model))
+ live in DEST_STATE: either explicitly reachable, implicitly
+ live based on the set of explicitly reachable svalues,
+ or possibly reachable as recorded in uncertainty.
+ Record those that have ceased to be live i.e. were known
+ to be live, and are now not known to be even possibly-live. */
+ if (!sval->live_p (&maybe_dest_svalues, dest_state.m_region_model))
dead_svals.quick_push (sval);
}
@@ -1214,11 +1345,46 @@ program_state::detect_leaks (const program_state &src_state,
ctxt->on_svalue_leak (sval);
/* Purge dead svals from sm-state. */
- ctxt->on_liveness_change (dest_svalues, dest_state.m_region_model);
+ ctxt->on_liveness_change (maybe_dest_svalues,
+ dest_state.m_region_model);
/* Purge dead svals from constraints. */
dest_state.m_region_model->get_constraints ()->on_liveness_change
- (dest_svalues, dest_state.m_region_model);
+ (maybe_dest_svalues, dest_state.m_region_model);
+
+ /* Purge dead heap-allocated regions from dynamic extents. */
+ for (const svalue *sval : dead_svals)
+ if (const region *reg = sval->maybe_get_region ())
+ if (reg->get_kind () == RK_HEAP_ALLOCATED)
+ dest_state.m_region_model->unset_dynamic_extents (reg);
+}
+
+/* Handle calls to "__analyzer_dump_state". */
+
+void
+program_state::impl_call_analyzer_dump_state (const gcall *call,
+ const extrinsic_state &ext_state,
+ region_model_context *ctxt)
+{
+ call_details cd (call, m_region_model, ctxt);
+ const char *sm_name = cd.get_arg_string_literal (0);
+ if (!sm_name)
+ {
+ error_at (call->location, "cannot determine state machine");
+ return;
+ }
+ unsigned sm_idx;
+ if (!ext_state.get_sm_idx_by_name (sm_name, &sm_idx))
+ {
+ error_at (call->location, "unrecognized state machine %qs", sm_name);
+ return;
+ }
+ const sm_state_map *smap = m_checker_states[sm_idx];
+
+ const svalue *sval = cd.get_arg_svalue (1);
+
+ state_machine::state_t state = smap->get_state (sval, ext_state);
+ warning_at (call->location, 0, "state: %qs", state->get_name ());
}
#if CHECKING_P
@@ -1375,7 +1541,7 @@ test_program_state_1 ()
program_state s (ext_state);
region_model *model = s.m_region_model;
const svalue *size_in_bytes
- = mgr->get_or_create_unknown_svalue (integer_type_node);
+ = mgr->get_or_create_unknown_svalue (size_type_node);
const region *new_reg = model->create_region_for_heap_alloc (size_in_bytes);
const svalue *ptr_sval = mgr->get_ptr_svalue (ptr_type_node, new_reg);
model->set_value (model->get_lvalue (p, NULL),
@@ -1426,11 +1592,12 @@ test_program_state_merging ()
region_model_manager *mgr = eng.get_model_manager ();
program_state s0 (ext_state);
- impl_region_model_context ctxt (&s0, ext_state);
+ uncertainty_t uncertainty;
+ impl_region_model_context ctxt (&s0, ext_state, &uncertainty);
region_model *model0 = s0.m_region_model;
const svalue *size_in_bytes
- = mgr->get_or_create_unknown_svalue (integer_type_node);
+ = mgr->get_or_create_unknown_svalue (size_type_node);
const region *new_reg = model0->create_region_for_heap_alloc (size_in_bytes);
const svalue *ptr_sval = mgr->get_ptr_svalue (ptr_type_node, new_reg);
model0->set_value (model0->get_lvalue (p, &ctxt),
diff --git a/gcc/analyzer/program-state.h b/gcc/analyzer/program-state.h
index d72945d..eb49006 100644
--- a/gcc/analyzer/program-state.h
+++ b/gcc/analyzer/program-state.h
@@ -58,6 +58,8 @@ public:
engine *get_engine () const { return m_engine; }
region_model_manager *get_model_manager () const;
+ bool get_sm_idx_by_name (const char *name, unsigned *out) const;
+
private:
/* The state machines. */
auto_delete_vec <state_machine> &m_checkers;
@@ -157,6 +159,9 @@ public:
bool is_mutable,
const extrinsic_state &ext_state);
+ void purge_state_involving (const svalue *sval,
+ const extrinsic_state &ext_state);
+
iterator_t begin () const { return m_map.begin (); }
iterator_t end () const { return m_map.end (); }
size_t elements () const { return m_map.elements (); }
@@ -189,11 +194,7 @@ public:
program_state (const extrinsic_state &ext_state);
program_state (const program_state &other);
program_state& operator= (const program_state &other);
-
-#if __cplusplus >= 201103
program_state (program_state &&other);
-#endif
-
~program_state ();
hashval_t hash () const;
@@ -217,13 +218,26 @@ public:
void push_frame (const extrinsic_state &ext_state, function *fun);
function * get_current_function () const;
+ void push_call (exploded_graph &eg,
+ exploded_node *enode,
+ const gcall *call_stmt,
+ uncertainty_t *uncertainty);
+
+ void returning_call (exploded_graph &eg,
+ exploded_node *enode,
+ const gcall *call_stmt,
+ uncertainty_t *uncertainty);
+
+
bool on_edge (exploded_graph &eg,
- const exploded_node &enode,
- const superedge *succ);
+ exploded_node *enode,
+ const superedge *succ,
+ uncertainty_t *uncertainty);
program_state prune_for_point (exploded_graph &eg,
const program_point &point,
- const exploded_node *enode_for_diag) const;
+ exploded_node *enode_for_diag,
+ uncertainty_t *uncertainty) const;
tree get_representative_tree (const svalue *sval) const;
@@ -255,6 +269,10 @@ public:
const extrinsic_state &ext_state,
region_model_context *ctxt);
+ void impl_call_analyzer_dump_state (const gcall *call,
+ const extrinsic_state &ext_state,
+ region_model_context *ctxt);
+
/* TODO: lose the pointer here (const-correctness issues?). */
region_model *m_region_model;
auto_delete_vec<sm_state_map> m_checker_states;
diff --git a/gcc/analyzer/region-model-asm.cc b/gcc/analyzer/region-model-asm.cc
new file mode 100644
index 0000000..3efc3fd
--- /dev/null
+++ b/gcc/analyzer/region-model-asm.cc
@@ -0,0 +1,303 @@
+/* Handling inline asm in the analyzer.
+ Copyright (C) 2021 Free Software Foundation, Inc.
+ Contributed by David Malcolm <dmalcolm@redhat.com>.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tree.h"
+#include "function.h"
+#include "basic-block.h"
+#include "gimple.h"
+#include "gimple-iterator.h"
+#include "diagnostic-core.h"
+#include "pretty-print.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "json.h"
+#include "analyzer/analyzer.h"
+#include "analyzer/analyzer-logging.h"
+#include "options.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
+#include "analyzer/region-model-reachability.h"
+#include "stmt.h"
+
+#if ENABLE_ANALYZER
+
+namespace ana {
+
+/* Minimal asm support for the analyzer.
+
+ The objective of this code is to:
+ - minimize false positives from the analyzer on the Linux kernel
+ (which makes heavy use of inline asm), whilst
+ - avoiding having to "teach" the compiler anything about specific strings
+ in asm statements.
+
+ Specifically, we want to:
+
+ (a) mark asm outputs and certain other regions as having been written to,
+ to avoid false postives from -Wanalyzer-use-of-uninitialized-value.
+
+ (b) identify some of these stmts as "deterministic" so that we can
+ write consistent outputs given consistent inputs, so that we can
+ avoid false positives for paths in which an asm is invoked twice
+ with the same inputs and is expected to emit the same output.
+
+ This file implements heuristics for achieving the above. */
+
+/* Determine if ASM_STMT is deterministic, in the sense of (b) above.
+
+ Consider this x86 function taken from the Linux kernel
+ (arch/x86/include/asm/barrier.h):
+
+ static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+ {
+ unsigned long mask;
+
+ asm volatile ("cmp %1,%2; sbb %0,%0;"
+ :"=r" (mask)
+ :"g"(size),"r" (index)
+ :"cc");
+ return mask;
+ }
+
+ The above is a mitigation for Spectre-variant-1 attacks, for clamping
+ an array access to within the range of [0, size] if the CPU speculates
+ past the array bounds.
+
+ However, it is ultimately used to implement wdev_to_wvif:
+
+ static inline struct wfx_vif *
+ wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
+ {
+ vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
+ if (!wdev->vif[vif_id]) {
+ return NULL;
+ }
+ return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv;
+ }
+
+ which is used by:
+
+ if (wdev_to_wvif(wvif->wdev, 1))
+ return wdev_to_wvif(wvif->wdev, 1)->vif;
+
+ The code has been written to assume that wdev_to_wvif is deterministic,
+ and won't change from returning non-NULL at the "if" clause to
+ returning NULL at the "->vif" dereference.
+
+ By treating the above specific "asm volatile" as deterministic we avoid
+ a false positive from -Wanalyzer-null-dereference. */
+
+static bool
+deterministic_p (const gasm *asm_stmt)
+{
+ /* Assume something volatile with no inputs is querying
+ changeable state e.g. rdtsc. */
+ if (gimple_asm_ninputs (asm_stmt) == 0
+ && gimple_asm_volatile_p (asm_stmt))
+ return false;
+
+ /* Otherwise assume it's purely a function of its inputs. */
+ return true;
+}
+
+/* Update this model for the asm STMT, using CTXT to report any
+ diagnostics.
+
+ Compare with cfgexpand.c: expand_asm_stmt. */
+
+void
+region_model::on_asm_stmt (const gasm *stmt, region_model_context *ctxt)
+{
+ logger *logger = ctxt ? ctxt->get_logger () : NULL;
+ LOG_SCOPE (logger);
+
+ const unsigned noutputs = gimple_asm_noutputs (stmt);
+ const unsigned ninputs = gimple_asm_ninputs (stmt);
+
+ auto_vec<tree> output_tvec;
+ auto_vec<tree> input_tvec;
+ auto_vec<const char *> constraints;
+
+ /* Copy the gimple vectors into new vectors that we can manipulate. */
+ output_tvec.safe_grow (noutputs, true);
+ input_tvec.safe_grow (ninputs, true);
+ constraints.safe_grow (noutputs + ninputs, true);
+
+ for (unsigned i = 0; i < noutputs; ++i)
+ {
+ tree t = gimple_asm_output_op (stmt, i);
+ output_tvec[i] = TREE_VALUE (t);
+ constraints[i] = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
+ }
+ for (unsigned i = 0; i < ninputs; i++)
+ {
+ tree t = gimple_asm_input_op (stmt, i);
+ input_tvec[i] = TREE_VALUE (t);
+ constraints[i + noutputs]
+ = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t)));
+ }
+
+ /* Determine which regions are reachable from the inputs
+ to this stmt. */
+ reachable_regions reachable_regs (this);
+
+ int num_errors = 0;
+
+ auto_vec<const region *> output_regions (noutputs);
+ for (unsigned i = 0; i < noutputs; ++i)
+ {
+ tree val = output_tvec[i];
+ const char *constraint;
+ bool is_inout;
+ bool allows_reg;
+ bool allows_mem;
+
+ const region *dst_reg = get_lvalue (val, ctxt);
+ output_regions.quick_push (dst_reg);
+ reachable_regs.add (dst_reg, true);
+
+ /* Try to parse the output constraint. If that fails, there's
+ no point in going further. */
+ constraint = constraints[i];
+ if (!parse_output_constraint (&constraint, i, ninputs, noutputs,
+ &allows_mem, &allows_reg, &is_inout))
+ {
+ if (logger)
+ logger->log ("error parsing constraint for output %i: %qs",
+ i, constraint);
+ num_errors++;
+ continue;
+ }
+
+ if (logger)
+ {
+ logger->log ("output %i: %qs %qE"
+ " is_inout: %i allows_reg: %i allows_mem: %i",
+ i, constraint, val,
+ (int)is_inout, (int)allows_reg, (int)allows_mem);
+ logger->start_log_line ();
+ logger->log_partial (" region: ");
+ dst_reg->dump_to_pp (logger->get_printer (), true);
+ logger->end_log_line ();
+ }
+
+ }
+
+ /* Ideally should combine with inout_svals to determine the
+ "effective inputs" and use this for the asm_output_svalue. */
+
+ auto_vec<const svalue *> input_svals (ninputs);
+ for (unsigned i = 0; i < ninputs; i++)
+ {
+ tree val = input_tvec[i];
+ const char *constraint = constraints[i + noutputs];
+ bool allows_reg, allows_mem;
+ if (! parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
+ constraints.address (),
+ &allows_mem, &allows_reg))
+ {
+ if (logger)
+ logger->log ("error parsing constraint for input %i: %qs",
+ i, constraint);
+ num_errors++;
+ continue;
+ }
+
+ tree src_expr = input_tvec[i];
+ const svalue *src_sval = get_rvalue (src_expr, ctxt);
+ check_for_poison (src_sval, src_expr, ctxt);
+ input_svals.quick_push (src_sval);
+ reachable_regs.handle_sval (src_sval);
+
+ if (logger)
+ {
+ logger->log ("input %i: %qs %qE"
+ " allows_reg: %i allows_mem: %i",
+ i, constraint, val,
+ (int)allows_reg, (int)allows_mem);
+ logger->start_log_line ();
+ logger->log_partial (" sval: ");
+ src_sval->dump_to_pp (logger->get_printer (), true);
+ logger->end_log_line ();
+ }
+ }
+
+ if (num_errors > 0)
+ gcc_unreachable ();
+
+ if (logger)
+ {
+ logger->log ("reachability: ");
+ reachable_regs.dump_to_pp (logger->get_printer ());
+ logger->end_log_line ();
+ }
+
+ /* Given the regions that were reachable from the inputs we
+ want to clobber them.
+ This is similar to region_model::handle_unrecognized_call,
+ but the unknown call policies seems too aggressive (e.g. purging state
+ from anything that's ever escaped). Instead, clobber any clusters
+ that were reachable in *this* asm stmt, rather than those that
+ escaped, and we don't treat the values as having escaped.
+ We also assume that asm stmts don't affect sm-state. */
+ for (auto iter = reachable_regs.begin_mutable_base_regs ();
+ iter != reachable_regs.end_mutable_base_regs (); ++iter)
+ {
+ const region *base_reg = *iter;
+ if (base_reg->symbolic_for_unknown_ptr_p ())
+ continue;
+
+ binding_cluster *cluster = m_store.get_or_create_cluster (base_reg);
+ cluster->on_asm (stmt, m_mgr->get_store_manager ());
+ }
+
+ /* Update the outputs. */
+ for (unsigned output_idx = 0; output_idx < noutputs; output_idx++)
+ {
+ tree dst_expr = output_tvec[output_idx];
+ const region *dst_reg = output_regions[output_idx];
+
+ const svalue *sval;
+ if (deterministic_p (stmt)
+ && input_svals.length () <= asm_output_svalue::MAX_INPUTS)
+ sval = m_mgr->get_or_create_asm_output_svalue (TREE_TYPE (dst_expr),
+ stmt,
+ output_idx,
+ input_svals);
+ else
+ {
+ sval = m_mgr->get_or_create_conjured_svalue (TREE_TYPE (dst_expr),
+ stmt,
+ dst_reg);
+ purge_state_involving (sval, ctxt);
+ }
+ set_value (dst_reg, sval, ctxt);
+ }
+}
+
+} // namespace ana
+
+#endif /* #if ENABLE_ANALYZER */
diff --git a/gcc/analyzer/region-model-impl-calls.cc b/gcc/analyzer/region-model-impl-calls.cc
index f83c12b..ff2ae9c 100644
--- a/gcc/analyzer/region-model-impl-calls.cc
+++ b/gcc/analyzer/region-model-impl-calls.cc
@@ -56,6 +56,7 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/program-point.h"
#include "analyzer/store.h"
#include "analyzer/region-model.h"
+#include "analyzer/call-info.h"
#include "gimple-pretty-print.h"
#if ENABLE_ANALYZER
@@ -79,6 +80,17 @@ call_details::call_details (const gcall *call, region_model *model,
}
}
+/* Get any uncertainty_t associated with the region_model_context. */
+
+uncertainty_t *
+call_details::get_uncertainty () const
+{
+ if (m_ctxt)
+ return m_ctxt->get_uncertainty ();
+ else
+ return NULL;
+}
+
/* If the callsite has a left-hand-side region, set it to RESULT
and return true.
Otherwise do nothing and return false. */
@@ -129,6 +141,33 @@ call_details::get_arg_svalue (unsigned idx) const
return m_model->get_rvalue (arg, m_ctxt);
}
+/* Attempt to get the string literal for argument IDX, or return NULL
+ otherwise.
+ For use when implementing "__analyzer_*" functions that take
+ string literals. */
+
+const char *
+call_details::get_arg_string_literal (unsigned idx) const
+{
+ const svalue *str_arg = get_arg_svalue (idx);
+ if (const region *pointee = str_arg->maybe_get_region ())
+ if (const string_region *string_reg = pointee->dyn_cast_string_region ())
+ {
+ tree string_cst = string_reg->get_string_cst ();
+ return TREE_STRING_POINTER (string_cst);
+ }
+ return NULL;
+}
+
+/* Attempt to get the fndecl used at this call, if known, or NULL_TREE
+ otherwise. */
+
+tree
+call_details::get_fndecl_for_call () const
+{
+ return m_model->get_fndecl_for_call (m_call, m_ctxt);
+}
+
/* Dump a multiline representation of this call to PP. */
void
@@ -165,11 +204,20 @@ call_details::dump (bool simple) const
pp_flush (&pp);
}
+/* Get a conjured_svalue for this call for REG. */
+
+const svalue *
+call_details::get_or_create_conjured_svalue (const region *reg) const
+{
+ region_model_manager *mgr = m_model->get_manager ();
+ return mgr->get_or_create_conjured_svalue (reg->get_type (), m_call, reg);
+}
+
/* Implementations of specific functions. */
/* Handle the on_call_pre part of "alloca". */
-bool
+void
region_model::impl_call_alloca (const call_details &cd)
{
const svalue *size_sval = cd.get_arg_svalue (0);
@@ -177,7 +225,6 @@ region_model::impl_call_alloca (const call_details &cd)
const svalue *ptr_sval
= m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
cd.maybe_set_lhs (ptr_sval);
- return true;
}
/* Handle a call to "__analyzer_describe".
@@ -198,6 +245,25 @@ region_model::impl_call_analyzer_describe (const gcall *call,
warning_at (call->location, 0, "svalue: %qs", desc.m_buffer);
}
+/* Handle a call to "__analyzer_dump_capacity".
+
+ Emit a warning describing the capacity of the base region of
+ the region pointed to by the 1st argument.
+ This is for use when debugging, and may be of use in DejaGnu tests. */
+
+void
+region_model::impl_call_analyzer_dump_capacity (const gcall *call,
+ region_model_context *ctxt)
+{
+ tree t_ptr = gimple_call_arg (call, 0);
+ const svalue *sval_ptr = get_rvalue (t_ptr, ctxt);
+ const region *reg = deref_rvalue (sval_ptr, t_ptr, ctxt);
+ const region *base_reg = reg->get_base_region ();
+ const svalue *capacity = get_capacity (base_reg);
+ label_text desc = capacity->get_desc (true);
+ warning_at (call->location, 0, "capacity: %qs", desc.m_buffer);
+}
+
/* Handle a call to "__analyzer_eval" by evaluating the input
and dumping as a dummy warning, so that test cases can use
dg-warning to validate the result (and so unexpected warnings will
@@ -217,18 +283,17 @@ region_model::impl_call_analyzer_eval (const gcall *call,
/* Handle the on_call_pre part of "__builtin_expect" etc. */
-bool
+void
region_model::impl_call_builtin_expect (const call_details &cd)
{
/* __builtin_expect's return value is its initial argument. */
const svalue *sval = cd.get_arg_svalue (0);
cd.maybe_set_lhs (sval);
- return false;
}
/* Handle the on_call_pre part of "calloc". */
-bool
+void
region_model::impl_call_calloc (const call_details &cd)
{
const svalue *nmemb_sval = cd.get_arg_svalue (0);
@@ -245,7 +310,6 @@ region_model::impl_call_calloc (const call_details &cd)
= m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
cd.maybe_set_lhs (ptr_sval);
}
- return true;
}
/* Handle the on_call_pre part of "error" and "error_at_line" from
@@ -278,6 +342,38 @@ region_model::impl_call_error (const call_details &cd, unsigned min_args,
return true;
}
+/* Handle the on_call_pre part of "fgets" and "fgets_unlocked". */
+
+void
+region_model::impl_call_fgets (const call_details &cd)
+{
+ /* Ideally we would bifurcate state here between the
+ error vs no error cases. */
+ const svalue *ptr_sval = cd.get_arg_svalue (0);
+ if (const region *reg = ptr_sval->maybe_get_region ())
+ {
+ const region *base_reg = reg->get_base_region ();
+ const svalue *new_sval = cd.get_or_create_conjured_svalue (base_reg);
+ purge_state_involving (new_sval, cd.get_ctxt ());
+ set_value (base_reg, new_sval, cd.get_ctxt ());
+ }
+}
+
+/* Handle the on_call_pre part of "fread". */
+
+void
+region_model::impl_call_fread (const call_details &cd)
+{
+ const svalue *ptr_sval = cd.get_arg_svalue (0);
+ if (const region *reg = ptr_sval->maybe_get_region ())
+ {
+ const region *base_reg = reg->get_base_region ();
+ const svalue *new_sval = cd.get_or_create_conjured_svalue (base_reg);
+ purge_state_involving (new_sval, cd.get_ctxt ());
+ set_value (base_reg, new_sval, cd.get_ctxt ());
+ }
+}
+
/* Handle the on_call_post part of "free", after sm-handling.
If the ptr points to an underlying heap region, delete the region,
@@ -297,19 +393,18 @@ void
region_model::impl_call_free (const call_details &cd)
{
const svalue *ptr_sval = cd.get_arg_svalue (0);
- if (const region_svalue *ptr_to_region_sval
- = ptr_sval->dyn_cast_region_svalue ())
+ if (const region *freed_reg = ptr_sval->maybe_get_region ())
{
/* If the ptr points to an underlying heap region, delete it,
poisoning pointers. */
- const region *freed_reg = ptr_to_region_sval->get_pointee ();
unbind_region_and_descendents (freed_reg, POISON_KIND_FREED);
+ m_dynamic_extents.remove (freed_reg);
}
}
/* Handle the on_call_pre part of "malloc". */
-bool
+void
region_model::impl_call_malloc (const call_details &cd)
{
const svalue *size_sval = cd.get_arg_svalue (0);
@@ -320,7 +415,6 @@ region_model::impl_call_malloc (const call_details &cd)
= m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
cd.maybe_set_lhs (ptr_sval);
}
- return true;
}
/* Handle the on_call_pre part of "memcpy" and "__builtin_memcpy". */
@@ -343,15 +437,15 @@ region_model::impl_call_memcpy (const call_details &cd)
return;
}
- check_for_writable_region (dest_reg, cd.get_ctxt ());
+ check_region_for_write (dest_reg, cd.get_ctxt ());
/* Otherwise, mark region's contents as unknown. */
- mark_region_as_unknown (dest_reg);
+ mark_region_as_unknown (dest_reg, cd.get_uncertainty ());
}
/* Handle the on_call_pre part of "memset" and "__builtin_memset". */
-bool
+void
region_model::impl_call_memset (const call_details &cd)
{
const svalue *dest_sval = cd.get_arg_svalue (0);
@@ -361,41 +455,19 @@ region_model::impl_call_memset (const call_details &cd)
const region *dest_reg = deref_rvalue (dest_sval, cd.get_arg_tree (0),
cd.get_ctxt ());
- if (tree num_bytes = num_bytes_sval->maybe_get_constant ())
- {
- /* "memset" of zero size is a no-op. */
- if (zerop (num_bytes))
- return true;
+ const svalue *fill_value_u8
+ = m_mgr->get_or_create_cast (unsigned_char_type_node, fill_value_sval);
- /* Set with known amount. */
- byte_size_t reg_size;
- if (dest_reg->get_byte_size (&reg_size))
- {
- /* Check for an exact size match. */
- if (reg_size == wi::to_offset (num_bytes))
- {
- if (tree cst = fill_value_sval->maybe_get_constant ())
- {
- if (zerop (cst))
- {
- zero_fill_region (dest_reg);
- return true;
- }
- }
- }
- }
- }
-
- check_for_writable_region (dest_reg, cd.get_ctxt ());
-
- /* Otherwise, mark region's contents as unknown. */
- mark_region_as_unknown (dest_reg);
- return false;
+ const region *sized_dest_reg = m_mgr->get_sized_region (dest_reg,
+ NULL_TREE,
+ num_bytes_sval);
+ check_region_for_write (sized_dest_reg, cd.get_ctxt ());
+ fill_region (sized_dest_reg, fill_value_u8);
}
/* Handle the on_call_pre part of "operator new". */
-bool
+void
region_model::impl_call_operator_new (const call_details &cd)
{
const svalue *size_sval = cd.get_arg_svalue (0);
@@ -406,37 +478,201 @@ region_model::impl_call_operator_new (const call_details &cd)
= m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
cd.maybe_set_lhs (ptr_sval);
}
- return false;
}
/* Handle the on_call_pre part of "operator delete", which comes in
both sized and unsized variants (2 arguments and 1 argument
respectively). */
-bool
+void
region_model::impl_call_operator_delete (const call_details &cd)
{
const svalue *ptr_sval = cd.get_arg_svalue (0);
- if (const region_svalue *ptr_to_region_sval
- = ptr_sval->dyn_cast_region_svalue ())
+ if (const region *freed_reg = ptr_sval->maybe_get_region ())
{
/* If the ptr points to an underlying heap region, delete it,
poisoning pointers. */
- const region *freed_reg = ptr_to_region_sval->get_pointee ();
unbind_region_and_descendents (freed_reg, POISON_KIND_FREED);
}
- return false;
}
-/* Handle the on_call_pre part of "realloc". */
+/* Handle the on_call_post part of "realloc":
+
+ void *realloc(void *ptr, size_t size);
+
+ realloc(3) is awkward, since it has various different outcomes
+ that are best modelled as separate exploded nodes/edges.
+
+ We first check for sm-state, in
+ malloc_state_machine::on_realloc_call, so that we
+ can complain about issues such as realloc of a non-heap
+ pointer, and terminate the path for such cases (and issue
+ the complaints at the call's exploded node).
+
+ Assuming that these checks pass, we split the path here into
+ three special cases (and terminate the "standard" path):
+ (A) failure, returning NULL
+ (B) success, growing the buffer in-place without moving it
+ (C) success, allocating a new buffer, copying the content
+ of the old buffer to it, and freeing the old buffer.
+
+ Each of these has a custom_edge_info subclass, which updates
+ the region_model and sm-state of the destination state. */
void
-region_model::impl_call_realloc (const call_details &)
+region_model::impl_call_realloc (const call_details &cd)
{
- /* Currently we don't support bifurcating state, so there's no good
- way to implement realloc(3).
- For now, malloc_state_machine::on_realloc_call has a minimal
- implementation to suppress false positives. */
+ /* Three custom subclasses of custom_edge_info, for handling the various
+ outcomes of "realloc". */
+
+ /* Concrete custom_edge_info: a realloc call that fails, returning NULL. */
+ class failure : public failed_call_info
+ {
+ public:
+ failure (const call_details &cd)
+ : failed_call_info (cd)
+ {
+ }
+
+ bool update_model (region_model *model,
+ const exploded_edge *,
+ region_model_context *ctxt) const FINAL OVERRIDE
+ {
+ /* Return NULL; everything else is unchanged. */
+ const call_details cd (get_call_details (model, ctxt));
+ if (cd.get_lhs_type ())
+ {
+ const svalue *zero
+ = model->m_mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
+ model->set_value (cd.get_lhs_region (),
+ zero,
+ cd.get_ctxt ());
+ }
+ return true;
+ }
+ };
+
+ /* Concrete custom_edge_info: a realloc call that succeeds, growing
+ the existing buffer without moving it. */
+ class success_no_move : public call_info
+ {
+ public:
+ success_no_move (const call_details &cd)
+ : call_info (cd)
+ {
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE
+ {
+ return make_label_text (can_colorize,
+ "when %qE succeeds, without moving buffer",
+ get_fndecl ());
+ }
+
+ bool update_model (region_model *model,
+ const exploded_edge *,
+ region_model_context *ctxt) const FINAL OVERRIDE
+ {
+ /* Update size of buffer and return the ptr unchanged. */
+ const call_details cd (get_call_details (model, ctxt));
+ const svalue *ptr_sval = cd.get_arg_svalue (0);
+ const svalue *size_sval = cd.get_arg_svalue (1);
+ if (const region *buffer_reg = ptr_sval->maybe_get_region ())
+ if (compat_types_p (size_sval->get_type (), size_type_node))
+ model->set_dynamic_extents (buffer_reg, size_sval);
+ if (cd.get_lhs_region ())
+ {
+ model->set_value (cd.get_lhs_region (), ptr_sval, cd.get_ctxt ());
+ const svalue *zero
+ = model->m_mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
+ return model->add_constraint (ptr_sval, NE_EXPR, zero, cd.get_ctxt ());
+ }
+ else
+ return true;
+ }
+ };
+
+ /* Concrete custom_edge_info: a realloc call that succeeds, freeing
+ the existing buffer and moving the content to a freshly allocated
+ buffer. */
+ class success_with_move : public call_info
+ {
+ public:
+ success_with_move (const call_details &cd)
+ : call_info (cd)
+ {
+ }
+
+ label_text get_desc (bool can_colorize) const FINAL OVERRIDE
+ {
+ return make_label_text (can_colorize,
+ "when %qE succeeds, moving buffer",
+ get_fndecl ());
+ }
+ bool update_model (region_model *model,
+ const exploded_edge *,
+ region_model_context *ctxt) const FINAL OVERRIDE
+ {
+ const call_details cd (get_call_details (model, ctxt));
+ const svalue *old_ptr_sval = cd.get_arg_svalue (0);
+ const svalue *new_size_sval = cd.get_arg_svalue (1);
+
+ /* Create the new region. */
+ const region *new_reg
+ = model->create_region_for_heap_alloc (new_size_sval);
+ const svalue *new_ptr_sval
+ = model->m_mgr->get_ptr_svalue (cd.get_lhs_type (), new_reg);
+ if (cd.get_lhs_type ())
+ cd.maybe_set_lhs (new_ptr_sval);
+
+ if (const region *freed_reg = old_ptr_sval->maybe_get_region ())
+ {
+ /* Copy the data. */
+ const svalue *old_size_sval = model->get_dynamic_extents (freed_reg);
+ if (old_size_sval)
+ {
+ const region *sized_old_reg
+ = model->m_mgr->get_sized_region (freed_reg, NULL,
+ old_size_sval);
+ const svalue *buffer_content_sval
+ = model->get_store_value (sized_old_reg, cd.get_ctxt ());
+ model->set_value (new_reg, buffer_content_sval, cd.get_ctxt ());
+ }
+
+ /* Free the old region, so that pointers to the old buffer become
+ invalid. */
+
+ /* If the ptr points to an underlying heap region, delete it,
+ poisoning pointers. */
+ model->unbind_region_and_descendents (freed_reg, POISON_KIND_FREED);
+ model->m_dynamic_extents.remove (freed_reg);
+ }
+
+ /* Update the sm-state: mark the old_ptr_sval as "freed",
+ and the new_ptr_sval as "nonnull". */
+ model->on_realloc_with_move (cd, old_ptr_sval, new_ptr_sval);
+
+ if (cd.get_lhs_type ())
+ {
+ const svalue *zero
+ = model->m_mgr->get_or_create_int_cst (cd.get_lhs_type (), 0);
+ return model->add_constraint (new_ptr_sval, NE_EXPR, zero,
+ cd.get_ctxt ());
+ }
+ else
+ return true;
+ }
+ };
+
+ /* Body of region_model::impl_call_realloc. */
+
+ if (cd.get_ctxt ())
+ {
+ cd.get_ctxt ()->bifurcate (new failure (cd));
+ cd.get_ctxt ()->bifurcate (new success_no_move (cd));
+ cd.get_ctxt ()->bifurcate (new success_with_move (cd));
+ cd.get_ctxt ()->terminate_path ();
+ }
}
/* Handle the on_call_pre part of "strcpy" and "__builtin_strcpy_chk". */
@@ -450,16 +686,15 @@ region_model::impl_call_strcpy (const call_details &cd)
cd.maybe_set_lhs (dest_sval);
- check_for_writable_region (dest_reg, cd.get_ctxt ());
+ check_region_for_write (dest_reg, cd.get_ctxt ());
/* For now, just mark region's contents as unknown. */
- mark_region_as_unknown (dest_reg);
+ mark_region_as_unknown (dest_reg, cd.get_uncertainty ());
}
-/* Handle the on_call_pre part of "strlen".
- Return true if the LHS is updated. */
+/* Handle the on_call_pre part of "strlen". */
-bool
+void
region_model::impl_call_strlen (const call_details &cd)
{
region_model_context *ctxt = cd.get_ctxt ();
@@ -478,11 +713,10 @@ region_model::impl_call_strlen (const call_details &cd)
const svalue *result_sval
= m_mgr->get_or_create_constant_svalue (t_cst);
cd.maybe_set_lhs (result_sval);
- return true;
+ return;
}
}
- /* Otherwise an unknown value. */
- return true;
+ /* Otherwise a conjured value. */
}
/* Handle calls to functions referenced by
diff --git a/gcc/analyzer/region-model-manager.cc b/gcc/analyzer/region-model-manager.cc
index dfd2413..1cdec1b 100644
--- a/gcc/analyzer/region-model-manager.cc
+++ b/gcc/analyzer/region-model-manager.cc
@@ -56,6 +56,7 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/program-point.h"
#include "analyzer/store.h"
#include "analyzer/region-model.h"
+#include "analyzer/constraint-manager.h"
#if ENABLE_ANALYZER
@@ -71,12 +72,14 @@ region_model_manager::region_model_manager ()
m_stack_region (alloc_region_id (), &m_root_region),
m_heap_region (alloc_region_id (), &m_root_region),
m_unknown_NULL (NULL),
+ m_check_complexity (true),
m_max_complexity (0, 0),
m_code_region (alloc_region_id (), &m_root_region),
m_fndecls_map (), m_labels_map (),
m_globals_region (alloc_region_id (), &m_root_region),
m_globals_map (),
- m_store_mgr (this)
+ m_store_mgr (this),
+ m_range_mgr (new bounded_ranges_manager ())
{
}
@@ -141,6 +144,8 @@ region_model_manager::~region_model_manager ()
for (string_map_t::iterator iter = m_string_map.begin ();
iter != m_string_map.end (); ++iter)
delete (*iter).second;
+
+ delete m_range_mgr;
}
/* Return true if C exceeds the complexity limit for svalues. */
@@ -160,6 +165,9 @@ region_model_manager::too_complex_p (const complexity &c) const
bool
region_model_manager::reject_if_too_complex (svalue *sval)
{
+ if (!m_check_complexity)
+ return false;
+
const complexity &c = sval->get_complexity ();
if (!too_complex_p (c))
{
@@ -210,6 +218,17 @@ region_model_manager::get_or_create_constant_svalue (tree cst_expr)
return cst_sval;
}
+/* Return the svalue * for a constant_svalue for the INTEGER_CST
+ for VAL of type TYPE, creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_int_cst (tree type, poly_int64 val)
+{
+ gcc_assert (type);
+ tree tree_cst = build_int_cst (type, val);
+ return get_or_create_constant_svalue (tree_cst);
+}
+
/* Return the svalue * for a unknown_svalue for TYPE (which can be NULL),
creating it if necessary.
The unknown_svalue instances are reused, based on pointer equality
@@ -241,6 +260,10 @@ region_model_manager::get_or_create_unknown_svalue (tree type)
const svalue *
region_model_manager::get_or_create_initial_value (const region *reg)
{
+ if (!reg->can_have_initial_svalue_p ())
+ return get_or_create_poisoned_svalue (POISON_KIND_UNINIT,
+ reg->get_type ());
+
/* The initial value of a cast is a cast of the initial value. */
if (const cast_region *cast_reg = reg->dyn_cast_cast_region ())
{
@@ -325,6 +348,13 @@ region_model_manager::maybe_fold_unaryop (tree type, enum tree_code op,
/* Ops on "unknown" are also unknown. */
if (arg->get_kind () == SK_UNKNOWN)
return get_or_create_unknown_svalue (type);
+ /* Likewise for "poisoned". */
+ else if (const poisoned_svalue *poisoned_sval
+ = arg->dyn_cast_poisoned_svalue ())
+ return get_or_create_poisoned_svalue (poisoned_sval->get_poison_kind (),
+ type);
+
+ gcc_assert (arg->can_have_associated_state_p ());
switch (op)
{
@@ -431,6 +461,59 @@ region_model_manager::get_or_create_cast (tree type, const svalue *arg)
return get_or_create_unaryop (type, op, arg);
}
+/* Subroutine of region_model_manager::maybe_fold_binop for handling
+ (TYPE)(COMPOUND_SVAL BIT_AND_EXPR CST) that may have been generated by
+ optimize_bit_field_compare, where CST is from ARG1.
+
+ Support masking out bits from a compound_svalue for comparing a bitfield
+ against a value, as generated by optimize_bit_field_compare for
+ BITFIELD == VALUE.
+
+ If COMPOUND_SVAL has a value for the appropriate bits, return it,
+ shifted accordingly.
+ Otherwise return NULL. */
+
+const svalue *
+region_model_manager::
+maybe_undo_optimize_bit_field_compare (tree type,
+ const compound_svalue *compound_sval,
+ tree cst,
+ const svalue *arg1)
+{
+ if (type != unsigned_char_type_node)
+ return NULL;
+
+ const binding_map &map = compound_sval->get_map ();
+ unsigned HOST_WIDE_INT mask = TREE_INT_CST_LOW (cst);
+ /* If "mask" is a contiguous range of set bits, see if the
+ compound_sval has a value for those bits. */
+ bit_range bits (0, 0);
+ if (!bit_range::from_mask (mask, &bits))
+ return NULL;
+
+ bit_range bound_bits (bits);
+ if (BYTES_BIG_ENDIAN)
+ bound_bits = bit_range (BITS_PER_UNIT - bits.get_next_bit_offset (),
+ bits.m_size_in_bits);
+ const concrete_binding *conc
+ = get_store_manager ()->get_concrete_binding (bound_bits);
+ const svalue *sval = map.get (conc);
+ if (!sval)
+ return NULL;
+
+ /* We have a value;
+ shift it by the correct number of bits. */
+ const svalue *lhs = get_or_create_cast (type, sval);
+ HOST_WIDE_INT bit_offset = bits.get_start_bit_offset ().to_shwi ();
+ const svalue *shift_sval = get_or_create_int_cst (type, bit_offset);
+ const svalue *shifted_sval = get_or_create_binop (type, LSHIFT_EXPR,
+ lhs, shift_sval);
+ /* Reapply the mask (needed for negative
+ signed bitfields). */
+ return get_or_create_binop (type, BIT_AND_EXPR,
+ shifted_sval, arg1);
+}
+
/* Subroutine of region_model_manager::get_or_create_binop.
Attempt to fold the inputs and return a simpler svalue *.
Otherwise, return NULL. */
@@ -480,9 +563,19 @@ region_model_manager::maybe_fold_binop (tree type, enum tree_code op,
break;
case BIT_AND_EXPR:
if (cst1)
- if (zerop (cst1) && INTEGRAL_TYPE_P (type))
- /* "(ARG0 & 0)" -> "0". */
- return get_or_create_constant_svalue (build_int_cst (type, 0));
+ {
+ if (zerop (cst1) && INTEGRAL_TYPE_P (type))
+ /* "(ARG0 & 0)" -> "0". */
+ return get_or_create_constant_svalue (build_int_cst (type, 0));
+
+ if (const compound_svalue *compound_sval
+ = arg0->dyn_cast_compound_svalue ())
+ if (const svalue *sval
+ = maybe_undo_optimize_bit_field_compare (type,
+ compound_sval,
+ cst1, arg1))
+ return sval;
+ }
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_AND_EXPR:
@@ -537,12 +630,6 @@ region_model_manager::maybe_fold_binop (tree type, enum tree_code op,
get_or_create_binop (size_type_node, op,
binop->get_arg1 (), arg1));
- /* Ops on "unknown" are also unknown (unless we can use one of the
- identities above). */
- if (arg0->get_kind () == SK_UNKNOWN
- || arg1->get_kind () == SK_UNKNOWN)
- return get_or_create_unknown_svalue (type);
-
/* etc. */
return NULL;
@@ -563,6 +650,12 @@ region_model_manager::get_or_create_binop (tree type, enum tree_code op,
if (const svalue *folded = maybe_fold_binop (type, op, arg0, arg1))
return folded;
+ /* Ops on "unknown"/"poisoned" are unknown (unless we were able to fold
+ it via an identity in maybe_fold_binop). */
+ if (!arg0->can_have_associated_state_p ()
+ || !arg1->can_have_associated_state_p ())
+ return get_or_create_unknown_svalue (type);
+
binop_svalue::key_t key (type, op, arg0, arg1);
if (binop_svalue **slot = m_binop_values_map.get (key))
return *slot;
@@ -580,8 +673,8 @@ region_model_manager::maybe_fold_sub_svalue (tree type,
const svalue *parent_svalue,
const region *subregion)
{
- /* Subvalues of "unknown" are unknown. */
- if (parent_svalue->get_kind () == SK_UNKNOWN)
+ /* Subvalues of "unknown"/"poisoned" are unknown. */
+ if (!parent_svalue->can_have_associated_state_p ())
return get_or_create_unknown_svalue (type);
/* If we have a subregion of a zero-fill, it's zero. */
@@ -612,13 +705,13 @@ region_model_manager::maybe_fold_sub_svalue (tree type,
return get_or_create_cast (type, char_sval);
}
- /* SUB(INIT(r)).FIELD -> INIT(r.FIELD)
- i.e.
- Subvalue(InitialValue(R1), FieldRegion(R2, F))
- -> InitialValue(FieldRegion(R1, F)). */
if (const initial_svalue *init_sval
- = parent_svalue->dyn_cast_initial_svalue ())
+ = parent_svalue->dyn_cast_initial_svalue ())
{
+ /* SUB(INIT(r)).FIELD -> INIT(r.FIELD)
+ i.e.
+ Subvalue(InitialValue(R1), FieldRegion(R2, F))
+ -> InitialValue(FieldRegion(R1, F)). */
if (const field_region *field_reg = subregion->dyn_cast_field_region ())
{
const region *field_reg_new
@@ -626,8 +719,24 @@ region_model_manager::maybe_fold_sub_svalue (tree type,
field_reg->get_field ());
return get_or_create_initial_value (field_reg_new);
}
+ /* SUB(INIT(r)[ELEMENT] -> INIT(e[ELEMENT])
+ i.e.
+ Subvalue(InitialValue(R1), ElementRegion(R2, IDX))
+ -> InitialValue(ElementRegion(R1, IDX)). */
+ if (const element_region *element_reg = subregion->dyn_cast_element_region ())
+ {
+ const region *element_reg_new
+ = get_element_region (init_sval->get_region (),
+ element_reg->get_type (),
+ element_reg->get_index ());
+ return get_or_create_initial_value (element_reg_new);
+ }
}
+ if (const repeated_svalue *repeated_sval
+ = parent_svalue->dyn_cast_repeated_svalue ())
+ return get_or_create_cast (type, repeated_sval->get_inner_svalue ());
+
return NULL;
}
@@ -653,6 +762,260 @@ region_model_manager::get_or_create_sub_svalue (tree type,
return sub_sval;
}
+/* Subroutine of region_model_manager::get_or_create_repeated_svalue.
+ Return a folded svalue, or NULL. */
+
+const svalue *
+region_model_manager::maybe_fold_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+{
+ /* Repeated "unknown"/"poisoned" is unknown. */
+ if (!outer_size->can_have_associated_state_p ()
+ || !inner_svalue->can_have_associated_state_p ())
+ return get_or_create_unknown_svalue (type);
+
+ /* If INNER_SVALUE is the same size as OUTER_SIZE,
+ turn into simply a cast. */
+ if (tree cst_outer_num_bytes = outer_size->maybe_get_constant ())
+ {
+ HOST_WIDE_INT num_bytes_inner_svalue
+ = int_size_in_bytes (inner_svalue->get_type ());
+ if (num_bytes_inner_svalue != -1)
+ if (num_bytes_inner_svalue
+ == (HOST_WIDE_INT)tree_to_uhwi (cst_outer_num_bytes))
+ {
+ if (type)
+ return get_or_create_cast (type, inner_svalue);
+ else
+ return inner_svalue;
+ }
+ }
+
+ /* Handle zero-fill of a specific type. */
+ if (tree cst = inner_svalue->maybe_get_constant ())
+ if (zerop (cst) && type)
+ return get_or_create_cast (type, inner_svalue);
+
+ return NULL;
+}
+
+/* Return the svalue * of type TYPE in which INNER_SVALUE is repeated
+ enough times to be of size OUTER_SIZE, creating it if necessary.
+ e.g. for filling buffers with a constant value. */
+
+const svalue *
+region_model_manager::get_or_create_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+{
+ if (const svalue *folded
+ = maybe_fold_repeated_svalue (type, outer_size, inner_svalue))
+ return folded;
+
+ repeated_svalue::key_t key (type, outer_size, inner_svalue);
+ if (repeated_svalue **slot = m_repeated_values_map.get (key))
+ return *slot;
+ repeated_svalue *repeated_sval
+ = new repeated_svalue (type, outer_size, inner_svalue);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (repeated_sval);
+ m_repeated_values_map.put (key, repeated_sval);
+ return repeated_sval;
+}
+
+/* Attempt to get the bit_range for FIELD within a RECORD_TYPE.
+ Return true and write the result to OUT if successful.
+ Return false otherwise. */
+
+static bool
+get_bit_range_for_field (tree field, bit_range *out)
+{
+ bit_size_t bit_size;
+ if (!int_size_in_bits (TREE_TYPE (field), &bit_size))
+ return false;
+ int field_bit_offset = int_bit_position (field);
+ *out = bit_range (field_bit_offset, bit_size);
+ return true;
+}
+
+/* Attempt to get the byte_range for FIELD within a RECORD_TYPE.
+ Return true and write the result to OUT if successful.
+ Return false otherwise. */
+
+static bool
+get_byte_range_for_field (tree field, byte_range *out)
+{
+ bit_range field_bits (0, 0);
+ if (!get_bit_range_for_field (field, &field_bits))
+ return false;
+ return field_bits.as_byte_range (out);
+}
+
+/* Attempt to determine if there is a specific field within RECORD_TYPE
+ at BYTES. If so, return it, and write the location of BYTES relative
+ to the field to *OUT_RANGE_WITHIN_FIELD.
+ Otherwise, return NULL_TREE.
+ For example, given:
+ struct foo { uint32 a; uint32; b};
+ and
+ bytes = {bytes 6-7} (of foo)
+ we have bytes 3-4 of field b. */
+
+static tree
+get_field_at_byte_range (tree record_type, const byte_range &bytes,
+ byte_range *out_range_within_field)
+{
+ bit_offset_t bit_offset = bytes.m_start_byte_offset * BITS_PER_UNIT;
+
+ tree field = get_field_at_bit_offset (record_type, bit_offset);
+ if (!field)
+ return NULL_TREE;
+
+ byte_range field_bytes (0,0);
+ if (!get_byte_range_for_field (field, &field_bytes))
+ return NULL_TREE;
+
+ /* Is BYTES fully within field_bytes? */
+ byte_range bytes_within_field (0,0);
+ if (!field_bytes.contains_p (bytes, &bytes_within_field))
+ return NULL_TREE;
+
+ *out_range_within_field = bytes_within_field;
+ return field;
+}
+
+/* Subroutine of region_model_manager::get_or_create_bits_within.
+ Return a folded svalue, or NULL. */
+
+const svalue *
+region_model_manager::maybe_fold_bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+{
+ tree inner_type = inner_svalue->get_type ();
+ /* Fold:
+ BITS_WITHIN ((0, sizeof (VAL), VAL))
+ to:
+ CAST(TYPE, VAL). */
+ if (bits.m_start_bit_offset == 0 && inner_type)
+ {
+ bit_size_t inner_type_size;
+ if (int_size_in_bits (inner_type, &inner_type_size))
+ if (inner_type_size == bits.m_size_in_bits)
+ {
+ if (type)
+ return get_or_create_cast (type, inner_svalue);
+ else
+ return inner_svalue;
+ }
+ }
+
+ /* Kind-specific folding. */
+ if (const svalue *sval
+ = inner_svalue->maybe_fold_bits_within (type, bits, this))
+ return sval;
+
+ byte_range bytes (0,0);
+ if (bits.as_byte_range (&bytes) && inner_type)
+ switch (TREE_CODE (inner_type))
+ {
+ default:
+ break;
+ case ARRAY_TYPE:
+ {
+ /* Fold:
+ BITS_WITHIN (range, KIND(REG))
+ to:
+ BITS_WITHIN (range - offsetof(ELEMENT), KIND(REG.ELEMENT))
+ if range1 is a byte-range fully within one ELEMENT. */
+ tree element_type = TREE_TYPE (inner_type);
+ HOST_WIDE_INT element_byte_size
+ = int_size_in_bytes (element_type);
+ if (element_byte_size > 0)
+ {
+ HOST_WIDE_INT start_idx
+ = (bytes.get_start_byte_offset ().to_shwi ()
+ / element_byte_size);
+ HOST_WIDE_INT last_idx
+ = (bytes.get_last_byte_offset ().to_shwi ()
+ / element_byte_size);
+ if (start_idx == last_idx)
+ {
+ if (const initial_svalue *initial_sval
+ = inner_svalue->dyn_cast_initial_svalue ())
+ {
+ bit_offset_t start_of_element
+ = start_idx * element_byte_size * BITS_PER_UNIT;
+ bit_range bits_within_element
+ (bits.m_start_bit_offset - start_of_element,
+ bits.m_size_in_bits);
+ const svalue *idx_sval
+ = get_or_create_int_cst (integer_type_node, start_idx);
+ const region *element_reg =
+ get_element_region (initial_sval->get_region (),
+ element_type, idx_sval);
+ const svalue *element_reg_sval
+ = get_or_create_initial_value (element_reg);
+ return get_or_create_bits_within (type,
+ bits_within_element,
+ element_reg_sval);
+ }
+ }
+ }
+ }
+ break;
+ case RECORD_TYPE:
+ {
+ /* Fold:
+ BYTES_WITHIN (range, KIND(REG))
+ to:
+ BYTES_WITHIN (range - offsetof(FIELD), KIND(REG.FIELD))
+ if range1 is fully within FIELD. */
+ byte_range bytes_within_field (0, 0);
+ if (tree field = get_field_at_byte_range (inner_type, bytes,
+ &bytes_within_field))
+ {
+ if (const initial_svalue *initial_sval
+ = inner_svalue->dyn_cast_initial_svalue ())
+ {
+ const region *field_reg =
+ get_field_region (initial_sval->get_region (), field);
+ const svalue *initial_reg_sval
+ = get_or_create_initial_value (field_reg);
+ return get_or_create_bits_within
+ (type,
+ bytes_within_field.as_bit_range (),
+ initial_reg_sval);
+ }
+ }
+ }
+ break;
+ }
+ return NULL;
+}
+
+/* Return the svalue * of type TYPE for extracting BITS from INNER_SVALUE,
+ creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_bits_within (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+{
+ if (const svalue *folded
+ = maybe_fold_bits_within_svalue (type, bits, inner_svalue))
+ return folded;
+
+ bits_within_svalue::key_t key (type, bits, inner_svalue);
+ if (bits_within_svalue **slot = m_bits_within_values_map.get (key))
+ return *slot;
+ bits_within_svalue *bits_within_sval
+ = new bits_within_svalue (type, bits, inner_svalue);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (bits_within_sval);
+ m_bits_within_values_map.put (key, bits_within_sval);
+ return bits_within_sval;
+}
+
/* Return the svalue * that decorates ARG as being unmergeable,
creating it if necessary. */
@@ -728,6 +1091,51 @@ region_model_manager::get_or_create_conjured_svalue (tree type,
return conjured_sval;
}
+/* Subroutine of region_model_manager::get_or_create_asm_output_svalue.
+ Return a folded svalue, or NULL. */
+
+const svalue *
+region_model_manager::
+maybe_fold_asm_output_svalue (tree type,
+ const vec<const svalue *> &inputs)
+{
+ /* Unknown inputs should lead to unknown results. */
+ for (const auto &iter : inputs)
+ if (iter->get_kind () == SK_UNKNOWN)
+ return get_or_create_unknown_svalue (type);
+
+ return NULL;
+}
+
+/* Return the svalue * of type TYPE for OUTPUT_IDX of the deterministic
+ asm stmt ASM_STMT, given INPUTS as inputs. */
+
+const svalue *
+region_model_manager::
+get_or_create_asm_output_svalue (tree type,
+ const gasm *asm_stmt,
+ unsigned output_idx,
+ const vec<const svalue *> &inputs)
+{
+ gcc_assert (inputs.length () <= asm_output_svalue::MAX_INPUTS);
+
+ if (const svalue *folded
+ = maybe_fold_asm_output_svalue (type, inputs))
+ return folded;
+
+ const char *asm_string = gimple_asm_string (asm_stmt);
+ const unsigned noutputs = gimple_asm_noutputs (asm_stmt);
+
+ asm_output_svalue::key_t key (type, asm_string, output_idx, inputs);
+ if (asm_output_svalue **slot = m_asm_output_values_map.get (key))
+ return *slot;
+ asm_output_svalue *asm_output_sval
+ = new asm_output_svalue (type, asm_string, output_idx, noutputs, inputs);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (asm_output_sval);
+ m_asm_output_values_map.put (key, asm_output_sval);
+ return asm_output_sval;
+}
+
/* Given STRING_CST, a STRING_CST and BYTE_OFFSET_CST a constant,
attempt to get the character at that offset, returning either
the svalue for the character constant, or NULL if unsuccessful. */
@@ -892,6 +1300,38 @@ region_model_manager::get_offset_region (const region *parent,
return offset_reg;
}
+/* Return the region that describes accessing the subregion of type
+ TYPE of size BYTE_SIZE_SVAL within PARENT, creating it if necessary. */
+
+const region *
+region_model_manager::get_sized_region (const region *parent,
+ tree type,
+ const svalue *byte_size_sval)
+{
+ if (byte_size_sval->get_type () != size_type_node)
+ byte_size_sval = get_or_create_cast (size_type_node, byte_size_sval);
+
+ /* If PARENT is already that size, return it. */
+ const svalue *parent_byte_size_sval = parent->get_byte_size_sval (this);
+ if (tree parent_size_cst = parent_byte_size_sval->maybe_get_constant ())
+ if (tree size_cst = byte_size_sval->maybe_get_constant ())
+ {
+ tree comparison
+ = fold_binary (EQ_EXPR, boolean_type_node, parent_size_cst, size_cst);
+ if (comparison == boolean_true_node)
+ return parent;
+ }
+
+ sized_region::key_t key (parent, type, byte_size_sval);
+ if (sized_region *reg = m_sized_regions.get (key))
+ return reg;
+
+ sized_region *sized_reg
+ = new sized_region (alloc_region_id (), parent, type, byte_size_sval);
+ m_sized_regions.put (key, sized_reg);
+ return sized_reg;
+}
+
/* Return the region that describes accessing PARENT_REGION as if
it were of type TYPE, creating it if necessary. */
@@ -1106,11 +1546,17 @@ region_model_manager::log_stats (logger *logger, bool show_objs) const
log_uniq_map (logger, show_objs, "unaryop_svalue", m_unaryop_values_map);
log_uniq_map (logger, show_objs, "binop_svalue", m_binop_values_map);
log_uniq_map (logger, show_objs, "sub_svalue", m_sub_values_map);
+ log_uniq_map (logger, show_objs, "repeated_svalue", m_repeated_values_map);
+ log_uniq_map (logger, show_objs, "bits_within_svalue",
+ m_bits_within_values_map);
log_uniq_map (logger, show_objs, "unmergeable_svalue",
m_unmergeable_values_map);
log_uniq_map (logger, show_objs, "widening_svalue", m_widening_values_map);
log_uniq_map (logger, show_objs, "compound_svalue", m_compound_values_map);
log_uniq_map (logger, show_objs, "conjured_svalue", m_conjured_values_map);
+ log_uniq_map (logger, show_objs, "asm_output_svalue",
+ m_asm_output_values_map);
+
logger->log ("max accepted svalue num_nodes: %i",
m_max_complexity.m_num_nodes);
logger->log ("max accepted svalue max_depth: %i",
@@ -1124,6 +1570,7 @@ region_model_manager::log_stats (logger *logger, bool show_objs) const
log_uniq_map (logger, show_objs, "field_region", m_field_regions);
log_uniq_map (logger, show_objs, "element_region", m_element_regions);
log_uniq_map (logger, show_objs, "offset_region", m_offset_regions);
+ log_uniq_map (logger, show_objs, "sized_region", m_sized_regions);
log_uniq_map (logger, show_objs, "cast_region", m_cast_regions);
log_uniq_map (logger, show_objs, "frame_region", m_frame_regions);
log_uniq_map (logger, show_objs, "symbolic_region", m_symbolic_regions);
@@ -1131,6 +1578,7 @@ region_model_manager::log_stats (logger *logger, bool show_objs) const
logger->log (" # managed dynamic regions: %i",
m_managed_dynamic_regions.length ());
m_store_mgr.log_stats (logger, show_objs);
+ m_range_mgr->log_stats (logger, show_objs);
}
/* Dump the number of objects of each class that were managed by this
diff --git a/gcc/analyzer/region-model-reachability.cc b/gcc/analyzer/region-model-reachability.cc
index 087185b..b5ae787 100644
--- a/gcc/analyzer/region-model-reachability.cc
+++ b/gcc/analyzer/region-model-reachability.cc
@@ -154,7 +154,7 @@ reachable_regions::add (const region *reg, bool is_mutable)
if (binding_cluster *bind_cluster = m_store->get_cluster (base_reg))
bind_cluster->for_each_value (handle_sval_cb, this);
else
- handle_sval (m_model->get_store_value (reg));
+ handle_sval (m_model->get_store_value (reg, NULL));
}
void
@@ -170,6 +170,7 @@ void
reachable_regions::handle_sval (const svalue *sval)
{
m_reachable_svals.add (sval);
+ m_mutable_svals.add (sval);
if (const region_svalue *ptr = sval->dyn_cast_region_svalue ())
{
const region *pointee = ptr->get_pointee ();
@@ -266,7 +267,6 @@ reachable_regions::handle_parm (const svalue *sval, tree param_type)
void
reachable_regions::mark_escaped_clusters (region_model_context *ctxt)
{
- gcc_assert (ctxt);
auto_vec<const function_region *> escaped_fn_regs
(m_mutable_base_regs.elements ());
for (hash_set<const region *>::iterator iter = m_mutable_base_regs.begin ();
@@ -280,12 +280,15 @@ reachable_regions::mark_escaped_clusters (region_model_context *ctxt)
if (const function_region *fn_reg = base_reg->dyn_cast_function_region ())
escaped_fn_regs.quick_push (fn_reg);
}
- /* Sort to ensure deterministic results. */
- escaped_fn_regs.qsort (region::cmp_ptr_ptr);
- unsigned i;
- const function_region *fn_reg;
- FOR_EACH_VEC_ELT (escaped_fn_regs, i, fn_reg)
- ctxt->on_escaped_function (fn_reg->get_fndecl ());
+ if (ctxt)
+ {
+ /* Sort to ensure deterministic results. */
+ escaped_fn_regs.qsort (region::cmp_ptr_ptr);
+ unsigned i;
+ const function_region *fn_reg;
+ FOR_EACH_VEC_ELT (escaped_fn_regs, i, fn_reg)
+ ctxt->on_escaped_function (fn_reg->get_fndecl ());
+ }
}
/* Dump SET to PP, sorting it to avoid churn when comparing dumps. */
diff --git a/gcc/analyzer/region-model-reachability.h b/gcc/analyzer/region-model-reachability.h
index c6a21e9..57daf72 100644
--- a/gcc/analyzer/region-model-reachability.h
+++ b/gcc/analyzer/region-model-reachability.h
@@ -89,6 +89,14 @@ public:
{
return m_mutable_svals.end ();
}
+ hash_set<const region *>::iterator begin_mutable_base_regs ()
+ {
+ return m_mutable_base_regs.begin ();
+ }
+ hash_set<const region *>::iterator end_mutable_base_regs ()
+ {
+ return m_mutable_base_regs.end ();
+ }
void dump_to_pp (pretty_printer *pp) const;
diff --git a/gcc/analyzer/region-model.cc b/gcc/analyzer/region-model.cc
index 96ed549..a14d107 100644
--- a/gcc/analyzer/region-model.cc
+++ b/gcc/analyzer/region-model.cc
@@ -66,6 +66,7 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/analyzer-selftests.h"
#include "stor-layout.h"
#include "attribs.h"
+#include "tree-object-size.h"
#if ENABLE_ANALYZER
@@ -110,12 +111,140 @@ print_quoted_type (pretty_printer *pp, tree t)
pp_end_quote (pp, pp_show_color (pp));
}
+/* class region_to_value_map. */
+
+/* Assignment operator for region_to_value_map. */
+
+region_to_value_map &
+region_to_value_map::operator= (const region_to_value_map &other)
+{
+ m_hash_map.empty ();
+ for (auto iter : other.m_hash_map)
+ {
+ const region *reg = iter.first;
+ const svalue *sval = iter.second;
+ m_hash_map.put (reg, sval);
+ }
+ return *this;
+}
+
+/* Equality operator for region_to_value_map. */
+
+bool
+region_to_value_map::operator== (const region_to_value_map &other) const
+{
+ if (m_hash_map.elements () != other.m_hash_map.elements ())
+ return false;
+
+ for (auto iter : *this)
+ {
+ const region *reg = iter.first;
+ const svalue *sval = iter.second;
+ const svalue * const *other_slot = other.get (reg);
+ if (other_slot == NULL)
+ return false;
+ if (sval != *other_slot)
+ return false;
+ }
+
+ return true;
+}
+
+/* Dump this object to PP. */
+
+void
+region_to_value_map::dump_to_pp (pretty_printer *pp, bool simple,
+ bool multiline) const
+{
+ auto_vec<const region *> regs;
+ for (iterator iter = begin (); iter != end (); ++iter)
+ regs.safe_push ((*iter).first);
+ regs.qsort (region::cmp_ptr_ptr);
+ if (multiline)
+ pp_newline (pp);
+ else
+ pp_string (pp, " {");
+ unsigned i;
+ const region *reg;
+ FOR_EACH_VEC_ELT (regs, i, reg)
+ {
+ if (multiline)
+ pp_string (pp, " ");
+ else if (i > 0)
+ pp_string (pp, ", ");
+ reg->dump_to_pp (pp, simple);
+ pp_string (pp, ": ");
+ const svalue *sval = *get (reg);
+ sval->dump_to_pp (pp, true);
+ if (multiline)
+ pp_newline (pp);
+ }
+ if (!multiline)
+ pp_string (pp, "}");
+}
+
+/* Dump this object to stderr. */
+
+DEBUG_FUNCTION void
+region_to_value_map::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple, true);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+
+/* Attempt to merge THIS with OTHER, writing the result
+ to OUT.
+
+ For now, write (region, value) mappings that are in common between THIS
+ and OTHER to OUT, effectively taking the intersection, rather than
+ rejecting differences. */
+
+bool
+region_to_value_map::can_merge_with_p (const region_to_value_map &other,
+ region_to_value_map *out) const
+{
+ for (auto iter : *this)
+ {
+ const region *iter_reg = iter.first;
+ const svalue *iter_sval = iter.second;
+ const svalue * const * other_slot = other.get (iter_reg);
+ if (other_slot)
+ if (iter_sval == *other_slot)
+ out->put (iter_reg, iter_sval);
+ }
+ return true;
+}
+
+/* Purge any state involving SVAL. */
+
+void
+region_to_value_map::purge_state_involving (const svalue *sval)
+{
+ auto_vec<const region *> to_purge;
+ for (auto iter : *this)
+ {
+ const region *iter_reg = iter.first;
+ const svalue *iter_sval = iter.second;
+ if (iter_reg->involves_p (sval) || iter_sval->involves_p (sval))
+ to_purge.safe_push (iter_reg);
+ }
+ for (auto iter : to_purge)
+ m_hash_map.remove (iter);
+}
+
/* class region_model. */
/* Ctor for region_model: construct an "empty" model. */
region_model::region_model (region_model_manager *mgr)
-: m_mgr (mgr), m_store (), m_current_frame (NULL)
+: m_mgr (mgr), m_store (), m_current_frame (NULL),
+ m_dynamic_extents ()
{
m_constraints = new constraint_manager (mgr);
}
@@ -125,7 +254,8 @@ region_model::region_model (region_model_manager *mgr)
region_model::region_model (const region_model &other)
: m_mgr (other.m_mgr), m_store (other.m_store),
m_constraints (new constraint_manager (*other.m_constraints)),
- m_current_frame (other.m_current_frame)
+ m_current_frame (other.m_current_frame),
+ m_dynamic_extents (other.m_dynamic_extents)
{
}
@@ -151,6 +281,8 @@ region_model::operator= (const region_model &other)
m_current_frame = other.m_current_frame;
+ m_dynamic_extents = other.m_dynamic_extents;
+
return *this;
}
@@ -175,6 +307,9 @@ region_model::operator== (const region_model &other) const
if (m_current_frame != other.m_current_frame)
return false;
+ if (m_dynamic_extents != other.m_dynamic_extents)
+ return false;
+
gcc_checking_assert (hash () == other.hash ());
return true;
@@ -236,6 +371,13 @@ region_model::dump_to_pp (pretty_printer *pp, bool simple,
m_constraints->dump_to_pp (pp, multiline);
if (!multiline)
pp_string (pp, "}");
+
+ /* Dump sizes of dynamic regions, if any are known. */
+ if (!m_dynamic_extents.is_empty ())
+ {
+ pp_string (pp, "dynamic_extents:");
+ m_dynamic_extents.dump_to_pp (pp, simple, multiline);
+ }
}
/* Dump a representation of this model to FILE. */
@@ -268,6 +410,14 @@ region_model::debug () const
dump (true);
}
+/* Assert that this object is valid. */
+
+void
+region_model::validate () const
+{
+ m_store.validate ();
+}
+
/* Canonicalize the store and constraints, to maximize the chance of
equality between region_model instances. */
@@ -309,6 +459,11 @@ public:
const char *get_kind () const FINAL OVERRIDE { return "poisoned_value_diagnostic"; }
+ bool use_of_uninit_p () const FINAL OVERRIDE
+ {
+ return m_pkind == POISON_KIND_UNINIT;
+ }
+
bool operator== (const poisoned_value_diagnostic &other) const
{
return m_expr == other.m_expr;
@@ -320,6 +475,16 @@ public:
{
default:
gcc_unreachable ();
+ case POISON_KIND_UNINIT:
+ {
+ diagnostic_metadata m;
+ m.add_cwe (457); /* "CWE-457: Use of Uninitialized Variable". */
+ return warning_meta (rich_loc, m,
+ OPT_Wanalyzer_use_of_uninitialized_value,
+ "use of uninitialized value %qE",
+ m_expr);
+ }
+ break;
case POISON_KIND_FREED:
{
diagnostic_metadata m;
@@ -349,6 +514,9 @@ public:
{
default:
gcc_unreachable ();
+ case POISON_KIND_UNINIT:
+ return ev.formatted_print ("use of uninitialized value %qE here",
+ m_expr);
case POISON_KIND_FREED:
return ev.formatted_print ("use after %<free%> of %qE here",
m_expr);
@@ -649,6 +817,41 @@ region_model::get_gassign_result (const gassign *assign,
}
}
+/* Check for SVAL being poisoned, adding a warning to CTXT.
+ Return SVAL, or, if a warning is added, another value, to avoid
+ repeatedly complaining about the same poisoned value in followup code. */
+
+const svalue *
+region_model::check_for_poison (const svalue *sval,
+ tree expr,
+ region_model_context *ctxt) const
+{
+ if (!ctxt)
+ return sval;
+
+ if (const poisoned_svalue *poisoned_sval = sval->dyn_cast_poisoned_svalue ())
+ {
+ /* If we have an SSA name for a temporary, we don't want to print
+ '<unknown>'.
+ Poisoned values are shared by type, and so we can't reconstruct
+ the tree other than via the def stmts, using
+ fixup_tree_for_diagnostic. */
+ tree diag_arg = fixup_tree_for_diagnostic (expr);
+ enum poison_kind pkind = poisoned_sval->get_poison_kind ();
+ if (ctxt->warn (new poisoned_value_diagnostic (diag_arg, pkind)))
+ {
+ /* We only want to report use of a poisoned value at the first
+ place it gets used; return an unknown value to avoid generating
+ a chain of followup warnings. */
+ sval = m_mgr->get_or_create_unknown_svalue (sval->get_type ());
+ }
+
+ return sval;
+ }
+
+ return sval;
+}
+
/* Update this model for the ASSIGN stmt, using CTXT to report any
diagnostics. */
@@ -665,6 +868,8 @@ region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
for some SVALUE. */
if (const svalue *sval = get_gassign_result (assign, ctxt))
{
+ tree expr = get_diagnostic_tree_for_gassign (assign);
+ check_for_poison (sval, expr, ctxt);
set_value (lhs_reg, sval, ctxt);
return;
}
@@ -722,11 +927,115 @@ region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
case STRING_CST:
{
/* e.g. "struct s2 x = {{'A', 'B', 'C', 'D'}};". */
- /* Add a default binding, rather than a direct one, so that array
- access will "inherit" the individual chars. */
const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
- BK_default);
+ ctxt ? ctxt->get_uncertainty () : NULL);
+ }
+ break;
+ }
+}
+
+/* A pending_diagnostic subclass for implementing "__analyzer_dump_path". */
+
+class dump_path_diagnostic
+ : public pending_diagnostic_subclass<dump_path_diagnostic>
+{
+public:
+ bool emit (rich_location *richloc) FINAL OVERRIDE
+ {
+ inform (richloc, "path");
+ return true;
+ }
+
+ const char *get_kind () const FINAL OVERRIDE { return "dump_path_diagnostic"; }
+
+ bool operator== (const dump_path_diagnostic &) const
+ {
+ return true;
+ }
+};
+
+/* Handle the pre-sm-state part of STMT, modifying this object in-place.
+ Write true to *OUT_TERMINATE_PATH if the path should be terminated.
+ Write true to *OUT_UNKNOWN_SIDE_EFFECTS if the stmt has unknown
+ side effects. */
+
+void
+region_model::on_stmt_pre (const gimple *stmt,
+ bool *out_terminate_path,
+ bool *out_unknown_side_effects,
+ region_model_context *ctxt)
+{
+ switch (gimple_code (stmt))
+ {
+ default:
+ /* No-op for now. */
+ break;
+
+ case GIMPLE_ASSIGN:
+ {
+ const gassign *assign = as_a <const gassign *> (stmt);
+ on_assignment (assign, ctxt);
+ }
+ break;
+
+ case GIMPLE_ASM:
+ {
+ const gasm *asm_stmt = as_a <const gasm *> (stmt);
+ on_asm_stmt (asm_stmt, ctxt);
+ }
+ break;
+
+ case GIMPLE_CALL:
+ {
+ /* Track whether we have a gcall to a function that's not recognized by
+ anything, for which we don't have a function body, or for which we
+ don't know the fndecl. */
+ const gcall *call = as_a <const gcall *> (stmt);
+
+ /* Debugging/test support. */
+ if (is_special_named_call_p (call, "__analyzer_describe", 2))
+ impl_call_analyzer_describe (call, ctxt);
+ else if (is_special_named_call_p (call, "__analyzer_dump_capacity", 1))
+ impl_call_analyzer_dump_capacity (call, ctxt);
+ else if (is_special_named_call_p (call, "__analyzer_dump_path", 0))
+ {
+ /* Handle the builtin "__analyzer_dump_path" by queuing a
+ diagnostic at this exploded_node. */
+ ctxt->warn (new dump_path_diagnostic ());
+ }
+ else if (is_special_named_call_p (call, "__analyzer_dump_region_model",
+ 0))
+ {
+ /* Handle the builtin "__analyzer_dump_region_model" by dumping
+ the region model's state to stderr. */
+ dump (false);
+ }
+ else if (is_special_named_call_p (call, "__analyzer_eval", 1))
+ impl_call_analyzer_eval (call, ctxt);
+ else if (is_special_named_call_p (call, "__analyzer_break", 0))
+ {
+ /* Handle the builtin "__analyzer_break" by triggering a
+ breakpoint. */
+ /* TODO: is there a good cross-platform way to do this? */
+ raise (SIGINT);
+ }
+ else if (is_special_named_call_p (call,
+ "__analyzer_dump_exploded_nodes",
+ 1))
+ {
+ /* This is handled elsewhere. */
+ }
+ else
+ *out_unknown_side_effects = on_call_pre (call, ctxt,
+ out_terminate_path);
+ }
+ break;
+
+ case GIMPLE_RETURN:
+ {
+ const greturn *return_ = as_a <const greturn *> (stmt);
+ on_return (return_, ctxt);
}
break;
}
@@ -750,12 +1059,39 @@ bool
region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
bool *out_terminate_path)
{
+ call_details cd (call, this, ctxt);
+
bool unknown_side_effects = false;
- if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
+ /* Some of the cases below update the lhs of the call based on the
+ return value, but not all. Provide a default value, which may
+ get overwritten below. */
+ if (tree lhs = gimple_call_lhs (call))
{
- call_details cd (call, this, ctxt);
+ const region *lhs_region = get_lvalue (lhs, ctxt);
+ const svalue *sval
+ = m_mgr->get_or_create_conjured_svalue (TREE_TYPE (lhs), call,
+ lhs_region);
+ purge_state_involving (sval, ctxt);
+ set_value (lhs_region, sval, ctxt);
+ }
+ if (gimple_call_internal_p (call))
+ {
+ switch (gimple_call_internal_fn (call))
+ {
+ default:
+ break;
+ case IFN_BUILTIN_EXPECT:
+ impl_call_builtin_expect (cd);
+ return false;
+ case IFN_UBSAN_BOUNDS:
+ return false;
+ }
+ }
+
+ if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
+ {
/* The various impl_call_* member functions are implemented
in region-model-impl-calls.cc.
Having them split out into separate functions makes it easier
@@ -771,17 +1107,21 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
break;
case BUILT_IN_ALLOCA:
case BUILT_IN_ALLOCA_WITH_ALIGN:
- return impl_call_alloca (cd);
+ impl_call_alloca (cd);
+ return false;
case BUILT_IN_CALLOC:
- return impl_call_calloc (cd);
+ impl_call_calloc (cd);
+ return false;
case BUILT_IN_EXPECT:
case BUILT_IN_EXPECT_WITH_PROBABILITY:
- return impl_call_builtin_expect (cd);
+ impl_call_builtin_expect (cd);
+ return false;
case BUILT_IN_FREE:
/* Handle in "on_call_post". */
break;
case BUILT_IN_MALLOC:
- return impl_call_malloc (cd);
+ impl_call_malloc (cd);
+ return false;
case BUILT_IN_MEMCPY:
case BUILT_IN_MEMCPY_CHK:
impl_call_memcpy (cd);
@@ -792,16 +1132,18 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
return false;
break;
case BUILT_IN_REALLOC:
- impl_call_realloc (cd);
return false;
case BUILT_IN_STRCPY:
case BUILT_IN_STRCPY_CHK:
impl_call_strcpy (cd);
return false;
case BUILT_IN_STRLEN:
- if (impl_call_strlen (cd))
- return false;
- break;
+ impl_call_strlen (cd);
+ return false;
+
+ case BUILT_IN_STACK_SAVE:
+ case BUILT_IN_STACK_RESTORE:
+ return false;
/* Stdio builtins. */
case BUILT_IN_FPRINTF:
@@ -827,22 +1169,21 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
on the return value. */
break;
}
- else if (gimple_call_internal_p (call))
- switch (gimple_call_internal_fn (call))
- {
- default:
- if (!DECL_PURE_P (callee_fndecl))
- unknown_side_effects = true;
- break;
- case IFN_BUILTIN_EXPECT:
- return impl_call_builtin_expect (cd);
- }
else if (is_named_call_p (callee_fndecl, "malloc", call, 1))
- return impl_call_malloc (cd);
+ {
+ impl_call_malloc (cd);
+ return false;
+ }
else if (is_named_call_p (callee_fndecl, "calloc", call, 2))
- return impl_call_calloc (cd);
+ {
+ impl_call_calloc (cd);
+ return false;
+ }
else if (is_named_call_p (callee_fndecl, "alloca", call, 1))
- return impl_call_alloca (cd);
+ {
+ impl_call_alloca (cd);
+ return false;
+ }
else if (is_named_call_p (callee_fndecl, "realloc", call, 2))
{
impl_call_realloc (cd);
@@ -862,6 +1203,17 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
else
unknown_side_effects = true;
}
+ else if (is_named_call_p (callee_fndecl, "fgets", call, 3)
+ || is_named_call_p (callee_fndecl, "fgets_unlocked", call, 3))
+ {
+ impl_call_fgets (cd);
+ return false;
+ }
+ else if (is_named_call_p (callee_fndecl, "fread", call, 4))
+ {
+ impl_call_fread (cd);
+ return false;
+ }
else if (is_named_call_p (callee_fndecl, "getchar", call, 0))
{
/* No side-effects (tracking stream state is out-of-scope
@@ -876,13 +1228,19 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
else if (is_named_call_p (callee_fndecl, "strlen", call, 1)
&& POINTER_TYPE_P (cd.get_arg_type (0)))
{
- if (impl_call_strlen (cd))
- return false;
+ impl_call_strlen (cd);
+ return false;
}
else if (is_named_call_p (callee_fndecl, "operator new", call, 1))
- return impl_call_operator_new (cd);
+ {
+ impl_call_operator_new (cd);
+ return false;
+ }
else if (is_named_call_p (callee_fndecl, "operator new []", call, 1))
- return impl_call_operator_new (cd);
+ {
+ impl_call_operator_new (cd);
+ return false;
+ }
else if (is_named_call_p (callee_fndecl, "operator delete", call, 1)
|| is_named_call_p (callee_fndecl, "operator delete", call, 2)
|| is_named_call_p (callee_fndecl, "operator delete []", call, 1))
@@ -897,19 +1255,6 @@ region_model::on_call_pre (const gcall *call, region_model_context *ctxt,
else
unknown_side_effects = true;
- /* Some of the above cases update the lhs of the call based on the
- return value. If we get here, it hasn't been done yet, so do that
- now. */
- if (tree lhs = gimple_call_lhs (call))
- {
- const region *lhs_region = get_lvalue (lhs, ctxt);
- if (TREE_CODE (lhs) == SSA_NAME)
- {
- const svalue *sval = m_mgr->get_or_create_initial_value (lhs_region);
- set_value (lhs_region, sval, ctxt);
- }
- }
-
return unknown_side_effects;
}
@@ -930,9 +1275,9 @@ region_model::on_call_post (const gcall *call,
{
if (tree callee_fndecl = get_fndecl_for_call (call, ctxt))
{
+ call_details cd (call, this, ctxt);
if (is_named_call_p (callee_fndecl, "free", call, 1))
{
- call_details cd (call, this, ctxt);
impl_call_free (cd);
return;
}
@@ -940,7 +1285,6 @@ region_model::on_call_post (const gcall *call,
|| is_named_call_p (callee_fndecl, "operator delete", call, 2)
|| is_named_call_p (callee_fndecl, "operator delete []", call, 1))
{
- call_details cd (call, this, ctxt);
impl_call_operator_delete (cd);
return;
}
@@ -948,16 +1292,59 @@ region_model::on_call_post (const gcall *call,
__attribute__((malloc(FOO)))? */
if (lookup_attribute ("*dealloc", DECL_ATTRIBUTES (callee_fndecl)))
{
- call_details cd (call, this, ctxt);
impl_deallocation_call (cd);
return;
}
+ if (fndecl_built_in_p (callee_fndecl, BUILT_IN_NORMAL)
+ && gimple_builtin_call_types_compatible_p (call, callee_fndecl))
+ switch (DECL_UNCHECKED_FUNCTION_CODE (callee_fndecl))
+ {
+ default:
+ break;
+ case BUILT_IN_REALLOC:
+ impl_call_realloc (cd);
+ return;
+ }
}
if (unknown_side_effects)
handle_unrecognized_call (call, ctxt);
}
+/* Purge state involving SVAL from this region_model, using CTXT
+ (if non-NULL) to purge other state in a program_state.
+
+ For example, if we're at the def-stmt of an SSA name, then we need to
+ purge any state for svalues that involve that SSA name. This avoids
+ false positives in loops, since a symbolic value referring to the
+ SSA name will be referring to the previous value of that SSA name.
+
+ For example, in:
+ while ((e = hashmap_iter_next(&iter))) {
+ struct oid2strbuf *e_strbuf = (struct oid2strbuf *)e;
+ free (e_strbuf->value);
+ }
+ at the def-stmt of e_8:
+ e_8 = hashmap_iter_next (&iter);
+ we should purge the "freed" state of:
+ INIT_VAL(CAST_REG(‘struct oid2strbuf’, (*INIT_VAL(e_8))).value)
+ which is the "e_strbuf->value" value from the previous iteration,
+ or we will erroneously report a double-free - the "e_8" within it
+ refers to the previous value. */
+
+void
+region_model::purge_state_involving (const svalue *sval,
+ region_model_context *ctxt)
+{
+ if (!sval->can_have_associated_state_p ())
+ return;
+ m_store.purge_state_involving (sval, m_mgr);
+ m_constraints->purge_state_involving (sval);
+ m_dynamic_extents.purge_state_involving (sval);
+ if (ctxt)
+ ctxt->purge_state_involving (sval);
+}
+
/* Handle a call CALL to a function with unknown behavior.
Traverse the regions in this model, determining what regions are
@@ -1003,6 +1390,8 @@ region_model::handle_unrecognized_call (const gcall *call,
}
}
+ uncertainty_t *uncertainty = ctxt ? ctxt->get_uncertainty () : NULL;
+
/* Purge sm-state for the svalues that were reachable,
both in non-mutable and mutable form. */
for (svalue_set::iterator iter
@@ -1010,14 +1399,18 @@ region_model::handle_unrecognized_call (const gcall *call,
iter != reachable_regs.end_reachable_svals (); ++iter)
{
const svalue *sval = (*iter);
- ctxt->on_unknown_change (sval, false);
+ if (ctxt)
+ ctxt->on_unknown_change (sval, false);
}
for (svalue_set::iterator iter
= reachable_regs.begin_mutable_svals ();
iter != reachable_regs.end_mutable_svals (); ++iter)
{
const svalue *sval = (*iter);
- ctxt->on_unknown_change (sval, true);
+ if (ctxt)
+ ctxt->on_unknown_change (sval, true);
+ if (uncertainty)
+ uncertainty->on_mutable_sval_at_unknown_call (sval);
}
/* Mark any clusters that have escaped. */
@@ -1026,6 +1419,17 @@ region_model::handle_unrecognized_call (const gcall *call,
/* Update bindings for all clusters that have escaped, whether above,
or previously. */
m_store.on_unknown_fncall (call, m_mgr->get_store_manager ());
+
+ /* Purge dynamic extents from any regions that have escaped mutably:
+ realloc could have been called on them. */
+ for (hash_set<const region *>::iterator
+ iter = reachable_regs.begin_mutable_base_regs ();
+ iter != reachable_regs.end_mutable_base_regs ();
+ ++iter)
+ {
+ const region *base_reg = (*iter);
+ unset_dynamic_extents (base_reg);
+ }
}
/* Traverse the regions in this model, determining what regions are
@@ -1035,11 +1439,15 @@ region_model::handle_unrecognized_call (const gcall *call,
for reachability (for handling return values from functions when
analyzing return of the only function on the stack).
+ If UNCERTAINTY is non-NULL, treat any svalues that were recorded
+ within it as being maybe-bound as additional "roots" for reachability.
+
Find svalues that haven't leaked. */
void
region_model::get_reachable_svalues (svalue_set *out,
- const svalue *extra_sval)
+ const svalue *extra_sval,
+ const uncertainty_t *uncertainty)
{
reachable_regions reachable_regs (this);
@@ -1051,6 +1459,12 @@ region_model::get_reachable_svalues (svalue_set *out,
if (extra_sval)
reachable_regs.handle_sval (extra_sval);
+ if (uncertainty)
+ for (uncertainty_t::iterator iter
+ = uncertainty->begin_maybe_bound_svals ();
+ iter != uncertainty->end_maybe_bound_svals (); ++iter)
+ reachable_regs.handle_sval (*iter);
+
/* Get regions for locals that have explicitly bound values. */
for (store::cluster_map_t::iterator iter = m_store.begin ();
iter != m_store.end (); ++iter)
@@ -1109,8 +1523,8 @@ region_model::on_setjmp (const gcall *call, const exploded_node *enode,
/* Direct calls to setjmp return 0. */
if (tree lhs = gimple_call_lhs (call))
{
- tree zero = build_int_cst (TREE_TYPE (lhs), 0);
- const svalue *new_sval = m_mgr->get_or_create_constant_svalue (zero);
+ const svalue *new_sval
+ = m_mgr->get_or_create_int_cst (TREE_TYPE (lhs), 0);
const region *lhs_reg = get_lvalue (lhs, ctxt);
set_value (lhs_reg, new_sval, ctxt);
}
@@ -1141,15 +1555,14 @@ region_model::on_longjmp (const gcall *longjmp_call, const gcall *setjmp_call,
if (tree lhs = gimple_call_lhs (setjmp_call))
{
/* Passing 0 as the val to longjmp leads to setjmp returning 1. */
- tree t_zero = build_int_cst (TREE_TYPE (fake_retval), 0);
- const svalue *zero_sval = m_mgr->get_or_create_constant_svalue (t_zero);
+ const svalue *zero_sval
+ = m_mgr->get_or_create_int_cst (TREE_TYPE (fake_retval), 0);
tristate eq_zero = eval_condition (fake_retval_sval, EQ_EXPR, zero_sval);
/* If we have 0, use 1. */
if (eq_zero.is_true ())
{
- tree t_one = build_int_cst (TREE_TYPE (fake_retval), 1);
const svalue *one_sval
- = m_mgr->get_or_create_constant_svalue (t_one);
+ = m_mgr->get_or_create_int_cst (TREE_TYPE (fake_retval), 1);
fake_retval_sval = one_sval;
}
else
@@ -1172,11 +1585,14 @@ region_model::on_longjmp (const gcall *longjmp_call, const gcall *setjmp_call,
/* Update this region_model for a phi stmt of the form
LHS = PHI <...RHS...>.
- where RHS is for the appropriate edge. */
+ where RHS is for the appropriate edge.
+ Get state from OLD_STATE so that all of the phi stmts for a basic block
+ are effectively handled simultaneously. */
void
region_model::handle_phi (const gphi *phi,
tree lhs, tree rhs,
+ const region_model &old_state,
region_model_context *ctxt)
{
/* For now, don't bother tracking the .MEM SSA names. */
@@ -1185,9 +1601,10 @@ region_model::handle_phi (const gphi *phi,
if (VAR_DECL_IS_VIRTUAL_OPERAND (var))
return;
- const svalue *rhs_sval = get_rvalue (rhs, ctxt);
+ const svalue *src_sval = old_state.get_rvalue (rhs, ctxt);
+ const region *dst_reg = old_state.get_lvalue (lhs, ctxt);
- set_value (get_lvalue (lhs, ctxt), rhs_sval, ctxt);
+ set_value (dst_reg, src_sval, ctxt);
if (ctxt)
ctxt->on_phi (phi, rhs);
@@ -1199,7 +1616,7 @@ region_model::handle_phi (const gphi *phi,
emitting any diagnostics to CTXT. */
const region *
-region_model::get_lvalue_1 (path_var pv, region_model_context *ctxt)
+region_model::get_lvalue_1 (path_var pv, region_model_context *ctxt) const
{
tree expr = pv.m_tree;
@@ -1294,11 +1711,22 @@ assert_compat_types (tree src_type, tree dst_type)
}
}
+/* Return true if SRC_TYPE can be converted to DST_TYPE as a no-op. */
+
+bool
+compat_types_p (tree src_type, tree dst_type)
+{
+ if (src_type && dst_type && !VOID_TYPE_P (dst_type))
+ if (!(useless_type_conversion_p (src_type, dst_type)))
+ return false;
+ return true;
+}
+
/* Get the region for PV within this region_model,
emitting any diagnostics to CTXT. */
const region *
-region_model::get_lvalue (path_var pv, region_model_context *ctxt)
+region_model::get_lvalue (path_var pv, region_model_context *ctxt) const
{
if (pv.m_tree == NULL_TREE)
return NULL;
@@ -1312,7 +1740,7 @@ region_model::get_lvalue (path_var pv, region_model_context *ctxt)
recent stack frame if it's a local). */
const region *
-region_model::get_lvalue (tree expr, region_model_context *ctxt)
+region_model::get_lvalue (tree expr, region_model_context *ctxt) const
{
return get_lvalue (path_var (expr, get_stack_depth () - 1), ctxt);
}
@@ -1323,7 +1751,7 @@ region_model::get_lvalue (tree expr, region_model_context *ctxt)
emitting any diagnostics to CTXT. */
const svalue *
-region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
+region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt) const
{
gcc_assert (pv.m_tree);
@@ -1343,7 +1771,18 @@ region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
break;
case BIT_FIELD_REF:
- return m_mgr->get_or_create_unknown_svalue (TREE_TYPE (pv.m_tree));
+ {
+ tree expr = pv.m_tree;
+ tree op0 = TREE_OPERAND (expr, 0);
+ const region *reg = get_lvalue (op0, ctxt);
+ tree num_bits = TREE_OPERAND (expr, 1);
+ tree first_bit_offset = TREE_OPERAND (expr, 2);
+ gcc_assert (TREE_CODE (num_bits) == INTEGER_CST);
+ gcc_assert (TREE_CODE (first_bit_offset) == INTEGER_CST);
+ bit_range bits (TREE_INT_CST_LOW (first_bit_offset),
+ TREE_INT_CST_LOW (num_bits));
+ return get_rvalue_for_bits (TREE_TYPE (expr), reg, bits, ctxt);
+ }
case SSA_NAME:
case VAR_DECL:
@@ -1352,7 +1791,7 @@ region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
case ARRAY_REF:
{
const region *reg = get_lvalue (pv, ctxt);
- return get_store_value (reg);
+ return get_store_value (reg, ctxt);
}
case REALPART_EXPR:
@@ -1407,7 +1846,12 @@ region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
case MEM_REF:
{
const region *ref_reg = get_lvalue (pv, ctxt);
- return get_store_value (ref_reg);
+ return get_store_value (ref_reg, ctxt);
+ }
+ case OBJ_TYPE_REF:
+ {
+ tree expr = OBJ_TYPE_REF_EXPR (pv.m_tree);
+ return get_rvalue (expr, ctxt);
}
}
}
@@ -1416,7 +1860,7 @@ region_model::get_rvalue_1 (path_var pv, region_model_context *ctxt)
emitting any diagnostics to CTXT. */
const svalue *
-region_model::get_rvalue (path_var pv, region_model_context *ctxt)
+region_model::get_rvalue (path_var pv, region_model_context *ctxt) const
{
if (pv.m_tree == NULL_TREE)
return NULL;
@@ -1425,6 +1869,8 @@ region_model::get_rvalue (path_var pv, region_model_context *ctxt)
assert_compat_types (result_sval->get_type (), TREE_TYPE (pv.m_tree));
+ result_sval = check_for_poison (result_sval, pv.m_tree, ctxt);
+
return result_sval;
}
@@ -1432,7 +1878,7 @@ region_model::get_rvalue (path_var pv, region_model_context *ctxt)
recent stack frame if it's a local). */
const svalue *
-region_model::get_rvalue (tree expr, region_model_context *ctxt)
+region_model::get_rvalue (tree expr, region_model_context *ctxt) const
{
return get_rvalue (path_var (expr, get_stack_depth () - 1), ctxt);
}
@@ -1491,8 +1937,7 @@ region_model::get_initial_value_for_global (const region *reg) const
{
/* Get the value for REG within base_reg_init. */
binding_cluster c (base_reg);
- c.bind (m_mgr->get_store_manager (), base_reg, base_reg_init,
- BK_direct);
+ c.bind (m_mgr->get_store_manager (), base_reg, base_reg_init);
const svalue *sval
= c.get_any_binding (m_mgr->get_store_manager (), reg);
if (sval)
@@ -1511,11 +1956,15 @@ region_model::get_initial_value_for_global (const region *reg) const
}
/* Get a value for REG, looking it up in the store, or otherwise falling
- back to "initial" or "unknown" values. */
+ back to "initial" or "unknown" values.
+ Use CTXT to report any warnings associated with reading from REG. */
const svalue *
-region_model::get_store_value (const region *reg) const
+region_model::get_store_value (const region *reg,
+ region_model_context *ctxt) const
{
+ check_region_for_read (reg, ctxt);
+
/* Special-case: handle var_decls in the constant pool. */
if (const decl_region *decl_reg = reg->dyn_cast_decl_region ())
if (const svalue *sval = decl_reg->maybe_get_constant_value (m_mgr))
@@ -1599,7 +2048,7 @@ region_model::region_exists_p (const region *reg) const
const region *
region_model::deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
- region_model_context *ctxt)
+ region_model_context *ctxt) const
{
gcc_assert (ptr_sval);
gcc_assert (POINTER_TYPE_P (ptr_sval->get_type ()));
@@ -1672,6 +2121,22 @@ region_model::deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
return m_mgr->get_symbolic_region (ptr_sval);
}
+/* Attempt to get BITS within any value of REG, as TYPE.
+ In particular, extract values from compound_svalues for the case
+ where there's a concrete binding at BITS.
+ Return an unknown svalue if we can't handle the given case.
+ Use CTXT to report any warnings associated with reading from REG. */
+
+const svalue *
+region_model::get_rvalue_for_bits (tree type,
+ const region *reg,
+ const bit_range &bits,
+ region_model_context *ctxt) const
+{
+ const svalue *sval = get_store_value (reg, ctxt);
+ return m_mgr->get_or_create_bits_within (type, bits, sval);
+}
+
/* A subclass of pending_diagnostic for complaining about writes to
constant regions of memory. */
@@ -1785,8 +2250,91 @@ region_model::check_for_writable_region (const region* dest_reg,
}
}
+/* Get the capacity of REG in bytes. */
+
+const svalue *
+region_model::get_capacity (const region *reg) const
+{
+ switch (reg->get_kind ())
+ {
+ default:
+ break;
+ case RK_DECL:
+ {
+ const decl_region *decl_reg = as_a <const decl_region *> (reg);
+ tree decl = decl_reg->get_decl ();
+ if (TREE_CODE (decl) == SSA_NAME)
+ {
+ tree type = TREE_TYPE (decl);
+ tree size = TYPE_SIZE (type);
+ return get_rvalue (size, NULL);
+ }
+ else
+ {
+ tree size = decl_init_size (decl, false);
+ if (size)
+ return get_rvalue (size, NULL);
+ }
+ }
+ break;
+ case RK_SIZED:
+ /* Look through sized regions to get at the capacity
+ of the underlying regions. */
+ return get_capacity (reg->get_parent_region ());
+ }
+
+ if (const svalue *recorded = get_dynamic_extents (reg))
+ return recorded;
+
+ return m_mgr->get_or_create_unknown_svalue (sizetype);
+}
+
+/* If CTXT is non-NULL, use it to warn about any problems accessing REG,
+ using DIR to determine if this access is a read or write. */
+
+void
+region_model::check_region_access (const region *reg,
+ enum access_direction dir,
+ region_model_context *ctxt) const
+{
+ /* Fail gracefully if CTXT is NULL. */
+ if (!ctxt)
+ return;
+
+ switch (dir)
+ {
+ default:
+ gcc_unreachable ();
+ case DIR_READ:
+ /* Currently a no-op. */
+ break;
+ case DIR_WRITE:
+ check_for_writable_region (reg, ctxt);
+ break;
+ }
+}
+
+/* If CTXT is non-NULL, use it to warn about any problems writing to REG. */
+
+void
+region_model::check_region_for_write (const region *dest_reg,
+ region_model_context *ctxt) const
+{
+ check_region_access (dest_reg, DIR_WRITE, ctxt);
+}
+
+/* If CTXT is non-NULL, use it to warn about any problems reading from REG. */
+
+void
+region_model::check_region_for_read (const region *src_reg,
+ region_model_context *ctxt) const
+{
+ check_region_access (src_reg, DIR_READ, ctxt);
+}
+
/* Set the value of the region given by LHS_REG to the value given
- by RHS_SVAL. */
+ by RHS_SVAL.
+ Use CTXT to report any warnings associated with writing to LHS_REG. */
void
region_model::set_value (const region *lhs_reg, const svalue *rhs_sval,
@@ -1795,10 +2343,10 @@ region_model::set_value (const region *lhs_reg, const svalue *rhs_sval,
gcc_assert (lhs_reg);
gcc_assert (rhs_sval);
- check_for_writable_region (lhs_reg, ctxt);
+ check_region_for_write (lhs_reg, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
- BK_direct);
+ ctxt ? ctxt->get_uncertainty () : NULL);
}
/* Set the value of the region given by LHS to the value given by RHS. */
@@ -1829,6 +2377,14 @@ region_model::purge_region (const region *reg)
m_store.purge_region (m_mgr->get_store_manager(), reg);
}
+/* Fill REG with SVAL. */
+
+void
+region_model::fill_region (const region *reg, const svalue *sval)
+{
+ m_store.fill_region (m_mgr->get_store_manager(), reg, sval);
+}
+
/* Zero-fill REG. */
void
@@ -1840,9 +2396,11 @@ region_model::zero_fill_region (const region *reg)
/* Mark REG as having unknown content. */
void
-region_model::mark_region_as_unknown (const region *reg)
+region_model::mark_region_as_unknown (const region *reg,
+ uncertainty_t *uncertainty)
{
- m_store.mark_region_as_unknown (m_mgr->get_store_manager(), reg);
+ m_store.mark_region_as_unknown (m_mgr->get_store_manager(), reg,
+ uncertainty);
}
/* Determine what is known about the condition "LHS_SVAL OP RHS_SVAL" within
@@ -1937,34 +2495,51 @@ region_model::eval_condition_without_cm (const svalue *lhs,
if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
return constant_svalue::eval_condition (cst_lhs, op, cst_rhs);
- /* Handle comparison of a region_svalue against zero. */
-
- if (const region_svalue *ptr = lhs->dyn_cast_region_svalue ())
- if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
- if (zerop (cst_rhs->get_constant ()))
- {
- /* A region_svalue is a non-NULL pointer, except in certain
- special cases (see the comment for region::non_null_p. */
- const region *pointee = ptr->get_pointee ();
- if (pointee->non_null_p ())
- {
- switch (op)
- {
- default:
- gcc_unreachable ();
-
- case EQ_EXPR:
- case GE_EXPR:
- case LE_EXPR:
- return tristate::TS_FALSE;
-
- case NE_EXPR:
- case GT_EXPR:
- case LT_EXPR:
- return tristate::TS_TRUE;
- }
- }
- }
+ /* Handle comparison against zero. */
+ if (const constant_svalue *cst_rhs = rhs->dyn_cast_constant_svalue ())
+ if (zerop (cst_rhs->get_constant ()))
+ {
+ if (const region_svalue *ptr = lhs->dyn_cast_region_svalue ())
+ {
+ /* A region_svalue is a non-NULL pointer, except in certain
+ special cases (see the comment for region::non_null_p). */
+ const region *pointee = ptr->get_pointee ();
+ if (pointee->non_null_p ())
+ {
+ switch (op)
+ {
+ default:
+ gcc_unreachable ();
+
+ case EQ_EXPR:
+ case GE_EXPR:
+ case LE_EXPR:
+ return tristate::TS_FALSE;
+
+ case NE_EXPR:
+ case GT_EXPR:
+ case LT_EXPR:
+ return tristate::TS_TRUE;
+ }
+ }
+ }
+ else if (const binop_svalue *binop = lhs->dyn_cast_binop_svalue ())
+ {
+ /* Treat offsets from a non-NULL pointer as being non-NULL. This
+ isn't strictly true, in that eventually ptr++ will wrap
+ around and be NULL, but it won't occur in practise and thus
+ can be used to suppress effectively false positives that we
+ shouldn't warn for. */
+ if (binop->get_op () == POINTER_PLUS_EXPR)
+ {
+ tristate lhs_ts
+ = eval_condition_without_cm (binop->get_arg0 (),
+ op, rhs);
+ if (lhs_ts.is_known ())
+ return lhs_ts;
+ }
+ }
+ }
/* Handle rejection of equality for comparisons of the initial values of
"external" values (such as params) with the address of locals. */
@@ -2012,6 +2587,123 @@ region_model::compare_initial_and_pointer (const initial_svalue *init,
return tristate::TS_UNKNOWN;
}
+/* Handle various constraints of the form:
+ LHS: ((bool)INNER_LHS INNER_OP INNER_RHS))
+ OP : == or !=
+ RHS: zero
+ and (with a cast):
+ LHS: CAST([long]int, ((bool)INNER_LHS INNER_OP INNER_RHS))
+ OP : == or !=
+ RHS: zero
+ by adding constraints for INNER_LHS INNEROP INNER_RHS.
+
+ Return true if this function can fully handle the constraint; if
+ so, add the implied constraint(s) and write true to *OUT if they
+ are consistent with existing constraints, or write false to *OUT
+ if they contradicts existing constraints.
+
+ Return false for cases that this function doeesn't know how to handle.
+
+ For example, if we're checking a stored conditional, we'll have
+ something like:
+ LHS: CAST(long int, (&HEAP_ALLOCATED_REGION(8)!=(int *)0B))
+ OP : NE_EXPR
+ RHS: zero
+ which this function can turn into an add_constraint of:
+ (&HEAP_ALLOCATED_REGION(8) != (int *)0B)
+
+ Similarly, optimized && and || conditionals lead to e.g.
+ if (p && q)
+ becoming gimple like this:
+ _1 = p_6 == 0B;
+ _2 = q_8 == 0B
+ _3 = _1 | _2
+ On the "_3 is false" branch we can have constraints of the form:
+ ((&HEAP_ALLOCATED_REGION(8)!=(int *)0B)
+ | (&HEAP_ALLOCATED_REGION(10)!=(int *)0B))
+ == 0
+ which implies that both _1 and _2 are false,
+ which this function can turn into a pair of add_constraints of
+ (&HEAP_ALLOCATED_REGION(8)!=(int *)0B)
+ and:
+ (&HEAP_ALLOCATED_REGION(10)!=(int *)0B). */
+
+bool
+region_model::add_constraints_from_binop (const svalue *outer_lhs,
+ enum tree_code outer_op,
+ const svalue *outer_rhs,
+ bool *out,
+ region_model_context *ctxt)
+{
+ while (const svalue *cast = outer_lhs->maybe_undo_cast ())
+ outer_lhs = cast;
+ const binop_svalue *binop_sval = outer_lhs->dyn_cast_binop_svalue ();
+ if (!binop_sval)
+ return false;
+ if (!outer_rhs->all_zeroes_p ())
+ return false;
+
+ const svalue *inner_lhs = binop_sval->get_arg0 ();
+ enum tree_code inner_op = binop_sval->get_op ();
+ const svalue *inner_rhs = binop_sval->get_arg1 ();
+
+ if (outer_op != NE_EXPR && outer_op != EQ_EXPR)
+ return false;
+
+ /* We have either
+ - "OUTER_LHS != false" (i.e. OUTER is true), or
+ - "OUTER_LHS == false" (i.e. OUTER is false). */
+ bool is_true = outer_op == NE_EXPR;
+
+ switch (inner_op)
+ {
+ default:
+ return false;
+
+ case EQ_EXPR:
+ case NE_EXPR:
+ {
+ /* ...and "(inner_lhs OP inner_rhs) == 0"
+ then (inner_lhs OP inner_rhs) must have the same
+ logical value as LHS. */
+ if (!is_true)
+ inner_op = invert_tree_comparison (inner_op, false /* honor_nans */);
+ *out = add_constraint (inner_lhs, inner_op, inner_rhs, ctxt);
+ return true;
+ }
+ break;
+
+ case BIT_AND_EXPR:
+ if (is_true)
+ {
+ /* ...and "(inner_lhs & inner_rhs) != 0"
+ then both inner_lhs and inner_rhs must be true. */
+ const svalue *false_sval
+ = m_mgr->get_or_create_constant_svalue (boolean_false_node);
+ bool sat1 = add_constraint (inner_lhs, NE_EXPR, false_sval, ctxt);
+ bool sat2 = add_constraint (inner_rhs, NE_EXPR, false_sval, ctxt);
+ *out = sat1 && sat2;
+ return true;
+ }
+ return false;
+
+ case BIT_IOR_EXPR:
+ if (!is_true)
+ {
+ /* ...and "(inner_lhs | inner_rhs) == 0"
+ i.e. "(inner_lhs | inner_rhs)" is false
+ then both inner_lhs and inner_rhs must be false. */
+ const svalue *false_sval
+ = m_mgr->get_or_create_constant_svalue (boolean_false_node);
+ bool sat1 = add_constraint (inner_lhs, EQ_EXPR, false_sval, ctxt);
+ bool sat2 = add_constraint (inner_rhs, EQ_EXPR, false_sval, ctxt);
+ *out = sat1 && sat2;
+ return true;
+ }
+ return false;
+ }
+}
+
/* Attempt to add the constraint "LHS OP RHS" to this region_model.
If it is consistent with existing constraints, add it, and return true.
Return false if it contradicts existing constraints.
@@ -2029,7 +2721,21 @@ region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
const svalue *lhs_sval = get_rvalue (lhs, ctxt);
const svalue *rhs_sval = get_rvalue (rhs, ctxt);
- tristate t_cond = eval_condition (lhs_sval, op, rhs_sval);
+ return add_constraint (lhs_sval, op, rhs_sval, ctxt);
+}
+
+/* Attempt to add the constraint "LHS OP RHS" to this region_model.
+ If it is consistent with existing constraints, add it, and return true.
+ Return false if it contradicts existing constraints.
+ Use CTXT for reporting any diagnostics associated with the accesses. */
+
+bool
+region_model::add_constraint (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs,
+ region_model_context *ctxt)
+{
+ tristate t_cond = eval_condition (lhs, op, rhs);
/* If we already have the condition, do nothing. */
if (t_cond.is_true ())
@@ -2040,10 +2746,12 @@ region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
if (t_cond.is_false ())
return false;
- /* Store the constraint. */
- m_constraints->add_constraint (lhs_sval, op, rhs_sval);
+ bool out;
+ if (add_constraints_from_binop (lhs, op, rhs, &out, ctxt))
+ return out;
- add_any_constraints_from_ssa_def_stmt (lhs, op, rhs, ctxt);
+ /* Store the constraint. */
+ m_constraints->add_constraint (lhs, op, rhs);
/* Notify the context, if any. This exists so that the state machines
in a program_state can be notified about the condition, and so can
@@ -2052,6 +2760,13 @@ region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
if (ctxt)
ctxt->on_condition (lhs, op, rhs);
+ /* If we have &REGION == NULL, then drop dynamic extents for REGION (for
+ the case where REGION is heap-allocated and thus could be NULL). */
+ if (tree rhs_cst = rhs->maybe_get_constant ())
+ if (op == EQ_EXPR && zerop (rhs_cst))
+ if (const region_svalue *region_sval = lhs->dyn_cast_region_svalue ())
+ unset_dynamic_extents (region_sval->get_pointee ());
+
return true;
}
@@ -2065,141 +2780,10 @@ region_model::add_constraint (tree lhs, enum tree_code op, tree rhs,
{
bool sat = add_constraint (lhs, op, rhs, ctxt);
if (!sat && out)
- *out = new rejected_constraint (*this, lhs, op, rhs);
+ *out = new rejected_op_constraint (*this, lhs, op, rhs);
return sat;
}
-/* Subroutine of region_model::add_constraint for handling optimized
- && and || conditionals.
-
- If we have an SSA_NAME for a boolean compared against 0,
- look at anything implied by the def stmt and call add_constraint
- for it (which could recurse).
-
- For example, if we have
- _1 = p_6 == 0B;
- _2 = p_8 == 0B
- _3 = _1 | _2
- and add the constraint
- (_3 == 0),
- then the def stmt for _3 implies that _1 and _2 are both false,
- and hence we can add the constraints:
- p_6 != 0B
- p_8 != 0B. */
-
-void
-region_model::add_any_constraints_from_ssa_def_stmt (tree lhs,
- enum tree_code op,
- tree rhs,
- region_model_context *ctxt)
-{
- if (TREE_CODE (lhs) != SSA_NAME)
- return;
-
- if (!zerop (rhs))
- return;
-
- if (op != NE_EXPR && op != EQ_EXPR)
- return;
-
- gimple *def_stmt = SSA_NAME_DEF_STMT (lhs);
- if (const gassign *assign = dyn_cast<gassign *> (def_stmt))
- add_any_constraints_from_gassign (op, rhs, assign, ctxt);
- else if (gcall *call = dyn_cast<gcall *> (def_stmt))
- add_any_constraints_from_gcall (op, rhs, call, ctxt);
-}
-
-/* Add any constraints for an SSA_NAME defined by ASSIGN
- where the result OP RHS. */
-
-void
-region_model::add_any_constraints_from_gassign (enum tree_code op,
- tree rhs,
- const gassign *assign,
- region_model_context *ctxt)
-{
- /* We have either
- - "LHS != false" (i.e. LHS is true), or
- - "LHS == false" (i.e. LHS is false). */
- bool is_true = op == NE_EXPR;
-
- enum tree_code rhs_code = gimple_assign_rhs_code (assign);
-
- switch (rhs_code)
- {
- default:
- break;
-
- case NOP_EXPR:
- case VIEW_CONVERT_EXPR:
- {
- add_constraint (gimple_assign_rhs1 (assign), op, rhs, ctxt);
- }
- break;
-
- case BIT_AND_EXPR:
- {
- if (is_true)
- {
- /* ...and "LHS == (rhs1 & rhs2) i.e. "(rhs1 & rhs2)" is true
- then both rhs1 and rhs2 must be true. */
- tree rhs1 = gimple_assign_rhs1 (assign);
- tree rhs2 = gimple_assign_rhs2 (assign);
- add_constraint (rhs1, NE_EXPR, boolean_false_node, ctxt);
- add_constraint (rhs2, NE_EXPR, boolean_false_node, ctxt);
- }
- }
- break;
-
- case BIT_IOR_EXPR:
- {
- if (!is_true)
- {
- /* ...and "LHS == (rhs1 | rhs2)
- i.e. "(rhs1 | rhs2)" is false
- then both rhs1 and rhs2 must be false. */
- tree rhs1 = gimple_assign_rhs1 (assign);
- tree rhs2 = gimple_assign_rhs2 (assign);
- add_constraint (rhs1, EQ_EXPR, boolean_false_node, ctxt);
- add_constraint (rhs2, EQ_EXPR, boolean_false_node, ctxt);
- }
- }
- break;
-
- case EQ_EXPR:
- case NE_EXPR:
- {
- /* ...and "LHS == (rhs1 OP rhs2)"
- then rhs1 OP rhs2 must have the same logical value as LHS. */
- tree rhs1 = gimple_assign_rhs1 (assign);
- tree rhs2 = gimple_assign_rhs2 (assign);
- if (!is_true)
- rhs_code
- = invert_tree_comparison (rhs_code, false /* honor_nans */);
- add_constraint (rhs1, rhs_code, rhs2, ctxt);
- }
- break;
- }
-}
-
-/* Add any constraints for an SSA_NAME defined by CALL
- where the result OP RHS. */
-
-void
-region_model::add_any_constraints_from_gcall (enum tree_code op,
- tree rhs,
- const gcall *call,
- region_model_context *ctxt)
-{
- if (gimple_call_builtin_p (call, BUILT_IN_EXPECT)
- || gimple_call_builtin_p (call, BUILT_IN_EXPECT_WITH_PROBABILITY)
- || gimple_call_internal_p (call, IFN_BUILTIN_EXPECT))
- {
- /* __builtin_expect's return value is its initial argument. */
- add_constraint (gimple_call_arg (call, 0), op, rhs, ctxt);
- }
-}
-
/* Determine what is known about the condition "LHS OP RHS" within
this model.
Use CTXT for reporting any diagnostics associated with the accesses. */
@@ -2325,9 +2909,9 @@ region_model::get_representative_tree (const svalue *sval) const
/* Strip off any top-level cast. */
if (expr && TREE_CODE (expr) == NOP_EXPR)
- return TREE_OPERAND (expr, 0);
+ expr = TREE_OPERAND (expr, 0);
- return expr;
+ return fixup_tree_for_diagnostic (expr);
}
/* Implementation of region_model::get_representative_path_var.
@@ -2445,6 +3029,9 @@ region_model::get_representative_path_var_1 (const region *reg,
parent_pv.m_stack_depth);
}
+ case RK_SIZED:
+ return path_var (NULL_TREE, 0);
+
case RK_CAST:
{
path_var parent_pv
@@ -2507,6 +3094,10 @@ region_model::update_for_phis (const supernode *snode,
{
gcc_assert (last_cfg_superedge);
+ /* Copy this state and pass it to handle_phi so that all of the phi stmts
+ are effectively handled simultaneously. */
+ const region_model old_state (*this);
+
for (gphi_iterator gpi = const_cast<supernode *>(snode)->start_phis ();
!gsi_end_p (gpi); gsi_next (&gpi))
{
@@ -2515,8 +3106,8 @@ region_model::update_for_phis (const supernode *snode,
tree src = last_cfg_superedge->get_phi_arg (phi);
tree lhs = gimple_phi_result (phi);
- /* Update next_state based on phi. */
- handle_phi (phi, lhs, src, ctxt);
+ /* Update next_state based on phi and old_state. */
+ handle_phi (phi, lhs, src, old_state, ctxt);
}
}
@@ -2610,12 +3201,12 @@ region_model::maybe_update_for_edge (const superedge &edge,
caller's frame. */
void
-region_model::update_for_call_superedge (const call_superedge &call_edge,
- region_model_context *ctxt)
+region_model::update_for_gcall (const gcall *call_stmt,
+ region_model_context *ctxt,
+ function *callee)
{
/* Build a vec of argument svalues, using the current top
frame for resolving tree expressions. */
- const gcall *call_stmt = call_edge.get_call_stmt ();
auto_vec<const svalue *> arg_svals (gimple_call_num_args (call_stmt));
for (unsigned i = 0; i < gimple_call_num_args (call_stmt); i++)
@@ -2624,33 +3215,62 @@ region_model::update_for_call_superedge (const call_superedge &call_edge,
arg_svals.quick_push (get_rvalue (arg, ctxt));
}
- push_frame (call_edge.get_callee_function (), &arg_svals, ctxt);
+ if(!callee)
+ {
+ /* Get the function * from the gcall. */
+ tree fn_decl = get_fndecl_for_call (call_stmt,ctxt);
+ callee = DECL_STRUCT_FUNCTION (fn_decl);
+ }
+
+ push_frame (callee, &arg_svals, ctxt);
}
/* Pop the top-most frame_region from the stack, and copy the return
region's values (if any) into the region for the lvalue of the LHS of
the call (if any). */
+
void
-region_model::update_for_return_superedge (const return_superedge &return_edge,
- region_model_context *ctxt)
+region_model::update_for_return_gcall (const gcall *call_stmt,
+ region_model_context *ctxt)
{
/* Get the region for the result of the call, within the caller frame. */
const region *result_dst_reg = NULL;
- const gcall *call_stmt = return_edge.get_call_stmt ();
tree lhs = gimple_call_lhs (call_stmt);
if (lhs)
{
/* Normally we access the top-level frame, which is:
- path_var (expr, get_stack_depth () - 1)
- whereas here we need the caller frame, hence "- 2" here. */
+ path_var (expr, get_stack_depth () - 1)
+ whereas here we need the caller frame, hence "- 2" here. */
gcc_assert (get_stack_depth () >= 2);
result_dst_reg = get_lvalue (path_var (lhs, get_stack_depth () - 2),
- ctxt);
+ ctxt);
}
pop_frame (result_dst_reg, NULL, ctxt);
}
+/* Extract calling information from the superedge and update the model for the
+ call */
+
+void
+region_model::update_for_call_superedge (const call_superedge &call_edge,
+ region_model_context *ctxt)
+{
+ const gcall *call_stmt = call_edge.get_call_stmt ();
+ update_for_gcall (call_stmt, ctxt, call_edge.get_callee_function ());
+}
+
+/* Extract calling information from the return superedge and update the model
+ for the returning call */
+
+void
+region_model::update_for_return_superedge (const return_superedge &return_edge,
+ region_model_context *ctxt)
+{
+ const gcall *call_stmt = return_edge.get_call_stmt ();
+ update_for_return_gcall (call_stmt, ctxt);
+}
+
/* Update this region_model with a summary of the effect of calling
and returning from CG_SEDGE.
@@ -2666,7 +3286,8 @@ region_model::update_for_call_summary (const callgraph_superedge &cg_sedge,
const gcall *call_stmt = cg_sedge.get_call_stmt ();
tree lhs = gimple_call_lhs (call_stmt);
if (lhs)
- mark_region_as_unknown (get_lvalue (lhs, ctxt));
+ mark_region_as_unknown (get_lvalue (lhs, ctxt),
+ ctxt ? ctxt->get_uncertainty () : NULL);
// TODO: actually implement some kind of summary here
}
@@ -2715,56 +3336,15 @@ region_model::apply_constraints_for_gswitch (const switch_cfg_superedge &edge,
region_model_context *ctxt,
rejected_constraint **out)
{
+ bounded_ranges_manager *ranges_mgr = get_range_manager ();
+ const bounded_ranges *all_cases_ranges
+ = ranges_mgr->get_or_create_ranges_for_switch (&edge, switch_stmt);
tree index = gimple_switch_index (switch_stmt);
- tree case_label = edge.get_case_label ();
- gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR);
- tree lower_bound = CASE_LOW (case_label);
- tree upper_bound = CASE_HIGH (case_label);
- if (lower_bound)
- {
- if (upper_bound)
- {
- /* Range. */
- if (!add_constraint (index, GE_EXPR, lower_bound, ctxt, out))
- return false;
- return add_constraint (index, LE_EXPR, upper_bound, ctxt, out);
- }
- else
- /* Single-value. */
- return add_constraint (index, EQ_EXPR, lower_bound, ctxt, out);
- }
- else
- {
- /* The default case.
- Add exclusions based on the other cases. */
- for (unsigned other_idx = 1;
- other_idx < gimple_switch_num_labels (switch_stmt);
- other_idx++)
- {
- tree other_label = gimple_switch_label (switch_stmt,
- other_idx);
- tree other_lower_bound = CASE_LOW (other_label);
- tree other_upper_bound = CASE_HIGH (other_label);
- gcc_assert (other_lower_bound);
- if (other_upper_bound)
- {
- /* Exclude this range-valued case.
- For now, we just exclude the boundary values.
- TODO: exclude the values within the region. */
- if (!add_constraint (index, NE_EXPR, other_lower_bound,
- ctxt, out))
- return false;
- if (!add_constraint (index, NE_EXPR, other_upper_bound,
- ctxt, out))
- return false;
- }
- else
- /* Exclude this single-valued case. */
- if (!add_constraint (index, NE_EXPR, other_lower_bound, ctxt, out))
- return false;
- }
- return true;
- }
+ const svalue *index_sval = get_rvalue (index, ctxt);
+ bool sat = m_constraints->add_bounded_ranges (index_sval, all_cases_ranges);
+ if (!sat && out)
+ *out = new rejected_ranges_constraint (*this, index, all_cases_ranges);
+ return sat;
}
/* Apply any constraints due to an exception being thrown at LAST_STMT.
@@ -2956,7 +3536,8 @@ region_model::get_frame_at_index (int index) const
/* Unbind svalues for any regions in REG and below.
Find any pointers to such regions; convert them to
- poisoned values of kind PKIND. */
+ poisoned values of kind PKIND.
+ Also purge any dynamic extents. */
void
region_model::unbind_region_and_descendents (const region *reg,
@@ -2977,6 +3558,15 @@ region_model::unbind_region_and_descendents (const region *reg,
/* Find any pointers to REG or its descendents; convert to poisoned. */
poison_any_pointers_to_descendents (reg, pkind);
+
+ /* Purge dynamic extents of any base regions in REG and below
+ (e.g. VLAs and alloca stack regions). */
+ for (auto iter : m_dynamic_extents)
+ {
+ const region *iter_reg = iter.first;
+ if (iter_reg->descendent_of_p (reg))
+ unset_dynamic_extents (iter_reg);
+ }
}
/* Implementation of BindingVisitor.
@@ -3051,6 +3641,10 @@ region_model::can_merge_with_p (const region_model &other_model,
&m))
return false;
+ if (!m_dynamic_extents.can_merge_with_p (other_model.m_dynamic_extents,
+ &out_model->m_dynamic_extents))
+ return false;
+
/* Merge constraints. */
constraint_manager::merge (*m_constraints,
*other_model.m_constraints,
@@ -3132,7 +3726,8 @@ const region *
region_model::create_region_for_heap_alloc (const svalue *size_in_bytes)
{
const region *reg = m_mgr->create_region_for_heap_alloc ();
- record_dynamic_extents (reg, size_in_bytes);
+ if (compat_types_p (size_in_bytes->get_type (), size_type_node))
+ set_dynamic_extents (reg, size_in_bytes);
return reg;
}
@@ -3143,17 +3738,50 @@ const region *
region_model::create_region_for_alloca (const svalue *size_in_bytes)
{
const region *reg = m_mgr->create_region_for_alloca (m_current_frame);
- record_dynamic_extents (reg, size_in_bytes);
+ if (compat_types_p (size_in_bytes->get_type (), size_type_node))
+ set_dynamic_extents (reg, size_in_bytes);
return reg;
}
-/* Placeholder hook for recording that the size of REG is SIZE_IN_BYTES.
- Currently does nothing. */
+/* Record that the size of REG is SIZE_IN_BYTES. */
void
-region_model::
-record_dynamic_extents (const region *reg ATTRIBUTE_UNUSED,
- const svalue *size_in_bytes ATTRIBUTE_UNUSED)
+region_model::set_dynamic_extents (const region *reg,
+ const svalue *size_in_bytes)
+{
+ assert_compat_types (size_in_bytes->get_type (), size_type_node);
+ m_dynamic_extents.put (reg, size_in_bytes);
+}
+
+/* Get the recording of REG in bytes, or NULL if no dynamic size was
+ recorded. */
+
+const svalue *
+region_model::get_dynamic_extents (const region *reg) const
+{
+ if (const svalue * const *slot = m_dynamic_extents.get (reg))
+ return *slot;
+ return NULL;
+}
+
+/* Unset any recorded dynamic size of REG. */
+
+void
+region_model::unset_dynamic_extents (const region *reg)
+{
+ m_dynamic_extents.remove (reg);
+}
+
+/* class noop_region_model_context : public region_model_context. */
+
+void
+noop_region_model_context::bifurcate (custom_edge_info *info)
+{
+ delete info;
+}
+
+void
+noop_region_model_context::terminate_path ()
{
}
@@ -3211,10 +3839,10 @@ debug (const region_model &rmodel)
rmodel.dump (false);
}
-/* struct rejected_constraint. */
+/* class rejected_op_constraint : public rejected_constraint. */
void
-rejected_constraint::dump_to_pp (pretty_printer *pp) const
+rejected_op_constraint::dump_to_pp (pretty_printer *pp) const
{
region_model m (m_model);
const svalue *lhs_sval = m.get_rvalue (m_lhs, NULL);
@@ -3224,6 +3852,18 @@ rejected_constraint::dump_to_pp (pretty_printer *pp) const
rhs_sval->dump_to_pp (pp, true);
}
+/* class rejected_ranges_constraint : public rejected_constraint. */
+
+void
+rejected_ranges_constraint::dump_to_pp (pretty_printer *pp) const
+{
+ region_model m (m_model);
+ const svalue *sval = m.get_rvalue (m_expr, NULL);
+ sval->dump_to_pp (pp, true);
+ pp_string (pp, " in ");
+ m_ranges->dump_to_pp (pp, true);
+}
+
/* class engine. */
/* Dump the managed objects by class to LOGGER, and the per-class totals. */
@@ -4019,7 +4659,7 @@ test_stack_frames ()
/* Verify that p (which was pointing at the local "x" in the popped
frame) has been poisoned. */
- const svalue *new_p_sval = model.get_rvalue (p, &ctxt);
+ const svalue *new_p_sval = model.get_rvalue (p, NULL);
ASSERT_EQ (new_p_sval->get_kind (), SK_POISONED);
ASSERT_EQ (new_p_sval->dyn_cast_poisoned_svalue ()->get_poison_kind (),
POISON_KIND_POPPED_STACK);
@@ -4028,7 +4668,7 @@ test_stack_frames ()
renumbering. */
const svalue *new_q_sval = model.get_rvalue (q, &ctxt);
ASSERT_EQ (new_q_sval->get_kind (), SK_REGION);
- ASSERT_EQ (new_q_sval->dyn_cast_region_svalue ()->get_pointee (),
+ ASSERT_EQ (new_q_sval->maybe_get_region (),
model.get_lvalue (p, &ctxt));
/* Verify that top of stack has been updated. */
@@ -4214,9 +4854,7 @@ test_canonicalization_4 ()
region_model_manager mgr;
region_model model (&mgr);
- unsigned i;
- tree cst;
- FOR_EACH_VEC_ELT (csts, i, cst)
+ for (tree cst : csts)
model.get_rvalue (cst, NULL);
model.canonicalize ();
@@ -4456,7 +5094,7 @@ test_state_merging ()
{
test_region_model_context ctxt;
region_model model0 (&mgr);
- tree size = build_int_cst (integer_type_node, 1024);
+ tree size = build_int_cst (size_type_node, 1024);
const svalue *size_sval = mgr.get_or_create_constant_svalue (size);
const region *new_reg = model0.create_region_for_heap_alloc (size_sval);
const svalue *ptr_sval = mgr.get_ptr_svalue (ptr_type_node, new_reg);
@@ -4559,8 +5197,7 @@ test_state_merging ()
model0.set_value (q_in_first_frame, sval_ptr, NULL);
/* Verify that it's pointing at the newer frame. */
- const region *reg_pointee
- = sval_ptr->dyn_cast_region_svalue ()->get_pointee ();
+ const region *reg_pointee = sval_ptr->maybe_get_region ();
ASSERT_EQ (reg_pointee->get_parent_region (), reg_2nd_frame);
model0.canonicalize ();
@@ -4846,7 +5483,7 @@ test_malloc_constraints ()
tree null_ptr = build_int_cst (ptr_type_node, 0);
const svalue *size_in_bytes
- = mgr.get_or_create_unknown_svalue (integer_type_node);
+ = mgr.get_or_create_unknown_svalue (size_type_node);
const region *reg = model.create_region_for_heap_alloc (size_in_bytes);
const svalue *sval = mgr.get_ptr_svalue (ptr_type_node, reg);
model.set_value (model.get_lvalue (p, NULL), sval, NULL);
@@ -5071,7 +5708,7 @@ test_malloc ()
const region *reg = model.create_region_for_heap_alloc (size_sval);
const svalue *ptr = mgr.get_ptr_svalue (int_star, reg);
model.set_value (model.get_lvalue (p, &ctxt), ptr, &ctxt);
- // TODO: verify dynamic extents
+ ASSERT_EQ (model.get_capacity (reg), size_sval);
}
/* Verify that alloca works. */
@@ -5106,12 +5743,43 @@ test_alloca ()
ASSERT_EQ (reg->get_parent_region (), frame_reg);
const svalue *ptr = mgr.get_ptr_svalue (int_star, reg);
model.set_value (model.get_lvalue (p, &ctxt), ptr, &ctxt);
- // TODO: verify dynamic extents
+ ASSERT_EQ (model.get_capacity (reg), size_sval);
/* Verify that the pointers to the alloca region are replaced by
poisoned values when the frame is popped. */
model.pop_frame (NULL, NULL, &ctxt);
- ASSERT_EQ (model.get_rvalue (p, &ctxt)->get_kind (), SK_POISONED);
+ ASSERT_EQ (model.get_rvalue (p, NULL)->get_kind (), SK_POISONED);
+}
+
+/* Verify that svalue::involves_p works. */
+
+static void
+test_involves_p ()
+{
+ region_model_manager mgr;
+ tree int_star = build_pointer_type (integer_type_node);
+ tree p = build_global_decl ("p", int_star);
+ tree q = build_global_decl ("q", int_star);
+
+ test_region_model_context ctxt;
+ region_model model (&mgr);
+ const svalue *p_init = model.get_rvalue (p, &ctxt);
+ const svalue *q_init = model.get_rvalue (q, &ctxt);
+
+ ASSERT_TRUE (p_init->involves_p (p_init));
+ ASSERT_FALSE (p_init->involves_p (q_init));
+
+ const region *star_p_reg = mgr.get_symbolic_region (p_init);
+ const region *star_q_reg = mgr.get_symbolic_region (q_init);
+
+ const svalue *init_star_p = mgr.get_or_create_initial_value (star_p_reg);
+ const svalue *init_star_q = mgr.get_or_create_initial_value (star_q_reg);
+
+ ASSERT_TRUE (init_star_p->involves_p (p_init));
+ ASSERT_FALSE (p_init->involves_p (init_star_p));
+ ASSERT_FALSE (init_star_p->involves_p (q_init));
+ ASSERT_TRUE (init_star_q->involves_p (q_init));
+ ASSERT_FALSE (init_star_q->involves_p (p_init));
}
/* Run all of the selftests within this file. */
@@ -5150,6 +5818,7 @@ analyzer_region_model_cc_tests ()
test_POINTER_PLUS_EXPR_then_MEM_REF ();
test_malloc ();
test_alloca ();
+ test_involves_p ();
}
} // namespace selftest
diff --git a/gcc/analyzer/region-model.h b/gcc/analyzer/region-model.h
index 54977f9..5fabf78 100644
--- a/gcc/analyzer/region-model.h
+++ b/gcc/analyzer/region-model.h
@@ -128,6 +128,55 @@ one_way_id_map<T>::update (T *id) const
*id = get_dst_for_src (*id);
}
+/* A mapping from region to svalue for use when tracking state. */
+
+class region_to_value_map
+{
+public:
+ typedef hash_map<const region *, const svalue *> hash_map_t;
+ typedef hash_map_t::iterator iterator;
+
+ region_to_value_map () : m_hash_map () {}
+ region_to_value_map (const region_to_value_map &other)
+ : m_hash_map (other.m_hash_map) {}
+ region_to_value_map &operator= (const region_to_value_map &other);
+
+ bool operator== (const region_to_value_map &other) const;
+ bool operator!= (const region_to_value_map &other) const
+ {
+ return !(*this == other);
+ }
+
+ iterator begin () const { return m_hash_map.begin (); }
+ iterator end () const { return m_hash_map.end (); }
+
+ const svalue * const *get (const region *reg) const
+ {
+ return const_cast <hash_map_t &> (m_hash_map).get (reg);
+ }
+ void put (const region *reg, const svalue *sval)
+ {
+ m_hash_map.put (reg, sval);
+ }
+ void remove (const region *reg)
+ {
+ m_hash_map.remove (reg);
+ }
+
+ bool is_empty () const { return m_hash_map.is_empty (); }
+
+ void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
+ void dump (bool simple) const;
+
+ bool can_merge_with_p (const region_to_value_map &other,
+ region_to_value_map *out) const;
+
+ void purge_state_involving (const svalue *sval);
+
+private:
+ hash_map_t m_hash_map;
+};
+
/* Various operations delete information from a region_model.
This struct tracks how many of each kind of entity were purged (e.g.
@@ -140,6 +189,7 @@ struct purge_stats
m_num_regions (0),
m_num_equiv_classes (0),
m_num_constraints (0),
+ m_num_bounded_ranges_constraints (0),
m_num_client_items (0)
{}
@@ -147,6 +197,7 @@ struct purge_stats
int m_num_regions;
int m_num_equiv_classes;
int m_num_constraints;
+ int m_num_bounded_ranges_constraints;
int m_num_client_items;
};
@@ -165,11 +216,14 @@ public:
virtual void visit_unaryop_svalue (const unaryop_svalue *) {}
virtual void visit_binop_svalue (const binop_svalue *) {}
virtual void visit_sub_svalue (const sub_svalue *) {}
+ virtual void visit_repeated_svalue (const repeated_svalue *) {}
+ virtual void visit_bits_within_svalue (const bits_within_svalue *) {}
virtual void visit_unmergeable_svalue (const unmergeable_svalue *) {}
virtual void visit_placeholder_svalue (const placeholder_svalue *) {}
virtual void visit_widening_svalue (const widening_svalue *) {}
virtual void visit_compound_svalue (const compound_svalue *) {}
virtual void visit_conjured_svalue (const conjured_svalue *) {}
+ virtual void visit_asm_output_svalue (const asm_output_svalue *) {}
virtual void visit_region (const region *) {}
};
@@ -191,6 +245,7 @@ public:
/* svalue consolidation. */
const svalue *get_or_create_constant_svalue (tree cst_expr);
+ const svalue *get_or_create_int_cst (tree type, poly_int64);
const svalue *get_or_create_unknown_svalue (tree type);
const svalue *get_or_create_setjmp_svalue (const setjmp_record &r,
tree type);
@@ -207,6 +262,12 @@ public:
const svalue *get_or_create_sub_svalue (tree type,
const svalue *parent_svalue,
const region *subregion);
+ const svalue *get_or_create_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue);
+ const svalue *get_or_create_bits_within (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue);
const svalue *get_or_create_unmergeable (const svalue *arg);
const svalue *get_or_create_widening_svalue (tree type,
const program_point &point,
@@ -216,6 +277,11 @@ public:
const binding_map &map);
const svalue *get_or_create_conjured_svalue (tree type, const gimple *stmt,
const region *id_reg);
+ const svalue *
+ get_or_create_asm_output_svalue (tree type,
+ const gasm *asm_stmt,
+ unsigned output_idx,
+ const vec<const svalue *> &inputs);
const svalue *maybe_get_char_from_string_cst (tree string_cst,
tree byte_offset_cst);
@@ -238,6 +304,9 @@ public:
const region *get_offset_region (const region *parent,
tree type,
const svalue *byte_offset);
+ const region *get_sized_region (const region *parent,
+ tree type,
+ const svalue *byte_size_sval);
const region *get_cast_region (const region *original_region,
tree type);
const frame_region *get_frame_region (const frame_region *calling_frame,
@@ -253,6 +322,7 @@ public:
unsigned alloc_region_id () { return m_next_region_id++; }
store_manager *get_store_manager () { return &m_store_mgr; }
+ bounded_ranges_manager *get_range_manager () const { return m_range_mgr; }
/* Dynamically-allocated region instances.
The number of these within the analysis can grow arbitrarily.
@@ -262,6 +332,9 @@ public:
void log_stats (logger *logger, bool show_objs) const;
+ void enable_complexity_check (void) { m_check_complexity = true; }
+ void disable_complexity_check (void) { m_check_complexity = false; }
+
private:
bool too_complex_p (const complexity &c) const;
bool reject_if_too_complex (svalue *sval);
@@ -273,6 +346,17 @@ private:
const svalue *maybe_fold_sub_svalue (tree type,
const svalue *parent_svalue,
const region *subregion);
+ const svalue *maybe_fold_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue);
+ const svalue *maybe_fold_bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue);
+ const svalue *maybe_undo_optimize_bit_field_compare (tree type,
+ const compound_svalue *compound_sval,
+ tree cst, const svalue *arg1);
+ const svalue *maybe_fold_asm_output_svalue (tree type,
+ const vec<const svalue *> &inputs);
unsigned m_next_region_id;
root_region m_root_region;
@@ -311,6 +395,14 @@ private:
typedef hash_map<sub_svalue::key_t, sub_svalue *> sub_values_map_t;
sub_values_map_t m_sub_values_map;
+ typedef hash_map<repeated_svalue::key_t,
+ repeated_svalue *> repeated_values_map_t;
+ repeated_values_map_t m_repeated_values_map;
+
+ typedef hash_map<bits_within_svalue::key_t,
+ bits_within_svalue *> bits_within_values_map_t;
+ bits_within_values_map_t m_bits_within_values_map;
+
typedef hash_map<const svalue *,
unmergeable_svalue *> unmergeable_values_map_t;
unmergeable_values_map_t m_unmergeable_values_map;
@@ -329,6 +421,12 @@ private:
conjured_svalue *> conjured_values_map_t;
conjured_values_map_t m_conjured_values_map;
+ typedef hash_map<asm_output_svalue::key_t,
+ asm_output_svalue *> asm_output_values_map_t;
+ asm_output_values_map_t m_asm_output_values_map;
+
+ bool m_check_complexity;
+
/* Maximum complexity of svalues that weren't rejected. */
complexity m_max_complexity;
@@ -351,6 +449,7 @@ private:
consolidation_map<field_region> m_field_regions;
consolidation_map<element_region> m_element_regions;
consolidation_map<offset_region> m_offset_regions;
+ consolidation_map<sized_region> m_sized_regions;
consolidation_map<cast_region> m_cast_regions;
consolidation_map<frame_region> m_frame_regions;
consolidation_map<symbolic_region> m_symbolic_regions;
@@ -360,6 +459,8 @@ private:
store_manager m_store_mgr;
+ bounded_ranges_manager *m_range_mgr;
+
/* "Dynamically-allocated" region instances.
The number of these within the analysis can grow arbitrarily.
They are still owned by the manager. */
@@ -378,6 +479,7 @@ public:
region_model_context *ctxt);
region_model_context *get_ctxt () const { return m_ctxt; }
+ uncertainty_t *get_uncertainty () const;
tree get_lhs_type () const { return m_lhs_type; }
const region *get_lhs_region () const { return m_lhs_region; }
@@ -385,13 +487,20 @@ public:
unsigned num_args () const;
+ const gcall *get_call_stmt () const { return m_call; }
+
tree get_arg_tree (unsigned idx) const;
tree get_arg_type (unsigned idx) const;
const svalue *get_arg_svalue (unsigned idx) const;
+ const char *get_arg_string_literal (unsigned idx) const;
+
+ tree get_fndecl_for_call () const;
void dump_to_pp (pretty_printer *pp, bool simple) const;
void dump (bool simple) const;
+ const svalue *get_or_create_conjured_svalue (const region *) const;
+
private:
const gcall *m_call;
region_model *m_model;
@@ -404,20 +513,19 @@ private:
a tree of regions, along with their associated values.
The representation is graph-like because values can be pointers to
regions.
- It also stores a constraint_manager, capturing relationships between
- the values. */
+ It also stores:
+ - a constraint_manager, capturing relationships between the values, and
+ - dynamic extents, mapping dynamically-allocated regions to svalues (their
+ capacities). */
class region_model
{
public:
+ typedef region_to_value_map dynamic_extents_t;
+
region_model (region_model_manager *mgr);
region_model (const region_model &other);
~region_model ();
-
-#if 0//__cplusplus >= 201103
- region_model (region_model &&other);
-#endif
-
region_model &operator= (const region_model &other);
bool operator== (const region_model &other) const;
@@ -441,40 +549,54 @@ class region_model
void canonicalize ();
bool canonicalized_p () const;
+ void
+ on_stmt_pre (const gimple *stmt,
+ bool *out_terminate_path,
+ bool *out_unknown_side_effects,
+ region_model_context *ctxt);
+
void on_assignment (const gassign *stmt, region_model_context *ctxt);
const svalue *get_gassign_result (const gassign *assign,
region_model_context *ctxt);
+ void on_asm_stmt (const gasm *asm_stmt, region_model_context *ctxt);
bool on_call_pre (const gcall *stmt, region_model_context *ctxt,
bool *out_terminate_path);
void on_call_post (const gcall *stmt,
bool unknown_side_effects,
region_model_context *ctxt);
+ void purge_state_involving (const svalue *sval, region_model_context *ctxt);
+
/* Specific handling for on_call_pre. */
- bool impl_call_alloca (const call_details &cd);
+ void impl_call_alloca (const call_details &cd);
void impl_call_analyzer_describe (const gcall *call,
region_model_context *ctxt);
+ void impl_call_analyzer_dump_capacity (const gcall *call,
+ region_model_context *ctxt);
void impl_call_analyzer_eval (const gcall *call,
region_model_context *ctxt);
- bool impl_call_builtin_expect (const call_details &cd);
- bool impl_call_calloc (const call_details &cd);
+ void impl_call_builtin_expect (const call_details &cd);
+ void impl_call_calloc (const call_details &cd);
bool impl_call_error (const call_details &cd, unsigned min_args,
bool *out_terminate_path);
+ void impl_call_fgets (const call_details &cd);
+ void impl_call_fread (const call_details &cd);
void impl_call_free (const call_details &cd);
- bool impl_call_malloc (const call_details &cd);
+ void impl_call_malloc (const call_details &cd);
void impl_call_memcpy (const call_details &cd);
- bool impl_call_memset (const call_details &cd);
+ void impl_call_memset (const call_details &cd);
void impl_call_realloc (const call_details &cd);
void impl_call_strcpy (const call_details &cd);
- bool impl_call_strlen (const call_details &cd);
- bool impl_call_operator_new (const call_details &cd);
- bool impl_call_operator_delete (const call_details &cd);
+ void impl_call_strlen (const call_details &cd);
+ void impl_call_operator_new (const call_details &cd);
+ void impl_call_operator_delete (const call_details &cd);
void impl_deallocation_call (const call_details &cd);
void handle_unrecognized_call (const gcall *call,
region_model_context *ctxt);
void get_reachable_svalues (svalue_set *out,
- const svalue *extra_sval);
+ const svalue *extra_sval,
+ const uncertainty_t *uncertainty);
void on_return (const greturn *stmt, region_model_context *ctxt);
void on_setjmp (const gcall *stmt, const exploded_node *enode,
@@ -487,6 +609,7 @@ class region_model
region_model_context *ctxt);
void handle_phi (const gphi *phi, tree lhs, tree rhs,
+ const region_model &old_state,
region_model_context *ctxt);
bool maybe_update_for_edge (const superedge &edge,
@@ -494,6 +617,13 @@ class region_model
region_model_context *ctxt,
rejected_constraint **out);
+ void update_for_gcall (const gcall *call_stmt,
+ region_model_context *ctxt,
+ function *callee = NULL);
+
+ void update_for_return_gcall (const gcall *call_stmt,
+ region_model_context *ctxt);
+
const region *push_frame (function *fun, const vec<const svalue *> *arg_sids,
region_model_context *ctxt);
const frame_region *get_current_frame () const { return m_current_frame; }
@@ -504,21 +634,27 @@ class region_model
int get_stack_depth () const;
const frame_region *get_frame_at_index (int index) const;
- const region *get_lvalue (path_var pv, region_model_context *ctxt);
- const region *get_lvalue (tree expr, region_model_context *ctxt);
- const svalue *get_rvalue (path_var pv, region_model_context *ctxt);
- const svalue *get_rvalue (tree expr, region_model_context *ctxt);
+ const region *get_lvalue (path_var pv, region_model_context *ctxt) const;
+ const region *get_lvalue (tree expr, region_model_context *ctxt) const;
+ const svalue *get_rvalue (path_var pv, region_model_context *ctxt) const;
+ const svalue *get_rvalue (tree expr, region_model_context *ctxt) const;
const region *deref_rvalue (const svalue *ptr_sval, tree ptr_tree,
- region_model_context *ctxt);
+ region_model_context *ctxt) const;
+
+ const svalue *get_rvalue_for_bits (tree type,
+ const region *reg,
+ const bit_range &bits,
+ region_model_context *ctxt) const;
void set_value (const region *lhs_reg, const svalue *rhs_sval,
region_model_context *ctxt);
void set_value (tree lhs, tree rhs, region_model_context *ctxt);
void clobber_region (const region *reg);
void purge_region (const region *reg);
+ void fill_region (const region *reg, const svalue *sval);
void zero_fill_region (const region *reg);
- void mark_region_as_unknown (const region *reg);
+ void mark_region_as_unknown (const region *reg, uncertainty_t *uncertainty);
void copy_region (const region *dst_reg, const region *src_reg,
region_model_context *ctxt);
@@ -560,7 +696,21 @@ class region_model
store *get_store () { return &m_store; }
const store *get_store () const { return &m_store; }
+ const dynamic_extents_t &
+ get_dynamic_extents () const
+ {
+ return m_dynamic_extents;
+ }
+ const svalue *get_dynamic_extents (const region *reg) const;
+ void set_dynamic_extents (const region *reg,
+ const svalue *size_in_bytes);
+ void unset_dynamic_extents (const region *reg);
+
region_model_manager *get_manager () const { return m_mgr; }
+ bounded_ranges_manager *get_range_manager () const
+ {
+ return m_mgr->get_range_manager ();
+ }
void unbind_region_and_descendents (const region *reg,
enum poison_kind pkind);
@@ -577,15 +727,23 @@ class region_model
static void append_ssa_names_cb (const region *base_reg,
struct append_ssa_names_cb_data *data);
- const svalue *get_store_value (const region *reg) const;
+ const svalue *get_store_value (const region *reg,
+ region_model_context *ctxt) const;
bool region_exists_p (const region *reg) const;
void loop_replay_fixup (const region_model *dst_state);
+ const svalue *get_capacity (const region *reg) const;
+
+ /* Implemented in sm-malloc.cc */
+ void on_realloc_with_move (const call_details &cd,
+ const svalue *old_ptr_sval,
+ const svalue *new_ptr_sval);
+
private:
- const region *get_lvalue_1 (path_var pv, region_model_context *ctxt);
- const svalue *get_rvalue_1 (path_var pv, region_model_context *ctxt);
+ const region *get_lvalue_1 (path_var pv, region_model_context *ctxt) const;
+ const svalue *get_rvalue_1 (path_var pv, region_model_context *ctxt) const;
path_var
get_representative_path_var_1 (const svalue *sval,
@@ -594,18 +752,15 @@ class region_model
get_representative_path_var_1 (const region *reg,
svalue_set *visited) const;
- void add_any_constraints_from_ssa_def_stmt (tree lhs,
- enum tree_code op,
- tree rhs,
- region_model_context *ctxt);
- void add_any_constraints_from_gassign (enum tree_code op,
- tree rhs,
- const gassign *assign,
- region_model_context *ctxt);
- void add_any_constraints_from_gcall (enum tree_code op,
- tree rhs,
- const gcall *call,
- region_model_context *ctxt);
+ bool add_constraint (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs,
+ region_model_context *ctxt);
+ bool add_constraints_from_binop (const svalue *outer_lhs,
+ enum tree_code outer_op,
+ const svalue *outer_rhs,
+ bool *out,
+ region_model_context *ctxt);
void update_for_call_superedge (const call_superedge &call_edge,
region_model_context *ctxt);
@@ -630,14 +785,22 @@ class region_model
void on_top_level_param (tree param, region_model_context *ctxt);
- void record_dynamic_extents (const region *reg,
- const svalue *size_in_bytes);
-
bool called_from_main_p () const;
const svalue *get_initial_value_for_global (const region *reg) const;
+ const svalue *check_for_poison (const svalue *sval,
+ tree expr,
+ region_model_context *ctxt) const;
+
void check_for_writable_region (const region* dest_reg,
region_model_context *ctxt) const;
+ void check_region_access (const region *reg,
+ enum access_direction dir,
+ region_model_context *ctxt) const;
+ void check_region_for_write (const region *dest_reg,
+ region_model_context *ctxt) const;
+ void check_region_for_read (const region *src_reg,
+ region_model_context *ctxt) const;
/* Storing this here to avoid passing it around everywhere. */
region_model_manager *const m_mgr;
@@ -647,6 +810,12 @@ class region_model
constraint_manager *m_constraints; // TODO: embed, rather than dynalloc?
const frame_region *m_current_frame;
+
+ /* Map from base region to size in bytes, for tracking the sizes of
+ dynamically-allocated regions.
+ This is part of the region_model rather than the region to allow for
+ memory regions to be resized (e.g. by realloc). */
+ dynamic_extents_t m_dynamic_extents;
};
/* Some region_model activity could lead to warnings (e.g. attempts to use an
@@ -660,7 +829,9 @@ class region_model
class region_model_context
{
public:
- virtual void warn (pending_diagnostic *d) = 0;
+ /* Hook for clients to store pending diagnostics.
+ Return true if the diagnostic was stored, or false if it was deleted. */
+ virtual bool warn (pending_diagnostic *d) = 0;
/* Hook for clients to be notified when an SVAL that was reachable
in a previous state is no longer live, so that clients can emit warnings
@@ -681,7 +852,9 @@ class region_model_context
and use them to trigger sm-state transitions (e.g. transitions due
to ptrs becoming known to be NULL or non-NULL, rather than just
"unchecked") */
- virtual void on_condition (tree lhs, enum tree_code op, tree rhs) = 0;
+ virtual void on_condition (const svalue *lhs,
+ enum tree_code op,
+ const svalue *rhs) = 0;
/* Hooks for clients to be notified when an unknown change happens
to SVAL (in response to a call to an unknown function). */
@@ -698,6 +871,26 @@ class region_model_context
/* Hook for clients to be notified when a function_decl escapes. */
virtual void on_escaped_function (tree fndecl) = 0;
+
+ virtual uncertainty_t *get_uncertainty () = 0;
+
+ /* Hook for clients to purge state involving SVAL. */
+ virtual void purge_state_involving (const svalue *sval) = 0;
+
+ /* Hook for clients to split state with a non-standard path.
+ Take ownership of INFO. */
+ virtual void bifurcate (custom_edge_info *info) = 0;
+
+ /* Hook for clients to terminate the standard path. */
+ virtual void terminate_path () = 0;
+
+ virtual const extrinsic_state *get_ext_state () const = 0;
+
+ /* Hook for clients to access the "malloc" state machine in
+ any underlying program_state. */
+ virtual bool get_malloc_map (sm_state_map **out_smap,
+ const state_machine **out_sm,
+ unsigned *out_sm_idx) = 0;
};
/* A "do nothing" subclass of region_model_context. */
@@ -705,14 +898,14 @@ class region_model_context
class noop_region_model_context : public region_model_context
{
public:
- void warn (pending_diagnostic *) OVERRIDE {}
+ bool warn (pending_diagnostic *) OVERRIDE { return false; }
void on_svalue_leak (const svalue *) OVERRIDE {}
void on_liveness_change (const svalue_set &,
const region_model *) OVERRIDE {}
logger *get_logger () OVERRIDE { return NULL; }
- void on_condition (tree lhs ATTRIBUTE_UNUSED,
+ void on_condition (const svalue *lhs ATTRIBUTE_UNUSED,
enum tree_code op ATTRIBUTE_UNUSED,
- tree rhs ATTRIBUTE_UNUSED) OVERRIDE
+ const svalue *rhs ATTRIBUTE_UNUSED) OVERRIDE
{
}
void on_unknown_change (const svalue *sval ATTRIBUTE_UNUSED,
@@ -726,6 +919,22 @@ public:
void on_unexpected_tree_code (tree, const dump_location_t &) OVERRIDE {}
void on_escaped_function (tree) OVERRIDE {}
+
+ uncertainty_t *get_uncertainty () OVERRIDE { return NULL; }
+
+ void purge_state_involving (const svalue *sval ATTRIBUTE_UNUSED) OVERRIDE {}
+
+ void bifurcate (custom_edge_info *info) OVERRIDE;
+ void terminate_path () OVERRIDE;
+
+ const extrinsic_state *get_ext_state () const OVERRIDE { return NULL; }
+
+ bool get_malloc_map (sm_state_map **,
+ const state_machine **,
+ unsigned *) OVERRIDE
+ {
+ return false;
+ }
};
/* A subclass of region_model_context for determining if operations fail
@@ -781,21 +990,54 @@ struct model_merger
/* A record that can (optionally) be written out when
region_model::add_constraint fails. */
-struct rejected_constraint
+class rejected_constraint
{
- rejected_constraint (const region_model &model,
- tree lhs, enum tree_code op, tree rhs)
- : m_model (model), m_lhs (lhs), m_op (op), m_rhs (rhs)
- {}
+public:
+ virtual ~rejected_constraint () {}
+ virtual void dump_to_pp (pretty_printer *pp) const = 0;
- void dump_to_pp (pretty_printer *pp) const;
+ const region_model &get_model () const { return m_model; }
+
+protected:
+ rejected_constraint (const region_model &model)
+ : m_model (model)
+ {}
region_model m_model;
+};
+
+class rejected_op_constraint : public rejected_constraint
+{
+public:
+ rejected_op_constraint (const region_model &model,
+ tree lhs, enum tree_code op, tree rhs)
+ : rejected_constraint (model),
+ m_lhs (lhs), m_op (op), m_rhs (rhs)
+ {}
+
+ void dump_to_pp (pretty_printer *pp) const FINAL OVERRIDE;
+
tree m_lhs;
enum tree_code m_op;
tree m_rhs;
};
+class rejected_ranges_constraint : public rejected_constraint
+{
+public:
+ rejected_ranges_constraint (const region_model &model,
+ tree expr, const bounded_ranges *ranges)
+ : rejected_constraint (model),
+ m_expr (expr), m_ranges (ranges)
+ {}
+
+ void dump_to_pp (pretty_printer *pp) const FINAL OVERRIDE;
+
+private:
+ tree m_expr;
+ const bounded_ranges *m_ranges;
+};
+
/* A bundle of state. */
class engine
@@ -828,9 +1070,10 @@ using namespace ::selftest;
class test_region_model_context : public noop_region_model_context
{
public:
- void warn (pending_diagnostic *d) FINAL OVERRIDE
+ bool warn (pending_diagnostic *d) FINAL OVERRIDE
{
m_diagnostics.safe_push (d);
+ return true;
}
unsigned get_num_diagnostics () const { return m_diagnostics.length (); }
diff --git a/gcc/analyzer/region.cc b/gcc/analyzer/region.cc
index 6db1fc9..fa187fd 100644
--- a/gcc/analyzer/region.cc
+++ b/gcc/analyzer/region.cc
@@ -98,6 +98,7 @@ region::get_base_region () const
case RK_FIELD:
case RK_ELEMENT:
case RK_OFFSET:
+ case RK_SIZED:
iter = iter->get_parent_region ();
continue;
case RK_CAST:
@@ -121,6 +122,7 @@ region::base_region_p () const
case RK_FIELD:
case RK_ELEMENT:
case RK_OFFSET:
+ case RK_SIZED:
case RK_CAST:
return false;
@@ -166,6 +168,109 @@ region::maybe_get_frame_region () const
return NULL;
}
+/* Get the memory space of this region. */
+
+enum memory_space
+region::get_memory_space () const
+{
+ const region *iter = this;
+ while (iter)
+ {
+ switch (iter->get_kind ())
+ {
+ default:
+ break;
+ case RK_GLOBALS:
+ return MEMSPACE_GLOBALS;
+ case RK_CODE:
+ case RK_FUNCTION:
+ case RK_LABEL:
+ return MEMSPACE_CODE;
+ case RK_FRAME:
+ case RK_STACK:
+ case RK_ALLOCA:
+ return MEMSPACE_STACK;
+ case RK_HEAP:
+ case RK_HEAP_ALLOCATED:
+ return MEMSPACE_HEAP;
+ case RK_STRING:
+ return MEMSPACE_READONLY_DATA;
+ }
+ if (iter->get_kind () == RK_CAST)
+ iter = iter->dyn_cast_cast_region ()->get_original_region ();
+ else
+ iter = iter->get_parent_region ();
+ }
+ return MEMSPACE_UNKNOWN;
+}
+
+/* Subroutine for use by region_model_manager::get_or_create_initial_value.
+ Return true if this region has an initial_svalue.
+ Return false if attempting to use INIT_VAL(this_region) should give
+ the "UNINITIALIZED" poison value. */
+
+bool
+region::can_have_initial_svalue_p () const
+{
+ const region *base_reg = get_base_region ();
+
+ /* Check for memory spaces that are uninitialized by default. */
+ enum memory_space mem_space = base_reg->get_memory_space ();
+ switch (mem_space)
+ {
+ default:
+ gcc_unreachable ();
+ case MEMSPACE_UNKNOWN:
+ case MEMSPACE_CODE:
+ case MEMSPACE_GLOBALS:
+ case MEMSPACE_READONLY_DATA:
+ /* Such regions have initial_svalues. */
+ return true;
+
+ case MEMSPACE_HEAP:
+ /* Heap allocations are uninitialized by default. */
+ return false;
+
+ case MEMSPACE_STACK:
+ if (tree decl = base_reg->maybe_get_decl ())
+ {
+ /* See the assertion in frame_region::get_region_for_local for the
+ tree codes we need to handle here. */
+ switch (TREE_CODE (decl))
+ {
+ default:
+ gcc_unreachable ();
+
+ case PARM_DECL:
+ /* Parameters have initial values. */
+ return true;
+
+ case VAR_DECL:
+ case RESULT_DECL:
+ /* Function locals don't have initial values. */
+ return false;
+
+ case SSA_NAME:
+ {
+ tree ssa_name = decl;
+ /* SSA names that are the default defn of a PARM_DECL
+ have initial_svalues; other SSA names don't. */
+ if (SSA_NAME_IS_DEFAULT_DEF (ssa_name)
+ && SSA_NAME_VAR (ssa_name)
+ && TREE_CODE (SSA_NAME_VAR (ssa_name)) == PARM_DECL)
+ return true;
+ else
+ return false;
+ }
+ }
+ }
+
+ /* If we have an on-stack region that isn't associated with a decl
+ or SSA name, then we have VLA/alloca, which is uninitialized. */
+ return false;
+ }
+}
+
/* If this region is a decl_region, return the decl.
Otherwise return NULL. */
@@ -188,7 +293,8 @@ region::get_offset () const
return *m_cached_offset;
}
-/* If the size of this region (in bytes) is known statically, write it to *OUT
+/* Base class implementation of region::get_byte_size vfunc.
+ If the size of this region (in bytes) is known statically, write it to *OUT
and return true.
Otherwise return false. */
@@ -208,6 +314,50 @@ region::get_byte_size (byte_size_t *out) const
return true;
}
+/* Base implementation of region::get_byte_size_sval vfunc. */
+
+const svalue *
+region::get_byte_size_sval (region_model_manager *mgr) const
+{
+ tree type = get_type ();
+
+ /* Bail out e.g. for heap-allocated regions. */
+ if (!type)
+ return mgr->get_or_create_unknown_svalue (size_type_node);
+
+ HOST_WIDE_INT bytes = int_size_in_bytes (type);
+ if (bytes == -1)
+ return mgr->get_or_create_unknown_svalue (size_type_node);
+
+ tree byte_size = size_in_bytes (type);
+ if (TREE_TYPE (byte_size) != size_type_node)
+ byte_size = fold_build1 (NOP_EXPR, size_type_node, byte_size);
+ return mgr->get_or_create_constant_svalue (byte_size);
+}
+
+/* Attempt to get the size of TYPE in bits.
+ If successful, return true and write the size to *OUT.
+ Otherwise return false. */
+
+bool
+int_size_in_bits (const_tree type, bit_size_t *out)
+{
+ if (INTEGRAL_TYPE_P (type))
+ {
+ *out = TYPE_PRECISION (type);
+ return true;
+ }
+
+ tree sz = TYPE_SIZE (type);
+ if (sz && tree_fits_uhwi_p (sz))
+ {
+ *out = TREE_INT_CST_LOW (sz);
+ return true;
+ }
+ else
+ return false;
+}
+
/* If the size of this region (in bits) is known statically, write it to *OUT
and return true.
Otherwise return false. */
@@ -215,16 +365,18 @@ region::get_byte_size (byte_size_t *out) const
bool
region::get_bit_size (bit_size_t *out) const
{
- byte_size_t byte_size;
- if (!get_byte_size (&byte_size))
+ tree type = get_type ();
+
+ /* Bail out e.g. for heap-allocated regions. */
+ if (!type)
return false;
- *out = byte_size * BITS_PER_UNIT;
- return true;
+
+ return int_size_in_bits (type, out);
}
/* Get the field within RECORD_TYPE at BIT_OFFSET. */
-static tree
+tree
get_field_at_bit_offset (tree record_type, bit_offset_t bit_offset)
{
gcc_assert (TREE_CODE (record_type) == RECORD_TYPE);
@@ -350,18 +502,10 @@ region::calc_offset () const
= (const field_region *)iter_region;
iter_region = iter_region->get_parent_region ();
- /* Compare with e.g. gimple-fold.c's
- fold_nonarray_ctor_reference. */
- tree field = field_reg->get_field ();
- tree byte_offset = DECL_FIELD_OFFSET (field);
- if (TREE_CODE (byte_offset) != INTEGER_CST)
+ bit_offset_t rel_bit_offset;
+ if (!field_reg->get_relative_concrete_offset (&rel_bit_offset))
return region_offset::make_symbolic (iter_region);
- tree field_offset = DECL_FIELD_BIT_OFFSET (field);
- /* Compute bit offset of the field. */
- offset_int bitoffset
- = (wi::to_offset (field_offset)
- + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
- accum_bit_offset += bitoffset;
+ accum_bit_offset += rel_bit_offset;
}
continue;
@@ -371,28 +515,10 @@ region::calc_offset () const
= (const element_region *)iter_region;
iter_region = iter_region->get_parent_region ();
- if (tree idx_cst
- = element_reg->get_index ()->maybe_get_constant ())
- {
- gcc_assert (TREE_CODE (idx_cst) == INTEGER_CST);
-
- tree elem_type = element_reg->get_type ();
- offset_int element_idx = wi::to_offset (idx_cst);
-
- /* First, use int_size_in_bytes, to reject the case where we
- have an incomplete type, or a non-constant value. */
- HOST_WIDE_INT hwi_byte_size = int_size_in_bytes (elem_type);
- if (hwi_byte_size > 0)
- {
- offset_int element_bit_size
- = hwi_byte_size << LOG2_BITS_PER_UNIT;
- offset_int element_bit_offset
- = element_idx * element_bit_size;
- accum_bit_offset += element_bit_offset;
- continue;
- }
- }
- return region_offset::make_symbolic (iter_region);
+ bit_offset_t rel_bit_offset;
+ if (!element_reg->get_relative_concrete_offset (&rel_bit_offset))
+ return region_offset::make_symbolic (iter_region);
+ accum_bit_offset += rel_bit_offset;
}
continue;
@@ -402,22 +528,17 @@ region::calc_offset () const
= (const offset_region *)iter_region;
iter_region = iter_region->get_parent_region ();
- if (tree byte_offset_cst
- = offset_reg->get_byte_offset ()->maybe_get_constant ())
- {
- gcc_assert (TREE_CODE (byte_offset_cst) == INTEGER_CST);
- /* Use a signed value for the byte offset, to handle
- negative offsets. */
- HOST_WIDE_INT byte_offset
- = wi::to_offset (byte_offset_cst).to_shwi ();
- HOST_WIDE_INT bit_offset = byte_offset * BITS_PER_UNIT;
- accum_bit_offset += bit_offset;
- }
- else
+ bit_offset_t rel_bit_offset;
+ if (!offset_reg->get_relative_concrete_offset (&rel_bit_offset))
return region_offset::make_symbolic (iter_region);
+ accum_bit_offset += rel_bit_offset;
}
continue;
+ case RK_SIZED:
+ iter_region = iter_region->get_parent_region ();
+ continue;
+
case RK_CAST:
{
const cast_region *cast_reg
@@ -433,6 +554,14 @@ region::calc_offset () const
return region_offset::make_concrete (iter_region, accum_bit_offset);
}
+/* Base implementation of region::get_relative_concrete_offset vfunc. */
+
+bool
+region::get_relative_concrete_offset (bit_offset_t *) const
+{
+ return false;
+}
+
/* Copy from SRC_REG to DST_REG, using CTXT for any issues that occur. */
void
@@ -444,7 +573,7 @@ region_model::copy_region (const region *dst_reg, const region *src_reg,
if (dst_reg == src_reg)
return;
- const svalue *sval = get_store_value (src_reg);
+ const svalue *sval = get_store_value (src_reg, ctxt);
set_value (dst_reg, sval, ctxt);
}
@@ -558,6 +687,20 @@ region::non_null_p () const
}
}
+/* Return true iff this region is defined in terms of SVAL. */
+
+bool
+region::involves_p (const svalue *sval) const
+{
+ if (const symbolic_region *symbolic_reg = dyn_cast_symbolic_region ())
+ {
+ if (symbolic_reg->get_pointer ()->involves_p (sval))
+ return true;
+ }
+
+ return false;
+}
+
/* Comparator for trees to impose a deterministic ordering on
T1 and T2. */
@@ -959,7 +1102,7 @@ decl_region::get_svalue_for_initializer (region_model_manager *mgr) const
which can fail if we have a region with unknown size
(e.g. "extern const char arr[];"). */
const binding_key *binding
- = binding_key::make (mgr->get_store_manager (), this, BK_direct);
+ = binding_key::make (mgr->get_store_manager (), this);
if (binding->symbolic_p ())
return NULL;
@@ -1005,6 +1148,26 @@ field_region::dump_to_pp (pretty_printer *pp, bool simple) const
}
}
+/* Implementation of region::get_relative_concrete_offset vfunc
+ for field_region. */
+
+bool
+field_region::get_relative_concrete_offset (bit_offset_t *out) const
+{
+ /* Compare with e.g. gimple-fold.c's
+ fold_nonarray_ctor_reference. */
+ tree byte_offset = DECL_FIELD_OFFSET (m_field);
+ if (TREE_CODE (byte_offset) != INTEGER_CST)
+ return false;
+ tree field_offset = DECL_FIELD_BIT_OFFSET (m_field);
+ /* Compute bit offset of the field. */
+ offset_int bitoffset
+ = (wi::to_offset (field_offset)
+ + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
+ *out = bitoffset;
+ return true;
+}
+
/* class element_region : public region. */
/* Implementation of region::accept vfunc for element_region. */
@@ -1042,6 +1205,35 @@ element_region::dump_to_pp (pretty_printer *pp, bool simple) const
}
}
+/* Implementation of region::get_relative_concrete_offset vfunc
+ for element_region. */
+
+bool
+element_region::get_relative_concrete_offset (bit_offset_t *out) const
+{
+ if (tree idx_cst = m_index->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (idx_cst) == INTEGER_CST);
+
+ tree elem_type = get_type ();
+ offset_int element_idx = wi::to_offset (idx_cst);
+
+ /* First, use int_size_in_bytes, to reject the case where we
+ have an incomplete type, or a non-constant value. */
+ HOST_WIDE_INT hwi_byte_size = int_size_in_bytes (elem_type);
+ if (hwi_byte_size > 0)
+ {
+ offset_int element_bit_size
+ = hwi_byte_size << LOG2_BITS_PER_UNIT;
+ offset_int element_bit_offset
+ = element_idx * element_bit_size;
+ *out = element_bit_offset;
+ return true;
+ }
+ }
+ return false;
+}
+
/* class offset_region : public region. */
/* Implementation of region::accept vfunc for offset_region. */
@@ -1078,6 +1270,86 @@ offset_region::dump_to_pp (pretty_printer *pp, bool simple) const
}
}
+/* Implementation of region::get_relative_concrete_offset vfunc
+ for offset_region. */
+
+bool
+offset_region::get_relative_concrete_offset (bit_offset_t *out) const
+{
+ if (tree byte_offset_cst = m_byte_offset->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (byte_offset_cst) == INTEGER_CST);
+ /* Use a signed value for the byte offset, to handle
+ negative offsets. */
+ HOST_WIDE_INT byte_offset
+ = wi::to_offset (byte_offset_cst).to_shwi ();
+ HOST_WIDE_INT bit_offset = byte_offset * BITS_PER_UNIT;
+ *out = bit_offset;
+ return true;
+ }
+ return false;
+}
+
+/* class sized_region : public region. */
+
+/* Implementation of region::accept vfunc for sized_region. */
+
+void
+sized_region::accept (visitor *v) const
+{
+ region::accept (v);
+ m_byte_size_sval->accept (v);
+}
+
+/* Implementation of region::dump_to_pp vfunc for sized_region. */
+
+void
+sized_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "SIZED_REG(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_byte_size_sval->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+ else
+ {
+ pp_string (pp, "sized_region(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_byte_size_sval->dump_to_pp (pp, simple);
+ pp_printf (pp, ")");
+ }
+}
+
+/* Implementation of region::get_byte_size vfunc for sized_region. */
+
+bool
+sized_region::get_byte_size (byte_size_t *out) const
+{
+ if (tree cst = m_byte_size_sval->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (cst) == INTEGER_CST);
+ *out = tree_to_uhwi (cst);
+ return true;
+ }
+ return false;
+}
+
+/* Implementation of region::get_bit_size vfunc for sized_region. */
+
+bool
+sized_region::get_bit_size (bit_size_t *out) const
+{
+ byte_size_t byte_size;
+ if (!get_byte_size (&byte_size))
+ return false;
+ *out = byte_size * BITS_PER_UNIT;
+ return true;
+}
+
/* class cast_region : public region. */
/* Implementation of region::accept vfunc for cast_region. */
diff --git a/gcc/analyzer/region.h b/gcc/analyzer/region.h
index ea24b38..a17e73c 100644
--- a/gcc/analyzer/region.h
+++ b/gcc/analyzer/region.h
@@ -25,6 +25,18 @@ along with GCC; see the file COPYING3. If not see
namespace ana {
+/* An enum for identifying different spaces within memory. */
+
+enum memory_space
+{
+ MEMSPACE_UNKNOWN,
+ MEMSPACE_CODE,
+ MEMSPACE_GLOBALS,
+ MEMSPACE_STACK,
+ MEMSPACE_HEAP,
+ MEMSPACE_READONLY_DATA
+};
+
/* An enum for discriminating between the different concrete subclasses
of region. */
@@ -43,6 +55,7 @@ enum region_kind
RK_FIELD,
RK_ELEMENT,
RK_OFFSET,
+ RK_SIZED,
RK_CAST,
RK_HEAP_ALLOCATED,
RK_ALLOCA,
@@ -70,6 +83,7 @@ enum region_kind
field_region (RK_FIELD)
element_region (RK_ELEMENT)
offset_region (RK_OFFSET)
+ sized_region (RK_SIZED)
cast_region (RK_CAST)
heap_allocated_region (RK_HEAP_ALLOCATED)
alloca_region (RK_ALLOCA)
@@ -107,6 +121,8 @@ public:
dyn_cast_element_region () const { return NULL; }
virtual const offset_region *
dyn_cast_offset_region () const { return NULL; }
+ virtual const sized_region *
+ dyn_cast_sized_region () const { return NULL; }
virtual const cast_region *
dyn_cast_cast_region () const { return NULL; }
virtual const string_region *
@@ -119,6 +135,8 @@ public:
bool base_region_p () const;
bool descendent_of_p (const region *elder) const;
const frame_region *maybe_get_frame_region () const;
+ enum memory_space get_memory_space () const;
+ bool can_have_initial_svalue_p () const;
tree maybe_get_decl () const;
@@ -128,11 +146,6 @@ public:
pretty_printer *pp) const;
label_text get_desc (bool simple=true) const;
- void dump_to_pp (const region_model &model,
- pretty_printer *pp,
- const char *prefix,
- bool is_last_child) const;
-
virtual void dump_to_pp (pretty_printer *pp, bool simple) const = 0;
void dump (bool simple) const;
@@ -142,9 +155,28 @@ public:
static int cmp_ptr_ptr (const void *, const void *);
+ bool involves_p (const svalue *sval) const;
+
region_offset get_offset () const;
- bool get_byte_size (byte_size_t *out) const;
- bool get_bit_size (bit_size_t *out) const;
+
+ /* Attempt to get the size of this region as a concrete number of bytes.
+ If successful, return true and write the size to *OUT.
+ Otherwise return false. */
+ virtual bool get_byte_size (byte_size_t *out) const;
+
+ /* Attempt to get the size of this region as a concrete number of bits.
+ If successful, return true and write the size to *OUT.
+ Otherwise return false. */
+ virtual bool get_bit_size (bit_size_t *out) const;
+
+ /* Get a symbolic value describing the size of this region in bytes
+ (which could be "unknown"). */
+ virtual const svalue *get_byte_size_sval (region_model_manager *mgr) const;
+
+ /* Attempt to get the offset in bits of this region relative to its parent.
+ If successful, return true and write to *OUT.
+ Otherwise return false. */
+ virtual bool get_relative_concrete_offset (bit_offset_t *out) const;
void
get_subregions_for_binding (region_model_manager *mgr,
@@ -675,6 +707,8 @@ public:
tree get_field () const { return m_field; }
+ bool get_relative_concrete_offset (bit_offset_t *out) const FINAL OVERRIDE;
+
private:
tree m_field;
};
@@ -756,6 +790,9 @@ public:
const svalue *get_index () const { return m_index; }
+ virtual bool
+ get_relative_concrete_offset (bit_offset_t *out) const FINAL OVERRIDE;
+
private:
const svalue *m_index;
};
@@ -838,6 +875,8 @@ public:
const svalue *get_byte_offset () const { return m_byte_offset; }
+ bool get_relative_concrete_offset (bit_offset_t *out) const FINAL OVERRIDE;
+
private:
const svalue *m_byte_offset;
};
@@ -860,6 +899,99 @@ template <> struct default_hash_traits<offset_region::key_t>
namespace ana {
+/* A region that is size BYTES_SIZE_SVAL in size within its parent
+ region (or possibly larger, which would lead to an overflow. */
+
+class sized_region : public region
+{
+public:
+ /* A support class for uniquifying instances of sized_region. */
+ struct key_t
+ {
+ key_t (const region *parent, tree element_type,
+ const svalue *byte_size_sval)
+ : m_parent (parent), m_element_type (element_type),
+ m_byte_size_sval (byte_size_sval)
+ {
+ gcc_assert (byte_size_sval);
+ }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_parent);
+ hstate.add_ptr (m_element_type);
+ hstate.add_ptr (m_byte_size_sval);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_parent == other.m_parent
+ && m_element_type == other.m_element_type
+ && m_byte_size_sval == other.m_byte_size_sval);
+ }
+
+ void mark_deleted () { m_byte_size_sval = reinterpret_cast<const svalue *> (1); }
+ void mark_empty () { m_byte_size_sval = NULL; }
+ bool is_deleted () const
+ {
+ return m_byte_size_sval == reinterpret_cast<const svalue *> (1);
+ }
+ bool is_empty () const { return m_byte_size_sval == NULL; }
+
+ const region *m_parent;
+ tree m_element_type;
+ const svalue *m_byte_size_sval;
+ const svalue *m_end_offset;
+ };
+
+ sized_region (unsigned id, const region *parent, tree type,
+ const svalue *byte_size_sval)
+ : region (complexity::from_pair (parent, byte_size_sval),
+ id, parent, type),
+ m_byte_size_sval (byte_size_sval)
+ {}
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_SIZED; }
+ const sized_region *
+ dyn_cast_sized_region () const FINAL OVERRIDE { return this; }
+
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+
+ bool get_byte_size (byte_size_t *out) const FINAL OVERRIDE;
+ bool get_bit_size (bit_size_t *out) const FINAL OVERRIDE;
+
+ const svalue *
+ get_byte_size_sval (region_model_manager *) const FINAL OVERRIDE
+ {
+ return m_byte_size_sval;
+ }
+
+private:
+ const svalue *m_byte_size_sval;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const sized_region *>::test (const region *reg)
+{
+ return reg->get_kind () == RK_SIZED;
+}
+
+template <> struct default_hash_traits<sized_region::key_t>
+: public member_function_hash_traits<sized_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
/* A region that views another region using a different type. */
class cast_region : public region
diff --git a/gcc/analyzer/sm-file.cc b/gcc/analyzer/sm-file.cc
index 7a81c8f..0c8cdf0 100644
--- a/gcc/analyzer/sm-file.cc
+++ b/gcc/analyzer/sm-file.cc
@@ -77,9 +77,9 @@ public:
void on_condition (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs) const FINAL OVERRIDE;
+ const svalue *rhs) const FINAL OVERRIDE;
bool can_purge_p (state_t s) const FINAL OVERRIDE;
pending_diagnostic *on_leak (tree var) const FINAL OVERRIDE;
@@ -125,11 +125,21 @@ public:
return label_text::borrow ("opened here");
if (change.m_old_state == m_sm.m_unchecked
&& change.m_new_state == m_sm.m_nonnull)
- return change.formatted_print ("assuming %qE is non-NULL",
- change.m_expr);
+ {
+ if (change.m_expr)
+ return change.formatted_print ("assuming %qE is non-NULL",
+ change.m_expr);
+ else
+ return change.formatted_print ("assuming FILE * is non-NULL");
+ }
if (change.m_new_state == m_sm.m_null)
- return change.formatted_print ("assuming %qE is NULL",
- change.m_expr);
+ {
+ if (change.m_expr)
+ return change.formatted_print ("assuming %qE is NULL",
+ change.m_expr);
+ else
+ return change.formatted_print ("assuming FILE * is NULL");
+ }
return label_text ();
}
@@ -193,9 +203,13 @@ public:
/* CWE-775: "Missing Release of File Descriptor or Handle after
Effective Lifetime". */
m.add_cwe (775);
- return warning_meta (rich_loc, m, OPT_Wanalyzer_file_leak,
- "leak of FILE %qE",
- m_arg);
+ if (m_arg)
+ return warning_meta (rich_loc, m, OPT_Wanalyzer_file_leak,
+ "leak of FILE %qE",
+ m_arg);
+ else
+ return warning_meta (rich_loc, m, OPT_Wanalyzer_file_leak,
+ "leak of FILE");
}
label_text describe_state_change (const evdesc::state_change &change)
@@ -212,10 +226,21 @@ public:
label_text describe_final_event (const evdesc::final_event &ev) FINAL OVERRIDE
{
if (m_fopen_event.known_p ())
- return ev.formatted_print ("%qE leaks here; was opened at %@",
- ev.m_expr, &m_fopen_event);
+ {
+ if (ev.m_expr)
+ return ev.formatted_print ("%qE leaks here; was opened at %@",
+ ev.m_expr, &m_fopen_event);
+ else
+ return ev.formatted_print ("leaks here; was opened at %@",
+ &m_fopen_event);
+ }
else
- return ev.formatted_print ("%qE leaks here", ev.m_expr);
+ {
+ if (ev.m_expr)
+ return ev.formatted_print ("%qE leaks here", ev.m_expr);
+ else
+ return ev.formatted_print ("leaks here");
+ }
}
private:
@@ -312,9 +337,8 @@ is_file_using_fn_p (tree fndecl)
/* Also support variants of these names prefixed with "_IO_". */
const char *name = IDENTIFIER_POINTER (DECL_NAME (fndecl));
- if (strncmp (name, "_IO_", 4) == 0)
- if (fs.contains_name_p (name + 4))
- return true;
+ if (startswith (name, "_IO_") && fs.contains_name_p (name + 4))
+ return true;
return false;
}
@@ -344,7 +368,6 @@ fileptr_state_machine::on_stmt (sm_context *sm_ctxt,
if (is_named_call_p (callee_fndecl, "fclose", call, 1))
{
tree arg = gimple_call_arg (call, 0);
- tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->on_transition (node, stmt, arg, m_start, m_closed);
@@ -356,6 +379,7 @@ fileptr_state_machine::on_stmt (sm_context *sm_ctxt,
if (sm_ctxt->get_state (stmt, arg) == m_closed)
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, stmt, arg,
new double_fclose (*this, diag_arg));
sm_ctxt->set_next_state (stmt, arg, m_stop);
@@ -382,19 +406,18 @@ void
fileptr_state_machine::on_condition (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs) const
+ const svalue *rhs) const
{
- if (!zerop (rhs))
+ if (!rhs->all_zeroes_p ())
return;
// TODO: has to be a FILE *, specifically
- if (TREE_CODE (TREE_TYPE (lhs)) != POINTER_TYPE)
+ if (!any_pointer_p (lhs))
return;
-
// TODO: has to be a FILE *, specifically
- if (TREE_CODE (TREE_TYPE (rhs)) != POINTER_TYPE)
+ if (!any_pointer_p (rhs))
return;
if (op == NE_EXPR)
diff --git a/gcc/analyzer/sm-malloc.cc b/gcc/analyzer/sm-malloc.cc
index ef250c8..bf5e3c3 100644
--- a/gcc/analyzer/sm-malloc.cc
+++ b/gcc/analyzer/sm-malloc.cc
@@ -44,6 +44,8 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/region-model.h"
#include "stringpool.h"
#include "attribs.h"
+#include "analyzer/function-set.h"
+#include "analyzer/program-state.h"
#if ENABLE_ANALYZER
@@ -374,9 +376,9 @@ public:
void on_condition (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs) const FINAL OVERRIDE;
+ const svalue *rhs) const FINAL OVERRIDE;
bool can_purge_p (state_t s) const FINAL OVERRIDE;
pending_diagnostic *on_leak (tree var) const FINAL OVERRIDE;
@@ -384,6 +386,14 @@ public:
bool reset_when_passed_to_unknown_fn_p (state_t s,
bool is_mutable) const FINAL OVERRIDE;
+ static bool unaffected_by_call_p (tree fndecl);
+
+ void on_realloc_with_move (region_model *model,
+ sm_state_map *smap,
+ const svalue *old_ptr_sval,
+ const svalue *new_ptr_sval,
+ const extrinsic_state &ext_state) const;
+
standard_deallocator_set m_free;
standard_deallocator_set m_scalar_delete;
standard_deallocator_set m_vector_delete;
@@ -1195,6 +1205,25 @@ public:
funcname, ev.m_expr);
}
+ /* Implementation of pending_diagnostic::supercedes_p for
+ use_after_free.
+
+ We want use-after-free to supercede use-of-unitialized-value,
+ so that if we have these at the same stmt, we don't emit
+ a use-of-uninitialized, just the use-after-free.
+ (this is because we fully purge information about freed
+ buffers when we free them to avoid state explosions, so
+ that if they are accessed after the free, it looks like
+ they are uninitialized). */
+
+ bool supercedes_p (const pending_diagnostic &other) const FINAL OVERRIDE
+ {
+ if (other.use_of_uninit_p ())
+ return true;
+
+ return false;
+ }
+
private:
diagnostic_event_id_t m_free_event;
const deallocator *m_deallocator;
@@ -1303,7 +1332,7 @@ public:
{
/* Attempt to reconstruct what kind of pointer it is.
(It seems neater for this to be a part of the state, though). */
- if (TREE_CODE (change.m_expr) == SSA_NAME)
+ if (change.m_expr && TREE_CODE (change.m_expr) == SSA_NAME)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (change.m_expr);
if (gcall *call = dyn_cast <gcall *> (def_stmt))
@@ -1489,7 +1518,8 @@ malloc_state_machine::get_or_create_deallocator (tree deallocator_fndecl)
/* Reuse "free". */
deallocator *d;
if (is_named_call_p (deallocator_fndecl, "free")
- || is_std_named_call_p (deallocator_fndecl, "free"))
+ || is_std_named_call_p (deallocator_fndecl, "free")
+ || is_named_call_p (deallocator_fndecl, "__builtin_free"))
d = &m_free.m_deallocator;
else
{
@@ -1503,6 +1533,38 @@ malloc_state_machine::get_or_create_deallocator (tree deallocator_fndecl)
return d;
}
+/* Try to identify the function declaration either by name or as a known malloc
+ builtin. */
+
+static bool
+known_allocator_p (const_tree fndecl, const gcall *call)
+{
+ /* Either it is a function we know by name and number of arguments... */
+ if (is_named_call_p (fndecl, "malloc", call, 1)
+ || is_named_call_p (fndecl, "calloc", call, 2)
+ || is_std_named_call_p (fndecl, "malloc", call, 1)
+ || is_std_named_call_p (fndecl, "calloc", call, 2)
+ || is_named_call_p (fndecl, "strdup", call, 1)
+ || is_named_call_p (fndecl, "strndup", call, 2))
+ return true;
+
+ /* ... or it is a builtin allocator that allocates objects freed with
+ __builtin_free. */
+ if (fndecl_built_in_p (fndecl, BUILT_IN_NORMAL))
+ switch (DECL_FUNCTION_CODE (fndecl))
+ {
+ case BUILT_IN_MALLOC:
+ case BUILT_IN_CALLOC:
+ case BUILT_IN_STRDUP:
+ case BUILT_IN_STRNDUP:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
/* Implementation of state_machine::on_stmt vfunc for malloc_state_machine. */
bool
@@ -1513,14 +1575,7 @@ malloc_state_machine::on_stmt (sm_context *sm_ctxt,
if (const gcall *call = dyn_cast <const gcall *> (stmt))
if (tree callee_fndecl = sm_ctxt->get_fndecl_for_call (call))
{
- if (is_named_call_p (callee_fndecl, "malloc", call, 1)
- || is_named_call_p (callee_fndecl, "calloc", call, 2)
- || is_std_named_call_p (callee_fndecl, "malloc", call, 1)
- || is_std_named_call_p (callee_fndecl, "calloc", call, 2)
- || is_named_call_p (callee_fndecl, "__builtin_malloc", call, 1)
- || is_named_call_p (callee_fndecl, "__builtin_calloc", call, 2)
- || is_named_call_p (callee_fndecl, "strdup", call, 1)
- || is_named_call_p (callee_fndecl, "strndup", call, 2))
+ if (known_allocator_p (callee_fndecl, call))
{
on_allocator_call (sm_ctxt, call, &m_free);
return true;
@@ -1569,6 +1624,9 @@ malloc_state_machine::on_stmt (sm_context *sm_ctxt,
return true;
}
+ if (unaffected_by_call_p (callee_fndecl))
+ return true;
+
/* Cast away const-ness for cache-like operations. */
malloc_state_machine *mutable_this
= const_cast <malloc_state_machine *> (this);
@@ -1600,11 +1658,11 @@ malloc_state_machine::on_stmt (sm_context *sm_ctxt,
if (bitmap_empty_p (nonnull_args)
|| bitmap_bit_p (nonnull_args, i))
{
- tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
state_t state = sm_ctxt->get_state (stmt, arg);
/* Can't use a switch as the states are non-const. */
if (unchecked_p (state))
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, stmt, arg,
new possible_null_arg (*this, diag_arg,
callee_fndecl,
@@ -1616,6 +1674,7 @@ malloc_state_machine::on_stmt (sm_context *sm_ctxt,
}
else if (state == m_null)
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, stmt, arg,
new null_arg (*this, diag_arg,
callee_fndecl, i));
@@ -1674,11 +1733,11 @@ malloc_state_machine::on_stmt (sm_context *sm_ctxt,
if (TREE_CODE (op) == MEM_REF)
{
tree arg = TREE_OPERAND (op, 0);
- tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
state_t state = sm_ctxt->get_state (stmt, arg);
if (unchecked_p (state))
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, stmt, arg,
new possible_null_deref (*this, diag_arg));
const allocation_state *astate = as_a_allocation_state (state);
@@ -1686,12 +1745,14 @@ malloc_state_machine::on_stmt (sm_context *sm_ctxt,
}
else if (state == m_null)
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, stmt, arg,
new null_deref (*this, diag_arg));
sm_ctxt->set_next_state (stmt, arg, m_stop);
}
else if (freed_p (state))
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
const allocation_state *astate = as_a_allocation_state (state);
sm_ctxt->warn (node, stmt, arg,
new use_after_free (*this, diag_arg,
@@ -1738,7 +1799,6 @@ malloc_state_machine::on_deallocator_call (sm_context *sm_ctxt,
if (argno >= gimple_call_num_args (call))
return;
tree arg = gimple_call_arg (call, argno);
- tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
state_t state = sm_ctxt->get_state (call, arg);
@@ -1752,6 +1812,7 @@ malloc_state_machine::on_deallocator_call (sm_context *sm_ctxt,
if (!astate->m_deallocators->contains_p (d))
{
/* Wrong allocator. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
pending_diagnostic *pd
= new mismatching_deallocation (*this, diag_arg,
astate->m_deallocators,
@@ -1766,6 +1827,7 @@ malloc_state_machine::on_deallocator_call (sm_context *sm_ctxt,
else if (state == d->m_freed)
{
/* freed -> stop, with warning. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, call, arg,
new double_free (*this, diag_arg, d->m_name));
sm_ctxt->set_next_state (call, arg, m_stop);
@@ -1773,6 +1835,7 @@ malloc_state_machine::on_deallocator_call (sm_context *sm_ctxt,
else if (state == m_non_heap)
{
/* non-heap -> stop, with warning. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, call, arg,
new free_of_non_heap (*this, diag_arg,
d->m_name));
@@ -1780,54 +1843,65 @@ malloc_state_machine::on_deallocator_call (sm_context *sm_ctxt,
}
}
-/* Implementation of realloc(3):
-
- void *realloc(void *ptr, size_t size);
-
- realloc(3) is awkward.
+/* Handle a call to "realloc".
+ Check for free of non-heap or mismatching allocators,
+ transitioning to the "stop" state for such cases.
- We currently don't have a way to express multiple possible outcomes
- from a function call, "bifurcating" the state such as:
- - success: non-NULL is returned
- - failure: NULL is returned, existing buffer is not freed.
- or even an N-way state split e.g.:
- - buffer grew successfully in-place
- - buffer was successfully moved to a larger allocation
- - buffer was successfully contracted
- - realloc failed, returning NULL, without freeing existing buffer.
- (PR analyzer/99260 tracks this)
-
- Given that we can currently only express one outcome, eliminate
- false positives by dropping state from the buffer. */
+ Otherwise, region_model::impl_call_realloc will later
+ get called (which will handle other sm-state transitions
+ when the state is bifurcated). */
void
malloc_state_machine::on_realloc_call (sm_context *sm_ctxt,
- const supernode *node ATTRIBUTE_UNUSED,
+ const supernode *node,
const gcall *call) const
{
- tree ptr = gimple_call_arg (call, 0);
- tree diag_ptr = sm_ctxt->get_diagnostic_tree (ptr);
+ const unsigned argno = 0;
+ const deallocator *d = &m_realloc;
- state_t state = sm_ctxt->get_state (call, ptr);
+ tree arg = gimple_call_arg (call, argno);
+
+ state_t state = sm_ctxt->get_state (call, arg);
- /* Detect mismatches. */
if (unchecked_p (state) || nonnull_p (state))
{
const allocation_state *astate = as_a_allocation_state (state);
gcc_assert (astate->m_deallocators);
- if (astate->m_deallocators != &m_free)
+ if (!astate->m_deallocators->contains_p (&m_free.m_deallocator))
{
/* Wrong allocator. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
pending_diagnostic *pd
- = new mismatching_deallocation (*this, diag_ptr,
+ = new mismatching_deallocation (*this, diag_arg,
astate->m_deallocators,
- &m_realloc);
- sm_ctxt->warn (node, call, ptr, pd);
+ d);
+ sm_ctxt->warn (node, call, arg, pd);
+ sm_ctxt->set_next_state (call, arg, m_stop);
+ if (path_context *path_ctxt = sm_ctxt->get_path_context ())
+ path_ctxt->terminate_path ();
}
}
-
- /* Transition ptr to "stop" state. */
- sm_ctxt->set_next_state (call, ptr, m_stop);
+ else if (state == m_free.m_deallocator.m_freed)
+ {
+ /* freed -> stop, with warning. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
+ sm_ctxt->warn (node, call, arg,
+ new double_free (*this, diag_arg, "free"));
+ sm_ctxt->set_next_state (call, arg, m_stop);
+ if (path_context *path_ctxt = sm_ctxt->get_path_context ())
+ path_ctxt->terminate_path ();
+ }
+ else if (state == m_non_heap)
+ {
+ /* non-heap -> stop, with warning. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
+ sm_ctxt->warn (node, call, arg,
+ new free_of_non_heap (*this, diag_arg,
+ d->m_name));
+ sm_ctxt->set_next_state (call, arg, m_stop);
+ if (path_context *path_ctxt = sm_ctxt->get_path_context ())
+ path_ctxt->terminate_path ();
+ }
}
/* Implementation of state_machine::on_phi vfunc for malloc_state_machine. */
@@ -1852,11 +1926,11 @@ void
malloc_state_machine::on_condition (sm_context *sm_ctxt,
const supernode *node ATTRIBUTE_UNUSED,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs) const
+ const svalue *rhs) const
{
- if (!zerop (rhs))
+ if (!rhs->all_zeroes_p ())
return;
if (!any_pointer_p (lhs))
@@ -1920,6 +1994,28 @@ malloc_state_machine::reset_when_passed_to_unknown_fn_p (state_t s,
return is_mutable;
}
+/* Return true if calls to FNDECL are known to not affect this sm-state. */
+
+bool
+malloc_state_machine::unaffected_by_call_p (tree fndecl)
+{
+ /* A set of functions that are known to not affect allocation
+ status, even if we haven't fully modelled the rest of their
+ behavior yet. */
+ static const char * const funcnames[] = {
+ /* This array must be kept sorted. */
+ "strsep",
+ };
+ const size_t count
+ = sizeof(funcnames) / sizeof (funcnames[0]);
+ function_set fs (funcnames, count);
+
+ if (fs.contains_decl_p (fndecl))
+ return true;
+
+ return false;
+}
+
/* Shared logic for handling GIMPLE_ASSIGNs and GIMPLE_PHIs that
assign zero to LHS. */
@@ -1937,6 +2033,30 @@ malloc_state_machine::on_zero_assignment (sm_context *sm_ctxt,
sm_ctxt->set_next_state (stmt, lhs, m_null);
}
+/* Special-case hook for handling realloc, for the "success with move to
+ a new buffer" case, marking OLD_PTR_SVAL as freed and NEW_PTR_SVAL as
+ non-null.
+
+ This is similar to on_deallocator_call and on_allocator_call,
+ but the checks happen in on_realloc_call, and by splitting the states. */
+
+void
+malloc_state_machine::
+on_realloc_with_move (region_model *model,
+ sm_state_map *smap,
+ const svalue *old_ptr_sval,
+ const svalue *new_ptr_sval,
+ const extrinsic_state &ext_state) const
+{
+ smap->set_state (model, old_ptr_sval,
+ m_free.m_deallocator.m_freed,
+ NULL, ext_state);
+
+ smap->set_state (model, new_ptr_sval,
+ m_free.m_nonnull,
+ NULL, ext_state);
+}
+
} // anonymous namespace
/* Internal interface to this file. */
@@ -1947,6 +2067,40 @@ make_malloc_state_machine (logger *logger)
return new malloc_state_machine (logger);
}
+/* Specialcase hook for handling realloc, for use by
+ region_model::impl_call_realloc::success_with_move::update_model. */
+
+void
+region_model::on_realloc_with_move (const call_details &cd,
+ const svalue *old_ptr_sval,
+ const svalue *new_ptr_sval)
+{
+ region_model_context *ctxt = cd.get_ctxt ();
+ if (!ctxt)
+ return;
+ const extrinsic_state *ext_state = ctxt->get_ext_state ();
+ if (!ext_state)
+ return;
+
+ sm_state_map *smap;
+ const state_machine *sm;
+ unsigned sm_idx;
+ if (!ctxt->get_malloc_map (&smap, &sm, &sm_idx))
+ return;
+
+ gcc_assert (smap);
+ gcc_assert (sm);
+
+ const malloc_state_machine &malloc_sm
+ = (const malloc_state_machine &)*sm;
+
+ malloc_sm.on_realloc_with_move (this,
+ smap,
+ old_ptr_sval,
+ new_ptr_sval,
+ *ext_state);
+}
+
} // namespace ana
#endif /* #if ENABLE_ANALYZER */
diff --git a/gcc/analyzer/sm-pattern-test.cc b/gcc/analyzer/sm-pattern-test.cc
index 43b8475..4e28549 100644
--- a/gcc/analyzer/sm-pattern-test.cc
+++ b/gcc/analyzer/sm-pattern-test.cc
@@ -37,6 +37,12 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/analyzer-logging.h"
#include "analyzer/sm.h"
#include "analyzer/pending-diagnostic.h"
+#include "tristate.h"
+#include "selftest.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/region-model.h"
#if ENABLE_ANALYZER
@@ -61,9 +67,9 @@ public:
void on_condition (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs) const FINAL OVERRIDE;
+ const svalue *rhs) const FINAL OVERRIDE;
bool can_purge_p (state_t s) const FINAL OVERRIDE;
};
@@ -118,18 +124,22 @@ void
pattern_test_state_machine::on_condition (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs) const
+ const svalue *rhs) const
{
if (stmt == NULL)
return;
- if (!CONSTANT_CLASS_P (rhs))
+ tree rhs_cst = rhs->maybe_get_constant ();
+ if (!rhs_cst)
return;
- pending_diagnostic *diag = new pattern_match (lhs, op, rhs);
- sm_ctxt->warn (node, stmt, lhs, diag);
+ if (tree lhs_expr = sm_ctxt->get_diagnostic_tree (lhs))
+ {
+ pending_diagnostic *diag = new pattern_match (lhs_expr, op, rhs_cst);
+ sm_ctxt->warn (node, stmt, lhs_expr, diag);
+ }
}
bool
diff --git a/gcc/analyzer/sm-sensitive.cc b/gcc/analyzer/sm-sensitive.cc
index 95172f0..4add55e 100644
--- a/gcc/analyzer/sm-sensitive.cc
+++ b/gcc/analyzer/sm-sensitive.cc
@@ -58,13 +58,6 @@ public:
const supernode *node,
const gimple *stmt) const FINAL OVERRIDE;
- void on_condition (sm_context *sm_ctxt,
- const supernode *node,
- const gimple *stmt,
- tree lhs,
- enum tree_code op,
- tree rhs) const FINAL OVERRIDE;
-
bool can_purge_p (state_t s) const FINAL OVERRIDE;
/* State for "sensitive" data, such as a password. */
@@ -174,10 +167,12 @@ sensitive_state_machine::warn_for_any_exposure (sm_context *sm_ctxt,
const gimple *stmt,
tree arg) const
{
- tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
if (sm_ctxt->get_state (stmt, arg) == m_sensitive)
- sm_ctxt->warn (node, stmt, arg,
- new exposure_through_output_file (*this, diag_arg));
+ {
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
+ sm_ctxt->warn (node, stmt, arg,
+ new exposure_through_output_file (*this, diag_arg));
+ }
}
/* Implementation of state_machine::on_stmt vfunc for
@@ -220,17 +215,6 @@ sensitive_state_machine::on_stmt (sm_context *sm_ctxt,
return false;
}
-void
-sensitive_state_machine::on_condition (sm_context *sm_ctxt ATTRIBUTE_UNUSED,
- const supernode *node ATTRIBUTE_UNUSED,
- const gimple *stmt ATTRIBUTE_UNUSED,
- tree lhs ATTRIBUTE_UNUSED,
- enum tree_code op ATTRIBUTE_UNUSED,
- tree rhs ATTRIBUTE_UNUSED) const
-{
- /* Empty. */
-}
-
bool
sensitive_state_machine::can_purge_p (state_t s ATTRIBUTE_UNUSED) const
{
diff --git a/gcc/analyzer/sm-signal.cc b/gcc/analyzer/sm-signal.cc
index d7e7e7c..e8cbe2d 100644
--- a/gcc/analyzer/sm-signal.cc
+++ b/gcc/analyzer/sm-signal.cc
@@ -81,13 +81,6 @@ public:
const supernode *node,
const gimple *stmt) const FINAL OVERRIDE;
- void on_condition (sm_context *sm_ctxt,
- const supernode *node,
- const gimple *stmt,
- tree lhs,
- enum tree_code op,
- tree rhs) const FINAL OVERRIDE;
-
bool can_purge_p (state_t s) const FINAL OVERRIDE;
/* These states are "global", rather than per-expression. */
@@ -213,10 +206,10 @@ update_model_for_signal_handler (region_model *model,
/* Custom exploded_edge info: entry into a signal-handler. */
-class signal_delivery_edge_info_t : public exploded_edge::custom_info_t
+class signal_delivery_edge_info_t : public custom_edge_info
{
public:
- void print (pretty_printer *pp) FINAL OVERRIDE
+ void print (pretty_printer *pp) const FINAL OVERRIDE
{
pp_string (pp, "signal delivered");
}
@@ -227,20 +220,24 @@ public:
return custom_obj;
}
- void update_model (region_model *model,
- const exploded_edge &eedge) FINAL OVERRIDE
+ bool update_model (region_model *model,
+ const exploded_edge *eedge,
+ region_model_context *) const FINAL OVERRIDE
{
- update_model_for_signal_handler (model, eedge.m_dest->get_function ());
+ gcc_assert (eedge);
+ update_model_for_signal_handler (model, eedge->m_dest->get_function ());
+ return true;
}
void add_events_to_path (checker_path *emission_path,
const exploded_edge &eedge ATTRIBUTE_UNUSED)
- FINAL OVERRIDE
+ const FINAL OVERRIDE
{
emission_path->add_event
- (new custom_event (UNKNOWN_LOCATION, NULL_TREE, 0,
- "later on,"
- " when the signal is delivered to the process"));
+ (new precanned_custom_event
+ (UNKNOWN_LOCATION, NULL_TREE, 0,
+ "later on,"
+ " when the signal is delivered to the process"));
}
};
@@ -362,20 +359,6 @@ signal_state_machine::on_stmt (sm_context *sm_ctxt,
return false;
}
-/* Implementation of state_machine::on_condition vfunc for
- signal_state_machine. */
-
-void
-signal_state_machine::on_condition (sm_context *sm_ctxt ATTRIBUTE_UNUSED,
- const supernode *node ATTRIBUTE_UNUSED,
- const gimple *stmt ATTRIBUTE_UNUSED,
- tree lhs ATTRIBUTE_UNUSED,
- enum tree_code op ATTRIBUTE_UNUSED,
- tree rhs ATTRIBUTE_UNUSED) const
-{
- // Empty
-}
-
bool
signal_state_machine::can_purge_p (state_t s ATTRIBUTE_UNUSED) const
{
diff --git a/gcc/analyzer/sm-taint.cc b/gcc/analyzer/sm-taint.cc
index 2b2792e..721d3ea 100644
--- a/gcc/analyzer/sm-taint.cc
+++ b/gcc/analyzer/sm-taint.cc
@@ -61,9 +61,9 @@ public:
void on_condition (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs) const FINAL OVERRIDE;
+ const svalue *rhs) const FINAL OVERRIDE;
bool can_purge_p (state_t s) const FINAL OVERRIDE;
@@ -227,7 +227,6 @@ taint_state_machine::on_stmt (sm_context *sm_ctxt,
if (op == ARRAY_REF)
{
tree arg = TREE_OPERAND (rhs1, 1);
- tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
/* Unsigned types have an implicit lower bound. */
bool is_unsigned = false;
@@ -239,6 +238,7 @@ taint_state_machine::on_stmt (sm_context *sm_ctxt,
if (state == m_tainted)
{
/* Complain about missing bounds. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
pending_diagnostic *d
= new tainted_array_index (*this, diag_arg,
is_unsigned
@@ -249,6 +249,7 @@ taint_state_machine::on_stmt (sm_context *sm_ctxt,
else if (state == m_has_lb)
{
/* Complain about missing upper bound. */
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, stmt, arg,
new tainted_array_index (*this, diag_arg,
BOUNDS_LOWER));
@@ -259,6 +260,7 @@ taint_state_machine::on_stmt (sm_context *sm_ctxt,
/* Complain about missing lower bound. */
if (!is_unsigned)
{
+ tree diag_arg = sm_ctxt->get_diagnostic_tree (arg);
sm_ctxt->warn (node, stmt, arg,
new tainted_array_index (*this, diag_arg,
BOUNDS_UPPER));
@@ -279,9 +281,9 @@ void
taint_state_machine::on_condition (sm_context *sm_ctxt,
const supernode *node,
const gimple *stmt,
- tree lhs,
+ const svalue *lhs,
enum tree_code op,
- tree rhs ATTRIBUTE_UNUSED) const
+ const svalue *rhs ATTRIBUTE_UNUSED) const
{
if (stmt == NULL)
return;
diff --git a/gcc/analyzer/sm.cc b/gcc/analyzer/sm.cc
index 2d227dd..db07bf3 100644
--- a/gcc/analyzer/sm.cc
+++ b/gcc/analyzer/sm.cc
@@ -35,6 +35,11 @@ along with GCC; see the file COPYING3. If not see
#include "analyzer/analyzer.h"
#include "analyzer/analyzer-logging.h"
#include "analyzer/sm.h"
+#include "tristate.h"
+#include "analyzer/call-string.h"
+#include "analyzer/program-point.h"
+#include "analyzer/store.h"
+#include "analyzer/svalue.h"
#if ENABLE_ANALYZER
@@ -48,6 +53,15 @@ any_pointer_p (tree var)
return POINTER_TYPE_P (TREE_TYPE (var));
}
+/* Return true if SVAL has pointer or reference type. */
+
+bool
+any_pointer_p (const svalue *sval)
+{
+ if (!sval->get_type ())
+ return false;
+ return POINTER_TYPE_P (sval->get_type ());
+}
/* class state_machine::state. */
diff --git a/gcc/analyzer/sm.h b/gcc/analyzer/sm.h
index 8d4d030..02faffb 100644
--- a/gcc/analyzer/sm.h
+++ b/gcc/analyzer/sm.h
@@ -29,7 +29,8 @@ class state_machine;
class sm_context;
class pending_diagnostic;
-extern bool any_pointer_p (tree var);
+extern bool any_pointer_p (tree expr);
+extern bool any_pointer_p (const svalue *sval);
/* An abstract base class for a state machine describing an API.
Manages a set of state objects, and has various virtual functions
@@ -89,10 +90,14 @@ public:
{
}
- virtual void on_condition (sm_context *sm_ctxt,
- const supernode *node,
- const gimple *stmt,
- tree lhs, enum tree_code op, tree rhs) const = 0;
+ virtual void on_condition (sm_context *sm_ctxt ATTRIBUTE_UNUSED,
+ const supernode *node ATTRIBUTE_UNUSED,
+ const gimple *stmt ATTRIBUTE_UNUSED,
+ const svalue *lhs ATTRIBUTE_UNUSED,
+ enum tree_code op ATTRIBUTE_UNUSED,
+ const svalue *rhs ATTRIBUTE_UNUSED) const
+ {
+ }
/* Return true if it safe to discard the given state (to help
when simplifying state objects).
@@ -182,6 +187,8 @@ public:
/* Get the old state of VAR at STMT. */
virtual state_machine::state_t get_state (const gimple *stmt,
tree var) = 0;
+ virtual state_machine::state_t get_state (const gimple *stmt,
+ const svalue *) = 0;
/* Set the next state of VAR to be TO, recording the "origin" of the
state as ORIGIN.
Use STMT for location information. */
@@ -189,6 +196,10 @@ public:
tree var,
state_machine::state_t to,
tree origin = NULL_TREE) = 0;
+ virtual void set_next_state (const gimple *stmt,
+ const svalue *var,
+ state_machine::state_t to,
+ tree origin = NULL_TREE) = 0;
/* Called by state_machine in response to pattern matches:
if VAR is in state FROM, transition it to state TO, potentially
@@ -206,6 +217,18 @@ public:
set_next_state (stmt, var, to, origin);
}
+ void on_transition (const supernode *node ATTRIBUTE_UNUSED,
+ const gimple *stmt,
+ const svalue *var,
+ state_machine::state_t from,
+ state_machine::state_t to,
+ tree origin = NULL_TREE)
+ {
+ state_machine::state_t current = get_state (stmt, var);
+ if (current == from)
+ set_next_state (stmt, var, to, origin);
+ }
+
/* Called by state_machine in response to pattern matches:
issue a diagnostic D using NODE and STMT for location information. */
virtual void warn (const supernode *node, const gimple *stmt,
@@ -220,6 +243,7 @@ public:
{
return expr;
}
+ virtual tree get_diagnostic_tree (const svalue *) = 0;
virtual state_machine::state_t get_global_state () const = 0;
virtual void set_global_state (state_machine::state_t) = 0;
@@ -233,6 +257,11 @@ public:
Otherwise return NULL_TREE. */
virtual tree is_zero_assignment (const gimple *stmt) = 0;
+ virtual path_context *get_path_context () const
+ {
+ return NULL;
+ }
+
protected:
sm_context (int sm_idx, const state_machine &sm)
: m_sm_idx (sm_idx), m_sm (sm) {}
diff --git a/gcc/analyzer/state-purge.cc b/gcc/analyzer/state-purge.cc
index 70a09ed..8800570 100644
--- a/gcc/analyzer/state-purge.cc
+++ b/gcc/analyzer/state-purge.cc
@@ -288,6 +288,26 @@ state_purge_per_ssa_name::add_to_worklist (const function_point &point,
}
}
+/* Return true iff NAME is used by any of the phi nodes in SNODE
+ when processing the in-edge with PHI_ARG_IDX. */
+
+static bool
+name_used_by_phis_p (tree name, const supernode *snode,
+ size_t phi_arg_idx)
+{
+ gcc_assert (TREE_CODE (name) == SSA_NAME);
+
+ for (gphi_iterator gpi
+ = const_cast<supernode *> (snode)->start_phis ();
+ !gsi_end_p (gpi); gsi_next (&gpi))
+ {
+ gphi *phi = gpi.phi ();
+ if (gimple_phi_arg_def (phi, phi_arg_idx) == name)
+ return true;
+ }
+ return false;
+}
+
/* Process POINT, popped from WORKLIST.
Iterate over predecessors of POINT, adding to WORKLIST. */
@@ -325,12 +345,29 @@ state_purge_per_ssa_name::process_point (const function_point &point,
= const_cast<supernode *> (snode)->start_phis ();
!gsi_end_p (gpi); gsi_next (&gpi))
{
+ gcc_assert (point.get_from_edge ());
+ const cfg_superedge *cfg_sedge
+ = point.get_from_edge ()->dyn_cast_cfg_superedge ();
+ gcc_assert (cfg_sedge);
+
gphi *phi = gpi.phi ();
+ /* Are we at the def-stmt for m_name? */
if (phi == def_stmt)
{
- if (logger)
- logger->log ("def stmt within phis; terminating");
- return;
+ if (name_used_by_phis_p (m_name, snode,
+ cfg_sedge->get_phi_arg_idx ()))
+ {
+ if (logger)
+ logger->log ("name in def stmt used within phis;"
+ " continuing");
+ }
+ else
+ {
+ if (logger)
+ logger->log ("name in def stmt not used within phis;"
+ " terminating");
+ return;
+ }
}
}
@@ -346,18 +383,31 @@ state_purge_per_ssa_name::process_point (const function_point &point,
{
/* Add any intraprocedually edge for a call. */
if (snode->m_returning_call)
- {
- cgraph_edge *cedge
+ {
+ gcall *returning_call = snode->m_returning_call;
+ cgraph_edge *cedge
= supergraph_call_edge (snode->m_fun,
- snode->m_returning_call);
- gcc_assert (cedge);
- superedge *sedge
- = map.get_sg ().get_intraprocedural_edge_for_call (cedge);
- gcc_assert (sedge);
- add_to_worklist
- (function_point::after_supernode (sedge->m_src),
- worklist, logger);
- }
+ returning_call);
+ if(cedge)
+ {
+ superedge *sedge
+ = map.get_sg ().get_intraprocedural_edge_for_call (cedge);
+ gcc_assert (sedge);
+ add_to_worklist
+ (function_point::after_supernode (sedge->m_src),
+ worklist, logger);
+ }
+ else
+ {
+ supernode *callernode
+ = map.get_sg ().get_supernode_for_stmt (returning_call);
+
+ gcc_assert (callernode);
+ add_to_worklist
+ (function_point::after_supernode (callernode),
+ worklist, logger);
+ }
+ }
}
}
break;
@@ -446,23 +496,20 @@ state_purge_annotator::add_node_annotations (graphviz_out *gv,
"lightblue");
pp_write_text_to_stream (pp);
- // FIXME: passing in a NULL in-edge means we get no hits
- function_point before_supernode
- (function_point::before_supernode (&n, NULL));
+ /* Different in-edges mean different names need purging.
+ Determine which points to dump. */
+ auto_vec<function_point> points;
+ if (n.entry_p ())
+ points.safe_push (function_point::before_supernode (&n, NULL));
+ else
+ for (auto inedge : n.m_preds)
+ points.safe_push (function_point::before_supernode (&n, inedge));
- for (state_purge_map::iterator iter = m_map->begin ();
- iter != m_map->end ();
- ++iter)
+ for (auto & point : points)
{
- tree name = (*iter).first;
- state_purge_per_ssa_name *per_name_data = (*iter).second;
- if (per_name_data->get_function () == n.m_fun)
- {
- if (per_name_data->needed_at_point_p (before_supernode))
- pp_printf (pp, "%qE needed here", name);
- else
- pp_printf (pp, "%qE not needed here", name);
- }
+ point.print (pp, format (true));
+ pp_newline (pp);
+ print_needed (gv, point, false);
pp_newline (pp);
}
@@ -471,19 +518,20 @@ state_purge_annotator::add_node_annotations (graphviz_out *gv,
return false;
}
-/* Print V to GV as a comma-separated list in braces within a <TR>,
- titling it with TITLE.
+/* Print V to GV as a comma-separated list in braces, titling it with TITLE.
+ If WITHIN_TABLE is true, print it within a <TR>
- Subroutine of state_purge_annotator::add_stmt_annotations. */
+ Subroutine of state_purge_annotator::print_needed. */
static void
print_vec_of_names (graphviz_out *gv, const char *title,
- const auto_vec<tree> &v)
+ const auto_vec<tree> &v, bool within_table)
{
pretty_printer *pp = gv->get_pp ();
tree name;
unsigned i;
- gv->begin_trtd ();
+ if (within_table)
+ gv->begin_trtd ();
pp_printf (pp, "%s: {", title);
FOR_EACH_VEC_ELT (v, i, name)
{
@@ -492,8 +540,11 @@ print_vec_of_names (graphviz_out *gv, const char *title,
pp_printf (pp, "%qE", name);
}
pp_printf (pp, "}");
- pp_write_text_as_html_like_dot_to_stream (pp);
- gv->end_tdtr ();
+ if (within_table)
+ {
+ pp_write_text_as_html_like_dot_to_stream (pp);
+ gv->end_tdtr ();
+ }
pp_newline (pp);
}
@@ -525,6 +576,17 @@ state_purge_annotator::add_stmt_annotations (graphviz_out *gv,
function_point before_stmt
(function_point::before_stmt (supernode, stmt_idx));
+ print_needed (gv, before_stmt, true);
+}
+
+/* Get the ssa names needed and not-needed at POINT, and print them to GV.
+ If WITHIN_TABLE is true, print them within <TR> elements. */
+
+void
+state_purge_annotator::print_needed (graphviz_out *gv,
+ const function_point &point,
+ bool within_table) const
+{
auto_vec<tree> needed;
auto_vec<tree> not_needed;
for (state_purge_map::iterator iter = m_map->begin ();
@@ -533,17 +595,17 @@ state_purge_annotator::add_stmt_annotations (graphviz_out *gv,
{
tree name = (*iter).first;
state_purge_per_ssa_name *per_name_data = (*iter).second;
- if (per_name_data->get_function () == supernode->m_fun)
+ if (per_name_data->get_function () == point.get_function ())
{
- if (per_name_data->needed_at_point_p (before_stmt))
+ if (per_name_data->needed_at_point_p (point))
needed.safe_push (name);
else
not_needed.safe_push (name);
}
}
- print_vec_of_names (gv, "needed here", needed);
- print_vec_of_names (gv, "not needed here", not_needed);
+ print_vec_of_names (gv, "needed here", needed, within_table);
+ print_vec_of_names (gv, "not needed here", not_needed, within_table);
}
#endif /* #if ENABLE_ANALYZER */
diff --git a/gcc/analyzer/state-purge.h b/gcc/analyzer/state-purge.h
index 879013d..409490e 100644
--- a/gcc/analyzer/state-purge.h
+++ b/gcc/analyzer/state-purge.h
@@ -159,6 +159,10 @@ public:
const FINAL OVERRIDE;
private:
+ void print_needed (graphviz_out *gv,
+ const function_point &point,
+ bool within_table) const;
+
const state_purge_map *m_map;
};
diff --git a/gcc/analyzer/store.cc b/gcc/analyzer/store.cc
index 53b6e21..3760858 100644
--- a/gcc/analyzer/store.cc
+++ b/gcc/analyzer/store.cc
@@ -63,52 +63,80 @@ along with GCC; see the file COPYING3. If not see
namespace ana {
-/* Get a human-readable string for KIND for dumps. */
+/* Dump SVALS to PP, sorting them to ensure determinism. */
-const char *binding_kind_to_string (enum binding_kind kind)
+static void
+dump_svalue_set (const hash_set <const svalue *> &svals,
+ pretty_printer *pp, bool simple)
{
- switch (kind)
+ auto_vec <const svalue *> v;
+ for (hash_set<const svalue *>::iterator iter = svals.begin ();
+ iter != svals.end (); ++iter)
{
- default:
- case BK_empty:
- case BK_deleted:
- /* We shouldn't be attempting to print the hash kinds. */
- gcc_unreachable ();
- case BK_direct:
- return "direct";
- case BK_default:
- return "default";
+ v.safe_push (*iter);
+ }
+ v.qsort (svalue::cmp_ptr_ptr);
+
+ pp_character (pp, '{');
+ const svalue *sval;
+ unsigned i;
+ FOR_EACH_VEC_ELT (v, i, sval)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ sval->dump_to_pp (pp, simple);
}
+ pp_character (pp, '}');
+}
+
+/* class uncertainty_t. */
+
+/* Dump this object to PP. */
+
+void
+uncertainty_t::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ pp_string (pp, "{m_maybe_bound_svals: ");
+ dump_svalue_set (m_maybe_bound_svals, pp, simple);
+
+ pp_string (pp, ", m_mutable_at_unknown_call_svals: ");
+ dump_svalue_set (m_mutable_at_unknown_call_svals, pp, simple);
+ pp_string (pp, "}");
+}
+
+/* Dump this object to stderr. */
+
+DEBUG_FUNCTION void
+uncertainty_t::dump (bool simple) const
+{
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp_show_color (&pp) = pp_show_color (global_dc->printer);
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp, simple);
+ pp_newline (&pp);
+ pp_flush (&pp);
}
/* class binding_key. */
const binding_key *
-binding_key::make (store_manager *mgr, const region *r,
- enum binding_kind kind)
+binding_key::make (store_manager *mgr, const region *r)
{
region_offset offset = r->get_offset ();
if (offset.symbolic_p ())
- return mgr->get_symbolic_binding (r, kind);
+ return mgr->get_symbolic_binding (r);
else
{
bit_size_t bit_size;
if (r->get_bit_size (&bit_size))
return mgr->get_concrete_binding (offset.get_bit_offset (),
- bit_size, kind);
+ bit_size);
else
- return mgr->get_symbolic_binding (r, kind);
+ return mgr->get_symbolic_binding (r);
}
}
-/* Base class implementation of binding_key::dump_to_pp vfunc. */
-
-void
-binding_key::dump_to_pp (pretty_printer *pp, bool /*simple*/) const
-{
- pp_printf (pp, "kind: %s", binding_kind_to_string (m_kind));
-}
-
/* Dump this binding_key to stderr. */
DEBUG_FUNCTION void
@@ -149,11 +177,6 @@ binding_key::cmp_ptrs (const void *p1, const void *p2)
int
binding_key::cmp (const binding_key *k1, const binding_key *k2)
{
- enum binding_kind kind1 = k1->get_kind ();
- enum binding_kind kind2 = k2->get_kind ();
- if (kind1 != kind2)
- return (int)kind1 - (int)kind2;
-
int concrete1 = k1->concrete_p ();
int concrete2 = k2->concrete_p ();
if (int concrete_cmp = concrete1 - concrete2)
@@ -181,20 +204,248 @@ binding_key::cmp (const binding_key *k1, const binding_key *k2)
}
}
+/* struct bit_range. */
+
+void
+bit_range::dump_to_pp (pretty_printer *pp) const
+{
+ byte_range bytes (0, 0);
+ if (as_byte_range (&bytes))
+ bytes.dump_to_pp (pp);
+ else
+ {
+ pp_string (pp, "start: ");
+ pp_wide_int (pp, m_start_bit_offset, SIGNED);
+ pp_string (pp, ", size: ");
+ pp_wide_int (pp, m_size_in_bits, SIGNED);
+ pp_string (pp, ", next: ");
+ pp_wide_int (pp, get_next_bit_offset (), SIGNED);
+ }
+}
+
+/* Dump this object to stderr. */
+
+DEBUG_FUNCTION void
+bit_range::dump () const
+{
+ pretty_printer pp;
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* If OTHER is a subset of this, return true and write
+ to *OUT the relative range of OTHER within this.
+ Otherwise return false. */
+
+bool
+bit_range::contains_p (const bit_range &other, bit_range *out) const
+{
+ if (contains_p (other.get_start_bit_offset ())
+ && contains_p (other.get_last_bit_offset ()))
+ {
+ out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
+ out->m_size_in_bits = other.m_size_in_bits;
+ return true;
+ }
+ else
+ return false;
+}
+
+/* If OTHER intersects this, return true and write
+ the relative range of OTHER within THIS to *OUT_THIS,
+ and the relative range of THIS within OTHER to *OUT_OTHER.
+ Otherwise return false. */
+
+bool
+bit_range::intersects_p (const bit_range &other,
+ bit_range *out_this,
+ bit_range *out_other) const
+{
+ if (get_start_bit_offset () < other.get_next_bit_offset ()
+ && other.get_start_bit_offset () < get_next_bit_offset ())
+ {
+ bit_offset_t overlap_start
+ = MAX (get_start_bit_offset (),
+ other.get_start_bit_offset ());
+ bit_offset_t overlap_next
+ = MIN (get_next_bit_offset (),
+ other.get_next_bit_offset ());
+ gcc_assert (overlap_next > overlap_start);
+ bit_range abs_overlap_bits (overlap_start, overlap_next - overlap_start);
+ *out_this = abs_overlap_bits - get_start_bit_offset ();
+ *out_other = abs_overlap_bits - other.get_start_bit_offset ();
+ return true;
+ }
+ else
+ return false;
+}
+
+int
+bit_range::cmp (const bit_range &br1, const bit_range &br2)
+{
+ if (int start_cmp = wi::cmps (br1.m_start_bit_offset,
+ br2.m_start_bit_offset))
+ return start_cmp;
+
+ return wi::cmpu (br1.m_size_in_bits, br2.m_size_in_bits);
+}
+
+/* Offset this range by OFFSET. */
+
+bit_range
+bit_range::operator- (bit_offset_t offset) const
+{
+ return bit_range (m_start_bit_offset - offset, m_size_in_bits);
+}
+
+/* If MASK is a contiguous range of set bits, write them
+ to *OUT and return true.
+ Otherwise return false. */
+
+bool
+bit_range::from_mask (unsigned HOST_WIDE_INT mask, bit_range *out)
+{
+ unsigned iter_bit_idx = 0;
+ unsigned HOST_WIDE_INT iter_bit_mask = 1;
+
+ /* Find the first contiguous run of set bits in MASK. */
+
+ /* Find first set bit in MASK. */
+ while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+ {
+ if (mask & iter_bit_mask)
+ break;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+ }
+ if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
+ /* MASK is zero. */
+ return false;
+
+ unsigned first_set_iter_bit_idx = iter_bit_idx;
+ unsigned num_set_bits = 1;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+
+ /* Find next unset bit in MASK. */
+ while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+ {
+ if (!(mask & iter_bit_mask))
+ break;
+ num_set_bits++;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+ }
+ if (iter_bit_idx == HOST_BITS_PER_WIDE_INT)
+ {
+ *out = bit_range (first_set_iter_bit_idx, num_set_bits);
+ return true;
+ }
+
+ /* We now have the first contiguous run of set bits in MASK.
+ Fail if any other bits are set. */
+ while (iter_bit_idx < HOST_BITS_PER_WIDE_INT)
+ {
+ if (mask & iter_bit_mask)
+ return false;
+ iter_bit_idx++;
+ iter_bit_mask <<= 1;
+ }
+
+ *out = bit_range (first_set_iter_bit_idx, num_set_bits);
+ return true;
+}
+
+/* Attempt to convert this bit_range to a byte_range.
+ Return true if it is possible, writing the result to *OUT.
+ Otherwise return false. */
+
+bool
+bit_range::as_byte_range (byte_range *out) const
+{
+ if (m_start_bit_offset % BITS_PER_UNIT == 0
+ && m_size_in_bits % BITS_PER_UNIT == 0)
+ {
+ out->m_start_byte_offset = m_start_bit_offset / BITS_PER_UNIT;
+ out->m_size_in_bytes = m_size_in_bits / BITS_PER_UNIT;
+ return true;
+ }
+ return false;
+}
+
+/* Dump this object to PP. */
+
+void
+byte_range::dump_to_pp (pretty_printer *pp) const
+{
+ if (m_size_in_bytes == 1)
+ {
+ pp_string (pp, "byte ");
+ pp_wide_int (pp, m_start_byte_offset, SIGNED);
+ }
+ else
+ {
+ pp_string (pp, "bytes ");
+ pp_wide_int (pp, m_start_byte_offset, SIGNED);
+ pp_string (pp, "-");
+ pp_wide_int (pp, get_last_byte_offset (), SIGNED);
+ }
+}
+
+/* Dump this object to stderr. */
+
+DEBUG_FUNCTION void
+byte_range::dump () const
+{
+ pretty_printer pp;
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* If OTHER is a subset of this, return true and write
+ to *OUT the relative range of OTHER within this.
+ Otherwise return false. */
+
+bool
+byte_range::contains_p (const byte_range &other, byte_range *out) const
+{
+ if (contains_p (other.get_start_byte_offset ())
+ && contains_p (other.get_last_byte_offset ()))
+ {
+ out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
+ out->m_size_in_bytes = other.m_size_in_bytes;
+ return true;
+ }
+ else
+ return false;
+}
+
+/* qsort comparator for byte ranges. */
+
+int
+byte_range::cmp (const byte_range &br1, const byte_range &br2)
+{
+ /* Order first by offset. */
+ if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
+ br2.m_start_byte_offset))
+ return start_cmp;
+
+ /* ...then by size. */
+ return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
+}
+
/* class concrete_binding : public binding_key. */
/* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
void
-concrete_binding::dump_to_pp (pretty_printer *pp, bool simple) const
+concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
{
- binding_key::dump_to_pp (pp, simple);
- pp_string (pp, ", start: ");
- pp_wide_int (pp, m_start_bit_offset, SIGNED);
- pp_string (pp, ", size: ");
- pp_wide_int (pp, m_size_in_bits, SIGNED);
- pp_string (pp, ", next: ");
- pp_wide_int (pp, get_next_bit_offset (), SIGNED);
+ m_bit_range.dump_to_pp (pp);
}
/* Return true if this binding overlaps with OTHER. */
@@ -202,7 +453,7 @@ concrete_binding::dump_to_pp (pretty_printer *pp, bool simple) const
bool
concrete_binding::overlaps_p (const concrete_binding &other) const
{
- if (m_start_bit_offset < other.get_next_bit_offset ()
+ if (get_start_bit_offset () < other.get_next_bit_offset ()
&& get_next_bit_offset () > other.get_start_bit_offset ())
return true;
return false;
@@ -216,13 +467,7 @@ concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
const concrete_binding *b1 = *(const concrete_binding * const *)p1;
const concrete_binding *b2 = *(const concrete_binding * const *)p2;
- if (int kind_cmp = b1->get_kind () - b2->get_kind ())
- return kind_cmp;
-
- if (int start_cmp = wi::cmps (b1->m_start_bit_offset, b2->m_start_bit_offset))
- return start_cmp;
-
- return wi::cmpu (b1->m_size_in_bits, b2->m_size_in_bits);
+ return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
}
/* class symbolic_binding : public binding_key. */
@@ -230,8 +475,8 @@ concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
void
symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
{
- binding_key::dump_to_pp (pp, simple);
- pp_string (pp, ", region: ");
+ //binding_key::dump_to_pp (pp, simple);
+ pp_string (pp, "region: ");
m_region->dump_to_pp (pp, simple);
}
@@ -243,9 +488,6 @@ symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
- if (int kind_cmp = b1->get_kind () - b2->get_kind ())
- return kind_cmp;
-
return region::cmp_ids (b1->get_region (), b2->get_region ());
}
@@ -594,8 +836,7 @@ binding_map::apply_ctor_val_to_range (const region *parent_reg,
return false;
bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
store_manager *smgr = mgr->get_store_manager ();
- const binding_key *max_element_key
- = binding_key::make (smgr, max_element, BK_direct);
+ const binding_key *max_element_key = binding_key::make (smgr, max_element);
if (max_element_key->symbolic_p ())
return false;
const concrete_binding *max_element_ckey
@@ -603,8 +844,7 @@ binding_map::apply_ctor_val_to_range (const region *parent_reg,
bit_size_t range_size_in_bits
= max_element_ckey->get_next_bit_offset () - start_bit_offset;
const concrete_binding *range_key
- = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits,
- BK_direct);
+ = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
if (range_key->symbolic_p ())
return false;
@@ -636,8 +876,7 @@ binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
{
const svalue *sval = get_svalue_for_ctor_val (val, mgr);
const binding_key *k
- = binding_key::make (mgr->get_store_manager (), child_reg,
- BK_direct);
+ = binding_key::make (mgr->get_store_manager (), child_reg);
/* Handle the case where we have an unknown size for child_reg
(e.g. due to it being a trailing field with incomplete array
type. */
@@ -661,7 +900,7 @@ binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
- parent_base_offset.get_bit_offset ());
/* Create a concrete key for the child within the parent. */
k = mgr->get_store_manager ()->get_concrete_binding
- (child_parent_offset, sval_bit_size, BK_direct);
+ (child_parent_offset, sval_bit_size);
}
gcc_assert (k->concrete_p ());
put (k, sval);
@@ -669,6 +908,166 @@ binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
}
}
+/* Populate OUT with all bindings within this map that overlap KEY. */
+
+void
+binding_map::get_overlapping_bindings (const binding_key *key,
+ auto_vec<const binding_key *> *out)
+{
+ for (auto iter : *this)
+ {
+ const binding_key *iter_key = iter.first;
+ if (const concrete_binding *ckey
+ = key->dyn_cast_concrete_binding ())
+ {
+ if (const concrete_binding *iter_ckey
+ = iter_key->dyn_cast_concrete_binding ())
+ {
+ if (ckey->overlaps_p (*iter_ckey))
+ out->safe_push (iter_key);
+ }
+ else
+ {
+ /* Assume overlap. */
+ out->safe_push (iter_key);
+ }
+ }
+ else
+ {
+ /* Assume overlap. */
+ out->safe_push (iter_key);
+ }
+ }
+}
+
+/* Remove, truncate, and/or split any bindings within this map that
+ overlap DROP_KEY.
+
+ For example, if we have:
+
+ +------------------------------------+
+ | old binding |
+ +------------------------------------+
+
+ which is to be overwritten with:
+
+ .......+----------------------+.......
+ .......| new binding |.......
+ .......+----------------------+.......
+
+ this function "cuts a hole" out of the old binding:
+
+ +------+......................+------+
+ |prefix| hole for new binding |suffix|
+ +------+......................+------+
+
+ into which the new binding can be added without
+ overlapping the prefix or suffix.
+
+ The prefix and suffix (if added) will be bound to the pertinent
+ parts of the value of the old binding.
+
+ For example, given:
+ struct s5
+ {
+ char arr[8];
+ };
+ void test_5 (struct s5 *p)
+ {
+ struct s5 f = *p;
+ f.arr[3] = 42;
+ }
+ then after the "f = *p;" we have:
+ cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
+ and at the "f.arr[3] = 42;" we remove the bindings overlapping
+ "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
+ giving:
+ cluster for: f
+ key: {bytes 0-2}
+ value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+ key: {bytes 4-7}
+ value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+ punching a hole into which the new value can be written at byte 3:
+ cluster for: f
+ key: {bytes 0-2}
+ value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+ key: {byte 3}
+ value: 'char' {(char)42}
+ key: {bytes 4-7}
+ value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+
+ If UNCERTAINTY is non-NULL, use it to record any svalues that
+ were removed, as being maybe-bound. */
+
+void
+binding_map::remove_overlapping_bindings (store_manager *mgr,
+ const binding_key *drop_key,
+ uncertainty_t *uncertainty)
+{
+ auto_vec<const binding_key *> bindings;
+ get_overlapping_bindings (drop_key, &bindings);
+
+ unsigned i;
+ const binding_key *iter_binding;
+ FOR_EACH_VEC_ELT (bindings, i, iter_binding)
+ {
+ const svalue *old_sval = get (iter_binding);
+ if (uncertainty)
+ uncertainty->on_maybe_bound_sval (old_sval);
+
+ /* Begin by removing the old binding. */
+ m_map.remove (iter_binding);
+
+ /* Now potentially add the prefix and suffix. */
+ if (const concrete_binding *drop_ckey
+ = drop_key->dyn_cast_concrete_binding ())
+ if (const concrete_binding *iter_ckey
+ = iter_binding->dyn_cast_concrete_binding ())
+ {
+ gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
+
+ const bit_range &drop_bits = drop_ckey->get_bit_range ();
+ const bit_range &iter_bits = iter_ckey->get_bit_range ();
+
+ if (iter_bits.get_start_bit_offset ()
+ < drop_bits.get_start_bit_offset ())
+ {
+ /* We have a truncated prefix. */
+ bit_range prefix_bits (iter_bits.get_start_bit_offset (),
+ (drop_bits.get_start_bit_offset ()
+ - iter_bits.get_start_bit_offset ()));
+ const concrete_binding *prefix_key
+ = mgr->get_concrete_binding (prefix_bits);
+ bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
+ const svalue *prefix_sval
+ = old_sval->extract_bit_range (NULL_TREE,
+ rel_prefix,
+ mgr->get_svalue_manager ());
+ m_map.put (prefix_key, prefix_sval);
+ }
+
+ if (iter_bits.get_next_bit_offset ()
+ > drop_bits.get_next_bit_offset ())
+ {
+ /* We have a truncated suffix. */
+ bit_range suffix_bits (drop_bits.get_next_bit_offset (),
+ (iter_bits.get_next_bit_offset ()
+ - drop_bits.get_next_bit_offset ()));
+ const concrete_binding *suffix_key
+ = mgr->get_concrete_binding (suffix_bits);
+ bit_range rel_suffix (drop_bits.get_next_bit_offset ()
+ - iter_bits.get_start_bit_offset (),
+ suffix_bits.m_size_in_bits);
+ const svalue *suffix_sval
+ = old_sval->extract_bit_range (NULL_TREE,
+ rel_suffix,
+ mgr->get_svalue_manager ());
+ m_map.put (suffix_key, suffix_sval);
+ }
+ }
+ }
+}
+
/* class binding_cluster. */
/* binding_cluster's copy ctor. */
@@ -781,6 +1180,27 @@ binding_cluster::dump (bool simple) const
pp_flush (&pp);
}
+/* Assert that this object is valid. */
+
+void
+binding_cluster::validate () const
+{
+ int num_symbolic = 0;
+ int num_concrete = 0;
+ for (auto iter : m_map)
+ {
+ if (iter.first->symbolic_p ())
+ num_symbolic++;
+ else
+ num_concrete++;
+ }
+ /* We shouldn't have more than one symbolic key per cluster
+ (or one would have clobbered the other). */
+ gcc_assert (num_symbolic < 2);
+ /* We can't have both concrete and symbolic keys. */
+ gcc_assert (num_concrete == 0 || num_symbolic == 0);
+}
+
/* Return a new json::object of the form
{"escaped": true/false,
"touched": true/false,
@@ -803,8 +1223,7 @@ binding_cluster::to_json () const
void
binding_cluster::bind (store_manager *mgr,
- const region *reg, const svalue *sval,
- binding_kind kind)
+ const region *reg, const svalue *sval)
{
if (const compound_svalue *compound_sval
= sval->dyn_cast_compound_svalue ())
@@ -813,7 +1232,7 @@ binding_cluster::bind (store_manager *mgr,
return;
}
- const binding_key *binding = binding_key::make (mgr, reg, kind);
+ const binding_key *binding = binding_key::make (mgr, reg);
bind_key (binding, sval);
}
@@ -862,8 +1281,7 @@ binding_cluster::bind_compound_sval (store_manager *mgr,
+ reg_offset.get_bit_offset ());
const concrete_binding *effective_concrete_key
= mgr->get_concrete_binding (effective_start,
- concrete_key->get_size_in_bits (),
- iter_key->get_kind ());
+ concrete_key->get_size_in_bits ());
bind_key (effective_concrete_key, iter_sval);
}
else
@@ -876,7 +1294,7 @@ binding_cluster::bind_compound_sval (store_manager *mgr,
void
binding_cluster::clobber_region (store_manager *mgr, const region *reg)
{
- remove_overlapping_bindings (mgr, reg);
+ remove_overlapping_bindings (mgr, reg, NULL);
}
/* Remove any bindings for REG within this cluster. */
@@ -886,46 +1304,89 @@ binding_cluster::purge_region (store_manager *mgr, const region *reg)
{
gcc_assert (reg->get_kind () == RK_DECL);
const binding_key *binding
- = binding_key::make (mgr, const_cast<region *> (reg),
- BK_direct);
+ = binding_key::make (mgr, const_cast<region *> (reg));
m_map.remove (binding);
}
-/* Mark REG within this cluster as being filled with zeroes.
- Remove all bindings, add a default binding to zero, and clear the
- TOUCHED flag. */
+/* Clobber REG and fill it with repeated copies of SVAL. */
void
-binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
+binding_cluster::fill_region (store_manager *mgr,
+ const region *reg,
+ const svalue *sval)
{
clobber_region (mgr, reg);
- /* Add a default binding to zero. */
region_model_manager *sval_mgr = mgr->get_svalue_manager ();
- tree cst_zero = build_int_cst (integer_type_node, 0);
- const svalue *cst_sval = sval_mgr->get_or_create_constant_svalue (cst_zero);
- const svalue *bound_sval = cst_sval;
- if (reg->get_type ())
- bound_sval = sval_mgr->get_or_create_unaryop (reg->get_type (), NOP_EXPR,
- cst_sval);
- bind (mgr, reg, bound_sval, BK_default);
+ const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
+ const svalue *fill_sval
+ = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
+ byte_size_sval, sval);
+ bind (mgr, reg, fill_sval);
+}
+
+/* Clobber REG within this cluster and fill it with zeroes. */
- m_touched = false;
+void
+binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
+{
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
+ fill_region (mgr, reg, zero_sval);
}
-/* Mark REG within this cluster as being unknown. */
+/* Mark REG within this cluster as being unknown.
+ If UNCERTAINTY is non-NULL, use it to record any svalues that
+ had bindings to them removed, as being maybe-bound. */
void
binding_cluster::mark_region_as_unknown (store_manager *mgr,
- const region *reg)
+ const region *reg,
+ uncertainty_t *uncertainty)
{
- remove_overlapping_bindings (mgr, reg);
+ remove_overlapping_bindings (mgr, reg, uncertainty);
/* Add a default binding to "unknown". */
region_model_manager *sval_mgr = mgr->get_svalue_manager ();
const svalue *sval
= sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
- bind (mgr, reg, sval, BK_default);
+ bind (mgr, reg, sval);
+}
+
+/* Purge state involving SVAL. */
+
+void
+binding_cluster::purge_state_involving (const svalue *sval,
+ region_model_manager *sval_mgr)
+{
+ auto_vec<const binding_key *> to_remove;
+ auto_vec<std::pair<const binding_key *, tree> > to_make_unknown;
+ for (auto iter : m_map)
+ {
+ const binding_key *iter_key = iter.first;
+ if (const symbolic_binding *symbolic_key
+ = iter_key->dyn_cast_symbolic_binding ())
+ {
+ const region *reg = symbolic_key->get_region ();
+ if (reg->involves_p (sval))
+ to_remove.safe_push (iter_key);
+ }
+ const svalue *iter_sval = iter.second;
+ if (iter_sval->involves_p (sval))
+ to_make_unknown.safe_push (std::make_pair(iter_key,
+ iter_sval->get_type ()));
+ }
+ for (auto iter : to_remove)
+ {
+ m_map.remove (iter);
+ m_touched = true;
+ }
+ for (auto iter : to_make_unknown)
+ {
+ const svalue *new_sval
+ = sval_mgr->get_or_create_unknown_svalue (iter.second);
+ m_map.put (iter.first, new_sval);
+ }
}
/* Get any SVAL bound to REG within this cluster via kind KIND,
@@ -933,10 +1394,9 @@ binding_cluster::mark_region_as_unknown (store_manager *mgr,
const svalue *
binding_cluster::get_binding (store_manager *mgr,
- const region *reg,
- binding_kind kind) const
+ const region *reg) const
{
- const binding_key *reg_binding = binding_key::make (mgr, reg, kind);
+ const binding_key *reg_binding = binding_key::make (mgr, reg);
const svalue *sval = m_map.get (reg_binding);
if (sval)
{
@@ -954,7 +1414,7 @@ binding_cluster::get_binding (store_manager *mgr,
while (const region *parent_reg = reg->get_parent_region ())
{
const binding_key *parent_reg_binding
- = binding_key::make (mgr, parent_reg, kind);
+ = binding_key::make (mgr, parent_reg);
if (parent_reg_binding == reg_binding
&& sval->get_type ()
&& reg->get_type ()
@@ -975,7 +1435,7 @@ binding_cluster::get_binding (store_manager *mgr,
FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
{
region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
- sval = rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
+ sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
sval, iter_reg);
}
}
@@ -983,21 +1443,20 @@ binding_cluster::get_binding (store_manager *mgr,
return sval;
}
-/* Get any SVAL bound to REG within this cluster via kind KIND,
+/* Get any SVAL bound to REG within this cluster,
either directly for REG, or recursively checking for bindings within
parent regions and extracting subvalues if need be. */
const svalue *
binding_cluster::get_binding_recursive (store_manager *mgr,
- const region *reg,
- enum binding_kind kind) const
+ const region *reg) const
{
- if (const svalue *sval = get_binding (mgr, reg, kind))
+ if (const svalue *sval = get_binding (mgr, reg))
return sval;
if (reg != m_base_region)
if (const region *parent_reg = reg->get_parent_region ())
if (const svalue *parent_sval
- = get_binding_recursive (mgr, parent_reg, kind))
+ = get_binding_recursive (mgr, parent_reg))
{
/* Extract child svalue from parent svalue. */
region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
@@ -1013,18 +1472,11 @@ const svalue *
binding_cluster::get_any_binding (store_manager *mgr,
const region *reg) const
{
- /* Look for a "direct" binding. */
+ /* Look for a direct binding. */
if (const svalue *direct_sval
- = get_binding_recursive (mgr, reg, BK_direct))
+ = get_binding_recursive (mgr, reg))
return direct_sval;
- /* Look for a "default" binding, but not if there's been a symbolic
- write. */
- if (!m_touched)
- if (const svalue *default_sval
- = get_binding_recursive (mgr, reg, BK_default))
- return default_sval;
-
/* If this cluster has been touched by a symbolic write, then the content
of any subregion not currently specifically bound is "UNKNOWN". */
if (m_touched)
@@ -1033,6 +1485,16 @@ binding_cluster::get_any_binding (store_manager *mgr,
return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
}
+ /* Alternatively, if this is a symbolic read and the cluster has any bindings,
+ then we don't know if we're reading those values or not, so the result
+ is also "UNKNOWN". */
+ if (reg->get_offset ().symbolic_p ()
+ && m_map.elements () > 0)
+ {
+ region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
+ return rmm_mgr->get_or_create_unknown_svalue (reg->get_type ());
+ }
+
if (const svalue *compound_sval = maybe_get_compound_binding (mgr, reg))
return compound_sval;
@@ -1055,8 +1517,6 @@ const svalue *
binding_cluster::maybe_get_compound_binding (store_manager *mgr,
const region *reg) const
{
- binding_map map;
-
region_offset cluster_offset = m_base_region->get_offset ();
if (cluster_offset.symbolic_p ())
return NULL;
@@ -1064,6 +1524,36 @@ binding_cluster::maybe_get_compound_binding (store_manager *mgr,
if (reg_offset.symbolic_p ())
return NULL;
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+
+ /* We will a build the result map in two parts:
+ (a) result_map, holding the concrete keys from this cluster,
+
+ (b) default_map, holding the initial values for the region
+ (e.g. uninitialized, initializer values, or zero), unless this
+ cluster has been touched.
+
+ We will populate (a), and as we do, clobber (b), trimming and
+ splitting its bindings as necessary.
+ Finally, we will merge (b) into (a), giving a concrete map
+ that merges both the initial values and the bound values from
+ the binding_cluster.
+ Doing it this way reduces N for the O(N^2) intersection-finding,
+ perhaps we should have a spatial-organized data structure for
+ concrete keys, though. */
+
+ binding_map result_map;
+ binding_map default_map;
+
+ /* Set up default values in default_map. */
+ const svalue *default_sval;
+ if (m_touched)
+ default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
+ else
+ default_sval = sval_mgr->get_or_create_initial_value (reg);
+ const binding_key *default_key = binding_key::make (mgr, reg);
+ default_map.put (default_key, default_sval);
+
for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
{
const binding_key *key = (*iter).first;
@@ -1072,101 +1562,108 @@ binding_cluster::maybe_get_compound_binding (store_manager *mgr,
if (const concrete_binding *concrete_key
= key->dyn_cast_concrete_binding ())
{
- /* Skip bindings that are outside the bit range of REG. */
- if (concrete_key->get_start_bit_offset ()
- < reg_offset.get_bit_offset ())
- continue;
- bit_size_t reg_bit_size;
- if (reg->get_bit_size (&reg_bit_size))
- if (concrete_key->get_start_bit_offset ()
- >= reg_offset.get_bit_offset () + reg_bit_size)
- continue;
+ const bit_range &bound_range = concrete_key->get_bit_range ();
- /* Get offset of KEY relative to REG, rather than to
- the cluster. */
- bit_offset_t relative_start
- = (concrete_key->get_start_bit_offset ()
- - reg_offset.get_bit_offset ());
- const concrete_binding *offset_concrete_key
- = mgr->get_concrete_binding (relative_start,
- concrete_key->get_size_in_bits (),
- key->get_kind ());
- map.put (offset_concrete_key, sval);
- }
- else
- return NULL;
- }
-
- if (map.elements () == 0)
- return NULL;
+ bit_size_t reg_bit_size;
+ if (!reg->get_bit_size (&reg_bit_size))
+ return NULL;
- region_model_manager *sval_mgr = mgr->get_svalue_manager ();
- return sval_mgr->get_or_create_compound_svalue (reg->get_type (), map);
-}
+ bit_range reg_range (reg_offset.get_bit_offset (),
+ reg_bit_size);
+ /* Skip bindings that are outside the bit range of REG. */
+ if (!bound_range.intersects_p (reg_range))
+ continue;
-/* Populate OUT with all bindings within this cluster that overlap REG. */
+ /* We shouldn't have an exact match; that should have been
+ handled already. */
+ gcc_assert (!(reg_range == bound_range));
-void
-binding_cluster::get_overlapping_bindings (store_manager *mgr,
- const region *reg,
- auto_vec<const binding_key *> *out)
-{
- const binding_key *binding
- = binding_key::make (mgr, reg, BK_direct);
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end (); ++iter)
- {
- const binding_key *iter_key = (*iter).first;
- if (const concrete_binding *ckey
- = binding->dyn_cast_concrete_binding ())
- {
- if (const concrete_binding *iter_ckey
- = iter_key->dyn_cast_concrete_binding ())
+ bit_range subrange (0, 0);
+ if (reg_range.contains_p (bound_range, &subrange))
{
- if (ckey->overlaps_p (*iter_ckey))
- out->safe_push (iter_key);
+ /* We have a bound range fully within REG.
+ Add it to map, offsetting accordingly. */
+
+ /* Get offset of KEY relative to REG, rather than to
+ the cluster. */
+ const concrete_binding *offset_concrete_key
+ = mgr->get_concrete_binding (subrange);
+ result_map.put (offset_concrete_key, sval);
+
+ /* Clobber default_map, removing/trimming/spliting where
+ it overlaps with offset_concrete_key. */
+ default_map.remove_overlapping_bindings (mgr,
+ offset_concrete_key,
+ NULL);
+ }
+ else if (bound_range.contains_p (reg_range, &subrange))
+ {
+ /* REG is fully within the bound range, but
+ is not equal to it; we're extracting a subvalue. */
+ return sval->extract_bit_range (reg->get_type (),
+ subrange,
+ mgr->get_svalue_manager ());
}
else
{
- /* Assume overlap. */
- out->safe_push (iter_key);
+ /* REG and the bound range partially overlap. */
+ bit_range reg_subrange (0, 0);
+ bit_range bound_subrange (0, 0);
+ reg_range.intersects_p (bound_range,
+ &reg_subrange, &bound_subrange);
+
+ /* Get the bits from the bound value for the bits at the
+ intersection (relative to the bound value). */
+ const svalue *overlap_sval
+ = sval->extract_bit_range (NULL_TREE,
+ bound_subrange,
+ mgr->get_svalue_manager ());
+
+ /* Get key for overlap, relative to the REG. */
+ const concrete_binding *overlap_concrete_key
+ = mgr->get_concrete_binding (reg_subrange);
+ result_map.put (overlap_concrete_key, overlap_sval);
+
+ /* Clobber default_map, removing/trimming/spliting where
+ it overlaps with overlap_concrete_key. */
+ default_map.remove_overlapping_bindings (mgr,
+ overlap_concrete_key,
+ NULL);
}
}
else
- {
- /* Assume overlap. */
- out->safe_push (iter_key);
- }
+ /* Can't handle symbolic bindings. */
+ return NULL;
+ }
+
+ if (result_map.elements () == 0)
+ return NULL;
+
+ /* Merge any bindings from default_map into result_map. */
+ for (auto iter : default_map)
+ {
+ const binding_key *key = iter.first;
+ const svalue *sval = iter.second;
+ result_map.put (key, sval);
}
+
+ return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
}
-/* Remove any bindings within this cluster that overlap REG,
- but retain default bindings that overlap but aren't fully covered
- by REG. */
+/* Remove, truncate, and/or split any bindings within this map that
+ overlap REG.
+ If UNCERTAINTY is non-NULL, use it to record any svalues that
+ were removed, as being maybe-bound. */
void
binding_cluster::remove_overlapping_bindings (store_manager *mgr,
- const region *reg)
+ const region *reg,
+ uncertainty_t *uncertainty)
{
- auto_vec<const binding_key *> bindings;
- get_overlapping_bindings (mgr, reg, &bindings);
+ const binding_key *reg_binding = binding_key::make (mgr, reg);
- unsigned i;
- const binding_key *iter_binding;
- FOR_EACH_VEC_ELT (bindings, i, iter_binding)
- {
- /* Don't remove default bindings, unless the default binding
- is fully covered by REG. */
- if (iter_binding->get_kind () == BK_default)
- {
- const binding_key *reg_binding
- = binding_key::make (mgr, reg, BK_default);
- if (reg_binding != iter_binding)
- continue;
- }
- m_map.remove (iter_binding);
- }
+ m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty);
}
/* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
@@ -1227,6 +1724,8 @@ binding_cluster::can_merge_p (const binding_cluster *cluster_a,
const binding_key *key_b = (*iter_b).first;
keys.add (key_b);
}
+ int num_symbolic_keys = 0;
+ int num_concrete_keys = 0;
for (hash_set<const binding_key *>::iterator iter = keys.begin ();
iter != keys.end (); ++iter)
{
@@ -1234,6 +1733,11 @@ binding_cluster::can_merge_p (const binding_cluster *cluster_a,
const svalue *sval_a = cluster_a->get_any_value (key);
const svalue *sval_b = cluster_b->get_any_value (key);
+ if (key->symbolic_p ())
+ num_symbolic_keys++;
+ else
+ num_concrete_keys++;
+
if (sval_a == sval_b)
{
gcc_assert (sval_a);
@@ -1262,29 +1766,15 @@ binding_cluster::can_merge_p (const binding_cluster *cluster_a,
out_cluster->m_map.put (key, unknown_sval);
}
- /* Handle the case where we get a default binding from one and a direct
- binding from the other. */
- auto_vec<const concrete_binding *> duplicate_keys;
- for (map_t::iterator iter = out_cluster->m_map.begin ();
- iter != out_cluster->m_map.end (); ++iter)
- {
- const concrete_binding *ckey
- = (*iter).first->dyn_cast_concrete_binding ();
- if (!ckey)
- continue;
- if (ckey->get_kind () != BK_direct)
- continue;
- const concrete_binding *def_ckey
- = mgr->get_concrete_binding (ckey->get_start_bit_offset (),
- ckey->get_size_in_bits (),
- BK_default);
- if (out_cluster->m_map.get (def_ckey))
- duplicate_keys.safe_push (def_ckey);
+ /* We can only have at most one symbolic key per cluster,
+ and if we do, we can't have any concrete keys.
+ If this happens, mark the cluster as touched, with no keys. */
+ if (num_symbolic_keys >= 2
+ || (num_concrete_keys > 0 && num_symbolic_keys > 0))
+ {
+ out_cluster->m_touched = true;
+ out_cluster->m_map.empty ();
}
- unsigned i;
- const concrete_binding *key;
- FOR_EACH_VEC_ELT (duplicate_keys, i, key)
- out_cluster->m_map.remove (key);
/* We don't handle other kinds of overlaps yet. */
@@ -1357,12 +1847,29 @@ binding_cluster::on_unknown_fncall (const gcall *call,
const svalue *sval
= mgr->get_svalue_manager ()->get_or_create_conjured_svalue
(m_base_region->get_type (), call, m_base_region);
- bind (mgr, m_base_region, sval, BK_direct);
+ bind (mgr, m_base_region, sval);
m_touched = true;
}
}
+/* Mark this cluster as having been clobbered by STMT. */
+
+void
+binding_cluster::on_asm (const gasm *stmt,
+ store_manager *mgr)
+{
+ m_map.empty ();
+
+ /* Bind it to a new "conjured" value using CALL. */
+ const svalue *sval
+ = mgr->get_svalue_manager ()->get_or_create_conjured_svalue
+ (m_base_region->get_type (), stmt, m_base_region);
+ bind (mgr, m_base_region, sval);
+
+ m_touched = true;
+}
+
/* Return true if this binding_cluster has no information
i.e. if there are no bindings, and it hasn't been marked as having
escaped, or touched symbolically. */
@@ -1464,7 +1971,7 @@ binding_cluster::maybe_get_simple_value (store_manager *mgr) const
if (m_map.elements () != 1)
return NULL;
- const binding_key *key = binding_key::make (mgr, m_base_region, BK_direct);
+ const binding_key *key = binding_key::make (mgr, m_base_region);
return get_any_value (key);
}
@@ -1474,10 +1981,9 @@ binding_cluster::maybe_get_simple_value (store_manager *mgr) const
const concrete_binding *
store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
- bit_offset_t size_in_bits,
- enum binding_kind kind)
+ bit_offset_t size_in_bits)
{
- concrete_binding b (start_bit_offset, size_in_bits, kind);
+ concrete_binding b (start_bit_offset, size_in_bits);
if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
return existing;
@@ -1487,10 +1993,9 @@ store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
}
const symbolic_binding *
-store_manager::get_symbolic_binding (const region *reg,
- enum binding_kind kind)
+store_manager::get_symbolic_binding (const region *reg)
{
- symbolic_binding b (reg, kind);
+ symbolic_binding b (reg);
if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
return existing;
@@ -1751,6 +2256,15 @@ store::dump (bool simple) const
pp_flush (&pp);
}
+/* Assert that this object is valid. */
+
+void
+store::validate () const
+{
+ for (auto iter : m_cluster_map)
+ iter.second->validate ();
+}
+
/* Return a new json::object of the form
{PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
... for each cluster within parent region},
@@ -1826,7 +2340,8 @@ store::get_any_binding (store_manager *mgr, const region *reg) const
void
store::set_value (store_manager *mgr, const region *lhs_reg,
- const svalue *rhs_sval, enum binding_kind kind)
+ const svalue *rhs_sval,
+ uncertainty_t *uncertainty)
{
remove_overlapping_bindings (mgr, lhs_reg);
@@ -1852,7 +2367,7 @@ store::set_value (store_manager *mgr, const region *lhs_reg,
else
{
lhs_cluster = get_or_create_cluster (lhs_base_reg);
- lhs_cluster->bind (mgr, lhs_reg, rhs_sval, kind);
+ lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
}
/* Bindings to a cluster can affect other clusters if a symbolic
@@ -1880,7 +2395,8 @@ store::set_value (store_manager *mgr, const region *lhs_reg,
gcc_unreachable ();
case tristate::TS_UNKNOWN:
- iter_cluster->mark_region_as_unknown (mgr, iter_base_reg);
+ iter_cluster->mark_region_as_unknown (mgr, iter_base_reg,
+ uncertainty);
break;
case tristate::TS_TRUE:
@@ -2006,28 +2522,62 @@ store::purge_region (store_manager *mgr, const region *reg)
}
}
-/* Zero-fill REG. */
+/* Fill REG with SVAL. */
void
-store::zero_fill_region (store_manager *mgr, const region *reg)
+store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
{
const region *base_reg = reg->get_base_region ();
if (base_reg->symbolic_for_unknown_ptr_p ())
return;
binding_cluster *cluster = get_or_create_cluster (base_reg);
- cluster->zero_fill_region (mgr, reg);
+ cluster->fill_region (mgr, reg, sval);
+}
+
+/* Zero-fill REG. */
+
+void
+store::zero_fill_region (store_manager *mgr, const region *reg)
+{
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
+ fill_region (mgr, reg, zero_sval);
}
/* Mark REG as having unknown content. */
void
-store::mark_region_as_unknown (store_manager *mgr, const region *reg)
+store::mark_region_as_unknown (store_manager *mgr, const region *reg,
+ uncertainty_t *uncertainty)
{
const region *base_reg = reg->get_base_region ();
if (base_reg->symbolic_for_unknown_ptr_p ())
return;
binding_cluster *cluster = get_or_create_cluster (base_reg);
- cluster->mark_region_as_unknown (mgr, reg);
+ cluster->mark_region_as_unknown (mgr, reg, uncertainty);
+}
+
+/* Purge state involving SVAL. */
+
+void
+store::purge_state_involving (const svalue *sval,
+ region_model_manager *sval_mgr)
+{
+ auto_vec <const region *> base_regs_to_purge;
+ for (auto iter : m_cluster_map)
+ {
+ const region *base_reg = iter.first;
+ if (base_reg->involves_p (sval))
+ base_regs_to_purge.safe_push (base_reg);
+ else
+ {
+ binding_cluster *cluster = iter.second;
+ cluster->purge_state_involving (sval, sval_mgr);
+ }
+ }
+
+ for (auto iter : base_regs_to_purge)
+ purge_cluster (iter);
}
/* Get the cluster for BASE_REG, or NULL (const version). */
@@ -2238,7 +2788,7 @@ store::remove_overlapping_bindings (store_manager *mgr, const region *reg)
delete cluster;
return;
}
- cluster->remove_overlapping_bindings (mgr, reg);
+ cluster->remove_overlapping_bindings (mgr, reg, NULL);
}
}
@@ -2366,6 +2916,146 @@ store::loop_replay_fixup (const store *other_store,
namespace selftest {
+/* Verify that bit_range::intersects_p works as expected. */
+
+static void
+test_bit_range_intersects_p ()
+{
+ bit_range b0 (0, 1);
+ bit_range b1 (1, 1);
+ bit_range b2 (2, 1);
+ bit_range b3 (3, 1);
+ bit_range b4 (4, 1);
+ bit_range b5 (5, 1);
+ bit_range b6 (6, 1);
+ bit_range b7 (7, 1);
+ bit_range b1_to_6 (1, 6);
+ bit_range b0_to_7 (0, 8);
+ bit_range b3_to_5 (3, 3);
+ bit_range b6_to_7 (6, 2);
+
+ /* self-intersection is true. */
+ ASSERT_TRUE (b0.intersects_p (b0));
+ ASSERT_TRUE (b7.intersects_p (b7));
+ ASSERT_TRUE (b1_to_6.intersects_p (b1_to_6));
+ ASSERT_TRUE (b0_to_7.intersects_p (b0_to_7));
+
+ ASSERT_FALSE (b0.intersects_p (b1));
+ ASSERT_FALSE (b1.intersects_p (b0));
+ ASSERT_FALSE (b0.intersects_p (b7));
+ ASSERT_FALSE (b7.intersects_p (b0));
+
+ ASSERT_TRUE (b0_to_7.intersects_p (b0));
+ ASSERT_TRUE (b0_to_7.intersects_p (b7));
+ ASSERT_TRUE (b0.intersects_p (b0_to_7));
+ ASSERT_TRUE (b7.intersects_p (b0_to_7));
+
+ ASSERT_FALSE (b0.intersects_p (b1_to_6));
+ ASSERT_FALSE (b1_to_6.intersects_p (b0));
+ ASSERT_TRUE (b1.intersects_p (b1_to_6));
+ ASSERT_TRUE (b1_to_6.intersects_p (b1));
+ ASSERT_TRUE (b1_to_6.intersects_p (b6));
+ ASSERT_FALSE (b1_to_6.intersects_p (b7));
+
+ ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7));
+ ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6));
+
+ ASSERT_FALSE (b3_to_5.intersects_p (b6_to_7));
+ ASSERT_FALSE (b6_to_7.intersects_p (b3_to_5));
+
+ bit_range r1 (0,0);
+ bit_range r2 (0,0);
+ ASSERT_TRUE (b1_to_6.intersects_p (b0_to_7, &r1, &r2));
+ ASSERT_EQ (r1.get_start_bit_offset (), 0);
+ ASSERT_EQ (r1.m_size_in_bits, 6);
+ ASSERT_EQ (r2.get_start_bit_offset (), 1);
+ ASSERT_EQ (r2.m_size_in_bits, 6);
+
+ ASSERT_TRUE (b0_to_7.intersects_p (b1_to_6, &r1, &r2));
+ ASSERT_EQ (r1.get_start_bit_offset (), 1);
+ ASSERT_EQ (r1.m_size_in_bits, 6);
+ ASSERT_EQ (r2.get_start_bit_offset (), 0);
+ ASSERT_EQ (r2.m_size_in_bits, 6);
+}
+
+/* Implementation detail of ASSERT_BIT_RANGE_FROM_MASK_EQ. */
+
+static void
+assert_bit_range_from_mask_eq (const location &loc,
+ unsigned HOST_WIDE_INT mask,
+ const bit_range &expected)
+{
+ bit_range actual (0, 0);
+ bool ok = bit_range::from_mask (mask, &actual);
+ ASSERT_TRUE_AT (loc, ok);
+ ASSERT_EQ_AT (loc, actual, expected);
+}
+
+/* Assert that bit_range::from_mask (MASK) returns true, and writes
+ out EXPECTED_BIT_RANGE. */
+
+#define ASSERT_BIT_RANGE_FROM_MASK_EQ(MASK, EXPECTED_BIT_RANGE) \
+ SELFTEST_BEGIN_STMT \
+ assert_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK, \
+ EXPECTED_BIT_RANGE); \
+ SELFTEST_END_STMT
+
+/* Implementation detail of ASSERT_NO_BIT_RANGE_FROM_MASK. */
+
+static void
+assert_no_bit_range_from_mask_eq (const location &loc,
+ unsigned HOST_WIDE_INT mask)
+{
+ bit_range actual (0, 0);
+ bool ok = bit_range::from_mask (mask, &actual);
+ ASSERT_FALSE_AT (loc, ok);
+}
+
+/* Assert that bit_range::from_mask (MASK) returns false. */
+
+#define ASSERT_NO_BIT_RANGE_FROM_MASK(MASK) \
+ SELFTEST_BEGIN_STMT \
+ assert_no_bit_range_from_mask_eq (SELFTEST_LOCATION, MASK); \
+ SELFTEST_END_STMT
+
+/* Verify that bit_range::from_mask works as expected. */
+
+static void
+test_bit_range_from_mask ()
+{
+ /* Should fail on zero. */
+ ASSERT_NO_BIT_RANGE_FROM_MASK (0);
+
+ /* Verify 1-bit masks. */
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (1, bit_range (0, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (2, bit_range (1, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (4, bit_range (2, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (8, bit_range (3, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (16, bit_range (4, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (32, bit_range (5, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (64, bit_range (6, 1));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (128, bit_range (7, 1));
+
+ /* Verify N-bit masks starting at bit 0. */
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (3, bit_range (0, 2));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (7, bit_range (0, 3));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (15, bit_range (0, 4));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (31, bit_range (0, 5));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (63, bit_range (0, 6));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (127, bit_range (0, 7));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (255, bit_range (0, 8));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0xffff, bit_range (0, 16));
+
+ /* Various other tests. */
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0x30, bit_range (4, 2));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0x700, bit_range (8, 3));
+ ASSERT_BIT_RANGE_FROM_MASK_EQ (0x600, bit_range (9, 2));
+
+ /* Multiple ranges of set bits should fail. */
+ ASSERT_NO_BIT_RANGE_FROM_MASK (0x101);
+ ASSERT_NO_BIT_RANGE_FROM_MASK (0xf0f0f0f0);
+}
+
/* Implementation detail of ASSERT_OVERLAP. */
static void
@@ -2410,26 +3100,18 @@ test_binding_key_overlap ()
store_manager mgr (NULL);
/* Various 8-bit bindings. */
- const concrete_binding *cb_0_7
- = mgr.get_concrete_binding (0, 8, BK_direct);
- const concrete_binding *cb_8_15
- = mgr.get_concrete_binding (8, 8, BK_direct);
- const concrete_binding *cb_16_23
- = mgr.get_concrete_binding (16, 8, BK_direct);
- const concrete_binding *cb_24_31
- = mgr.get_concrete_binding (24, 8, BK_direct);
+ const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
+ const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
+ const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
+ const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
/* 16-bit bindings. */
- const concrete_binding *cb_0_15
- = mgr.get_concrete_binding (0, 16, BK_direct);
- const concrete_binding *cb_8_23
- = mgr.get_concrete_binding (8, 16, BK_direct);
- const concrete_binding *cb_16_31
- = mgr.get_concrete_binding (16, 16, BK_direct);
+ const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
+ const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
+ const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
/* 32-bit binding. */
- const concrete_binding *cb_0_31
- = mgr.get_concrete_binding (0, 32, BK_direct);
+ const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
/* Everything should self-overlap. */
ASSERT_OVERLAP (cb_0_7, cb_0_7);
@@ -2464,6 +3146,8 @@ test_binding_key_overlap ()
void
analyzer_store_cc_tests ()
{
+ test_bit_range_intersects_p ();
+ test_bit_range_from_mask ();
test_binding_key_overlap ();
}
diff --git a/gcc/analyzer/store.h b/gcc/analyzer/store.h
index 2bcef6c..da82bd1 100644
--- a/gcc/analyzer/store.h
+++ b/gcc/analyzer/store.h
@@ -119,30 +119,86 @@ along with GCC; see the file COPYING3. If not see
namespace ana {
-class concrete_binding;
+/* A class for keeping track of aspects of a program_state that we don't
+ know about, to avoid false positives about leaks.
+
+ Consider:
+
+ p->field = malloc (1024);
+ q->field = NULL;
+
+ where we don't know whether or not p and q point to the same memory,
+ and:
+
+ p->field = malloc (1024);
+ unknown_fn (p);
+
+ In both cases, the svalue for the address of the allocated buffer
+ goes from being bound to p->field to not having anything explicitly bound
+ to it.
+
+ Given that we conservatively discard bindings due to possible aliasing or
+ calls to unknown function, the store loses references to svalues,
+ but these svalues could still be live. We don't want to warn about
+ them leaking - they're effectively in a "maybe live" state.
-/* An enum for discriminating between "direct" vs "default" levels of
- mapping. */
+ This "maybe live" information is somewhat transient.
-enum binding_kind
+ We don't want to store this "maybe live" information in the program_state,
+ region_model, or store, since we don't want to bloat these objects (and
+ potentially bloat the exploded_graph with more nodes).
+ However, we can't store it in the region_model_context, as these context
+ objects sometimes don't last long enough to be around when comparing the
+ old vs the new state.
+
+ This class is a way to track a set of such svalues, so that we can
+ temporarily capture that they are in a "maybe live" state whilst
+ comparing old and new states. */
+
+class uncertainty_t
{
- /* Special-case value for hash support.
- This is the initial entry, so that hash traits can have
- empty_zero_p = true. */
- BK_empty = 0,
+public:
+ typedef hash_set<const svalue *>::iterator iterator;
- /* Special-case value for hash support. */
- BK_deleted,
+ void on_maybe_bound_sval (const svalue *sval)
+ {
+ m_maybe_bound_svals.add (sval);
+ }
+ void on_mutable_sval_at_unknown_call (const svalue *sval)
+ {
+ m_mutable_at_unknown_call_svals.add (sval);
+ }
- /* The normal kind of mapping. */
- BK_direct,
+ bool unknown_sm_state_p (const svalue *sval)
+ {
+ return (m_maybe_bound_svals.contains (sval)
+ || m_mutable_at_unknown_call_svals.contains (sval));
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const;
+ void dump (bool simple) const;
- /* A lower-priority kind of mapping, for use when inheriting
- default values from a parent region. */
- BK_default
+ iterator begin_maybe_bound_svals () const
+ {
+ return m_maybe_bound_svals.begin ();
+ }
+ iterator end_maybe_bound_svals () const
+ {
+ return m_maybe_bound_svals.end ();
+ }
+
+private:
+
+ /* svalues that might or might not still be bound. */
+ hash_set<const svalue *> m_maybe_bound_svals;
+
+ /* svalues that have mutable sm-state at unknown calls. */
+ hash_set<const svalue *> m_mutable_at_unknown_call_svals;
};
-extern const char *binding_kind_to_string (enum binding_kind kind);
+class byte_range;
+class concrete_binding;
+class symbolic_binding;
/* Abstract base class for describing ranges of bits within a binding_map
that can have svalues bound to them. */
@@ -154,10 +210,9 @@ public:
virtual bool concrete_p () const = 0;
bool symbolic_p () const { return !concrete_p (); }
- static const binding_key *make (store_manager *mgr, const region *r,
- enum binding_kind kind);
+ static const binding_key *make (store_manager *mgr, const region *r);
- virtual void dump_to_pp (pretty_printer *pp, bool simple) const;
+ virtual void dump_to_pp (pretty_printer *pp, bool simple) const = 0;
void dump (bool simple) const;
label_text get_desc (bool simple=true) const;
@@ -166,28 +221,118 @@ public:
virtual const concrete_binding *dyn_cast_concrete_binding () const
{ return NULL; }
+ virtual const symbolic_binding *dyn_cast_symbolic_binding () const
+ { return NULL; }
+};
- enum binding_kind get_kind () const { return m_kind; }
+/* A concrete range of bits. */
- void mark_deleted () { m_kind = BK_deleted; }
- void mark_empty () { m_kind = BK_empty; }
- bool is_deleted () const { return m_kind == BK_deleted; }
- bool is_empty () const { return m_kind == BK_empty; }
+struct bit_range
+{
+ bit_range (bit_offset_t start_bit_offset, bit_size_t size_in_bits)
+ : m_start_bit_offset (start_bit_offset),
+ m_size_in_bits (size_in_bits)
+ {}
-protected:
- binding_key (enum binding_kind kind) : m_kind (kind) {}
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump () const;
- hashval_t impl_hash () const
+ bit_offset_t get_start_bit_offset () const
{
- return m_kind;
+ return m_start_bit_offset;
}
- bool impl_eq (const binding_key &other) const
+ bit_offset_t get_next_bit_offset () const
{
- return m_kind == other.m_kind;
+ return m_start_bit_offset + m_size_in_bits;
+ }
+ bit_offset_t get_last_bit_offset () const
+ {
+ return get_next_bit_offset () - 1;
}
-private:
- enum binding_kind m_kind;
+ bool contains_p (bit_offset_t offset) const
+ {
+ return (offset >= get_start_bit_offset ()
+ && offset < get_next_bit_offset ());
+ }
+
+ bool contains_p (const bit_range &other, bit_range *out) const;
+
+ bool operator== (const bit_range &other) const
+ {
+ return (m_start_bit_offset == other.m_start_bit_offset
+ && m_size_in_bits == other.m_size_in_bits);
+ }
+
+ bool intersects_p (const bit_range &other) const
+ {
+ return (get_start_bit_offset () < other.get_next_bit_offset ()
+ && other.get_start_bit_offset () < get_next_bit_offset ());
+ }
+ bool intersects_p (const bit_range &other,
+ bit_range *out_this,
+ bit_range *out_other) const;
+
+ static int cmp (const bit_range &br1, const bit_range &br2);
+
+ bit_range operator- (bit_offset_t offset) const;
+
+ static bool from_mask (unsigned HOST_WIDE_INT mask, bit_range *out);
+
+ bool as_byte_range (byte_range *out) const;
+
+ bit_offset_t m_start_bit_offset;
+ bit_size_t m_size_in_bits;
+};
+
+/* A concrete range of bytes. */
+
+struct byte_range
+{
+ byte_range (byte_offset_t start_byte_offset, byte_size_t size_in_bytes)
+ : m_start_byte_offset (start_byte_offset),
+ m_size_in_bytes (size_in_bytes)
+ {}
+
+ void dump_to_pp (pretty_printer *pp) const;
+ void dump () const;
+
+ bool contains_p (byte_offset_t offset) const
+ {
+ return (offset >= get_start_byte_offset ()
+ && offset < get_next_byte_offset ());
+ }
+ bool contains_p (const byte_range &other, byte_range *out) const;
+
+ bool operator== (const byte_range &other) const
+ {
+ return (m_start_byte_offset == other.m_start_byte_offset
+ && m_size_in_bytes == other.m_size_in_bytes);
+ }
+
+ byte_offset_t get_start_byte_offset () const
+ {
+ return m_start_byte_offset;
+ }
+ byte_offset_t get_next_byte_offset () const
+ {
+ return m_start_byte_offset + m_size_in_bytes;
+ }
+ byte_offset_t get_last_byte_offset () const
+ {
+ return m_start_byte_offset + m_size_in_bytes - 1;
+ }
+
+ bit_range as_bit_range () const
+ {
+ return bit_range (m_start_byte_offset * BITS_PER_UNIT,
+ m_size_in_bytes * BITS_PER_UNIT);
+ }
+
+ static int cmp (const byte_range &br1, const byte_range &br2);
+
+ byte_offset_t m_start_byte_offset;
+ byte_size_t m_size_in_bytes;
};
/* Concrete subclass of binding_key, for describing a concrete range of
@@ -199,27 +344,21 @@ public:
/* This class is its own key for the purposes of consolidation. */
typedef concrete_binding key_t;
- concrete_binding (bit_offset_t start_bit_offset, bit_size_t size_in_bits,
- enum binding_kind kind)
- : binding_key (kind),
- m_start_bit_offset (start_bit_offset),
- m_size_in_bits (size_in_bits)
+ concrete_binding (bit_offset_t start_bit_offset, bit_size_t size_in_bits)
+ : m_bit_range (start_bit_offset, size_in_bits)
{}
bool concrete_p () const FINAL OVERRIDE { return true; }
hashval_t hash () const
{
inchash::hash hstate;
- hstate.add_wide_int (m_start_bit_offset);
- hstate.add_wide_int (m_size_in_bits);
- return hstate.end () ^ binding_key::impl_hash ();
+ hstate.add_wide_int (m_bit_range.m_start_bit_offset);
+ hstate.add_wide_int (m_bit_range.m_size_in_bits);
+ return hstate.end ();
}
bool operator== (const concrete_binding &other) const
{
- if (!binding_key::impl_eq (other))
- return false;
- return (m_start_bit_offset == other.m_start_bit_offset
- && m_size_in_bits == other.m_size_in_bits);
+ return m_bit_range == other.m_bit_range;
}
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
@@ -227,21 +366,33 @@ public:
const concrete_binding *dyn_cast_concrete_binding () const FINAL OVERRIDE
{ return this; }
- bit_offset_t get_start_bit_offset () const { return m_start_bit_offset; }
- bit_size_t get_size_in_bits () const { return m_size_in_bits; }
+ const bit_range &get_bit_range () const { return m_bit_range; }
+
+ bit_offset_t get_start_bit_offset () const
+ {
+ return m_bit_range.m_start_bit_offset;
+ }
+ bit_size_t get_size_in_bits () const
+ {
+ return m_bit_range.m_size_in_bits;
+ }
/* Return the next bit offset after the end of this binding. */
bit_offset_t get_next_bit_offset () const
{
- return m_start_bit_offset + m_size_in_bits;
+ return m_bit_range.get_next_bit_offset ();
}
bool overlaps_p (const concrete_binding &other) const;
static int cmp_ptr_ptr (const void *, const void *);
+ void mark_deleted () { m_bit_range.m_start_bit_offset = -1; }
+ void mark_empty () { m_bit_range.m_start_bit_offset = -2; }
+ bool is_deleted () const { return m_bit_range.m_start_bit_offset == -1; }
+ bool is_empty () const { return m_bit_range.m_start_bit_offset == -2; }
+
private:
- bit_offset_t m_start_bit_offset;
- bit_size_t m_size_in_bits;
+ bit_range m_bit_range;
};
} // namespace ana
@@ -249,7 +400,7 @@ private:
template <> struct default_hash_traits<ana::concrete_binding>
: public member_function_hash_traits<ana::concrete_binding>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -263,29 +414,33 @@ public:
/* This class is its own key for the purposes of consolidation. */
typedef symbolic_binding key_t;
- symbolic_binding (const region *region, enum binding_kind kind)
- : binding_key (kind),
- m_region (region)
- {}
+ symbolic_binding (const region *region) : m_region (region) {}
bool concrete_p () const FINAL OVERRIDE { return false; }
hashval_t hash () const
{
- return (binding_key::impl_hash () ^ (intptr_t)m_region);
+ return (intptr_t)m_region;
}
bool operator== (const symbolic_binding &other) const
{
- if (!binding_key::impl_eq (other))
- return false;
- return (m_region == other.m_region);
+ return m_region == other.m_region;
}
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ const symbolic_binding *dyn_cast_symbolic_binding () const FINAL OVERRIDE
+ { return this; }
+
const region *get_region () const { return m_region; }
static int cmp_ptr_ptr (const void *, const void *);
+ void mark_deleted () { m_region = reinterpret_cast<const region *> (1); }
+ void mark_empty () { m_region = NULL; }
+ bool is_deleted () const
+ { return m_region == reinterpret_cast<const region *> (1); }
+ bool is_empty () const { return m_region == NULL; }
+
private:
const region *m_region;
};
@@ -352,7 +507,13 @@ public:
static int cmp (const binding_map &map1, const binding_map &map2);
+ void remove_overlapping_bindings (store_manager *mgr,
+ const binding_key *drop_key,
+ uncertainty_t *uncertainty);
+
private:
+ void get_overlapping_bindings (const binding_key *key,
+ auto_vec<const binding_key *> *out);
bool apply_ctor_val_to_range (const region *parent_reg,
region_model_manager *mgr,
tree min_index, tree max_index,
@@ -401,27 +562,31 @@ public:
void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
void dump (bool simple) const;
+ void validate () const;
+
json::object *to_json () const;
- void bind (store_manager *mgr, const region *, const svalue *,
- binding_kind kind);
+ void bind (store_manager *mgr, const region *, const svalue *);
void clobber_region (store_manager *mgr, const region *reg);
void purge_region (store_manager *mgr, const region *reg);
+ void fill_region (store_manager *mgr, const region *reg, const svalue *sval);
void zero_fill_region (store_manager *mgr, const region *reg);
- void mark_region_as_unknown (store_manager *mgr, const region *reg);
+ void mark_region_as_unknown (store_manager *mgr, const region *reg,
+ uncertainty_t *uncertainty);
+ void purge_state_involving (const svalue *sval,
+ region_model_manager *sval_mgr);
- const svalue *get_binding (store_manager *mgr, const region *reg,
- binding_kind kind) const;
+ const svalue *get_binding (store_manager *mgr, const region *reg) const;
const svalue *get_binding_recursive (store_manager *mgr,
- const region *reg,
- enum binding_kind kind) const;
+ const region *reg) const;
const svalue *get_any_binding (store_manager *mgr,
const region *reg) const;
const svalue *maybe_get_compound_binding (store_manager *mgr,
const region *reg) const;
- void remove_overlapping_bindings (store_manager *mgr, const region *reg);
+ void remove_overlapping_bindings (store_manager *mgr, const region *reg,
+ uncertainty_t *uncertainty);
template <typename T>
void for_each_value (void (*cb) (const svalue *sval, T user_data),
@@ -443,6 +608,7 @@ public:
void mark_as_escaped ();
void on_unknown_fncall (const gcall *call, store_manager *mgr);
+ void on_asm (const gasm *stmt, store_manager *mgr);
bool escaped_p () const { return m_escaped; }
bool touched_p () const { return m_touched; }
@@ -476,8 +642,6 @@ public:
private:
const svalue *get_any_value (const binding_key *key) const;
- void get_overlapping_bindings (store_manager *mgr, const region *reg,
- auto_vec<const binding_key *> *out);
void bind_compound_sval (store_manager *mgr,
const region *reg,
const compound_svalue *compound_sval);
@@ -530,20 +694,25 @@ public:
void dump (bool simple) const;
void summarize_to_pp (pretty_printer *pp, bool simple) const;
+ void validate () const;
+
json::object *to_json () const;
- const svalue *get_direct_binding (store_manager *mgr, const region *reg);
- const svalue *get_default_binding (store_manager *mgr, const region *reg);
const svalue *get_any_binding (store_manager *mgr, const region *reg) const;
bool called_unknown_fn_p () const { return m_called_unknown_fn; }
void set_value (store_manager *mgr, const region *lhs_reg,
- const svalue *rhs_sval, enum binding_kind kind);
+ const svalue *rhs_sval,
+ uncertainty_t *uncertainty);
void clobber_region (store_manager *mgr, const region *reg);
void purge_region (store_manager *mgr, const region *reg);
+ void fill_region (store_manager *mgr, const region *reg, const svalue *sval);
void zero_fill_region (store_manager *mgr, const region *reg);
- void mark_region_as_unknown (store_manager *mgr, const region *reg);
+ void mark_region_as_unknown (store_manager *mgr, const region *reg,
+ uncertainty_t *uncertainty);
+ void purge_state_involving (const svalue *sval,
+ region_model_manager *sval_mgr);
const binding_cluster *get_cluster (const region *base_reg) const;
binding_cluster *get_cluster (const region *base_reg);
@@ -619,11 +788,15 @@ public:
/* binding consolidation. */
const concrete_binding *
get_concrete_binding (bit_offset_t start_bit_offset,
- bit_offset_t size_in_bits,
- enum binding_kind kind);
+ bit_offset_t size_in_bits);
+ const concrete_binding *
+ get_concrete_binding (const bit_range &bits)
+ {
+ return get_concrete_binding (bits.get_start_bit_offset (),
+ bits.m_size_in_bits);
+ }
const symbolic_binding *
- get_symbolic_binding (const region *region,
- enum binding_kind kind);
+ get_symbolic_binding (const region *region);
region_model_manager *get_svalue_manager () const
{
diff --git a/gcc/analyzer/supergraph.cc b/gcc/analyzer/supergraph.cc
index 4b93456..85acf44 100644
--- a/gcc/analyzer/supergraph.cc
+++ b/gcc/analyzer/supergraph.cc
@@ -50,6 +50,7 @@ along with GCC; see the file COPYING3. If not see
#include "cgraph.h"
#include "cfg.h"
#include "digraph.h"
+#include "tree-cfg.h"
#include "analyzer/supergraph.h"
#include "analyzer/analyzer-logging.h"
@@ -87,6 +88,50 @@ supergraph_call_edge (function *fun, gimple *stmt)
return edge;
}
+/* class saved_uids.
+
+ In order to ensure consistent results without relying on the ordering
+ of pointer values we assign a uid to each gimple stmt, globally unique
+ across all functions.
+
+ Normally, the stmt uids are a scratch space that each pass can freely
+ assign its own values to. However, in the case of LTO, the uids are
+ used to associate call stmts with callgraph edges between the WPA phase
+ (where the analyzer runs in LTO mode) and the LTRANS phase; if the
+ analyzer changes them in the WPA phase, it leads to errors when
+ streaming the code back in at LTRANS.
+ lto_prepare_function_for_streaming has code to renumber the stmt UIDs
+ when the code is streamed back out, but for some reason this isn't
+ called for clones.
+
+ Hence, as a workaround, this class has responsibility for tracking
+ the original uids and restoring them once the pass is complete
+ (in the supergraph dtor). */
+
+/* Give STMT a globally unique uid, storing its original uid so it can
+ later be restored. */
+
+void
+saved_uids::make_uid_unique (gimple *stmt)
+{
+ unsigned next_uid = m_old_stmt_uids.length ();
+ unsigned old_stmt_uid = stmt->uid;
+ stmt->uid = next_uid;
+ m_old_stmt_uids.safe_push
+ (std::pair<gimple *, unsigned> (stmt, old_stmt_uid));
+}
+
+/* Restore the saved uids of all stmts. */
+
+void
+saved_uids::restore_uids () const
+{
+ unsigned i;
+ std::pair<gimple *, unsigned> *pair;
+ FOR_EACH_VEC_ELT (m_old_stmt_uids, i, pair)
+ pair->first->uid = pair->second;
+}
+
/* supergraph's ctor. Walk the callgraph, building supernodes for each
CFG basic block, splitting the basic blocks at callsites. Join
together the supernodes with interprocedural and intraprocedural
@@ -101,8 +146,6 @@ supergraph::supergraph (logger *logger)
/* First pass: make supernodes (and assign UIDs to the gimple stmts). */
{
- unsigned next_uid = 0;
-
/* Sort the cgraph_nodes? */
cgraph_node *node;
FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
@@ -127,7 +170,7 @@ supergraph::supergraph (logger *logger)
{
gimple *stmt = gsi_stmt (gpi);
m_stmt_to_node_t.put (stmt, node_for_stmts);
- stmt->uid = next_uid++;
+ m_stmt_uids.make_uid_unique (stmt);
}
/* Append statements from BB to the current supernode, splitting
@@ -139,13 +182,35 @@ supergraph::supergraph (logger *logger)
gimple *stmt = gsi_stmt (gsi);
node_for_stmts->m_stmts.safe_push (stmt);
m_stmt_to_node_t.put (stmt, node_for_stmts);
- stmt->uid = next_uid++;
+ m_stmt_uids.make_uid_unique (stmt);
if (cgraph_edge *edge = supergraph_call_edge (fun, stmt))
- {
- m_cgraph_edge_to_caller_prev_node.put(edge, node_for_stmts);
- node_for_stmts = add_node (fun, bb, as_a <gcall *> (stmt), NULL);
- m_cgraph_edge_to_caller_next_node.put (edge, node_for_stmts);
- }
+ {
+ m_cgraph_edge_to_caller_prev_node.put(edge, node_for_stmts);
+ node_for_stmts = add_node (fun, bb, as_a <gcall *> (stmt),
+ NULL);
+ m_cgraph_edge_to_caller_next_node.put (edge, node_for_stmts);
+ }
+ else
+ {
+ // maybe call is via a function pointer
+ if (gcall *call = dyn_cast<gcall *> (stmt))
+ {
+ cgraph_edge *edge
+ = cgraph_node::get (fun->decl)->get_edge (stmt);
+ if (!edge || !edge->callee)
+ {
+ supernode *old_node_for_stmts = node_for_stmts;
+ node_for_stmts = add_node (fun, bb, call, NULL);
+
+ superedge *sedge
+ = new callgraph_superedge (old_node_for_stmts,
+ node_for_stmts,
+ SUPEREDGE_INTRAPROCEDURAL_CALL,
+ NULL);
+ add_edge (sedge);
+ }
+ }
+ }
}
m_bb_to_final_node.put (bb, node_for_stmts);
@@ -182,7 +247,7 @@ supergraph::supergraph (logger *logger)
supernode *dest_supernode
= *m_bb_to_initial_node.get (dest_cfg_block);
cfg_superedge *cfg_sedge
- = add_cfg_edge (src_supernode, dest_supernode, cfg_edge, idx);
+ = add_cfg_edge (src_supernode, dest_supernode, cfg_edge);
m_cfg_edge_to_cfg_superedge.put (cfg_edge, cfg_sedge);
}
}
@@ -257,6 +322,13 @@ supergraph::supergraph (logger *logger)
}
}
+/* supergraph's dtor. Reset stmt uids. */
+
+supergraph::~supergraph ()
+{
+ m_stmt_uids.restore_uids ();
+}
+
/* Dump this graph in .dot format to PP, using DUMP_ARGS.
Cluster the supernodes by function, then by BB from original CFG. */
@@ -434,17 +506,16 @@ supergraph::add_node (function *fun, basic_block bb, gcall *returning_call,
adding it to this supergraph.
If the edge is for a switch statement, create a switch_cfg_superedge
- subclass using IDX (the index of E within the out-edges from SRC's
- underlying basic block). */
+ subclass. */
cfg_superedge *
-supergraph::add_cfg_edge (supernode *src, supernode *dest, ::edge e, int idx)
+supergraph::add_cfg_edge (supernode *src, supernode *dest, ::edge e)
{
/* Special-case switch edges. */
gimple *stmt = src->get_last_stmt ();
cfg_superedge *new_edge;
if (stmt && stmt->code == GIMPLE_SWITCH)
- new_edge = new switch_cfg_superedge (src, dest, e, idx);
+ new_edge = new switch_cfg_superedge (src, dest, e);
else
new_edge = new cfg_superedge (src, dest, e);
add_edge (new_edge);
@@ -983,15 +1054,41 @@ cfg_superedge::dump_label_to_pp (pretty_printer *pp,
/* Otherwise, no label. */
}
+/* Get the index number for this edge for use in phi stmts
+ in its destination. */
+
+size_t
+cfg_superedge::get_phi_arg_idx () const
+{
+ return m_cfg_edge->dest_idx;
+}
+
/* Get the phi argument for PHI for this CFG edge. */
tree
cfg_superedge::get_phi_arg (const gphi *phi) const
{
- size_t index = m_cfg_edge->dest_idx;
+ size_t index = get_phi_arg_idx ();
return gimple_phi_arg_def (phi, index);
}
+switch_cfg_superedge::switch_cfg_superedge (supernode *src,
+ supernode *dst,
+ ::edge e)
+: cfg_superedge (src, dst, e)
+{
+ /* Populate m_case_labels with all cases which go to DST. */
+ const gswitch *gswitch = get_switch_stmt ();
+ for (unsigned i = 0; i < gimple_switch_num_labels (gswitch); i++)
+ {
+ tree case_ = gimple_switch_label (gswitch, i);
+ basic_block bb = label_to_block (src->get_function (),
+ CASE_LABEL (case_));
+ if (bb == dst->m_bb)
+ m_case_labels.safe_push (case_);
+ }
+}
+
/* Implementation of superedge::dump_label_to_pp for CFG superedges for
"switch" statements.
@@ -1001,31 +1098,63 @@ void
switch_cfg_superedge::dump_label_to_pp (pretty_printer *pp,
bool user_facing ATTRIBUTE_UNUSED) const
{
- tree case_label = get_case_label ();
- gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR);
- tree lower_bound = CASE_LOW (case_label);
- tree upper_bound = CASE_HIGH (case_label);
- if (lower_bound)
+ if (user_facing)
{
- pp_printf (pp, "case ");
- dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, false);
- if (upper_bound)
+ for (unsigned i = 0; i < m_case_labels.length (); ++i)
{
- pp_printf (pp, " ... ");
- dump_generic_node (pp, upper_bound, 0, (dump_flags_t)0, false);
+ if (i > 0)
+ pp_string (pp, ", ");
+ tree case_label = m_case_labels[i];
+ gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR);
+ tree lower_bound = CASE_LOW (case_label);
+ tree upper_bound = CASE_HIGH (case_label);
+ if (lower_bound)
+ {
+ pp_printf (pp, "case ");
+ dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, false);
+ if (upper_bound)
+ {
+ pp_printf (pp, " ... ");
+ dump_generic_node (pp, upper_bound, 0, (dump_flags_t)0,
+ false);
+ }
+ pp_printf (pp, ":");
+ }
+ else
+ pp_printf (pp, "default:");
}
- pp_printf (pp, ":");
}
else
- pp_printf (pp, "default:");
-}
-
-/* Get the case label for this "switch" superedge. */
-
-tree
-switch_cfg_superedge::get_case_label () const
-{
- return gimple_switch_label (get_switch_stmt (), m_idx);
+ {
+ pp_character (pp, '{');
+ for (unsigned i = 0; i < m_case_labels.length (); ++i)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ tree case_label = m_case_labels[i];
+ gcc_assert (TREE_CODE (case_label) == CASE_LABEL_EXPR);
+ tree lower_bound = CASE_LOW (case_label);
+ tree upper_bound = CASE_HIGH (case_label);
+ if (lower_bound)
+ {
+ if (upper_bound)
+ {
+ pp_character (pp, '[');
+ dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0,
+ false);
+ pp_string (pp, ", ");
+ dump_generic_node (pp, upper_bound, 0, (dump_flags_t)0,
+ false);
+ pp_character (pp, ']');
+ }
+ else
+ dump_generic_node (pp, lower_bound, 0, (dump_flags_t)0, false);
+ }
+ else
+ pp_printf (pp, "default");
+ }
+ pp_character (pp, '}');
+ }
}
/* Implementation of superedge::dump_label_to_pp for interprocedural
@@ -1081,6 +1210,17 @@ callgraph_superedge::get_callee_decl () const
return get_callee_function ()->decl;
}
+/* Get the gcall * of this interprocedural call/return edge. */
+
+gcall *
+callgraph_superedge::get_call_stmt () const
+{
+ if (m_cedge)
+ return m_cedge->call_stmt;
+
+ return m_src->get_final_call ();
+}
+
/* Get the calling fndecl at this interprocedural call/return edge. */
tree
diff --git a/gcc/analyzer/supergraph.h b/gcc/analyzer/supergraph.h
index 5d1268e..09a12be 100644
--- a/gcc/analyzer/supergraph.h
+++ b/gcc/analyzer/supergraph.h
@@ -79,6 +79,18 @@ struct supergraph_traits
typedef supercluster cluster_t;
};
+/* A class to manage the setting and restoring of statement uids. */
+
+class saved_uids
+{
+public:
+ void make_uid_unique (gimple *stmt);
+ void restore_uids () const;
+
+private:
+ auto_vec<std::pair<gimple *, unsigned> > m_old_stmt_uids;
+};
+
/* A "supergraph" is a directed graph formed by joining together all CFGs,
linking them via interprocedural call and return edges.
@@ -90,6 +102,7 @@ class supergraph : public digraph<supergraph_traits>
{
public:
supergraph (logger *logger);
+ ~supergraph ();
supernode *get_node_for_function_entry (function *fun) const
{
@@ -168,7 +181,7 @@ public:
private:
supernode *add_node (function *fun, basic_block bb, gcall *returning_call,
gimple_seq phi_nodes);
- cfg_superedge *add_cfg_edge (supernode *src, supernode *dest, ::edge e, int idx);
+ cfg_superedge *add_cfg_edge (supernode *src, supernode *dest, ::edge e);
call_superedge *add_call_superedge (supernode *src, supernode *dest,
cgraph_edge *cedge);
return_superedge *add_return_superedge (supernode *src, supernode *dest,
@@ -205,6 +218,8 @@ private:
typedef hash_map<const function *, unsigned> function_to_num_snodes_t;
function_to_num_snodes_t m_function_to_num_snodes;
+
+ saved_uids m_stmt_uids;
};
/* A node within a supergraph. */
@@ -253,6 +268,11 @@ class supernode : public dnode<supergraph_traits>
return i;
}
+ gcall *get_returning_call () const
+ {
+ return m_returning_call;
+ }
+
gimple *get_last_stmt () const
{
if (m_stmts.length () == 0)
@@ -385,7 +405,7 @@ class callgraph_superedge : public superedge
function *get_caller_function () const;
tree get_callee_decl () const;
tree get_caller_decl () const;
- gcall *get_call_stmt () const { return m_cedge->call_stmt; }
+ gcall *get_call_stmt () const;
tree get_arg_for_parm (tree parm, callsite_expr *out) const;
tree get_parm_for_arg (tree arg, callsite_expr *out) const;
tree map_expr_from_caller_to_callee (tree caller_expr,
@@ -499,6 +519,7 @@ class cfg_superedge : public superedge
int false_value_p () const { return get_flags () & EDGE_FALSE_VALUE; }
int back_edge_p () const { return get_flags () & EDGE_DFS_BACK; }
+ size_t get_phi_arg_idx () const;
tree get_phi_arg (const gphi *phi) const;
private:
@@ -518,15 +539,12 @@ is_a_helper <const cfg_superedge *>::test (const superedge *sedge)
namespace ana {
/* A subclass for edges from switch statements, retaining enough
- information to identify the pertinent case, and for adding labels
+ information to identify the pertinent cases, and for adding labels
when rendering via graphviz. */
class switch_cfg_superedge : public cfg_superedge {
public:
- switch_cfg_superedge (supernode *src, supernode *dst, ::edge e, int idx)
- : cfg_superedge (src, dst, e),
- m_idx (idx)
- {}
+ switch_cfg_superedge (supernode *src, supernode *dst, ::edge e);
const switch_cfg_superedge *dyn_cast_switch_cfg_superedge () const
FINAL OVERRIDE
@@ -542,10 +560,10 @@ class switch_cfg_superedge : public cfg_superedge {
return as_a <gswitch *> (m_src->get_last_stmt ());
}
- tree get_case_label () const;
+ const vec<tree> &get_case_labels () const { return m_case_labels; }
- private:
- const int m_idx;
+private:
+ auto_vec<tree> m_case_labels;
};
} // namespace ana
diff --git a/gcc/analyzer/svalue.cc b/gcc/analyzer/svalue.cc
index d6305a3..5f2fe4c 100644
--- a/gcc/analyzer/svalue.cc
+++ b/gcc/analyzer/svalue.cc
@@ -105,12 +105,25 @@ svalue::to_json () const
tree
svalue::maybe_get_constant () const
{
- if (const constant_svalue *cst_sval = dyn_cast_constant_svalue ())
+ const svalue *sval = unwrap_any_unmergeable ();
+ if (const constant_svalue *cst_sval = sval->dyn_cast_constant_svalue ())
return cst_sval->get_constant ();
else
return NULL_TREE;
}
+/* If this svalue is a region_svalue, return the region it points to.
+ Otherwise return NULL. */
+
+const region *
+svalue::maybe_get_region () const
+{
+ if (const region_svalue *region_sval = dyn_cast_region_svalue ())
+ return region_sval->get_pointee ();
+ else
+ return NULL;
+}
+
/* If this svalue is a cast (i.e a unaryop NOP_EXPR or VIEW_CONVERT_EXPR),
return the underlying svalue.
Otherwise return NULL. */
@@ -158,6 +171,13 @@ svalue::can_merge_p (const svalue *other,
|| (other->get_kind () == SK_UNMERGEABLE))
return NULL;
+ /* Reject attempts to merge poisoned svalues with other svalues
+ (either non-poisoned, or other kinds of poison), so that e.g.
+ we identify paths in which a variable is conditionally uninitialized. */
+ if (get_kind () == SK_POISONED
+ || other->get_kind () == SK_POISONED)
+ return NULL;
+
/* Reject attempts to merge NULL pointers with not-NULL-pointers. */
if (POINTER_TYPE_P (get_type ()))
{
@@ -415,6 +435,27 @@ svalue::cmp_ptr (const svalue *sval1, const svalue *sval2)
sub_sval2->get_subregion ());
}
break;
+ case SK_REPEATED:
+ {
+ const repeated_svalue *repeated_sval1 = (const repeated_svalue *)sval1;
+ const repeated_svalue *repeated_sval2 = (const repeated_svalue *)sval2;
+ return svalue::cmp_ptr (repeated_sval1->get_inner_svalue (),
+ repeated_sval2->get_inner_svalue ());
+ }
+ break;
+ case SK_BITS_WITHIN:
+ {
+ const bits_within_svalue *bits_within_sval1
+ = (const bits_within_svalue *)sval1;
+ const bits_within_svalue *bits_within_sval2
+ = (const bits_within_svalue *)sval2;
+ if (int cmp = bit_range::cmp (bits_within_sval1->get_bits (),
+ bits_within_sval2->get_bits ()))
+ return cmp;
+ return svalue::cmp_ptr (bits_within_sval1->get_inner_svalue (),
+ bits_within_sval2->get_inner_svalue ());
+ }
+ break;
case SK_UNMERGEABLE:
{
const unmergeable_svalue *unmergeable_sval1
@@ -468,6 +509,29 @@ svalue::cmp_ptr (const svalue *sval1, const svalue *sval2)
conjured_sval2->get_id_region ());
}
break;
+ case SK_ASM_OUTPUT:
+ {
+ const asm_output_svalue *asm_output_sval1
+ = (const asm_output_svalue *)sval1;
+ const asm_output_svalue *asm_output_sval2
+ = (const asm_output_svalue *)sval2;
+ if (int asm_string_cmp = strcmp (asm_output_sval1->get_asm_string (),
+ asm_output_sval2->get_asm_string ()))
+ return asm_string_cmp;
+ if (int output_idx_cmp = ((int)asm_output_sval1->get_output_idx ()
+ - (int)asm_output_sval2->get_output_idx ()))
+ return output_idx_cmp;
+ if (int cmp = ((int)asm_output_sval1->get_num_inputs ()
+ - (int)asm_output_sval2->get_num_inputs ()))
+ return cmp;
+ for (unsigned i = 0; i < asm_output_sval1->get_num_inputs (); i++)
+ if (int input_cmp
+ = svalue::cmp_ptr (asm_output_sval1->get_input (i),
+ asm_output_sval2->get_input (i)))
+ return input_cmp;
+ return 0;
+ }
+ break;
}
}
@@ -481,6 +545,77 @@ svalue::cmp_ptr_ptr (const void *p1, const void *p2)
return cmp_ptr (sval1, sval2);
}
+/* Subclass of visitor for use in implementing svalue::involves_p. */
+
+class involvement_visitor : public visitor
+{
+public:
+ involvement_visitor (const svalue *needle)
+ : m_needle (needle), m_found (false) {}
+
+ void visit_initial_svalue (const initial_svalue *candidate)
+ {
+ if (candidate == m_needle)
+ m_found = true;
+ }
+
+ void visit_conjured_svalue (const conjured_svalue *candidate)
+ {
+ if (candidate == m_needle)
+ m_found = true;
+ }
+
+ bool found_p () const { return m_found; }
+
+private:
+ const svalue *m_needle;
+ bool m_found;
+};
+
+/* Return true iff this svalue is defined in terms of OTHER. */
+
+bool
+svalue::involves_p (const svalue *other) const
+{
+ /* Currently only implemented for these kinds. */
+ gcc_assert (other->get_kind () == SK_INITIAL
+ || other->get_kind () == SK_CONJURED);
+
+ involvement_visitor v (other);
+ accept (&v);
+ return v.found_p ();
+}
+
+/* Extract SUBRANGE from this value, of type TYPE. */
+
+const svalue *
+svalue::extract_bit_range (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const
+{
+ return mgr->get_or_create_bits_within (type, subrange, this);
+}
+
+/* Base implementation of svalue::maybe_fold_bits_within vfunc. */
+
+const svalue *
+svalue::maybe_fold_bits_within (tree,
+ const bit_range &,
+ region_model_manager *) const
+{
+ /* By default, don't fold. */
+ return NULL;
+}
+
+/* Base implementation of svalue::all_zeroes_p.
+ Return true if this value is known to be all zeroes. */
+
+bool
+svalue::all_zeroes_p () const
+{
+ return false;
+}
+
/* class region_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for region_svalue. */
@@ -646,6 +781,34 @@ constant_svalue::eval_condition (const constant_svalue *lhs,
return tristate::TS_UNKNOWN;
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for constant_svalue. */
+
+const svalue *
+constant_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &,
+ region_model_manager *mgr) const
+{
+ /* Bits within an all-zero value are also all zero. */
+ if (zerop (m_cst_expr))
+ {
+ if (type)
+ return mgr->get_or_create_cast (type, this);
+ else
+ return this;
+ }
+ /* Otherwise, don't fold. */
+ return NULL;
+}
+
+/* Implementation of svalue::all_zeroes_p for constant_svalue. */
+
+bool
+constant_svalue::all_zeroes_p () const
+{
+ return zerop (m_cst_expr);
+}
+
/* class unknown_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for unknown_svalue. */
@@ -677,6 +840,18 @@ unknown_svalue::accept (visitor *v) const
v->visit_unknown_svalue (this);
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for unknown_svalue. */
+
+const svalue *
+unknown_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &,
+ region_model_manager *mgr) const
+{
+ /* Bits within an unknown_svalue are themselves unknown. */
+ return mgr->get_or_create_unknown_svalue (type);
+}
+
/* Get a string for KIND for use in debug dumps. */
const char *
@@ -686,6 +861,8 @@ poison_kind_to_str (enum poison_kind kind)
{
default:
gcc_unreachable ();
+ case POISON_KIND_UNINIT:
+ return "uninit";
case POISON_KIND_FREED:
return "freed";
case POISON_KIND_POPPED_STACK:
@@ -701,9 +878,17 @@ void
poisoned_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
{
if (simple)
- pp_printf (pp, "POISONED(%s)", poison_kind_to_str (m_kind));
+ {
+ pp_string (pp, "POISONED(");
+ print_quoted_type (pp, get_type ());
+ pp_printf (pp, ", %s)", poison_kind_to_str (m_kind));
+ }
else
- pp_printf (pp, "poisoned_svalue(%s)", poison_kind_to_str (m_kind));
+ {
+ pp_string (pp, "poisoned_svalue(");
+ print_quoted_type (pp, get_type ());
+ pp_printf (pp, ", %s)", poison_kind_to_str (m_kind));
+ }
}
/* Implementation of svalue::accept vfunc for poisoned_svalue. */
@@ -714,6 +899,18 @@ poisoned_svalue::accept (visitor *v) const
v->visit_poisoned_svalue (this);
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for poisoned_svalue. */
+
+const svalue *
+poisoned_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &,
+ region_model_manager *mgr) const
+{
+ /* Bits within a poisoned value are also poisoned. */
+ return mgr->get_or_create_poisoned_svalue (m_kind, type);
+}
+
/* class setjmp_svalue's implementation is in engine.cc, so that it can use
the declaration of exploded_node. */
@@ -763,7 +960,7 @@ initial_svalue::implicitly_live_p (const svalue_set *,
a popped stack frame. */
if (model->region_exists_p (m_reg))
{
- const svalue *reg_sval = model->get_store_value (m_reg);
+ const svalue *reg_sval = model->get_store_value (m_reg, NULL);
if (reg_sval == this)
return true;
}
@@ -850,8 +1047,51 @@ unaryop_svalue::implicitly_live_p (const svalue_set *live_svalues,
return get_arg ()->live_p (live_svalues, model);
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for unaryop_svalue. */
+
+const svalue *
+unaryop_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &,
+ region_model_manager *mgr) const
+{
+ switch (m_op)
+ {
+ default:
+ break;
+ case NOP_EXPR:
+ /* A cast of zero is zero. */
+ if (tree cst = m_arg->maybe_get_constant ())
+ if (zerop (cst))
+ {
+ if (type)
+ return mgr->get_or_create_cast (type, this);
+ else
+ return this;
+ }
+ break;
+ }
+ /* Otherwise, don't fold. */
+ return NULL;
+}
+
/* class binop_svalue : public svalue. */
+/* Return whether OP be printed as an infix operator. */
+
+static bool
+infix_p (enum tree_code op)
+{
+ switch (op)
+ {
+ default:
+ return true;
+ case MAX_EXPR:
+ case MIN_EXPR:
+ return false;
+ }
+}
+
/* Implementation of svalue::dump_to_pp vfunc for binop_svalue. */
void
@@ -859,11 +1099,25 @@ binop_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
{
if (simple)
{
- pp_character (pp, '(');
- m_arg0->dump_to_pp (pp, simple);
- pp_string (pp, op_symbol_code (m_op));
- m_arg1->dump_to_pp (pp, simple);
- pp_character (pp, ')');
+ if (infix_p (m_op))
+ {
+ /* Print "(A OP B)". */
+ pp_character (pp, '(');
+ m_arg0->dump_to_pp (pp, simple);
+ pp_string (pp, op_symbol_code (m_op));
+ m_arg1->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ /* Print "OP(A, B)". */
+ pp_string (pp, op_symbol_code (m_op));
+ pp_character (pp, '(');
+ m_arg0->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_arg1->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
}
else
{
@@ -908,6 +1162,7 @@ sub_svalue::sub_svalue (tree type, const svalue *parent_svalue,
type),
m_parent_svalue (parent_svalue), m_subregion (subregion)
{
+ gcc_assert (parent_svalue->can_have_associated_state_p ());
}
/* Implementation of svalue::dump_to_pp vfunc for sub_svalue. */
@@ -953,6 +1208,216 @@ sub_svalue::implicitly_live_p (const svalue_set *live_svalues,
return get_parent ()->live_p (live_svalues, model);
}
+/* class repeated_svalue : public svalue. */
+
+/* repeated_svalue'c ctor. */
+
+repeated_svalue::repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+: svalue (complexity::from_pair (outer_size, inner_svalue), type),
+ m_outer_size (outer_size),
+ m_inner_svalue (inner_svalue)
+{
+ gcc_assert (outer_size->can_have_associated_state_p ());
+ gcc_assert (inner_svalue->can_have_associated_state_p ());
+}
+
+/* Implementation of svalue::dump_to_pp vfunc for repeated_svalue. */
+
+void
+repeated_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "REPEATED(");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ pp_string (pp, "outer_size: ");
+ m_outer_size->dump_to_pp (pp, simple);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "repeated_svalue (");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ pp_string (pp, "outer_size: ");
+ m_outer_size->dump_to_pp (pp, simple);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for repeated_svalue. */
+
+void
+repeated_svalue::accept (visitor *v) const
+{
+ v->visit_repeated_svalue (this);
+ m_inner_svalue->accept (v);
+}
+
+/* Implementation of svalue::all_zeroes_p for repeated_svalue. */
+
+bool
+repeated_svalue::all_zeroes_p () const
+{
+ return m_inner_svalue->all_zeroes_p ();
+}
+
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for repeated_svalue. */
+
+const svalue *
+repeated_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &bits,
+ region_model_manager *mgr) const
+{
+ const svalue *innermost_sval = m_inner_svalue;
+ /* Fold
+ BITS_WITHIN (range, REPEATED_SVALUE (ZERO))
+ to:
+ REPEATED_SVALUE (ZERO). */
+ if (all_zeroes_p ())
+ {
+ byte_range bytes (0,0);
+ if (bits.as_byte_range (&bytes))
+ {
+ const svalue *byte_size
+ = mgr->get_or_create_int_cst (size_type_node,
+ bytes.m_size_in_bytes.to_uhwi ());
+ return mgr->get_or_create_repeated_svalue (type, byte_size,
+ innermost_sval);
+ }
+ }
+
+ /* Fold:
+ BITS_WITHIN (range, REPEATED_SVALUE (INNERMOST_SVALUE))
+ to:
+ BITS_WITHIN (range - offset, INNERMOST_SVALUE)
+ if range is fully within one instance of INNERMOST_SVALUE. */
+ if (tree innermost_type = innermost_sval->get_type ())
+ {
+ bit_size_t element_bit_size;
+ if (int_size_in_bits (innermost_type, &element_bit_size)
+ && element_bit_size > 0)
+ {
+ HOST_WIDE_INT start_idx
+ = (bits.get_start_bit_offset ()
+ / element_bit_size).to_shwi ();
+ HOST_WIDE_INT last_idx
+ = (bits.get_last_bit_offset ()
+ / element_bit_size).to_shwi ();
+ if (start_idx == last_idx)
+ {
+ bit_offset_t start_of_element
+ = start_idx * element_bit_size;
+ bit_range range_within_element
+ (bits.m_start_bit_offset - start_of_element,
+ bits.m_size_in_bits);
+ return mgr->get_or_create_bits_within (type,
+ range_within_element,
+ innermost_sval);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* class bits_within_svalue : public svalue. */
+
+/* bits_within_svalue'c ctor. */
+
+bits_within_svalue::bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+: svalue (complexity (inner_svalue), type),
+ m_bits (bits),
+ m_inner_svalue (inner_svalue)
+{
+ gcc_assert (inner_svalue->can_have_associated_state_p ());
+}
+
+/* Implementation of svalue::dump_to_pp vfunc for bits_within_svalue. */
+
+void
+bits_within_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "BITS_WITHIN(");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ m_bits.dump_to_pp (pp);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "bits_within_svalue (");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ m_bits.dump_to_pp (pp);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for bits_within_svalue. */
+
+const svalue *
+bits_within_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &bits,
+ region_model_manager *mgr) const
+{
+ /* Fold:
+ BITS_WITHIN (range1, BITS_WITHIN (range2, VAL))
+ to:
+ BITS_WITHIN (range1 in range 2, VAL). */
+ bit_range offset_bits (m_bits.get_start_bit_offset ()
+ + bits.m_start_bit_offset,
+ bits.m_size_in_bits);
+ return mgr->get_or_create_bits_within (type, offset_bits, m_inner_svalue);
+}
+
+/* Implementation of svalue::accept vfunc for bits_within_svalue. */
+
+void
+bits_within_svalue::accept (visitor *v) const
+{
+ v->visit_bits_within_svalue (this);
+ m_inner_svalue->accept (v);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for bits_within_svalue. */
+
+bool
+bits_within_svalue::implicitly_live_p (const svalue_set *live_svalues,
+ const region_model *model) const
+{
+ return m_inner_svalue->live_p (live_svalues, model);
+}
+
/* class widening_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for widening_svalue. */
@@ -1194,17 +1659,26 @@ compound_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
if (simple)
{
pp_string (pp, "COMPOUND(");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ pp_character (pp, '{');
m_map.dump_to_pp (pp, simple, false);
- pp_character (pp, ')');
+ pp_string (pp, "})");
}
else
{
pp_string (pp, "compound_svalue (");
- pp_string (pp, ", ");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
pp_character (pp, '{');
m_map.dump_to_pp (pp, simple, false);
- pp_string (pp, "}, ");
- pp_character (pp, ')');
+ pp_string (pp, "})");
}
}
@@ -1240,6 +1714,75 @@ compound_svalue::calc_complexity (const binding_map &map)
return complexity (num_child_nodes + 1, max_child_depth + 1);
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for compound_svalue. */
+
+const svalue *
+compound_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &bits,
+ region_model_manager *mgr) const
+{
+ binding_map result_map;
+ for (auto iter : m_map)
+ {
+ const binding_key *key = iter.first;
+ if (const concrete_binding *conc_key
+ = key->dyn_cast_concrete_binding ())
+ {
+ /* Ignore concrete bindings outside BITS. */
+ if (!conc_key->get_bit_range ().intersects_p (bits))
+ continue;
+
+ const svalue *sval = iter.second;
+ /* Get the position of conc_key relative to BITS. */
+ bit_range result_location (conc_key->get_start_bit_offset ()
+ - bits.get_start_bit_offset (),
+ conc_key->get_size_in_bits ());
+ /* If conc_key starts after BITS, trim off leading bits
+ from the svalue and adjust binding location. */
+ if (result_location.m_start_bit_offset < 0)
+ {
+ bit_size_t leading_bits_to_drop
+ = -result_location.m_start_bit_offset;
+ result_location = bit_range
+ (0, result_location.m_size_in_bits - leading_bits_to_drop);
+ bit_range bits_within_sval (leading_bits_to_drop,
+ result_location.m_size_in_bits);
+ /* Trim off leading bits from iter_sval. */
+ sval = mgr->get_or_create_bits_within (NULL_TREE,
+ bits_within_sval,
+ sval);
+ }
+ /* If conc_key finishes after BITS, trim off trailing bits
+ from the svalue and adjust binding location. */
+ if (conc_key->get_next_bit_offset ()
+ > bits.get_next_bit_offset ())
+ {
+ bit_size_t trailing_bits_to_drop
+ = (conc_key->get_next_bit_offset ()
+ - bits.get_next_bit_offset ());
+ result_location = bit_range
+ (result_location.m_start_bit_offset,
+ result_location.m_size_in_bits - trailing_bits_to_drop);
+ bit_range bits_within_sval (0,
+ result_location.m_size_in_bits);
+ /* Trim off leading bits from iter_sval. */
+ sval = mgr->get_or_create_bits_within (NULL_TREE,
+ bits_within_sval,
+ sval);
+ }
+ const concrete_binding *offset_conc_key
+ = mgr->get_store_manager ()->get_concrete_binding
+ (result_location);
+ result_map.put (offset_conc_key, sval);
+ }
+ else
+ /* If we have any symbolic keys we can't get it as bits. */
+ return NULL;
+ }
+ return mgr->get_or_create_compound_svalue (type, result_map);
+}
+
/* class conjured_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for conjured_svalue. */
@@ -1275,6 +1818,72 @@ conjured_svalue::accept (visitor *v) const
m_id_reg->accept (v);
}
+/* class asm_output_svalue : public svalue. */
+
+/* Implementation of svalue::dump_to_pp vfunc for asm_output_svalue. */
+
+void
+asm_output_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_printf (pp, "ASM_OUTPUT(%qs, %%%i, {",
+ get_asm_string (),
+ get_output_idx ());
+ for (unsigned i = 0; i < m_num_inputs; i++)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ dump_input (pp, 0, m_input_arr[i], simple);
+ }
+ pp_string (pp, "})");
+ }
+ else
+ {
+ pp_printf (pp, "asm_output_svalue (%qs, %%%i, {",
+ get_asm_string (),
+ get_output_idx ());
+ for (unsigned i = 0; i < m_num_inputs; i++)
+ {
+ if (i > 0)
+ pp_string (pp, ", ");
+ dump_input (pp, 0, m_input_arr[i], simple);
+ }
+ pp_string (pp, "})");
+ }
+}
+
+/* Subroutine of asm_output_svalue::dump_to_pp. */
+
+void
+asm_output_svalue::dump_input (pretty_printer *pp,
+ unsigned input_idx,
+ const svalue *sval,
+ bool simple) const
+{
+ pp_printf (pp, "%%%i: ", input_idx_to_asm_idx (input_idx));
+ sval->dump_to_pp (pp, simple);
+}
+
+/* Convert INPUT_IDX from an index into the array of inputs
+ into the index of all operands for the asm stmt. */
+
+unsigned
+asm_output_svalue::input_idx_to_asm_idx (unsigned input_idx) const
+{
+ return input_idx + m_num_outputs;
+}
+
+/* Implementation of svalue::accept vfunc for asm_output_svalue. */
+
+void
+asm_output_svalue::accept (visitor *v) const
+{
+ v->visit_asm_output_svalue (this);
+ for (unsigned i = 0; i < m_num_inputs; i++)
+ m_input_arr[i]->accept (v);
+}
+
} // namespace ana
#endif /* #if ENABLE_ANALYZER */
diff --git a/gcc/analyzer/svalue.h b/gcc/analyzer/svalue.h
index 672a89c..63f7d15 100644
--- a/gcc/analyzer/svalue.h
+++ b/gcc/analyzer/svalue.h
@@ -41,11 +41,14 @@ enum svalue_kind
SK_UNARYOP,
SK_BINOP,
SK_SUB,
+ SK_REPEATED,
+ SK_BITS_WITHIN,
SK_UNMERGEABLE,
SK_PLACEHOLDER,
SK_WIDENING,
SK_COMPOUND,
- SK_CONJURED
+ SK_CONJURED,
+ SK_ASM_OUTPUT
};
/* svalue and its subclasses.
@@ -63,13 +66,18 @@ enum svalue_kind
unaryop_svalue (SK_UNARYOP): unary operation on another svalue
binop_svalue (SK_BINOP): binary operation on two svalues
sub_svalue (SK_SUB): the result of accessing a subregion
+ repeated_svalue (SK_REPEATED): repeating an svalue to fill a larger region
+ bits_within_svalue (SK_BITS_WITHIN): a range of bits/bytes within a larger
+ svalue
unmergeable_svalue (SK_UNMERGEABLE): a value that is so interesting
from a control-flow perspective that it can inhibit state-merging
placeholder_svalue (SK_PLACEHOLDER): for use in selftests.
widening_svalue (SK_WIDENING): a merger of two svalues (possibly
in an iteration).
compound_svalue (SK_COMPOUND): a mapping of bit-ranges to svalues
- conjured_svalue (SK_CONJURED): a value arising from a stmt. */
+ conjured_svalue (SK_CONJURED): a value arising from a stmt
+ asm_output_svalue (SK_ASM_OUTPUT): an output from a deterministic
+ asm stmt. */
/* An abstract base class representing a value held by a region of memory. */
@@ -107,6 +115,10 @@ public:
dyn_cast_binop_svalue () const { return NULL; }
virtual const sub_svalue *
dyn_cast_sub_svalue () const { return NULL; }
+ virtual const repeated_svalue *
+ dyn_cast_repeated_svalue () const { return NULL; }
+ virtual const bits_within_svalue *
+ dyn_cast_bits_within_svalue () const { return NULL; }
virtual const unmergeable_svalue *
dyn_cast_unmergeable_svalue () const { return NULL; }
virtual const widening_svalue *
@@ -115,8 +127,11 @@ public:
dyn_cast_compound_svalue () const { return NULL; }
virtual const conjured_svalue *
dyn_cast_conjured_svalue () const { return NULL; }
+ virtual const asm_output_svalue *
+ dyn_cast_asm_output_svalue () const { return NULL; }
tree maybe_get_constant () const;
+ const region *maybe_get_region () const;
const svalue *maybe_undo_cast () const;
const svalue *unwrap_any_unmergeable () const;
@@ -136,6 +151,25 @@ public:
static int cmp_ptr (const svalue *, const svalue *);
static int cmp_ptr_ptr (const void *, const void *);
+ bool involves_p (const svalue *other) const;
+
+ const svalue *
+ extract_bit_range (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const;
+
+ virtual const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const;
+
+ virtual bool all_zeroes_p () const;
+
+ /* Can this svalue be involved in constraints and sm-state?
+ Most can, but UNKNOWN and POISONED svalues are singletons
+ per-type and thus it's meaningless for them to "have state". */
+ virtual bool can_have_associated_state_p () const { return true; }
+
protected:
svalue (complexity c, tree type)
: m_complexity (c), m_type (type)
@@ -173,9 +207,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
const region *m_reg;
@@ -220,7 +254,7 @@ is_a_helper <const region_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<region_svalue::key_t>
: public member_function_hash_traits<region_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -251,6 +285,13 @@ public:
enum tree_code op,
const constant_svalue *rhs);
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
+ bool all_zeroes_p () const FINAL OVERRIDE;
+
private:
tree m_cst_expr;
};
@@ -283,12 +324,23 @@ public:
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
+
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
+ /* Unknown values are singletons per-type, so can't have state. */
+ bool can_have_associated_state_p () const FINAL OVERRIDE { return false; }
};
/* An enum describing a particular kind of "poisoned" value. */
enum poison_kind
{
+ /* For use to describe uninitialized memory. */
+ POISON_KIND_UNINIT,
+
/* For use to describe freed memory. */
POISON_KIND_FREED,
@@ -325,9 +377,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
enum poison_kind m_kind;
tree m_type;
@@ -343,8 +395,16 @@ public:
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
enum poison_kind get_poison_kind () const { return m_kind; }
+ /* Poisoned svalues are singletons per-type, so can't have state. */
+ bool can_have_associated_state_p () const FINAL OVERRIDE { return false; }
+
private:
enum poison_kind m_kind;
};
@@ -362,7 +422,7 @@ is_a_helper <const poisoned_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<poisoned_svalue::key_t>
: public member_function_hash_traits<poisoned_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -424,9 +484,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
setjmp_record m_record;
tree m_type;
@@ -465,7 +525,7 @@ is_a_helper <const setjmp_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<setjmp_svalue::key_t>
: public member_function_hash_traits<setjmp_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -546,9 +606,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
enum tree_code m_op;
@@ -558,6 +618,7 @@ public:
unaryop_svalue (tree type, enum tree_code op, const svalue *arg)
: svalue (complexity (arg), type), m_op (op), m_arg (arg)
{
+ gcc_assert (arg->can_have_associated_state_p ());
}
enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_UNARYOP; }
@@ -572,6 +633,11 @@ public:
enum tree_code get_op () const { return m_op; }
const svalue *get_arg () const { return m_arg; }
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
private:
enum tree_code m_op;
const svalue *m_arg;
@@ -590,7 +656,7 @@ is_a_helper <const unaryop_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<unaryop_svalue::key_t>
: public member_function_hash_traits<unaryop_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -628,9 +694,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
enum tree_code m_op;
@@ -645,6 +711,8 @@ public:
type),
m_op (op), m_arg0 (arg0), m_arg1 (arg1)
{
+ gcc_assert (arg0->can_have_associated_state_p ());
+ gcc_assert (arg1->can_have_associated_state_p ());
}
enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_BINOP; }
@@ -681,7 +749,7 @@ is_a_helper <const binop_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<binop_svalue::key_t>
: public member_function_hash_traits<binop_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -717,9 +785,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
const svalue *m_parent_svalue;
@@ -760,7 +828,182 @@ is_a_helper <const sub_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<sub_svalue::key_t>
: public member_function_hash_traits<sub_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
+};
+
+namespace ana {
+
+/* Concrete subclass of svalue representing repeating an inner svalue
+ (possibly not a whole number of times) to fill a larger region of
+ type TYPE of size OUTER_SIZE bytes. */
+
+class repeated_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of repeated_svalue. */
+ struct key_t
+ {
+ key_t (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+ : m_type (type), m_outer_size (outer_size), m_inner_svalue (inner_svalue)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_ptr (m_outer_size);
+ hstate.add_ptr (m_inner_svalue);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_outer_size == other.m_outer_size
+ && m_inner_svalue == other.m_inner_svalue);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
+
+ tree m_type;
+ const svalue *m_outer_size;
+ const svalue *m_inner_svalue;
+ };
+ repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue);
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_REPEATED; }
+ const repeated_svalue *dyn_cast_repeated_svalue () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ const svalue *get_outer_size () const { return m_outer_size; }
+ const svalue *get_inner_svalue () const { return m_inner_svalue; }
+
+ bool all_zeroes_p () const FINAL OVERRIDE;
+
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
+ private:
+ const svalue *m_outer_size;
+ const svalue *m_inner_svalue;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const repeated_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_REPEATED;
+}
+
+template <> struct default_hash_traits<repeated_svalue::key_t>
+: public member_function_hash_traits<repeated_svalue::key_t>
+{
+ static const bool empty_zero_p = false;
+};
+
+namespace ana {
+
+/* A range of bits/bytes within another svalue
+ e.g. bytes 5-39 of INITIAL_SVALUE(R).
+ These can be generated for prefixes and suffixes when part of a binding
+ is clobbered, so that we don't lose too much information. */
+
+class bits_within_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of bits_within_svalue. */
+ struct key_t
+ {
+ key_t (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+ : m_type (type), m_bits (bits), m_inner_svalue (inner_svalue)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_ptr (m_inner_svalue);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_bits == other.m_bits
+ && m_inner_svalue == other.m_inner_svalue);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
+
+ tree m_type;
+ bit_range m_bits;
+ const svalue *m_inner_svalue;
+ };
+ bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue);
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_BITS_WITHIN; }
+ const bits_within_svalue *
+ dyn_cast_bits_within_svalue () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set *,
+ const region_model *) const FINAL OVERRIDE;
+
+ const bit_range &get_bits () const { return m_bits; }
+ const svalue *get_inner_svalue () const { return m_inner_svalue; }
+
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
+ private:
+ const bit_range m_bits;
+ const svalue *m_inner_svalue;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const bits_within_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_BITS_WITHIN;
+}
+
+template <> struct default_hash_traits<bits_within_svalue::key_t>
+: public member_function_hash_traits<bits_within_svalue::key_t>
+{
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -840,7 +1083,7 @@ public:
template <>
template <>
inline bool
-is_a_helper <placeholder_svalue *>::test (svalue *sval)
+is_a_helper <const placeholder_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_PLACEHOLDER;
}
@@ -886,9 +1129,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
function_point m_point;
@@ -911,6 +1154,8 @@ public:
m_point (point.get_function_point ()),
m_base_sval (base_sval), m_iter_sval (iter_sval)
{
+ gcc_assert (base_sval->can_have_associated_state_p ());
+ gcc_assert (iter_sval->can_have_associated_state_p ());
}
enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_WIDENING; }
@@ -942,7 +1187,7 @@ public:
template <>
template <>
inline bool
-is_a_helper <widening_svalue *>::test (svalue *sval)
+is_a_helper <const widening_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_WIDENING;
}
@@ -950,7 +1195,7 @@ is_a_helper <widening_svalue *>::test (svalue *sval)
template <> struct default_hash_traits<widening_svalue::key_t>
: public member_function_hash_traits<widening_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -998,9 +1243,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
const binding_map *m_map_ptr;
@@ -1027,6 +1272,11 @@ public:
return key_t (get_type (), &m_map);
}
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
private:
static complexity calc_complexity (const binding_map &map);
@@ -1038,7 +1288,7 @@ public:
template <>
template <>
inline bool
-is_a_helper <compound_svalue *>::test (svalue *sval)
+is_a_helper <const compound_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_COMPOUND;
}
@@ -1046,7 +1296,7 @@ is_a_helper <compound_svalue *>::test (svalue *sval)
template <> struct default_hash_traits<compound_svalue::key_t>
: public member_function_hash_traits<compound_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -1071,8 +1321,6 @@ namespace ana {
class conjured_svalue : public svalue
{
public:
- typedef binding_map::iterator_t iterator_t;
-
/* A support class for uniquifying instances of conjured_svalue. */
struct key_t
{
@@ -1140,7 +1388,7 @@ public:
template <>
template <>
inline bool
-is_a_helper <conjured_svalue *>::test (svalue *sval)
+is_a_helper <const conjured_svalue *>::test (const svalue *sval)
{
return sval->get_kind () == SK_CONJURED;
}
@@ -1151,4 +1399,140 @@ template <> struct default_hash_traits<conjured_svalue::key_t>
static const bool empty_zero_p = true;
};
+namespace ana {
+
+/* An output from a deterministic asm stmt, where we want to identify a
+ particular unknown value, rather than resorting to the unknown_value
+ singleton.
+
+ Comparisons of variables that share the same asm_output_svalue are known
+ to be equal, even if we don't know what the value is. */
+
+class asm_output_svalue : public svalue
+{
+public:
+ /* Imposing an upper limit and using a (small) array allows key_t
+ to avoid memory management. */
+ static const unsigned MAX_INPUTS = 2;
+
+ /* A support class for uniquifying instances of asm_output_svalue. */
+ struct key_t
+ {
+ key_t (tree type,
+ const char *asm_string,
+ unsigned output_idx,
+ const vec<const svalue *> &inputs)
+ : m_type (type), m_asm_string (asm_string), m_output_idx (output_idx),
+ m_num_inputs (inputs.length ())
+ {
+ gcc_assert (inputs.length () <= MAX_INPUTS);
+ for (unsigned i = 0; i < m_num_inputs; i++)
+ m_input_arr[i] = inputs[i];
+ }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ /* We don't bother hashing m_asm_str. */
+ hstate.add_int (m_output_idx);
+ for (unsigned i = 0; i < m_num_inputs; i++)
+ hstate.add_ptr (m_input_arr[i]);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ if (!(m_type == other.m_type
+ && 0 == (strcmp (m_asm_string, other.m_asm_string))
+ && m_output_idx == other.m_output_idx
+ && m_num_inputs == other.m_num_inputs))
+ return false;
+ for (unsigned i = 0; i < m_num_inputs; i++)
+ if (m_input_arr[i] != other.m_input_arr[i])
+ return false;
+ return true;
+ }
+
+ /* Use m_asm_string to mark empty/deleted, as m_type can be NULL for
+ legitimate instances. */
+ void mark_deleted () { m_asm_string = reinterpret_cast<const char *> (1); }
+ void mark_empty () { m_asm_string = NULL; }
+ bool is_deleted () const
+ {
+ return m_asm_string == reinterpret_cast<const char *> (1);
+ }
+ bool is_empty () const { return m_asm_string == NULL; }
+
+ tree m_type;
+ const char *m_asm_string;
+ unsigned m_output_idx;
+ unsigned m_num_inputs;
+ const svalue *m_input_arr[MAX_INPUTS];
+ };
+
+ asm_output_svalue (tree type,
+ const char *asm_string,
+ unsigned output_idx,
+ unsigned num_outputs,
+ const vec<const svalue *> &inputs)
+ : svalue (complexity::from_vec_svalue (inputs), type),
+ m_asm_string (asm_string),
+ m_output_idx (output_idx),
+ m_num_outputs (num_outputs),
+ m_num_inputs (inputs.length ())
+ {
+ gcc_assert (inputs.length () <= MAX_INPUTS);
+ for (unsigned i = 0; i < m_num_inputs; i++)
+ m_input_arr[i] = inputs[i];
+ }
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_ASM_OUTPUT; }
+ const asm_output_svalue *
+ dyn_cast_asm_output_svalue () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ const char *get_asm_string () const { return m_asm_string; }
+ unsigned get_output_idx () const { return m_output_idx; }
+ unsigned get_num_inputs () const { return m_num_inputs; }
+ const svalue *get_input (unsigned idx) const { return m_input_arr[idx]; }
+
+ private:
+ void dump_input (pretty_printer *pp,
+ unsigned input_idx,
+ const svalue *sval,
+ bool simple) const;
+ unsigned input_idx_to_asm_idx (unsigned input_idx) const;
+
+ const char *m_asm_string;
+ unsigned m_output_idx;
+
+ /* We capture this so that we can offset the input indices
+ to match the %0, %1, %2 in the asm_string when dumping. */
+ unsigned m_num_outputs;
+
+ unsigned m_num_inputs;
+ const svalue *m_input_arr[MAX_INPUTS];
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const asm_output_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_ASM_OUTPUT;
+}
+
+template <> struct default_hash_traits<asm_output_svalue::key_t>
+: public member_function_hash_traits<asm_output_svalue::key_t>
+{
+ static const bool empty_zero_p = true;
+};
#endif /* GCC_ANALYZER_SVALUE_H */