aboutsummaryrefslogtreecommitdiff
path: root/gcc/analyzer
diff options
context:
space:
mode:
Diffstat (limited to 'gcc/analyzer')
-rw-r--r--gcc/analyzer/analyzer.h5
-rw-r--r--gcc/analyzer/engine.cc5
-rw-r--r--gcc/analyzer/program-state.cc1
-rw-r--r--gcc/analyzer/region-model-impl-calls.cc39
-rw-r--r--gcc/analyzer/region-model-manager.cc313
-rw-r--r--gcc/analyzer/region-model.cc72
-rw-r--r--gcc/analyzer/region-model.h27
-rw-r--r--gcc/analyzer/region.cc230
-rw-r--r--gcc/analyzer/region.h125
-rw-r--r--gcc/analyzer/store.cc653
-rw-r--r--gcc/analyzer/store.h157
-rw-r--r--gcc/analyzer/svalue.cc381
-rw-r--r--gcc/analyzer/svalue.h262
13 files changed, 1808 insertions, 462 deletions
diff --git a/gcc/analyzer/analyzer.h b/gcc/analyzer/analyzer.h
index f06b68c..02830e4 100644
--- a/gcc/analyzer/analyzer.h
+++ b/gcc/analyzer/analyzer.h
@@ -46,6 +46,8 @@ class svalue;
class unaryop_svalue;
class binop_svalue;
class sub_svalue;
+ class repeated_svalue;
+ class bits_within_svalue;
class unmergeable_svalue;
class placeholder_svalue;
class widening_svalue;
@@ -60,6 +62,7 @@ class region;
class symbolic_region;
class element_region;
class offset_region;
+ class sized_region;
class cast_region;
class field_region;
class string_region;
@@ -147,6 +150,8 @@ typedef offset_int byte_size_t;
extern bool int_size_in_bits (const_tree type, bit_size_t *out);
+extern tree get_field_at_bit_offset (tree record_type, bit_offset_t bit_offset);
+
/* The location of a region expressesd as an offset relative to a
base region. */
diff --git a/gcc/analyzer/engine.cc b/gcc/analyzer/engine.cc
index f322fdb..4456d9b 100644
--- a/gcc/analyzer/engine.cc
+++ b/gcc/analyzer/engine.cc
@@ -2275,6 +2275,7 @@ exploded_graph::get_or_create_node (const program_point &point,
if (pruned_state.can_merge_with_p (existing_state, point,
&merged_state))
{
+ merged_state.validate (m_ext_state);
if (logger)
logger->log ("merging new state with that of EN: %i",
existing_enode->m_index);
@@ -2794,6 +2795,7 @@ maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
items.quick_push (it);
const program_state &state = iter_enode->get_state ();
program_state *next_state = &it->m_processed_state;
+ next_state->validate (m_ext_state);
const program_point &iter_point = iter_enode->get_point ();
if (const superedge *iter_sedge = iter_point.get_from_edge ())
{
@@ -2807,6 +2809,7 @@ maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
next_state->m_region_model->update_for_phis
(snode, last_cfg_superedge, &ctxt);
}
+ next_state->validate (m_ext_state);
}
/* Attempt to partition the items into a set of merged states.
@@ -2823,10 +2826,12 @@ maybe_process_run_of_before_supernode_enodes (exploded_node *enode)
unsigned iter_merger_idx;
FOR_EACH_VEC_ELT (merged_states, iter_merger_idx, merged_state)
{
+ merged_state->validate (m_ext_state);
program_state merge (m_ext_state);
if (it_state.can_merge_with_p (*merged_state, next_point, &merge))
{
*merged_state = merge;
+ merged_state->validate (m_ext_state);
it->m_merger_idx = iter_merger_idx;
if (logger)
logger->log ("reusing merger state %i for item %i (EN: %i)",
diff --git a/gcc/analyzer/program-state.cc b/gcc/analyzer/program-state.cc
index 67dd785..6d60c04 100644
--- a/gcc/analyzer/program-state.cc
+++ b/gcc/analyzer/program-state.cc
@@ -1142,6 +1142,7 @@ program_state::validate (const extrinsic_state &ext_state) const
#endif
gcc_assert (m_checker_states.length () == ext_state.get_num_checkers ());
+ m_region_model->validate ();
}
static void
diff --git a/gcc/analyzer/region-model-impl-calls.cc b/gcc/analyzer/region-model-impl-calls.cc
index 099520a..466d397 100644
--- a/gcc/analyzer/region-model-impl-calls.cc
+++ b/gcc/analyzer/region-model-impl-calls.cc
@@ -389,36 +389,15 @@ region_model::impl_call_memset (const call_details &cd)
const region *dest_reg = deref_rvalue (dest_sval, cd.get_arg_tree (0),
cd.get_ctxt ());
- if (tree num_bytes = num_bytes_sval->maybe_get_constant ())
- {
- /* "memset" of zero size is a no-op. */
- if (zerop (num_bytes))
- return true;
-
- /* Set with known amount. */
- byte_size_t reg_size;
- if (dest_reg->get_byte_size (&reg_size))
- {
- /* Check for an exact size match. */
- if (reg_size == wi::to_offset (num_bytes))
- {
- if (tree cst = fill_value_sval->maybe_get_constant ())
- {
- if (zerop (cst))
- {
- zero_fill_region (dest_reg);
- return true;
- }
- }
- }
- }
- }
-
- check_for_writable_region (dest_reg, cd.get_ctxt ());
-
- /* Otherwise, mark region's contents as unknown. */
- mark_region_as_unknown (dest_reg, cd.get_uncertainty ());
- return false;
+ const svalue *fill_value_u8
+ = m_mgr->get_or_create_cast (unsigned_char_type_node, fill_value_sval);
+
+ const region *sized_dest_reg = m_mgr->get_sized_region (dest_reg,
+ NULL_TREE,
+ num_bytes_sval);
+ check_for_writable_region (sized_dest_reg, cd.get_ctxt ());
+ fill_region (sized_dest_reg, fill_value_u8);
+ return true;
}
/* Handle the on_call_pre part of "operator new". */
diff --git a/gcc/analyzer/region-model-manager.cc b/gcc/analyzer/region-model-manager.cc
index 1ee6663..55acb90 100644
--- a/gcc/analyzer/region-model-manager.cc
+++ b/gcc/analyzer/region-model-manager.cc
@@ -477,7 +477,7 @@ maybe_undo_optimize_bit_field_compare (tree type,
bound_bits = bit_range (BITS_PER_UNIT - bits.get_next_bit_offset (),
bits.m_size_in_bits);
const concrete_binding *conc
- = get_store_manager ()->get_concrete_binding (bound_bits, BK_direct);
+ = get_store_manager ()->get_concrete_binding (bound_bits);
const svalue *sval = map.get (conc);
if (!sval)
return NULL;
@@ -686,13 +686,13 @@ region_model_manager::maybe_fold_sub_svalue (tree type,
return get_or_create_cast (type, char_sval);
}
- /* SUB(INIT(r)).FIELD -> INIT(r.FIELD)
- i.e.
- Subvalue(InitialValue(R1), FieldRegion(R2, F))
- -> InitialValue(FieldRegion(R1, F)). */
if (const initial_svalue *init_sval
- = parent_svalue->dyn_cast_initial_svalue ())
+ = parent_svalue->dyn_cast_initial_svalue ())
{
+ /* SUB(INIT(r)).FIELD -> INIT(r.FIELD)
+ i.e.
+ Subvalue(InitialValue(R1), FieldRegion(R2, F))
+ -> InitialValue(FieldRegion(R1, F)). */
if (const field_region *field_reg = subregion->dyn_cast_field_region ())
{
const region *field_reg_new
@@ -700,8 +700,24 @@ region_model_manager::maybe_fold_sub_svalue (tree type,
field_reg->get_field ());
return get_or_create_initial_value (field_reg_new);
}
+ /* SUB(INIT(r)[ELEMENT] -> INIT(e[ELEMENT])
+ i.e.
+ Subvalue(InitialValue(R1), ElementRegion(R2, IDX))
+ -> InitialValue(ElementRegion(R1, IDX)). */
+ if (const element_region *element_reg = subregion->dyn_cast_element_region ())
+ {
+ const region *element_reg_new
+ = get_element_region (init_sval->get_region (),
+ element_reg->get_type (),
+ element_reg->get_index ());
+ return get_or_create_initial_value (element_reg_new);
+ }
}
+ if (const repeated_svalue *repeated_sval
+ = parent_svalue->dyn_cast_repeated_svalue ())
+ return get_or_create_cast (type, repeated_sval->get_inner_svalue ());
+
return NULL;
}
@@ -727,6 +743,255 @@ region_model_manager::get_or_create_sub_svalue (tree type,
return sub_sval;
}
+/* Subroutine of region_model_manager::get_or_create_repeated_svalue.
+ Return a folded svalue, or NULL. */
+
+const svalue *
+region_model_manager::maybe_fold_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+{
+ /* If INNER_SVALUE is the same size as OUTER_SIZE,
+ turn into simply a cast. */
+ if (tree cst_outer_num_bytes = outer_size->maybe_get_constant ())
+ {
+ HOST_WIDE_INT num_bytes_inner_svalue
+ = int_size_in_bytes (inner_svalue->get_type ());
+ if (num_bytes_inner_svalue != -1)
+ if (num_bytes_inner_svalue
+ == (HOST_WIDE_INT)tree_to_uhwi (cst_outer_num_bytes))
+ {
+ if (type)
+ return get_or_create_cast (type, inner_svalue);
+ else
+ return inner_svalue;
+ }
+ }
+
+ /* Handle zero-fill of a specific type. */
+ if (tree cst = inner_svalue->maybe_get_constant ())
+ if (zerop (cst) && type)
+ return get_or_create_cast (type, inner_svalue);
+
+ return NULL;
+}
+
+/* Return the svalue * of type TYPE in which INNER_SVALUE is repeated
+ enough times to be of size OUTER_SIZE, creating it if necessary.
+ e.g. for filling buffers with a constant value. */
+
+const svalue *
+region_model_manager::get_or_create_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+{
+ if (const svalue *folded
+ = maybe_fold_repeated_svalue (type, outer_size, inner_svalue))
+ return folded;
+
+ repeated_svalue::key_t key (type, outer_size, inner_svalue);
+ if (repeated_svalue **slot = m_repeated_values_map.get (key))
+ return *slot;
+ repeated_svalue *repeated_sval
+ = new repeated_svalue (type, outer_size, inner_svalue);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (repeated_sval);
+ m_repeated_values_map.put (key, repeated_sval);
+ return repeated_sval;
+}
+
+/* Attempt to get the bit_range for FIELD within a RECORD_TYPE.
+ Return true and write the result to OUT if successful.
+ Return false otherwise. */
+
+static bool
+get_bit_range_for_field (tree field, bit_range *out)
+{
+ bit_size_t bit_size;
+ if (!int_size_in_bits (TREE_TYPE (field), &bit_size))
+ return false;
+ int field_bit_offset = int_bit_position (field);
+ *out = bit_range (field_bit_offset, bit_size);
+ return true;
+}
+
+/* Attempt to get the byte_range for FIELD within a RECORD_TYPE.
+ Return true and write the result to OUT if successful.
+ Return false otherwise. */
+
+static bool
+get_byte_range_for_field (tree field, byte_range *out)
+{
+ bit_range field_bits (0, 0);
+ if (!get_bit_range_for_field (field, &field_bits))
+ return false;
+ return field_bits.as_byte_range (out);
+}
+
+/* Attempt to determine if there is a specific field within RECORD_TYPE
+ at BYTES. If so, return it, and write the location of BYTES relative
+ to the field to *OUT_RANGE_WITHIN_FIELD.
+ Otherwise, return NULL_TREE.
+ For example, given:
+ struct foo { uint32 a; uint32; b};
+ and
+ bytes = {bytes 6-7} (of foo)
+ we have bytes 3-4 of field b. */
+
+static tree
+get_field_at_byte_range (tree record_type, const byte_range &bytes,
+ byte_range *out_range_within_field)
+{
+ bit_offset_t bit_offset = bytes.m_start_byte_offset * BITS_PER_UNIT;
+
+ tree field = get_field_at_bit_offset (record_type, bit_offset);
+ if (!field)
+ return NULL_TREE;
+
+ byte_range field_bytes (0,0);
+ if (!get_byte_range_for_field (field, &field_bytes))
+ return NULL_TREE;
+
+ /* Is BYTES fully within field_bytes? */
+ byte_range bytes_within_field (0,0);
+ if (!field_bytes.contains_p (bytes, &bytes_within_field))
+ return NULL_TREE;
+
+ *out_range_within_field = bytes_within_field;
+ return field;
+}
+
+/* Subroutine of region_model_manager::get_or_create_bits_within.
+ Return a folded svalue, or NULL. */
+
+const svalue *
+region_model_manager::maybe_fold_bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+{
+ tree inner_type = inner_svalue->get_type ();
+ /* Fold:
+ BITS_WITHIN ((0, sizeof (VAL), VAL))
+ to:
+ CAST(TYPE, VAL). */
+ if (bits.m_start_bit_offset == 0 && inner_type)
+ {
+ bit_size_t inner_type_size;
+ if (int_size_in_bits (inner_type, &inner_type_size))
+ if (inner_type_size == bits.m_size_in_bits)
+ {
+ if (type)
+ return get_or_create_cast (type, inner_svalue);
+ else
+ return inner_svalue;
+ }
+ }
+
+ /* Kind-specific folding. */
+ if (const svalue *sval
+ = inner_svalue->maybe_fold_bits_within (type, bits, this))
+ return sval;
+
+ byte_range bytes (0,0);
+ if (bits.as_byte_range (&bytes) && inner_type)
+ switch (TREE_CODE (inner_type))
+ {
+ default:
+ break;
+ case ARRAY_TYPE:
+ {
+ /* Fold:
+ BITS_WITHIN (range, KIND(REG))
+ to:
+ BITS_WITHIN (range - offsetof(ELEMENT), KIND(REG.ELEMENT))
+ if range1 is a byte-range fully within one ELEMENT. */
+ tree element_type = TREE_TYPE (inner_type);
+ HOST_WIDE_INT element_byte_size
+ = int_size_in_bytes (element_type);
+ if (element_byte_size > 0)
+ {
+ HOST_WIDE_INT start_idx
+ = (bytes.get_start_byte_offset ().to_shwi ()
+ / element_byte_size);
+ HOST_WIDE_INT last_idx
+ = (bytes.get_last_byte_offset ().to_shwi ()
+ / element_byte_size);
+ if (start_idx == last_idx)
+ {
+ if (const initial_svalue *initial_sval
+ = inner_svalue->dyn_cast_initial_svalue ())
+ {
+ bit_offset_t start_of_element
+ = start_idx * element_byte_size * BITS_PER_UNIT;
+ bit_range bits_within_element
+ (bits.m_start_bit_offset - start_of_element,
+ bits.m_size_in_bits);
+ const svalue *idx_sval
+ = get_or_create_int_cst (integer_type_node, start_idx);
+ const region *element_reg =
+ get_element_region (initial_sval->get_region (),
+ element_type, idx_sval);
+ const svalue *element_reg_sval
+ = get_or_create_initial_value (element_reg);
+ return get_or_create_bits_within (type,
+ bits_within_element,
+ element_reg_sval);
+ }
+ }
+ }
+ }
+ break;
+ case RECORD_TYPE:
+ {
+ /* Fold:
+ BYTES_WITHIN (range, KIND(REG))
+ to:
+ BYTES_WITHIN (range - offsetof(FIELD), KIND(REG.FIELD))
+ if range1 is fully within FIELD. */
+ byte_range bytes_within_field (0, 0);
+ if (tree field = get_field_at_byte_range (inner_type, bytes,
+ &bytes_within_field))
+ {
+ if (const initial_svalue *initial_sval
+ = inner_svalue->dyn_cast_initial_svalue ())
+ {
+ const region *field_reg =
+ get_field_region (initial_sval->get_region (), field);
+ const svalue *initial_reg_sval
+ = get_or_create_initial_value (field_reg);
+ return get_or_create_bits_within
+ (type,
+ bytes_within_field.as_bit_range (),
+ initial_reg_sval);
+ }
+ }
+ }
+ break;
+ }
+ return NULL;
+}
+
+/* Return the svalue * of type TYPE for extracting BITS from INNER_SVALUE,
+ creating it if necessary. */
+
+const svalue *
+region_model_manager::get_or_create_bits_within (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+{
+ if (const svalue *folded
+ = maybe_fold_bits_within_svalue (type, bits, inner_svalue))
+ return folded;
+
+ bits_within_svalue::key_t key (type, bits, inner_svalue);
+ if (bits_within_svalue **slot = m_bits_within_values_map.get (key))
+ return *slot;
+ bits_within_svalue *bits_within_sval
+ = new bits_within_svalue (type, bits, inner_svalue);
+ RETURN_UNKNOWN_IF_TOO_COMPLEX (bits_within_sval);
+ m_bits_within_values_map.put (key, bits_within_sval);
+ return bits_within_sval;
+}
+
/* Return the svalue * that decorates ARG as being unmergeable,
creating it if necessary. */
@@ -966,6 +1231,38 @@ region_model_manager::get_offset_region (const region *parent,
return offset_reg;
}
+/* Return the region that describes accessing the subregion of type
+ TYPE of size BYTE_SIZE_SVAL within PARENT, creating it if necessary. */
+
+const region *
+region_model_manager::get_sized_region (const region *parent,
+ tree type,
+ const svalue *byte_size_sval)
+{
+ if (byte_size_sval->get_type () != size_type_node)
+ byte_size_sval = get_or_create_cast (size_type_node, byte_size_sval);
+
+ /* If PARENT is already that size, return it. */
+ const svalue *parent_byte_size_sval = parent->get_byte_size_sval (this);
+ if (tree parent_size_cst = parent_byte_size_sval->maybe_get_constant ())
+ if (tree size_cst = byte_size_sval->maybe_get_constant ())
+ {
+ tree comparison
+ = fold_binary (EQ_EXPR, boolean_type_node, parent_size_cst, size_cst);
+ if (comparison == boolean_true_node)
+ return parent;
+ }
+
+ sized_region::key_t key (parent, type, byte_size_sval);
+ if (sized_region *reg = m_sized_regions.get (key))
+ return reg;
+
+ sized_region *sized_reg
+ = new sized_region (alloc_region_id (), parent, type, byte_size_sval);
+ m_sized_regions.put (key, sized_reg);
+ return sized_reg;
+}
+
/* Return the region that describes accessing PARENT_REGION as if
it were of type TYPE, creating it if necessary. */
@@ -1180,6 +1477,9 @@ region_model_manager::log_stats (logger *logger, bool show_objs) const
log_uniq_map (logger, show_objs, "unaryop_svalue", m_unaryop_values_map);
log_uniq_map (logger, show_objs, "binop_svalue", m_binop_values_map);
log_uniq_map (logger, show_objs, "sub_svalue", m_sub_values_map);
+ log_uniq_map (logger, show_objs, "repeated_svalue", m_repeated_values_map);
+ log_uniq_map (logger, show_objs, "bits_within_svalue",
+ m_bits_within_values_map);
log_uniq_map (logger, show_objs, "unmergeable_svalue",
m_unmergeable_values_map);
log_uniq_map (logger, show_objs, "widening_svalue", m_widening_values_map);
@@ -1198,6 +1498,7 @@ region_model_manager::log_stats (logger *logger, bool show_objs) const
log_uniq_map (logger, show_objs, "field_region", m_field_regions);
log_uniq_map (logger, show_objs, "element_region", m_element_regions);
log_uniq_map (logger, show_objs, "offset_region", m_offset_regions);
+ log_uniq_map (logger, show_objs, "sized_region", m_sized_regions);
log_uniq_map (logger, show_objs, "cast_region", m_cast_regions);
log_uniq_map (logger, show_objs, "frame_region", m_frame_regions);
log_uniq_map (logger, show_objs, "symbolic_region", m_symbolic_regions);
diff --git a/gcc/analyzer/region-model.cc b/gcc/analyzer/region-model.cc
index ee11e82..4fb6bc9 100644
--- a/gcc/analyzer/region-model.cc
+++ b/gcc/analyzer/region-model.cc
@@ -393,6 +393,14 @@ region_model::debug () const
dump (true);
}
+/* Assert that this object is valid. */
+
+void
+region_model::validate () const
+{
+ m_store.validate ();
+}
+
/* Canonicalize the store and constraints, to maximize the chance of
equality between region_model instances. */
@@ -847,11 +855,9 @@ region_model::on_assignment (const gassign *assign, region_model_context *ctxt)
case STRING_CST:
{
/* e.g. "struct s2 x = {{'A', 'B', 'C', 'D'}};". */
- /* Add a default binding, rather than a direct one, so that array
- access will "inherit" the individual chars. */
const svalue *rhs_sval = get_rvalue (rhs1, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
- BK_default, ctxt ? ctxt->get_uncertainty () : NULL);
+ ctxt ? ctxt->get_uncertainty () : NULL);
}
break;
}
@@ -1662,8 +1668,7 @@ region_model::get_initial_value_for_global (const region *reg) const
{
/* Get the value for REG within base_reg_init. */
binding_cluster c (base_reg);
- c.bind (m_mgr->get_store_manager (), base_reg, base_reg_init,
- BK_direct);
+ c.bind (m_mgr->get_store_manager (), base_reg, base_reg_init);
const svalue *sval
= c.get_any_binding (m_mgr->get_store_manager (), reg);
if (sval)
@@ -1854,45 +1859,7 @@ region_model::get_rvalue_for_bits (tree type,
const bit_range &bits) const
{
const svalue *sval = get_store_value (reg);
- if (const compound_svalue *compound_sval = sval->dyn_cast_compound_svalue ())
- {
- const binding_map &map = compound_sval->get_map ();
- binding_map result_map;
- for (auto iter : map)
- {
- const binding_key *key = iter.first;
- if (const concrete_binding *conc_key
- = key->dyn_cast_concrete_binding ())
- {
- /* Ignore concrete bindings outside BITS. */
- if (!conc_key->get_bit_range ().intersects_p (bits))
- continue;
- if ((conc_key->get_start_bit_offset ()
- < bits.get_start_bit_offset ())
- || (conc_key->get_next_bit_offset ()
- > bits.get_next_bit_offset ()))
- {
- /* If we have any concrete keys that aren't fully within BITS,
- then bail out. */
- return m_mgr->get_or_create_unknown_svalue (type);
- }
- const concrete_binding *offset_conc_key
- = m_mgr->get_store_manager ()->get_concrete_binding
- (conc_key->get_start_bit_offset ()
- - bits.get_start_bit_offset (),
- conc_key->get_size_in_bits (),
- conc_key->get_kind ());
- const svalue *sval = iter.second;
- result_map.put (offset_conc_key, sval);
- }
- else
- /* If we have any symbolic keys we can't get it as bits. */
- return m_mgr->get_or_create_unknown_svalue (type);
- }
- return m_mgr->get_or_create_compound_svalue (type, result_map);
- }
-
- return m_mgr->get_or_create_unknown_svalue (type);
+ return m_mgr->get_or_create_bits_within (type, bits, sval);
}
/* A subclass of pending_diagnostic for complaining about writes to
@@ -2035,6 +2002,10 @@ region_model::get_capacity (const region *reg) const
}
}
break;
+ case RK_SIZED:
+ /* Look through sized regions to get at the capacity
+ of the underlying regions. */
+ return get_capacity (reg->get_parent_region ());
}
if (const svalue *recorded = get_dynamic_extents (reg))
@@ -2056,7 +2027,7 @@ region_model::set_value (const region *lhs_reg, const svalue *rhs_sval,
check_for_writable_region (lhs_reg, ctxt);
m_store.set_value (m_mgr->get_store_manager(), lhs_reg, rhs_sval,
- BK_direct, ctxt ? ctxt->get_uncertainty () : NULL);
+ ctxt ? ctxt->get_uncertainty () : NULL);
}
/* Set the value of the region given by LHS to the value given by RHS. */
@@ -2087,6 +2058,14 @@ region_model::purge_region (const region *reg)
m_store.purge_region (m_mgr->get_store_manager(), reg);
}
+/* Fill REG with SVAL. */
+
+void
+region_model::fill_region (const region *reg, const svalue *sval)
+{
+ m_store.fill_region (m_mgr->get_store_manager(), reg, sval);
+}
+
/* Zero-fill REG. */
void
@@ -2711,6 +2690,9 @@ region_model::get_representative_path_var_1 (const region *reg,
parent_pv.m_stack_depth);
}
+ case RK_SIZED:
+ return path_var (NULL_TREE, 0);
+
case RK_CAST:
{
path_var parent_pv
diff --git a/gcc/analyzer/region-model.h b/gcc/analyzer/region-model.h
index a4b584d..b42852b 100644
--- a/gcc/analyzer/region-model.h
+++ b/gcc/analyzer/region-model.h
@@ -212,6 +212,8 @@ public:
virtual void visit_unaryop_svalue (const unaryop_svalue *) {}
virtual void visit_binop_svalue (const binop_svalue *) {}
virtual void visit_sub_svalue (const sub_svalue *) {}
+ virtual void visit_repeated_svalue (const repeated_svalue *) {}
+ virtual void visit_bits_within_svalue (const bits_within_svalue *) {}
virtual void visit_unmergeable_svalue (const unmergeable_svalue *) {}
virtual void visit_placeholder_svalue (const placeholder_svalue *) {}
virtual void visit_widening_svalue (const widening_svalue *) {}
@@ -255,6 +257,12 @@ public:
const svalue *get_or_create_sub_svalue (tree type,
const svalue *parent_svalue,
const region *subregion);
+ const svalue *get_or_create_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue);
+ const svalue *get_or_create_bits_within (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue);
const svalue *get_or_create_unmergeable (const svalue *arg);
const svalue *get_or_create_widening_svalue (tree type,
const program_point &point,
@@ -286,6 +294,9 @@ public:
const region *get_offset_region (const region *parent,
tree type,
const svalue *byte_offset);
+ const region *get_sized_region (const region *parent,
+ tree type,
+ const svalue *byte_size_sval);
const region *get_cast_region (const region *original_region,
tree type);
const frame_region *get_frame_region (const frame_region *calling_frame,
@@ -321,6 +332,12 @@ private:
const svalue *maybe_fold_sub_svalue (tree type,
const svalue *parent_svalue,
const region *subregion);
+ const svalue *maybe_fold_repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue);
+ const svalue *maybe_fold_bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue);
const svalue *maybe_undo_optimize_bit_field_compare (tree type,
const compound_svalue *compound_sval,
tree cst, const svalue *arg1);
@@ -362,6 +379,14 @@ private:
typedef hash_map<sub_svalue::key_t, sub_svalue *> sub_values_map_t;
sub_values_map_t m_sub_values_map;
+ typedef hash_map<repeated_svalue::key_t,
+ repeated_svalue *> repeated_values_map_t;
+ repeated_values_map_t m_repeated_values_map;
+
+ typedef hash_map<bits_within_svalue::key_t,
+ bits_within_svalue *> bits_within_values_map_t;
+ bits_within_values_map_t m_bits_within_values_map;
+
typedef hash_map<const svalue *,
unmergeable_svalue *> unmergeable_values_map_t;
unmergeable_values_map_t m_unmergeable_values_map;
@@ -402,6 +427,7 @@ private:
consolidation_map<field_region> m_field_regions;
consolidation_map<element_region> m_element_regions;
consolidation_map<offset_region> m_offset_regions;
+ consolidation_map<sized_region> m_sized_regions;
consolidation_map<cast_region> m_cast_regions;
consolidation_map<frame_region> m_frame_regions;
consolidation_map<symbolic_region> m_symbolic_regions;
@@ -575,6 +601,7 @@ class region_model
void set_value (tree lhs, tree rhs, region_model_context *ctxt);
void clobber_region (const region *reg);
void purge_region (const region *reg);
+ void fill_region (const region *reg, const svalue *sval);
void zero_fill_region (const region *reg);
void mark_region_as_unknown (const region *reg, uncertainty_t *uncertainty);
diff --git a/gcc/analyzer/region.cc b/gcc/analyzer/region.cc
index 5f246df..4633717 100644
--- a/gcc/analyzer/region.cc
+++ b/gcc/analyzer/region.cc
@@ -98,6 +98,7 @@ region::get_base_region () const
case RK_FIELD:
case RK_ELEMENT:
case RK_OFFSET:
+ case RK_SIZED:
iter = iter->get_parent_region ();
continue;
case RK_CAST:
@@ -121,6 +122,7 @@ region::base_region_p () const
case RK_FIELD:
case RK_ELEMENT:
case RK_OFFSET:
+ case RK_SIZED:
case RK_CAST:
return false;
@@ -188,7 +190,8 @@ region::get_offset () const
return *m_cached_offset;
}
-/* If the size of this region (in bytes) is known statically, write it to *OUT
+/* Base class implementation of region::get_byte_size vfunc.
+ If the size of this region (in bytes) is known statically, write it to *OUT
and return true.
Otherwise return false. */
@@ -208,8 +211,29 @@ region::get_byte_size (byte_size_t *out) const
return true;
}
-/* If the size of TYPE (in bits) is constant, write it to *OUT
- and return true.
+/* Base implementation of region::get_byte_size_sval vfunc. */
+
+const svalue *
+region::get_byte_size_sval (region_model_manager *mgr) const
+{
+ tree type = get_type ();
+
+ /* Bail out e.g. for heap-allocated regions. */
+ if (!type)
+ return mgr->get_or_create_unknown_svalue (size_type_node);
+
+ HOST_WIDE_INT bytes = int_size_in_bytes (type);
+ if (bytes == -1)
+ return mgr->get_or_create_unknown_svalue (size_type_node);
+
+ tree byte_size = size_in_bytes (type);
+ if (TREE_TYPE (byte_size) != size_type_node)
+ byte_size = fold_build1 (NOP_EXPR, size_type_node, byte_size);
+ return mgr->get_or_create_constant_svalue (byte_size);
+}
+
+/* Attempt to get the size of TYPE in bits.
+ If successful, return true and write the size to *OUT.
Otherwise return false. */
bool
@@ -249,7 +273,7 @@ region::get_bit_size (bit_size_t *out) const
/* Get the field within RECORD_TYPE at BIT_OFFSET. */
-static tree
+tree
get_field_at_bit_offset (tree record_type, bit_offset_t bit_offset)
{
gcc_assert (TREE_CODE (record_type) == RECORD_TYPE);
@@ -375,18 +399,10 @@ region::calc_offset () const
= (const field_region *)iter_region;
iter_region = iter_region->get_parent_region ();
- /* Compare with e.g. gimple-fold.c's
- fold_nonarray_ctor_reference. */
- tree field = field_reg->get_field ();
- tree byte_offset = DECL_FIELD_OFFSET (field);
- if (TREE_CODE (byte_offset) != INTEGER_CST)
+ bit_offset_t rel_bit_offset;
+ if (!field_reg->get_relative_concrete_offset (&rel_bit_offset))
return region_offset::make_symbolic (iter_region);
- tree field_offset = DECL_FIELD_BIT_OFFSET (field);
- /* Compute bit offset of the field. */
- offset_int bitoffset
- = (wi::to_offset (field_offset)
- + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
- accum_bit_offset += bitoffset;
+ accum_bit_offset += rel_bit_offset;
}
continue;
@@ -396,28 +412,10 @@ region::calc_offset () const
= (const element_region *)iter_region;
iter_region = iter_region->get_parent_region ();
- if (tree idx_cst
- = element_reg->get_index ()->maybe_get_constant ())
- {
- gcc_assert (TREE_CODE (idx_cst) == INTEGER_CST);
-
- tree elem_type = element_reg->get_type ();
- offset_int element_idx = wi::to_offset (idx_cst);
-
- /* First, use int_size_in_bytes, to reject the case where we
- have an incomplete type, or a non-constant value. */
- HOST_WIDE_INT hwi_byte_size = int_size_in_bytes (elem_type);
- if (hwi_byte_size > 0)
- {
- offset_int element_bit_size
- = hwi_byte_size << LOG2_BITS_PER_UNIT;
- offset_int element_bit_offset
- = element_idx * element_bit_size;
- accum_bit_offset += element_bit_offset;
- continue;
- }
- }
- return region_offset::make_symbolic (iter_region);
+ bit_offset_t rel_bit_offset;
+ if (!element_reg->get_relative_concrete_offset (&rel_bit_offset))
+ return region_offset::make_symbolic (iter_region);
+ accum_bit_offset += rel_bit_offset;
}
continue;
@@ -427,22 +425,17 @@ region::calc_offset () const
= (const offset_region *)iter_region;
iter_region = iter_region->get_parent_region ();
- if (tree byte_offset_cst
- = offset_reg->get_byte_offset ()->maybe_get_constant ())
- {
- gcc_assert (TREE_CODE (byte_offset_cst) == INTEGER_CST);
- /* Use a signed value for the byte offset, to handle
- negative offsets. */
- HOST_WIDE_INT byte_offset
- = wi::to_offset (byte_offset_cst).to_shwi ();
- HOST_WIDE_INT bit_offset = byte_offset * BITS_PER_UNIT;
- accum_bit_offset += bit_offset;
- }
- else
+ bit_offset_t rel_bit_offset;
+ if (!offset_reg->get_relative_concrete_offset (&rel_bit_offset))
return region_offset::make_symbolic (iter_region);
+ accum_bit_offset += rel_bit_offset;
}
continue;
+ case RK_SIZED:
+ iter_region = iter_region->get_parent_region ();
+ continue;
+
case RK_CAST:
{
const cast_region *cast_reg
@@ -458,6 +451,14 @@ region::calc_offset () const
return region_offset::make_concrete (iter_region, accum_bit_offset);
}
+/* Base implementation of region::get_relative_concrete_offset vfunc. */
+
+bool
+region::get_relative_concrete_offset (bit_offset_t *) const
+{
+ return false;
+}
+
/* Copy from SRC_REG to DST_REG, using CTXT for any issues that occur. */
void
@@ -984,7 +985,7 @@ decl_region::get_svalue_for_initializer (region_model_manager *mgr) const
which can fail if we have a region with unknown size
(e.g. "extern const char arr[];"). */
const binding_key *binding
- = binding_key::make (mgr->get_store_manager (), this, BK_direct);
+ = binding_key::make (mgr->get_store_manager (), this);
if (binding->symbolic_p ())
return NULL;
@@ -1030,6 +1031,26 @@ field_region::dump_to_pp (pretty_printer *pp, bool simple) const
}
}
+/* Implementation of region::get_relative_concrete_offset vfunc
+ for field_region. */
+
+bool
+field_region::get_relative_concrete_offset (bit_offset_t *out) const
+{
+ /* Compare with e.g. gimple-fold.c's
+ fold_nonarray_ctor_reference. */
+ tree byte_offset = DECL_FIELD_OFFSET (m_field);
+ if (TREE_CODE (byte_offset) != INTEGER_CST)
+ return false;
+ tree field_offset = DECL_FIELD_BIT_OFFSET (m_field);
+ /* Compute bit offset of the field. */
+ offset_int bitoffset
+ = (wi::to_offset (field_offset)
+ + (wi::to_offset (byte_offset) << LOG2_BITS_PER_UNIT));
+ *out = bitoffset;
+ return true;
+}
+
/* class element_region : public region. */
/* Implementation of region::accept vfunc for element_region. */
@@ -1067,6 +1088,35 @@ element_region::dump_to_pp (pretty_printer *pp, bool simple) const
}
}
+/* Implementation of region::get_relative_concrete_offset vfunc
+ for element_region. */
+
+bool
+element_region::get_relative_concrete_offset (bit_offset_t *out) const
+{
+ if (tree idx_cst = m_index->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (idx_cst) == INTEGER_CST);
+
+ tree elem_type = get_type ();
+ offset_int element_idx = wi::to_offset (idx_cst);
+
+ /* First, use int_size_in_bytes, to reject the case where we
+ have an incomplete type, or a non-constant value. */
+ HOST_WIDE_INT hwi_byte_size = int_size_in_bytes (elem_type);
+ if (hwi_byte_size > 0)
+ {
+ offset_int element_bit_size
+ = hwi_byte_size << LOG2_BITS_PER_UNIT;
+ offset_int element_bit_offset
+ = element_idx * element_bit_size;
+ *out = element_bit_offset;
+ return true;
+ }
+ }
+ return false;
+}
+
/* class offset_region : public region. */
/* Implementation of region::accept vfunc for offset_region. */
@@ -1103,6 +1153,86 @@ offset_region::dump_to_pp (pretty_printer *pp, bool simple) const
}
}
+/* Implementation of region::get_relative_concrete_offset vfunc
+ for offset_region. */
+
+bool
+offset_region::get_relative_concrete_offset (bit_offset_t *out) const
+{
+ if (tree byte_offset_cst = m_byte_offset->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (byte_offset_cst) == INTEGER_CST);
+ /* Use a signed value for the byte offset, to handle
+ negative offsets. */
+ HOST_WIDE_INT byte_offset
+ = wi::to_offset (byte_offset_cst).to_shwi ();
+ HOST_WIDE_INT bit_offset = byte_offset * BITS_PER_UNIT;
+ *out = bit_offset;
+ return true;
+ }
+ return false;
+}
+
+/* class sized_region : public region. */
+
+/* Implementation of region::accept vfunc for sized_region. */
+
+void
+sized_region::accept (visitor *v) const
+{
+ region::accept (v);
+ m_byte_size_sval->accept (v);
+}
+
+/* Implementation of region::dump_to_pp vfunc for sized_region. */
+
+void
+sized_region::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "SIZED_REG(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_byte_size_sval->dump_to_pp (pp, simple);
+ pp_string (pp, ")");
+ }
+ else
+ {
+ pp_string (pp, "sized_region(");
+ get_parent_region ()->dump_to_pp (pp, simple);
+ pp_string (pp, ", ");
+ m_byte_size_sval->dump_to_pp (pp, simple);
+ pp_printf (pp, ")");
+ }
+}
+
+/* Implementation of region::get_byte_size vfunc for sized_region. */
+
+bool
+sized_region::get_byte_size (byte_size_t *out) const
+{
+ if (tree cst = m_byte_size_sval->maybe_get_constant ())
+ {
+ gcc_assert (TREE_CODE (cst) == INTEGER_CST);
+ *out = tree_to_uhwi (cst);
+ return true;
+ }
+ return false;
+}
+
+/* Implementation of region::get_bit_size vfunc for sized_region. */
+
+bool
+sized_region::get_bit_size (bit_size_t *out) const
+{
+ byte_size_t byte_size;
+ if (!get_byte_size (&byte_size))
+ return false;
+ *out = byte_size * BITS_PER_UNIT;
+ return true;
+}
+
/* class cast_region : public region. */
/* Implementation of region::accept vfunc for cast_region. */
diff --git a/gcc/analyzer/region.h b/gcc/analyzer/region.h
index 175a82a..353d5c4 100644
--- a/gcc/analyzer/region.h
+++ b/gcc/analyzer/region.h
@@ -43,6 +43,7 @@ enum region_kind
RK_FIELD,
RK_ELEMENT,
RK_OFFSET,
+ RK_SIZED,
RK_CAST,
RK_HEAP_ALLOCATED,
RK_ALLOCA,
@@ -70,6 +71,7 @@ enum region_kind
field_region (RK_FIELD)
element_region (RK_ELEMENT)
offset_region (RK_OFFSET)
+ sized_region (RK_SIZED)
cast_region (RK_CAST)
heap_allocated_region (RK_HEAP_ALLOCATED)
alloca_region (RK_ALLOCA)
@@ -107,6 +109,8 @@ public:
dyn_cast_element_region () const { return NULL; }
virtual const offset_region *
dyn_cast_offset_region () const { return NULL; }
+ virtual const sized_region *
+ dyn_cast_sized_region () const { return NULL; }
virtual const cast_region *
dyn_cast_cast_region () const { return NULL; }
virtual const string_region *
@@ -138,8 +142,25 @@ public:
static int cmp_ptr_ptr (const void *, const void *);
region_offset get_offset () const;
- bool get_byte_size (byte_size_t *out) const;
- bool get_bit_size (bit_size_t *out) const;
+
+ /* Attempt to get the size of this region as a concrete number of bytes.
+ If successful, return true and write the size to *OUT.
+ Otherwise return false. */
+ virtual bool get_byte_size (byte_size_t *out) const;
+
+ /* Attempt to get the size of this region as a concrete number of bits.
+ If successful, return true and write the size to *OUT.
+ Otherwise return false. */
+ virtual bool get_bit_size (bit_size_t *out) const;
+
+ /* Get a symbolic value describing the size of this region in bytes
+ (which could be "unknown"). */
+ virtual const svalue *get_byte_size_sval (region_model_manager *mgr) const;
+
+ /* Attempt to get the offset in bits of this region relative to its parent.
+ If successful, return true and write to *OUT.
+ Otherwise return false. */
+ virtual bool get_relative_concrete_offset (bit_offset_t *out) const;
void
get_subregions_for_binding (region_model_manager *mgr,
@@ -670,6 +691,8 @@ public:
tree get_field () const { return m_field; }
+ bool get_relative_concrete_offset (bit_offset_t *out) const FINAL OVERRIDE;
+
private:
tree m_field;
};
@@ -751,6 +774,9 @@ public:
const svalue *get_index () const { return m_index; }
+ virtual bool
+ get_relative_concrete_offset (bit_offset_t *out) const FINAL OVERRIDE;
+
private:
const svalue *m_index;
};
@@ -833,6 +859,8 @@ public:
const svalue *get_byte_offset () const { return m_byte_offset; }
+ bool get_relative_concrete_offset (bit_offset_t *out) const FINAL OVERRIDE;
+
private:
const svalue *m_byte_offset;
};
@@ -855,6 +883,99 @@ template <> struct default_hash_traits<offset_region::key_t>
namespace ana {
+/* A region that is size BYTES_SIZE_SVAL in size within its parent
+ region (or possibly larger, which would lead to an overflow. */
+
+class sized_region : public region
+{
+public:
+ /* A support class for uniquifying instances of sized_region. */
+ struct key_t
+ {
+ key_t (const region *parent, tree element_type,
+ const svalue *byte_size_sval)
+ : m_parent (parent), m_element_type (element_type),
+ m_byte_size_sval (byte_size_sval)
+ {
+ gcc_assert (byte_size_sval);
+ }
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_parent);
+ hstate.add_ptr (m_element_type);
+ hstate.add_ptr (m_byte_size_sval);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_parent == other.m_parent
+ && m_element_type == other.m_element_type
+ && m_byte_size_sval == other.m_byte_size_sval);
+ }
+
+ void mark_deleted () { m_byte_size_sval = reinterpret_cast<const svalue *> (1); }
+ void mark_empty () { m_byte_size_sval = NULL; }
+ bool is_deleted () const
+ {
+ return m_byte_size_sval == reinterpret_cast<const svalue *> (1);
+ }
+ bool is_empty () const { return m_byte_size_sval == NULL; }
+
+ const region *m_parent;
+ tree m_element_type;
+ const svalue *m_byte_size_sval;
+ const svalue *m_end_offset;
+ };
+
+ sized_region (unsigned id, const region *parent, tree type,
+ const svalue *byte_size_sval)
+ : region (complexity::from_pair (parent, byte_size_sval),
+ id, parent, type),
+ m_byte_size_sval (byte_size_sval)
+ {}
+
+ enum region_kind get_kind () const FINAL OVERRIDE { return RK_SIZED; }
+ const sized_region *
+ dyn_cast_sized_region () const FINAL OVERRIDE { return this; }
+
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+
+ bool get_byte_size (byte_size_t *out) const FINAL OVERRIDE;
+ bool get_bit_size (bit_size_t *out) const FINAL OVERRIDE;
+
+ const svalue *
+ get_byte_size_sval (region_model_manager *) const FINAL OVERRIDE
+ {
+ return m_byte_size_sval;
+ }
+
+private:
+ const svalue *m_byte_size_sval;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const sized_region *>::test (const region *reg)
+{
+ return reg->get_kind () == RK_SIZED;
+}
+
+template <> struct default_hash_traits<sized_region::key_t>
+: public member_function_hash_traits<sized_region::key_t>
+{
+ static const bool empty_zero_p = true;
+};
+
+namespace ana {
+
/* A region that views another region using a different type. */
class cast_region : public region
diff --git a/gcc/analyzer/store.cc b/gcc/analyzer/store.cc
index d5f8798..a65c741 100644
--- a/gcc/analyzer/store.cc
+++ b/gcc/analyzer/store.cc
@@ -118,52 +118,25 @@ uncertainty_t::dump (bool simple) const
pp_flush (&pp);
}
-/* Get a human-readable string for KIND for dumps. */
-
-const char *binding_kind_to_string (enum binding_kind kind)
-{
- switch (kind)
- {
- default:
- case BK_empty:
- case BK_deleted:
- /* We shouldn't be attempting to print the hash kinds. */
- gcc_unreachable ();
- case BK_direct:
- return "direct";
- case BK_default:
- return "default";
- }
-}
-
/* class binding_key. */
const binding_key *
-binding_key::make (store_manager *mgr, const region *r,
- enum binding_kind kind)
+binding_key::make (store_manager *mgr, const region *r)
{
region_offset offset = r->get_offset ();
if (offset.symbolic_p ())
- return mgr->get_symbolic_binding (r, kind);
+ return mgr->get_symbolic_binding (r);
else
{
bit_size_t bit_size;
if (r->get_bit_size (&bit_size))
return mgr->get_concrete_binding (offset.get_bit_offset (),
- bit_size, kind);
+ bit_size);
else
- return mgr->get_symbolic_binding (r, kind);
+ return mgr->get_symbolic_binding (r);
}
}
-/* Base class implementation of binding_key::dump_to_pp vfunc. */
-
-void
-binding_key::dump_to_pp (pretty_printer *pp, bool /*simple*/) const
-{
- pp_printf (pp, "kind: %s", binding_kind_to_string (m_kind));
-}
-
/* Dump this binding_key to stderr. */
DEBUG_FUNCTION void
@@ -204,11 +177,6 @@ binding_key::cmp_ptrs (const void *p1, const void *p2)
int
binding_key::cmp (const binding_key *k1, const binding_key *k2)
{
- enum binding_kind kind1 = k1->get_kind ();
- enum binding_kind kind2 = k2->get_kind ();
- if (kind1 != kind2)
- return (int)kind1 - (int)kind2;
-
int concrete1 = k1->concrete_p ();
int concrete2 = k2->concrete_p ();
if (int concrete_cmp = concrete1 - concrete2)
@@ -236,7 +204,7 @@ binding_key::cmp (const binding_key *k1, const binding_key *k2)
}
}
-/* struct struct bit_range. */
+/* struct bit_range. */
void
bit_range::dump_to_pp (pretty_printer *pp) const
@@ -267,6 +235,24 @@ bit_range::dump () const
pp_flush (&pp);
}
+/* If OTHER is a subset of this, return true and write
+ to *OUT the relative range of OTHER within this.
+ Otherwise return false. */
+
+bool
+bit_range::contains_p (const bit_range &other, bit_range *out) const
+{
+ if (contains_p (other.get_start_bit_offset ())
+ && contains_p (other.get_last_bit_offset ()))
+ {
+ out->m_start_bit_offset = other.m_start_bit_offset - m_start_bit_offset;
+ out->m_size_in_bits = other.m_size_in_bits;
+ return true;
+ }
+ else
+ return false;
+}
+
int
bit_range::cmp (const bit_range &br1, const bit_range &br2)
{
@@ -371,15 +357,57 @@ byte_range::dump_to_pp (pretty_printer *pp) const
}
}
+/* Dump this object to stderr. */
+
+DEBUG_FUNCTION void
+byte_range::dump () const
+{
+ pretty_printer pp;
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_newline (&pp);
+ pp_flush (&pp);
+}
+
+/* If OTHER is a subset of this, return true and write
+ to *OUT the relative range of OTHER within this.
+ Otherwise return false. */
+
+bool
+byte_range::contains_p (const byte_range &other, byte_range *out) const
+{
+ if (contains_p (other.get_start_byte_offset ())
+ && contains_p (other.get_last_byte_offset ()))
+ {
+ out->m_start_byte_offset = other.m_start_byte_offset - m_start_byte_offset;
+ out->m_size_in_bytes = other.m_size_in_bytes;
+ return true;
+ }
+ else
+ return false;
+}
+
+/* qsort comparator for byte ranges. */
+
+int
+byte_range::cmp (const byte_range &br1, const byte_range &br2)
+{
+ /* Order first by offset. */
+ if (int start_cmp = wi::cmps (br1.m_start_byte_offset,
+ br2.m_start_byte_offset))
+ return start_cmp;
+
+ /* ...then by size. */
+ return wi::cmpu (br1.m_size_in_bytes, br2.m_size_in_bytes);
+}
+
/* class concrete_binding : public binding_key. */
/* Implementation of binding_key::dump_to_pp vfunc for concrete_binding. */
void
-concrete_binding::dump_to_pp (pretty_printer *pp, bool simple) const
+concrete_binding::dump_to_pp (pretty_printer *pp, bool) const
{
- binding_key::dump_to_pp (pp, simple);
- pp_string (pp, ", ");
m_bit_range.dump_to_pp (pp);
}
@@ -402,9 +430,6 @@ concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
const concrete_binding *b1 = *(const concrete_binding * const *)p1;
const concrete_binding *b2 = *(const concrete_binding * const *)p2;
- if (int kind_cmp = b1->get_kind () - b2->get_kind ())
- return kind_cmp;
-
return bit_range::cmp (b1->m_bit_range, b2->m_bit_range);
}
@@ -413,8 +438,8 @@ concrete_binding::cmp_ptr_ptr (const void *p1, const void *p2)
void
symbolic_binding::dump_to_pp (pretty_printer *pp, bool simple) const
{
- binding_key::dump_to_pp (pp, simple);
- pp_string (pp, ", region: ");
+ //binding_key::dump_to_pp (pp, simple);
+ pp_string (pp, "region: ");
m_region->dump_to_pp (pp, simple);
}
@@ -426,9 +451,6 @@ symbolic_binding::cmp_ptr_ptr (const void *p1, const void *p2)
const symbolic_binding *b1 = *(const symbolic_binding * const *)p1;
const symbolic_binding *b2 = *(const symbolic_binding * const *)p2;
- if (int kind_cmp = b1->get_kind () - b2->get_kind ())
- return kind_cmp;
-
return region::cmp_ids (b1->get_region (), b2->get_region ());
}
@@ -777,8 +799,7 @@ binding_map::apply_ctor_val_to_range (const region *parent_reg,
return false;
bit_offset_t start_bit_offset = min_offset.get_bit_offset ();
store_manager *smgr = mgr->get_store_manager ();
- const binding_key *max_element_key
- = binding_key::make (smgr, max_element, BK_direct);
+ const binding_key *max_element_key = binding_key::make (smgr, max_element);
if (max_element_key->symbolic_p ())
return false;
const concrete_binding *max_element_ckey
@@ -786,8 +807,7 @@ binding_map::apply_ctor_val_to_range (const region *parent_reg,
bit_size_t range_size_in_bits
= max_element_ckey->get_next_bit_offset () - start_bit_offset;
const concrete_binding *range_key
- = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits,
- BK_direct);
+ = smgr->get_concrete_binding (start_bit_offset, range_size_in_bits);
if (range_key->symbolic_p ())
return false;
@@ -819,8 +839,7 @@ binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
{
const svalue *sval = get_svalue_for_ctor_val (val, mgr);
const binding_key *k
- = binding_key::make (mgr->get_store_manager (), child_reg,
- BK_direct);
+ = binding_key::make (mgr->get_store_manager (), child_reg);
/* Handle the case where we have an unknown size for child_reg
(e.g. due to it being a trailing field with incomplete array
type. */
@@ -844,7 +863,7 @@ binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
- parent_base_offset.get_bit_offset ());
/* Create a concrete key for the child within the parent. */
k = mgr->get_store_manager ()->get_concrete_binding
- (child_parent_offset, sval_bit_size, BK_direct);
+ (child_parent_offset, sval_bit_size);
}
gcc_assert (k->concrete_p ());
put (k, sval);
@@ -852,6 +871,166 @@ binding_map::apply_ctor_pair_to_child_region (const region *parent_reg,
}
}
+/* Populate OUT with all bindings within this map that overlap KEY. */
+
+void
+binding_map::get_overlapping_bindings (const binding_key *key,
+ auto_vec<const binding_key *> *out)
+{
+ for (auto iter : *this)
+ {
+ const binding_key *iter_key = iter.first;
+ if (const concrete_binding *ckey
+ = key->dyn_cast_concrete_binding ())
+ {
+ if (const concrete_binding *iter_ckey
+ = iter_key->dyn_cast_concrete_binding ())
+ {
+ if (ckey->overlaps_p (*iter_ckey))
+ out->safe_push (iter_key);
+ }
+ else
+ {
+ /* Assume overlap. */
+ out->safe_push (iter_key);
+ }
+ }
+ else
+ {
+ /* Assume overlap. */
+ out->safe_push (iter_key);
+ }
+ }
+}
+
+/* Remove, truncate, and/or split any bindings within this map that
+ overlap DROP_KEY.
+
+ For example, if we have:
+
+ +------------------------------------+
+ | old binding |
+ +------------------------------------+
+
+ which is to be overwritten with:
+
+ .......+----------------------+.......
+ .......| new binding |.......
+ .......+----------------------+.......
+
+ this function "cuts a hole" out of the old binding:
+
+ +------+......................+------+
+ |prefix| hole for new binding |suffix|
+ +------+......................+------+
+
+ into which the new binding can be added without
+ overlapping the prefix or suffix.
+
+ The prefix and suffix (if added) will be bound to the pertinent
+ parts of the value of the old binding.
+
+ For example, given:
+ struct s5
+ {
+ char arr[8];
+ };
+ void test_5 (struct s5 *p)
+ {
+ struct s5 f = *p;
+ f.arr[3] = 42;
+ }
+ then after the "f = *p;" we have:
+ cluster for: f: INIT_VAL((*INIT_VAL(p_33(D))))
+ and at the "f.arr[3] = 42;" we remove the bindings overlapping
+ "f.arr[3]", replacing it with a prefix (bytes 0-2) and suffix (bytes 4-7)
+ giving:
+ cluster for: f
+ key: {bytes 0-2}
+ value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+ key: {bytes 4-7}
+ value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+ punching a hole into which the new value can be written at byte 3:
+ cluster for: f
+ key: {bytes 0-2}
+ value: {BITS_WITHIN(bytes 0-2, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+ key: {byte 3}
+ value: 'char' {(char)42}
+ key: {bytes 4-7}
+ value: {BITS_WITHIN(bytes 4-7, inner_val: INIT_VAL((*INIT_VAL(p_33(D))).arr))}
+
+ If UNCERTAINTY is non-NULL, use it to record any svalues that
+ were removed, as being maybe-bound. */
+
+void
+binding_map::remove_overlapping_bindings (store_manager *mgr,
+ const binding_key *drop_key,
+ uncertainty_t *uncertainty)
+{
+ auto_vec<const binding_key *> bindings;
+ get_overlapping_bindings (drop_key, &bindings);
+
+ unsigned i;
+ const binding_key *iter_binding;
+ FOR_EACH_VEC_ELT (bindings, i, iter_binding)
+ {
+ const svalue *old_sval = get (iter_binding);
+ if (uncertainty)
+ uncertainty->on_maybe_bound_sval (old_sval);
+
+ /* Begin by removing the old binding. */
+ m_map.remove (iter_binding);
+
+ /* Now potentially add the prefix and suffix. */
+ if (const concrete_binding *drop_ckey
+ = drop_key->dyn_cast_concrete_binding ())
+ if (const concrete_binding *iter_ckey
+ = iter_binding->dyn_cast_concrete_binding ())
+ {
+ gcc_assert (drop_ckey->overlaps_p (*iter_ckey));
+
+ const bit_range &drop_bits = drop_ckey->get_bit_range ();
+ const bit_range &iter_bits = iter_ckey->get_bit_range ();
+
+ if (iter_bits.get_start_bit_offset ()
+ < drop_bits.get_start_bit_offset ())
+ {
+ /* We have a truncated prefix. */
+ bit_range prefix_bits (iter_bits.get_start_bit_offset (),
+ (drop_bits.get_start_bit_offset ()
+ - iter_bits.get_start_bit_offset ()));
+ const concrete_binding *prefix_key
+ = mgr->get_concrete_binding (prefix_bits);
+ bit_range rel_prefix (0, prefix_bits.m_size_in_bits);
+ const svalue *prefix_sval
+ = old_sval->extract_bit_range (NULL_TREE,
+ rel_prefix,
+ mgr->get_svalue_manager ());
+ m_map.put (prefix_key, prefix_sval);
+ }
+
+ if (iter_bits.get_next_bit_offset ()
+ > drop_bits.get_next_bit_offset ())
+ {
+ /* We have a truncated suffix. */
+ bit_range suffix_bits (drop_bits.get_next_bit_offset (),
+ (iter_bits.get_next_bit_offset ()
+ - drop_bits.get_next_bit_offset ()));
+ const concrete_binding *suffix_key
+ = mgr->get_concrete_binding (suffix_bits);
+ bit_range rel_suffix (drop_bits.get_next_bit_offset ()
+ - iter_bits.get_start_bit_offset (),
+ suffix_bits.m_size_in_bits);
+ const svalue *suffix_sval
+ = old_sval->extract_bit_range (NULL_TREE,
+ rel_suffix,
+ mgr->get_svalue_manager ());
+ m_map.put (suffix_key, suffix_sval);
+ }
+ }
+ }
+}
+
/* class binding_cluster. */
/* binding_cluster's copy ctor. */
@@ -964,6 +1143,27 @@ binding_cluster::dump (bool simple) const
pp_flush (&pp);
}
+/* Assert that this object is valid. */
+
+void
+binding_cluster::validate () const
+{
+ int num_symbolic = 0;
+ int num_concrete = 0;
+ for (auto iter : m_map)
+ {
+ if (iter.first->symbolic_p ())
+ num_symbolic++;
+ else
+ num_concrete++;
+ }
+ /* We shouldn't have more than one symbolic key per cluster
+ (or one would have clobbered the other). */
+ gcc_assert (num_symbolic < 2);
+ /* We can't have both concrete and symbolic keys. */
+ gcc_assert (num_concrete == 0 || num_symbolic == 0);
+}
+
/* Return a new json::object of the form
{"escaped": true/false,
"touched": true/false,
@@ -986,8 +1186,7 @@ binding_cluster::to_json () const
void
binding_cluster::bind (store_manager *mgr,
- const region *reg, const svalue *sval,
- binding_kind kind)
+ const region *reg, const svalue *sval)
{
if (const compound_svalue *compound_sval
= sval->dyn_cast_compound_svalue ())
@@ -996,7 +1195,7 @@ binding_cluster::bind (store_manager *mgr,
return;
}
- const binding_key *binding = binding_key::make (mgr, reg, kind);
+ const binding_key *binding = binding_key::make (mgr, reg);
bind_key (binding, sval);
}
@@ -1045,8 +1244,7 @@ binding_cluster::bind_compound_sval (store_manager *mgr,
+ reg_offset.get_bit_offset ());
const concrete_binding *effective_concrete_key
= mgr->get_concrete_binding (effective_start,
- concrete_key->get_size_in_bits (),
- iter_key->get_kind ());
+ concrete_key->get_size_in_bits ());
bind_key (effective_concrete_key, iter_sval);
}
else
@@ -1069,31 +1267,35 @@ binding_cluster::purge_region (store_manager *mgr, const region *reg)
{
gcc_assert (reg->get_kind () == RK_DECL);
const binding_key *binding
- = binding_key::make (mgr, const_cast<region *> (reg),
- BK_direct);
+ = binding_key::make (mgr, const_cast<region *> (reg));
m_map.remove (binding);
}
-/* Mark REG within this cluster as being filled with zeroes.
- Remove all bindings, add a default binding to zero, and clear the
- TOUCHED flag. */
+/* Clobber REG and fill it with repeated copies of SVAL. */
void
-binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
+binding_cluster::fill_region (store_manager *mgr,
+ const region *reg,
+ const svalue *sval)
{
clobber_region (mgr, reg);
- /* Add a default binding to zero. */
region_model_manager *sval_mgr = mgr->get_svalue_manager ();
- const svalue *cst_sval
- = sval_mgr->get_or_create_int_cst (integer_type_node, 0);
- const svalue *bound_sval = cst_sval;
- if (reg->get_type ())
- bound_sval = sval_mgr->get_or_create_unaryop (reg->get_type (), NOP_EXPR,
- cst_sval);
- bind (mgr, reg, bound_sval, BK_default);
+ const svalue *byte_size_sval = reg->get_byte_size_sval (sval_mgr);
+ const svalue *fill_sval
+ = sval_mgr->get_or_create_repeated_svalue (reg->get_type (),
+ byte_size_sval, sval);
+ bind (mgr, reg, fill_sval);
+}
+
+/* Clobber REG within this cluster and fill it with zeroes. */
- m_touched = false;
+void
+binding_cluster::zero_fill_region (store_manager *mgr, const region *reg)
+{
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
+ fill_region (mgr, reg, zero_sval);
}
/* Mark REG within this cluster as being unknown.
@@ -1111,7 +1313,7 @@ binding_cluster::mark_region_as_unknown (store_manager *mgr,
region_model_manager *sval_mgr = mgr->get_svalue_manager ();
const svalue *sval
= sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
- bind (mgr, reg, sval, BK_default);
+ bind (mgr, reg, sval);
}
/* Get any SVAL bound to REG within this cluster via kind KIND,
@@ -1119,10 +1321,9 @@ binding_cluster::mark_region_as_unknown (store_manager *mgr,
const svalue *
binding_cluster::get_binding (store_manager *mgr,
- const region *reg,
- binding_kind kind) const
+ const region *reg) const
{
- const binding_key *reg_binding = binding_key::make (mgr, reg, kind);
+ const binding_key *reg_binding = binding_key::make (mgr, reg);
const svalue *sval = m_map.get (reg_binding);
if (sval)
{
@@ -1140,7 +1341,7 @@ binding_cluster::get_binding (store_manager *mgr,
while (const region *parent_reg = reg->get_parent_region ())
{
const binding_key *parent_reg_binding
- = binding_key::make (mgr, parent_reg, kind);
+ = binding_key::make (mgr, parent_reg);
if (parent_reg_binding == reg_binding
&& sval->get_type ()
&& reg->get_type ()
@@ -1161,7 +1362,7 @@ binding_cluster::get_binding (store_manager *mgr,
FOR_EACH_VEC_ELT_REVERSE (regions, i, iter_reg)
{
region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
- sval = rmm_mgr->get_or_create_sub_svalue (reg->get_type (),
+ sval = rmm_mgr->get_or_create_sub_svalue (iter_reg->get_type (),
sval, iter_reg);
}
}
@@ -1169,21 +1370,20 @@ binding_cluster::get_binding (store_manager *mgr,
return sval;
}
-/* Get any SVAL bound to REG within this cluster via kind KIND,
+/* Get any SVAL bound to REG within this cluster,
either directly for REG, or recursively checking for bindings within
parent regions and extracting subvalues if need be. */
const svalue *
binding_cluster::get_binding_recursive (store_manager *mgr,
- const region *reg,
- enum binding_kind kind) const
+ const region *reg) const
{
- if (const svalue *sval = get_binding (mgr, reg, kind))
+ if (const svalue *sval = get_binding (mgr, reg))
return sval;
if (reg != m_base_region)
if (const region *parent_reg = reg->get_parent_region ())
if (const svalue *parent_sval
- = get_binding_recursive (mgr, parent_reg, kind))
+ = get_binding_recursive (mgr, parent_reg))
{
/* Extract child svalue from parent svalue. */
region_model_manager *rmm_mgr = mgr->get_svalue_manager ();
@@ -1199,18 +1399,11 @@ const svalue *
binding_cluster::get_any_binding (store_manager *mgr,
const region *reg) const
{
- /* Look for a "direct" binding. */
+ /* Look for a direct binding. */
if (const svalue *direct_sval
- = get_binding_recursive (mgr, reg, BK_direct))
+ = get_binding_recursive (mgr, reg))
return direct_sval;
- /* Look for a "default" binding, but not if there's been a symbolic
- write. */
- if (!m_touched)
- if (const svalue *default_sval
- = get_binding_recursive (mgr, reg, BK_default))
- return default_sval;
-
/* If this cluster has been touched by a symbolic write, then the content
of any subregion not currently specifically bound is "UNKNOWN". */
if (m_touched)
@@ -1251,8 +1444,6 @@ const svalue *
binding_cluster::maybe_get_compound_binding (store_manager *mgr,
const region *reg) const
{
- binding_map map;
-
region_offset cluster_offset = m_base_region->get_offset ();
if (cluster_offset.symbolic_p ())
return NULL;
@@ -1260,6 +1451,36 @@ binding_cluster::maybe_get_compound_binding (store_manager *mgr,
if (reg_offset.symbolic_p ())
return NULL;
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+
+ /* We will a build the result map in two parts:
+ (a) result_map, holding the concrete keys from this cluster,
+
+ (b) default_map, holding the initial values for the region
+ (e.g. uninitialized, initializer values, or zero), unless this
+ cluster has been touched.
+
+ We will populate (a), and as we do, clobber (b), trimming and
+ splitting its bindings as necessary.
+ Finally, we will merge (b) into (a), giving a concrete map
+ that merges both the initial values and the bound values from
+ the binding_cluster.
+ Doing it this way reduces N for the O(N^2) intersection-finding,
+ perhaps we should have a spatial-organized data structure for
+ concrete keys, though. */
+
+ binding_map result_map;
+ binding_map default_map;
+
+ /* Set up default values in default_map. */
+ const svalue *default_sval;
+ if (m_touched)
+ default_sval = sval_mgr->get_or_create_unknown_svalue (reg->get_type ());
+ else
+ default_sval = sval_mgr->get_or_create_initial_value (reg);
+ const binding_key *default_key = binding_key::make (mgr, reg);
+ default_map.put (default_key, default_sval);
+
for (map_t::iterator iter = m_map.begin (); iter != m_map.end (); ++iter)
{
const binding_key *key = (*iter).first;
@@ -1268,78 +1489,77 @@ binding_cluster::maybe_get_compound_binding (store_manager *mgr,
if (const concrete_binding *concrete_key
= key->dyn_cast_concrete_binding ())
{
- /* Skip bindings that are outside the bit range of REG. */
- if (concrete_key->get_start_bit_offset ()
- < reg_offset.get_bit_offset ())
- continue;
- bit_size_t reg_bit_size;
- if (reg->get_bit_size (&reg_bit_size))
- if (concrete_key->get_start_bit_offset ()
- >= reg_offset.get_bit_offset () + reg_bit_size)
- continue;
-
- /* Get offset of KEY relative to REG, rather than to
- the cluster. */
- bit_offset_t relative_start
- = (concrete_key->get_start_bit_offset ()
- - reg_offset.get_bit_offset ());
- const concrete_binding *offset_concrete_key
- = mgr->get_concrete_binding (relative_start,
- concrete_key->get_size_in_bits (),
- key->get_kind ());
- map.put (offset_concrete_key, sval);
- }
- else
- return NULL;
- }
+ const bit_range &bound_range = concrete_key->get_bit_range ();
- if (map.elements () == 0)
- return NULL;
+ bit_size_t reg_bit_size;
+ if (!reg->get_bit_size (&reg_bit_size))
+ return NULL;
- region_model_manager *sval_mgr = mgr->get_svalue_manager ();
- return sval_mgr->get_or_create_compound_svalue (reg->get_type (), map);
-}
+ bit_range reg_range (reg_offset.get_bit_offset (),
+ reg_bit_size);
+ /* Skip bindings that are outside the bit range of REG. */
+ if (!bound_range.intersects_p (reg_range))
+ continue;
-/* Populate OUT with all bindings within this cluster that overlap REG. */
+ /* We shouldn't have an exact match; that should have been
+ handled already. */
+ gcc_assert (!(reg_range == bound_range));
-void
-binding_cluster::get_overlapping_bindings (store_manager *mgr,
- const region *reg,
- auto_vec<const binding_key *> *out)
-{
- const binding_key *binding
- = binding_key::make (mgr, reg, BK_direct);
- for (map_t::iterator iter = m_map.begin ();
- iter != m_map.end (); ++iter)
- {
- const binding_key *iter_key = (*iter).first;
- if (const concrete_binding *ckey
- = binding->dyn_cast_concrete_binding ())
- {
- if (const concrete_binding *iter_ckey
- = iter_key->dyn_cast_concrete_binding ())
+ bit_range subrange (0, 0);
+ if (reg_range.contains_p (bound_range, &subrange))
{
- if (ckey->overlaps_p (*iter_ckey))
- out->safe_push (iter_key);
+ /* We have a bound range fully within REG.
+ Add it to map, offsetting accordingly. */
+
+ /* Get offset of KEY relative to REG, rather than to
+ the cluster. */
+ const concrete_binding *offset_concrete_key
+ = mgr->get_concrete_binding (subrange);
+ result_map.put (offset_concrete_key, sval);
+
+ /* Clobber default_map, removing/trimming/spliting where
+ it overlaps with offset_concrete_key. */
+ default_map.remove_overlapping_bindings (mgr,
+ offset_concrete_key,
+ NULL);
+ }
+ else if (bound_range.contains_p (reg_range, &subrange))
+ {
+ /* REG is fully within the bound range, but
+ is not equal to it; we're extracting a subvalue. */
+ return sval->extract_bit_range (reg->get_type (),
+ subrange,
+ mgr->get_svalue_manager ());
}
else
{
- /* Assume overlap. */
- out->safe_push (iter_key);
+ /* REG and the bound range partially overlap.
+ We don't handle this case yet. */
+ return NULL;
}
}
else
- {
- /* Assume overlap. */
- out->safe_push (iter_key);
- }
+ /* Can't handle symbolic bindings. */
+ return NULL;
+ }
+
+ if (result_map.elements () == 0)
+ return NULL;
+
+ /* Merge any bindings from default_map into result_map. */
+ for (auto iter : default_map)
+ {
+ const binding_key *key = iter.first;
+ const svalue *sval = iter.second;
+ result_map.put (key, sval);
}
+
+ return sval_mgr->get_or_create_compound_svalue (reg->get_type (), result_map);
}
-/* Remove any bindings within this cluster that overlap REG,
- but retain default bindings that overlap but aren't fully covered
- by REG.
+/* Remove, truncate, and/or split any bindings within this map that
+ overlap REG.
If UNCERTAINTY is non-NULL, use it to record any svalues that
were removed, as being maybe-bound. */
@@ -1348,26 +1568,9 @@ binding_cluster::remove_overlapping_bindings (store_manager *mgr,
const region *reg,
uncertainty_t *uncertainty)
{
- auto_vec<const binding_key *> bindings;
- get_overlapping_bindings (mgr, reg, &bindings);
+ const binding_key *reg_binding = binding_key::make (mgr, reg);
- unsigned i;
- const binding_key *iter_binding;
- FOR_EACH_VEC_ELT (bindings, i, iter_binding)
- {
- /* Don't remove default bindings, unless the default binding
- is fully covered by REG. */
- if (iter_binding->get_kind () == BK_default)
- {
- const binding_key *reg_binding
- = binding_key::make (mgr, reg, BK_default);
- if (reg_binding != iter_binding)
- continue;
- }
- if (uncertainty)
- uncertainty->on_maybe_bound_sval (m_map.get (iter_binding));
- m_map.remove (iter_binding);
- }
+ m_map.remove_overlapping_bindings (mgr, reg_binding, uncertainty);
}
/* Attempt to merge CLUSTER_A and CLUSTER_B into OUT_CLUSTER, using
@@ -1428,6 +1631,8 @@ binding_cluster::can_merge_p (const binding_cluster *cluster_a,
const binding_key *key_b = (*iter_b).first;
keys.add (key_b);
}
+ int num_symbolic_keys = 0;
+ int num_concrete_keys = 0;
for (hash_set<const binding_key *>::iterator iter = keys.begin ();
iter != keys.end (); ++iter)
{
@@ -1435,6 +1640,11 @@ binding_cluster::can_merge_p (const binding_cluster *cluster_a,
const svalue *sval_a = cluster_a->get_any_value (key);
const svalue *sval_b = cluster_b->get_any_value (key);
+ if (key->symbolic_p ())
+ num_symbolic_keys++;
+ else
+ num_concrete_keys++;
+
if (sval_a == sval_b)
{
gcc_assert (sval_a);
@@ -1463,29 +1673,15 @@ binding_cluster::can_merge_p (const binding_cluster *cluster_a,
out_cluster->m_map.put (key, unknown_sval);
}
- /* Handle the case where we get a default binding from one and a direct
- binding from the other. */
- auto_vec<const concrete_binding *> duplicate_keys;
- for (map_t::iterator iter = out_cluster->m_map.begin ();
- iter != out_cluster->m_map.end (); ++iter)
- {
- const concrete_binding *ckey
- = (*iter).first->dyn_cast_concrete_binding ();
- if (!ckey)
- continue;
- if (ckey->get_kind () != BK_direct)
- continue;
- const concrete_binding *def_ckey
- = mgr->get_concrete_binding (ckey->get_start_bit_offset (),
- ckey->get_size_in_bits (),
- BK_default);
- if (out_cluster->m_map.get (def_ckey))
- duplicate_keys.safe_push (def_ckey);
+ /* We can only have at most one symbolic key per cluster,
+ and if we do, we can't have any concrete keys.
+ If this happens, mark the cluster as touched, with no keys. */
+ if (num_symbolic_keys >= 2
+ || (num_concrete_keys > 0 && num_symbolic_keys > 0))
+ {
+ out_cluster->m_touched = true;
+ out_cluster->m_map.empty ();
}
- unsigned i;
- const concrete_binding *key;
- FOR_EACH_VEC_ELT (duplicate_keys, i, key)
- out_cluster->m_map.remove (key);
/* We don't handle other kinds of overlaps yet. */
@@ -1558,7 +1754,7 @@ binding_cluster::on_unknown_fncall (const gcall *call,
const svalue *sval
= mgr->get_svalue_manager ()->get_or_create_conjured_svalue
(m_base_region->get_type (), call, m_base_region);
- bind (mgr, m_base_region, sval, BK_direct);
+ bind (mgr, m_base_region, sval);
m_touched = true;
}
@@ -1665,7 +1861,7 @@ binding_cluster::maybe_get_simple_value (store_manager *mgr) const
if (m_map.elements () != 1)
return NULL;
- const binding_key *key = binding_key::make (mgr, m_base_region, BK_direct);
+ const binding_key *key = binding_key::make (mgr, m_base_region);
return get_any_value (key);
}
@@ -1675,10 +1871,9 @@ binding_cluster::maybe_get_simple_value (store_manager *mgr) const
const concrete_binding *
store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
- bit_offset_t size_in_bits,
- enum binding_kind kind)
+ bit_offset_t size_in_bits)
{
- concrete_binding b (start_bit_offset, size_in_bits, kind);
+ concrete_binding b (start_bit_offset, size_in_bits);
if (concrete_binding *existing = m_concrete_binding_key_mgr.get (b))
return existing;
@@ -1688,10 +1883,9 @@ store_manager::get_concrete_binding (bit_offset_t start_bit_offset,
}
const symbolic_binding *
-store_manager::get_symbolic_binding (const region *reg,
- enum binding_kind kind)
+store_manager::get_symbolic_binding (const region *reg)
{
- symbolic_binding b (reg, kind);
+ symbolic_binding b (reg);
if (symbolic_binding *existing = m_symbolic_binding_key_mgr.get (b))
return existing;
@@ -1952,6 +2146,15 @@ store::dump (bool simple) const
pp_flush (&pp);
}
+/* Assert that this object is valid. */
+
+void
+store::validate () const
+{
+ for (auto iter : m_cluster_map)
+ iter.second->validate ();
+}
+
/* Return a new json::object of the form
{PARENT_REGION_DESC: {BASE_REGION_DESC: object for binding_map,
... for each cluster within parent region},
@@ -2027,7 +2230,7 @@ store::get_any_binding (store_manager *mgr, const region *reg) const
void
store::set_value (store_manager *mgr, const region *lhs_reg,
- const svalue *rhs_sval, enum binding_kind kind,
+ const svalue *rhs_sval,
uncertainty_t *uncertainty)
{
remove_overlapping_bindings (mgr, lhs_reg);
@@ -2054,7 +2257,7 @@ store::set_value (store_manager *mgr, const region *lhs_reg,
else
{
lhs_cluster = get_or_create_cluster (lhs_base_reg);
- lhs_cluster->bind (mgr, lhs_reg, rhs_sval, kind);
+ lhs_cluster->bind (mgr, lhs_reg, rhs_sval);
}
/* Bindings to a cluster can affect other clusters if a symbolic
@@ -2209,16 +2412,26 @@ store::purge_region (store_manager *mgr, const region *reg)
}
}
-/* Zero-fill REG. */
+/* Fill REG with SVAL. */
void
-store::zero_fill_region (store_manager *mgr, const region *reg)
+store::fill_region (store_manager *mgr, const region *reg, const svalue *sval)
{
const region *base_reg = reg->get_base_region ();
if (base_reg->symbolic_for_unknown_ptr_p ())
return;
binding_cluster *cluster = get_or_create_cluster (base_reg);
- cluster->zero_fill_region (mgr, reg);
+ cluster->fill_region (mgr, reg, sval);
+}
+
+/* Zero-fill REG. */
+
+void
+store::zero_fill_region (store_manager *mgr, const region *reg)
+{
+ region_model_manager *sval_mgr = mgr->get_svalue_manager ();
+ const svalue *zero_sval = sval_mgr->get_or_create_int_cst (char_type_node, 0);
+ fill_region (mgr, reg, zero_sval);
}
/* Mark REG as having unknown content. */
@@ -2740,26 +2953,18 @@ test_binding_key_overlap ()
store_manager mgr (NULL);
/* Various 8-bit bindings. */
- const concrete_binding *cb_0_7
- = mgr.get_concrete_binding (0, 8, BK_direct);
- const concrete_binding *cb_8_15
- = mgr.get_concrete_binding (8, 8, BK_direct);
- const concrete_binding *cb_16_23
- = mgr.get_concrete_binding (16, 8, BK_direct);
- const concrete_binding *cb_24_31
- = mgr.get_concrete_binding (24, 8, BK_direct);
+ const concrete_binding *cb_0_7 = mgr.get_concrete_binding (0, 8);
+ const concrete_binding *cb_8_15 = mgr.get_concrete_binding (8, 8);
+ const concrete_binding *cb_16_23 = mgr.get_concrete_binding (16, 8);
+ const concrete_binding *cb_24_31 = mgr.get_concrete_binding (24, 8);
/* 16-bit bindings. */
- const concrete_binding *cb_0_15
- = mgr.get_concrete_binding (0, 16, BK_direct);
- const concrete_binding *cb_8_23
- = mgr.get_concrete_binding (8, 16, BK_direct);
- const concrete_binding *cb_16_31
- = mgr.get_concrete_binding (16, 16, BK_direct);
+ const concrete_binding *cb_0_15 = mgr.get_concrete_binding (0, 16);
+ const concrete_binding *cb_8_23 = mgr.get_concrete_binding (8, 16);
+ const concrete_binding *cb_16_31 = mgr.get_concrete_binding (16, 16);
/* 32-bit binding. */
- const concrete_binding *cb_0_31
- = mgr.get_concrete_binding (0, 32, BK_direct);
+ const concrete_binding *cb_0_31 = mgr.get_concrete_binding (0, 32);
/* Everything should self-overlap. */
ASSERT_OVERLAP (cb_0_7, cb_0_7);
diff --git a/gcc/analyzer/store.h b/gcc/analyzer/store.h
index e0c60e1..2ac2923 100644
--- a/gcc/analyzer/store.h
+++ b/gcc/analyzer/store.h
@@ -199,29 +199,6 @@ private:
class byte_range;
class concrete_binding;
-/* An enum for discriminating between "direct" vs "default" levels of
- mapping. */
-
-enum binding_kind
-{
- /* Special-case value for hash support.
- This is the initial entry, so that hash traits can have
- empty_zero_p = true. */
- BK_empty = 0,
-
- /* Special-case value for hash support. */
- BK_deleted,
-
- /* The normal kind of mapping. */
- BK_direct,
-
- /* A lower-priority kind of mapping, for use when inheriting
- default values from a parent region. */
- BK_default
-};
-
-extern const char *binding_kind_to_string (enum binding_kind kind);
-
/* Abstract base class for describing ranges of bits within a binding_map
that can have svalues bound to them. */
@@ -232,10 +209,9 @@ public:
virtual bool concrete_p () const = 0;
bool symbolic_p () const { return !concrete_p (); }
- static const binding_key *make (store_manager *mgr, const region *r,
- enum binding_kind kind);
+ static const binding_key *make (store_manager *mgr, const region *r);
- virtual void dump_to_pp (pretty_printer *pp, bool simple) const;
+ virtual void dump_to_pp (pretty_printer *pp, bool simple) const = 0;
void dump (bool simple) const;
label_text get_desc (bool simple=true) const;
@@ -244,28 +220,6 @@ public:
virtual const concrete_binding *dyn_cast_concrete_binding () const
{ return NULL; }
-
- enum binding_kind get_kind () const { return m_kind; }
-
- void mark_deleted () { m_kind = BK_deleted; }
- void mark_empty () { m_kind = BK_empty; }
- bool is_deleted () const { return m_kind == BK_deleted; }
- bool is_empty () const { return m_kind == BK_empty; }
-
-protected:
- binding_key (enum binding_kind kind) : m_kind (kind) {}
-
- hashval_t impl_hash () const
- {
- return m_kind;
- }
- bool impl_eq (const binding_key &other) const
- {
- return m_kind == other.m_kind;
- }
-
-private:
- enum binding_kind m_kind;
};
/* A concrete range of bits. */
@@ -288,6 +242,10 @@ struct bit_range
{
return m_start_bit_offset + m_size_in_bits;
}
+ bit_offset_t get_last_bit_offset () const
+ {
+ return get_next_bit_offset () - 1;
+ }
bool contains_p (bit_offset_t offset) const
{
@@ -295,6 +253,8 @@ struct bit_range
&& offset < get_next_bit_offset ());
}
+ bool contains_p (const bit_range &other, bit_range *out) const;
+
bool operator== (const bit_range &other) const
{
return (m_start_bit_offset == other.m_start_bit_offset
@@ -327,12 +287,42 @@ struct byte_range
{}
void dump_to_pp (pretty_printer *pp) const;
+ void dump () const;
+
+ bool contains_p (byte_offset_t offset) const
+ {
+ return (offset >= get_start_byte_offset ()
+ && offset < get_next_byte_offset ());
+ }
+ bool contains_p (const byte_range &other, byte_range *out) const;
+
+ bool operator== (const byte_range &other) const
+ {
+ return (m_start_byte_offset == other.m_start_byte_offset
+ && m_size_in_bytes == other.m_size_in_bytes);
+ }
+ byte_offset_t get_start_byte_offset () const
+ {
+ return m_start_byte_offset;
+ }
+ byte_offset_t get_next_byte_offset () const
+ {
+ return m_start_byte_offset + m_size_in_bytes;
+ }
byte_offset_t get_last_byte_offset () const
{
return m_start_byte_offset + m_size_in_bytes - 1;
}
+ bit_range as_bit_range () const
+ {
+ return bit_range (m_start_byte_offset * BITS_PER_UNIT,
+ m_size_in_bytes * BITS_PER_UNIT);
+ }
+
+ static int cmp (const byte_range &br1, const byte_range &br2);
+
byte_offset_t m_start_byte_offset;
byte_size_t m_size_in_bytes;
};
@@ -346,10 +336,8 @@ public:
/* This class is its own key for the purposes of consolidation. */
typedef concrete_binding key_t;
- concrete_binding (bit_offset_t start_bit_offset, bit_size_t size_in_bits,
- enum binding_kind kind)
- : binding_key (kind),
- m_bit_range (start_bit_offset, size_in_bits)
+ concrete_binding (bit_offset_t start_bit_offset, bit_size_t size_in_bits)
+ : m_bit_range (start_bit_offset, size_in_bits)
{}
bool concrete_p () const FINAL OVERRIDE { return true; }
@@ -358,12 +346,10 @@ public:
inchash::hash hstate;
hstate.add_wide_int (m_bit_range.m_start_bit_offset);
hstate.add_wide_int (m_bit_range.m_size_in_bits);
- return hstate.end () ^ binding_key::impl_hash ();
+ return hstate.end ();
}
bool operator== (const concrete_binding &other) const
{
- if (!binding_key::impl_eq (other))
- return false;
return m_bit_range == other.m_bit_range;
}
@@ -392,6 +378,11 @@ public:
static int cmp_ptr_ptr (const void *, const void *);
+ void mark_deleted () { m_bit_range.m_start_bit_offset = -1; }
+ void mark_empty () { m_bit_range.m_start_bit_offset = -2; }
+ bool is_deleted () const { return m_bit_range.m_start_bit_offset == -1; }
+ bool is_empty () const { return m_bit_range.m_start_bit_offset == -2; }
+
private:
bit_range m_bit_range;
};
@@ -401,7 +392,7 @@ private:
template <> struct default_hash_traits<ana::concrete_binding>
: public member_function_hash_traits<ana::concrete_binding>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -415,21 +406,16 @@ public:
/* This class is its own key for the purposes of consolidation. */
typedef symbolic_binding key_t;
- symbolic_binding (const region *region, enum binding_kind kind)
- : binding_key (kind),
- m_region (region)
- {}
+ symbolic_binding (const region *region) : m_region (region) {}
bool concrete_p () const FINAL OVERRIDE { return false; }
hashval_t hash () const
{
- return (binding_key::impl_hash () ^ (intptr_t)m_region);
+ return (intptr_t)m_region;
}
bool operator== (const symbolic_binding &other) const
{
- if (!binding_key::impl_eq (other))
- return false;
- return (m_region == other.m_region);
+ return m_region == other.m_region;
}
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
@@ -438,6 +424,12 @@ public:
static int cmp_ptr_ptr (const void *, const void *);
+ void mark_deleted () { m_region = reinterpret_cast<const region *> (1); }
+ void mark_empty () { m_region = NULL; }
+ bool is_deleted () const
+ { return m_region == reinterpret_cast<const region *> (1); }
+ bool is_empty () const { return m_region == NULL; }
+
private:
const region *m_region;
};
@@ -504,7 +496,13 @@ public:
static int cmp (const binding_map &map1, const binding_map &map2);
+ void remove_overlapping_bindings (store_manager *mgr,
+ const binding_key *drop_key,
+ uncertainty_t *uncertainty);
+
private:
+ void get_overlapping_bindings (const binding_key *key,
+ auto_vec<const binding_key *> *out);
bool apply_ctor_val_to_range (const region *parent_reg,
region_model_manager *mgr,
tree min_index, tree max_index,
@@ -553,22 +551,22 @@ public:
void dump_to_pp (pretty_printer *pp, bool simple, bool multiline) const;
void dump (bool simple) const;
+ void validate () const;
+
json::object *to_json () const;
- void bind (store_manager *mgr, const region *, const svalue *,
- binding_kind kind);
+ void bind (store_manager *mgr, const region *, const svalue *);
void clobber_region (store_manager *mgr, const region *reg);
void purge_region (store_manager *mgr, const region *reg);
+ void fill_region (store_manager *mgr, const region *reg, const svalue *sval);
void zero_fill_region (store_manager *mgr, const region *reg);
void mark_region_as_unknown (store_manager *mgr, const region *reg,
uncertainty_t *uncertainty);
- const svalue *get_binding (store_manager *mgr, const region *reg,
- binding_kind kind) const;
+ const svalue *get_binding (store_manager *mgr, const region *reg) const;
const svalue *get_binding_recursive (store_manager *mgr,
- const region *reg,
- enum binding_kind kind) const;
+ const region *reg) const;
const svalue *get_any_binding (store_manager *mgr,
const region *reg) const;
const svalue *maybe_get_compound_binding (store_manager *mgr,
@@ -630,8 +628,6 @@ public:
private:
const svalue *get_any_value (const binding_key *key) const;
- void get_overlapping_bindings (store_manager *mgr, const region *reg,
- auto_vec<const binding_key *> *out);
void bind_compound_sval (store_manager *mgr,
const region *reg,
const compound_svalue *compound_sval);
@@ -684,6 +680,8 @@ public:
void dump (bool simple) const;
void summarize_to_pp (pretty_printer *pp, bool simple) const;
+ void validate () const;
+
json::object *to_json () const;
const svalue *get_any_binding (store_manager *mgr, const region *reg) const;
@@ -691,10 +689,11 @@ public:
bool called_unknown_fn_p () const { return m_called_unknown_fn; }
void set_value (store_manager *mgr, const region *lhs_reg,
- const svalue *rhs_sval, enum binding_kind kind,
+ const svalue *rhs_sval,
uncertainty_t *uncertainty);
void clobber_region (store_manager *mgr, const region *reg);
void purge_region (store_manager *mgr, const region *reg);
+ void fill_region (store_manager *mgr, const region *reg, const svalue *sval);
void zero_fill_region (store_manager *mgr, const region *reg);
void mark_region_as_unknown (store_manager *mgr, const region *reg,
uncertainty_t *uncertainty);
@@ -773,19 +772,15 @@ public:
/* binding consolidation. */
const concrete_binding *
get_concrete_binding (bit_offset_t start_bit_offset,
- bit_offset_t size_in_bits,
- enum binding_kind kind);
+ bit_offset_t size_in_bits);
const concrete_binding *
- get_concrete_binding (const bit_range &bits,
- enum binding_kind kind)
+ get_concrete_binding (const bit_range &bits)
{
return get_concrete_binding (bits.get_start_bit_offset (),
- bits.m_size_in_bits,
- kind);
+ bits.m_size_in_bits);
}
const symbolic_binding *
- get_symbolic_binding (const region *region,
- enum binding_kind kind);
+ get_symbolic_binding (const region *region);
region_model_manager *get_svalue_manager () const
{
diff --git a/gcc/analyzer/svalue.cc b/gcc/analyzer/svalue.cc
index a16563d..6c8afef 100644
--- a/gcc/analyzer/svalue.cc
+++ b/gcc/analyzer/svalue.cc
@@ -415,6 +415,27 @@ svalue::cmp_ptr (const svalue *sval1, const svalue *sval2)
sub_sval2->get_subregion ());
}
break;
+ case SK_REPEATED:
+ {
+ const repeated_svalue *repeated_sval1 = (const repeated_svalue *)sval1;
+ const repeated_svalue *repeated_sval2 = (const repeated_svalue *)sval2;
+ return svalue::cmp_ptr (repeated_sval1->get_inner_svalue (),
+ repeated_sval2->get_inner_svalue ());
+ }
+ break;
+ case SK_BITS_WITHIN:
+ {
+ const bits_within_svalue *bits_within_sval1
+ = (const bits_within_svalue *)sval1;
+ const bits_within_svalue *bits_within_sval2
+ = (const bits_within_svalue *)sval2;
+ if (int cmp = bit_range::cmp (bits_within_sval1->get_bits (),
+ bits_within_sval2->get_bits ()))
+ return cmp;
+ return svalue::cmp_ptr (bits_within_sval1->get_inner_svalue (),
+ bits_within_sval2->get_inner_svalue ());
+ }
+ break;
case SK_UNMERGEABLE:
{
const unmergeable_svalue *unmergeable_sval1
@@ -515,6 +536,27 @@ svalue::involves_p (const svalue *other) const
return v.found_p ();
}
+/* Extract SUBRANGE from this value, of type TYPE. */
+
+const svalue *
+svalue::extract_bit_range (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const
+{
+ return mgr->get_or_create_bits_within (type, subrange, this);
+}
+
+/* Base implementation of svalue::maybe_fold_bits_within vfunc. */
+
+const svalue *
+svalue::maybe_fold_bits_within (tree,
+ const bit_range &,
+ region_model_manager *) const
+{
+ /* By default, don't fold. */
+ return NULL;
+}
+
/* class region_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for region_svalue. */
@@ -680,6 +722,26 @@ constant_svalue::eval_condition (const constant_svalue *lhs,
return tristate::TS_UNKNOWN;
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for constant_svalue. */
+
+const svalue *
+constant_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &,
+ region_model_manager *mgr) const
+{
+ /* Bits within an all-zero value are also all zero. */
+ if (zerop (m_cst_expr))
+ {
+ if (type)
+ return mgr->get_or_create_cast (type, this);
+ else
+ return this;
+ }
+ /* Otherwise, don't fold. */
+ return NULL;
+}
+
/* class unknown_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for unknown_svalue. */
@@ -711,6 +773,18 @@ unknown_svalue::accept (visitor *v) const
v->visit_unknown_svalue (this);
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for unknown_svalue. */
+
+const svalue *
+unknown_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &,
+ region_model_manager *mgr) const
+{
+ /* Bits within an unknown_svalue are themselves unknown. */
+ return mgr->get_or_create_unknown_svalue (type);
+}
+
/* Get a string for KIND for use in debug dumps. */
const char *
@@ -892,6 +966,34 @@ unaryop_svalue::implicitly_live_p (const svalue_set *live_svalues,
return get_arg ()->live_p (live_svalues, model);
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for unaryop_svalue. */
+
+const svalue *
+unaryop_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &,
+ region_model_manager *mgr) const
+{
+ switch (m_op)
+ {
+ default:
+ break;
+ case NOP_EXPR:
+ /* A cast of zero is zero. */
+ if (tree cst = m_arg->maybe_get_constant ())
+ if (zerop (cst))
+ {
+ if (type)
+ return mgr->get_or_create_cast (type, this);
+ else
+ return this;
+ }
+ break;
+ }
+ /* Otherwise, don't fold. */
+ return NULL;
+}
+
/* class binop_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for binop_svalue. */
@@ -995,6 +1097,216 @@ sub_svalue::implicitly_live_p (const svalue_set *live_svalues,
return get_parent ()->live_p (live_svalues, model);
}
+/* class repeated_svalue : public svalue. */
+
+/* repeated_svalue'c ctor. */
+
+repeated_svalue::repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+: svalue (complexity::from_pair (outer_size, inner_svalue), type),
+ m_outer_size (outer_size),
+ m_inner_svalue (inner_svalue)
+{
+}
+
+/* Implementation of svalue::dump_to_pp vfunc for repeated_svalue. */
+
+void
+repeated_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "REPEATED(");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ pp_string (pp, "outer_size: ");
+ m_outer_size->dump_to_pp (pp, simple);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "repeated_svalue (");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ pp_string (pp, "outer_size: ");
+ m_outer_size->dump_to_pp (pp, simple);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::accept vfunc for repeated_svalue. */
+
+void
+repeated_svalue::accept (visitor *v) const
+{
+ v->visit_repeated_svalue (this);
+ m_inner_svalue->accept (v);
+}
+
+/* Return true if this value is known to be all zeroes. */
+
+bool
+repeated_svalue::all_zeroes_p () const
+{
+ if (tree cst = m_inner_svalue->maybe_get_constant ())
+ if (zerop (cst))
+ return true;
+ return false;
+}
+
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for repeated_svalue. */
+
+const svalue *
+repeated_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &bits,
+ region_model_manager *mgr) const
+{
+ const svalue *innermost_sval = m_inner_svalue;
+ /* Fold
+ BITS_WITHIN (range, REPEATED_SVALUE (ZERO))
+ to:
+ REPEATED_SVALUE (ZERO). */
+ if (all_zeroes_p ())
+ {
+ byte_range bytes (0,0);
+ if (bits.as_byte_range (&bytes))
+ {
+ const svalue *byte_size
+ = mgr->get_or_create_int_cst (size_type_node,
+ bytes.m_size_in_bytes.to_uhwi ());
+ return mgr->get_or_create_repeated_svalue (type, byte_size,
+ innermost_sval);
+ }
+ }
+
+ /* Fold:
+ BITS_WITHIN (range, REPEATED_SVALUE (INNERMOST_SVALUE))
+ to:
+ BITS_WITHIN (range - offset, INNERMOST_SVALUE)
+ if range is fully within one instance of INNERMOST_SVALUE. */
+ if (tree innermost_type = innermost_sval->get_type ())
+ {
+ bit_size_t element_bit_size;
+ if (int_size_in_bits (innermost_type, &element_bit_size)
+ && element_bit_size > 0)
+ {
+ HOST_WIDE_INT start_idx
+ = (bits.get_start_bit_offset ()
+ / element_bit_size).to_shwi ();
+ HOST_WIDE_INT last_idx
+ = (bits.get_last_bit_offset ()
+ / element_bit_size).to_shwi ();
+ if (start_idx == last_idx)
+ {
+ bit_offset_t start_of_element
+ = start_idx * element_bit_size;
+ bit_range range_within_element
+ (bits.m_start_bit_offset - start_of_element,
+ bits.m_size_in_bits);
+ return mgr->get_or_create_bits_within (type,
+ range_within_element,
+ innermost_sval);
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* class bits_within_svalue : public svalue. */
+
+/* bits_within_svalue'c ctor. */
+
+bits_within_svalue::bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+: svalue (complexity (inner_svalue), type),
+ m_bits (bits),
+ m_inner_svalue (inner_svalue)
+{
+}
+
+/* Implementation of svalue::dump_to_pp vfunc for bits_within_svalue. */
+
+void
+bits_within_svalue::dump_to_pp (pretty_printer *pp, bool simple) const
+{
+ if (simple)
+ {
+ pp_string (pp, "BITS_WITHIN(");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ m_bits.dump_to_pp (pp);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+ else
+ {
+ pp_string (pp, "bits_within_svalue (");
+ if (get_type ())
+ {
+ print_quoted_type (pp, get_type ());
+ pp_string (pp, ", ");
+ }
+ m_bits.dump_to_pp (pp);
+ pp_string (pp, ", inner_val: ");
+ m_inner_svalue->dump_to_pp (pp, simple);
+ pp_character (pp, ')');
+ }
+}
+
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for bits_within_svalue. */
+
+const svalue *
+bits_within_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &bits,
+ region_model_manager *mgr) const
+{
+ /* Fold:
+ BITS_WITHIN (range1, BITS_WITHIN (range2, VAL))
+ to:
+ BITS_WITHIN (range1 in range 2, VAL). */
+ bit_range offset_bits (m_bits.get_start_bit_offset ()
+ + bits.m_start_bit_offset,
+ bits.m_size_in_bits);
+ return mgr->get_or_create_bits_within (type, offset_bits, m_inner_svalue);
+}
+
+/* Implementation of svalue::accept vfunc for bits_within_svalue. */
+
+void
+bits_within_svalue::accept (visitor *v) const
+{
+ v->visit_bits_within_svalue (this);
+ m_inner_svalue->accept (v);
+}
+
+/* Implementation of svalue::implicitly_live_p vfunc for bits_within_svalue. */
+
+bool
+bits_within_svalue::implicitly_live_p (const svalue_set *live_svalues,
+ const region_model *model) const
+{
+ return m_inner_svalue->live_p (live_svalues, model);
+}
+
/* class widening_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for widening_svalue. */
@@ -1291,6 +1603,75 @@ compound_svalue::calc_complexity (const binding_map &map)
return complexity (num_child_nodes + 1, max_child_depth + 1);
}
+/* Implementation of svalue::maybe_fold_bits_within vfunc
+ for compound_svalue. */
+
+const svalue *
+compound_svalue::maybe_fold_bits_within (tree type,
+ const bit_range &bits,
+ region_model_manager *mgr) const
+{
+ binding_map result_map;
+ for (auto iter : m_map)
+ {
+ const binding_key *key = iter.first;
+ if (const concrete_binding *conc_key
+ = key->dyn_cast_concrete_binding ())
+ {
+ /* Ignore concrete bindings outside BITS. */
+ if (!conc_key->get_bit_range ().intersects_p (bits))
+ continue;
+
+ const svalue *sval = iter.second;
+ /* Get the position of conc_key relative to BITS. */
+ bit_range result_location (conc_key->get_start_bit_offset ()
+ - bits.get_start_bit_offset (),
+ conc_key->get_size_in_bits ());
+ /* If conc_key starts after BITS, trim off leading bits
+ from the svalue and adjust binding location. */
+ if (result_location.m_start_bit_offset < 0)
+ {
+ bit_size_t leading_bits_to_drop
+ = -result_location.m_start_bit_offset;
+ result_location = bit_range
+ (0, result_location.m_size_in_bits - leading_bits_to_drop);
+ bit_range bits_within_sval (leading_bits_to_drop,
+ result_location.m_size_in_bits);
+ /* Trim off leading bits from iter_sval. */
+ sval = mgr->get_or_create_bits_within (NULL_TREE,
+ bits_within_sval,
+ sval);
+ }
+ /* If conc_key finishes after BITS, trim off trailing bits
+ from the svalue and adjust binding location. */
+ if (conc_key->get_next_bit_offset ()
+ > bits.get_next_bit_offset ())
+ {
+ bit_size_t trailing_bits_to_drop
+ = (conc_key->get_next_bit_offset ()
+ - bits.get_next_bit_offset ());
+ result_location = bit_range
+ (result_location.m_start_bit_offset,
+ result_location.m_size_in_bits - trailing_bits_to_drop);
+ bit_range bits_within_sval (0,
+ result_location.m_size_in_bits);
+ /* Trim off leading bits from iter_sval. */
+ sval = mgr->get_or_create_bits_within (NULL_TREE,
+ bits_within_sval,
+ sval);
+ }
+ const concrete_binding *offset_conc_key
+ = mgr->get_store_manager ()->get_concrete_binding
+ (result_location);
+ result_map.put (offset_conc_key, sval);
+ }
+ else
+ /* If we have any symbolic keys we can't get it as bits. */
+ return NULL;
+ }
+ return mgr->get_or_create_compound_svalue (type, result_map);
+}
+
/* class conjured_svalue : public svalue. */
/* Implementation of svalue::dump_to_pp vfunc for conjured_svalue. */
diff --git a/gcc/analyzer/svalue.h b/gcc/analyzer/svalue.h
index d9e34aa..3965a5f 100644
--- a/gcc/analyzer/svalue.h
+++ b/gcc/analyzer/svalue.h
@@ -41,6 +41,8 @@ enum svalue_kind
SK_UNARYOP,
SK_BINOP,
SK_SUB,
+ SK_REPEATED,
+ SK_BITS_WITHIN,
SK_UNMERGEABLE,
SK_PLACEHOLDER,
SK_WIDENING,
@@ -63,6 +65,9 @@ enum svalue_kind
unaryop_svalue (SK_UNARYOP): unary operation on another svalue
binop_svalue (SK_BINOP): binary operation on two svalues
sub_svalue (SK_SUB): the result of accessing a subregion
+ repeated_svalue (SK_REPEATED): repeating an svalue to fill a larger region
+ bits_within_svalue (SK_BITS_WITHIN): a range of bits/bytes within a larger
+ svalue
unmergeable_svalue (SK_UNMERGEABLE): a value that is so interesting
from a control-flow perspective that it can inhibit state-merging
placeholder_svalue (SK_PLACEHOLDER): for use in selftests.
@@ -107,6 +112,10 @@ public:
dyn_cast_binop_svalue () const { return NULL; }
virtual const sub_svalue *
dyn_cast_sub_svalue () const { return NULL; }
+ virtual const repeated_svalue *
+ dyn_cast_repeated_svalue () const { return NULL; }
+ virtual const bits_within_svalue *
+ dyn_cast_bits_within_svalue () const { return NULL; }
virtual const unmergeable_svalue *
dyn_cast_unmergeable_svalue () const { return NULL; }
virtual const widening_svalue *
@@ -138,6 +147,16 @@ public:
bool involves_p (const svalue *other) const;
+ const svalue *
+ extract_bit_range (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const;
+
+ virtual const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const;
+
protected:
svalue (complexity c, tree type)
: m_complexity (c), m_type (type)
@@ -175,9 +194,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
const region *m_reg;
@@ -222,7 +241,7 @@ is_a_helper <const region_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<region_svalue::key_t>
: public member_function_hash_traits<region_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -253,6 +272,11 @@ public:
enum tree_code op,
const constant_svalue *rhs);
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
private:
tree m_cst_expr;
};
@@ -285,6 +309,11 @@ public:
void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
void accept (visitor *v) const FINAL OVERRIDE;
+
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
};
/* An enum describing a particular kind of "poisoned" value. */
@@ -327,9 +356,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
enum poison_kind m_kind;
tree m_type;
@@ -364,7 +393,7 @@ is_a_helper <const poisoned_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<poisoned_svalue::key_t>
: public member_function_hash_traits<poisoned_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -426,9 +455,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
setjmp_record m_record;
tree m_type;
@@ -467,7 +496,7 @@ is_a_helper <const setjmp_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<setjmp_svalue::key_t>
: public member_function_hash_traits<setjmp_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -548,9 +577,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
enum tree_code m_op;
@@ -574,6 +603,11 @@ public:
enum tree_code get_op () const { return m_op; }
const svalue *get_arg () const { return m_arg; }
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
private:
enum tree_code m_op;
const svalue *m_arg;
@@ -592,7 +626,7 @@ is_a_helper <const unaryop_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<unaryop_svalue::key_t>
: public member_function_hash_traits<unaryop_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -630,9 +664,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
enum tree_code m_op;
@@ -683,7 +717,7 @@ is_a_helper <const binop_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<binop_svalue::key_t>
: public member_function_hash_traits<binop_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -719,9 +753,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
const svalue *m_parent_svalue;
@@ -762,7 +796,182 @@ is_a_helper <const sub_svalue *>::test (const svalue *sval)
template <> struct default_hash_traits<sub_svalue::key_t>
: public member_function_hash_traits<sub_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
+};
+
+namespace ana {
+
+/* Concrete subclass of svalue representing repeating an inner svalue
+ (possibly not a whole number of times) to fill a larger region of
+ type TYPE of size OUTER_SIZE bytes. */
+
+class repeated_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of repeated_svalue. */
+ struct key_t
+ {
+ key_t (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue)
+ : m_type (type), m_outer_size (outer_size), m_inner_svalue (inner_svalue)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_ptr (m_outer_size);
+ hstate.add_ptr (m_inner_svalue);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_outer_size == other.m_outer_size
+ && m_inner_svalue == other.m_inner_svalue);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
+
+ tree m_type;
+ const svalue *m_outer_size;
+ const svalue *m_inner_svalue;
+ };
+ repeated_svalue (tree type,
+ const svalue *outer_size,
+ const svalue *inner_svalue);
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_REPEATED; }
+ const repeated_svalue *dyn_cast_repeated_svalue () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+
+ const svalue *get_outer_size () const { return m_outer_size; }
+ const svalue *get_inner_svalue () const { return m_inner_svalue; }
+
+ bool all_zeroes_p () const;
+
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
+ private:
+ const svalue *m_outer_size;
+ const svalue *m_inner_svalue;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const repeated_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_REPEATED;
+}
+
+template <> struct default_hash_traits<repeated_svalue::key_t>
+: public member_function_hash_traits<repeated_svalue::key_t>
+{
+ static const bool empty_zero_p = false;
+};
+
+namespace ana {
+
+/* A range of bits/bytes within another svalue
+ e.g. bytes 5-39 of INITIAL_SVALUE(R).
+ These can be generated for prefixes and suffixes when part of a binding
+ is clobbered, so that we don't lose too much information. */
+
+class bits_within_svalue : public svalue
+{
+public:
+ /* A support class for uniquifying instances of bits_within_svalue. */
+ struct key_t
+ {
+ key_t (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue)
+ : m_type (type), m_bits (bits), m_inner_svalue (inner_svalue)
+ {}
+
+ hashval_t hash () const
+ {
+ inchash::hash hstate;
+ hstate.add_ptr (m_type);
+ hstate.add_ptr (m_inner_svalue);
+ return hstate.end ();
+ }
+
+ bool operator== (const key_t &other) const
+ {
+ return (m_type == other.m_type
+ && m_bits == other.m_bits
+ && m_inner_svalue == other.m_inner_svalue);
+ }
+
+ void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
+ bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
+
+ tree m_type;
+ bit_range m_bits;
+ const svalue *m_inner_svalue;
+ };
+ bits_within_svalue (tree type,
+ const bit_range &bits,
+ const svalue *inner_svalue);
+
+ enum svalue_kind get_kind () const FINAL OVERRIDE { return SK_BITS_WITHIN; }
+ const bits_within_svalue *
+ dyn_cast_bits_within_svalue () const FINAL OVERRIDE
+ {
+ return this;
+ }
+
+ void dump_to_pp (pretty_printer *pp, bool simple) const FINAL OVERRIDE;
+ void accept (visitor *v) const FINAL OVERRIDE;
+ bool implicitly_live_p (const svalue_set *,
+ const region_model *) const FINAL OVERRIDE;
+
+ const bit_range &get_bits () const { return m_bits; }
+ const svalue *get_inner_svalue () const { return m_inner_svalue; }
+
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
+ private:
+ const bit_range m_bits;
+ const svalue *m_inner_svalue;
+};
+
+} // namespace ana
+
+template <>
+template <>
+inline bool
+is_a_helper <const bits_within_svalue *>::test (const svalue *sval)
+{
+ return sval->get_kind () == SK_BITS_WITHIN;
+}
+
+template <> struct default_hash_traits<bits_within_svalue::key_t>
+: public member_function_hash_traits<bits_within_svalue::key_t>
+{
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -888,9 +1097,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
function_point m_point;
@@ -952,7 +1161,7 @@ is_a_helper <widening_svalue *>::test (svalue *sval)
template <> struct default_hash_traits<widening_svalue::key_t>
: public member_function_hash_traits<widening_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {
@@ -1000,9 +1209,9 @@ public:
}
void mark_deleted () { m_type = reinterpret_cast<tree> (1); }
- void mark_empty () { m_type = NULL_TREE; }
+ void mark_empty () { m_type = reinterpret_cast<tree> (2); }
bool is_deleted () const { return m_type == reinterpret_cast<tree> (1); }
- bool is_empty () const { return m_type == NULL_TREE; }
+ bool is_empty () const { return m_type == reinterpret_cast<tree> (2); }
tree m_type;
const binding_map *m_map_ptr;
@@ -1029,6 +1238,11 @@ public:
return key_t (get_type (), &m_map);
}
+ const svalue *
+ maybe_fold_bits_within (tree type,
+ const bit_range &subrange,
+ region_model_manager *mgr) const FINAL OVERRIDE;
+
private:
static complexity calc_complexity (const binding_map &map);
@@ -1048,7 +1262,7 @@ is_a_helper <compound_svalue *>::test (svalue *sval)
template <> struct default_hash_traits<compound_svalue::key_t>
: public member_function_hash_traits<compound_svalue::key_t>
{
- static const bool empty_zero_p = true;
+ static const bool empty_zero_p = false;
};
namespace ana {