aboutsummaryrefslogtreecommitdiff
path: root/gcc/analyzer/region-model.cc
diff options
context:
space:
mode:
authorDavid Malcolm <dmalcolm@redhat.com>2022-09-09 17:13:04 -0400
committerDavid Malcolm <dmalcolm@redhat.com>2022-09-09 17:13:04 -0400
commitc81b60b8c6ff3d4db2e395a628e114df812cfc48 (patch)
treee1e3715a12fdaf50af93746d0df9ec4c744f8321 /gcc/analyzer/region-model.cc
parent07e30160beaa207f56f170900fac0d799c6af410 (diff)
downloadgcc-c81b60b8c6ff3d4db2e395a628e114df812cfc48.zip
gcc-c81b60b8c6ff3d4db2e395a628e114df812cfc48.tar.gz
gcc-c81b60b8c6ff3d4db2e395a628e114df812cfc48.tar.bz2
analyzer: implement trust boundaries via a plugin for Linux kernel
This is a less ambitious version of: [PATCH 0/6] RFC: adding support to GCC for detecting trust boundaries https://gcc.gnu.org/pipermail/gcc-patches/2021-November/584372.html Earlier versions of this patch attempted: (a) various ways of identifying "untrusted" memory regions (b) providing a way to support the Linux kernel's "__user" annotation, either via type attributes, or via custom address spaces (c) enough attributes to identify "copy_from_user" and "copy_to_user", (d) wiring all of the above together to detect infoleaks and taint This patch adds a new -Wanalyzer-exposure-through-uninit-copy, emitted by -fanalyzer if it detects copying of uninitialized data through a pointer to an untrusted region, but requires a plugin to tell it when a copy crosses a trust boundary. This patch adds a proof-of-concept gcc plugin for the analyzer for use with the Linux kernel that special-cases calls to "copy_from_user" and calls to "copy_to_user": calls to copy_to_user are checked for -Wanalyzer-exposure-through-uninit-copy, and data copied via copy_from_user is marked as tainted when -fanalyzer-checker=taint is active. This is very much just a proof-of-concept. A big limitation is that the copy_{from,to}_user special-casing only happens if these functions have no body in the TU being analyzed, which isn't the case for a normal kernel build. I'd much prefer to provide a more general mechanism for handling such behavior without resorting to plugins (e.g. via attributes or custom address spaces), but in the interest of not "letting perfect be the enemy of the good" this patch at least allows parts of this "trust boundaries" code to be merged for experimentation with the idea. The -Wanalyzer-exposure-through-uninit-copy diagnostic uses notes to express what fields and padding within a struct have not been initialized. For example: infoleak-CVE-2011-1078-2.c: In function 'test_1': infoleak-CVE-2011-1078-2.c:32:9: warning: potential exposure of sensitive information by copying uninitialized data from stack across trust boundary [CWE-200] [-Wanalyzer-exposure-through-uninit-copy] 32 | copy_to_user(optval, &cinfo, sizeof(cinfo)); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'test_1': events 1-3 | | 25 | struct sco_conninfo cinfo; | | ^~~~~ | | | | | (1) region created on stack here | | (2) capacity: 6 bytes |...... | 32 | copy_to_user(optval, &cinfo, sizeof(cinfo)); | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | | | (3) uninitialized data copied from stack here | infoleak-CVE-2011-1078-2.c:32:9: note: 1 byte is uninitialized 32 | copy_to_user(optval, &cinfo, sizeof(cinfo)); | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ infoleak-CVE-2011-1078-2.c:18:15: note: padding after field 'dev_class' is uninitialized (1 byte) 18 | __u8 dev_class[3]; | ^~~~~~~~~ infoleak-CVE-2011-1078-2.c:25:29: note: suggest forcing zero-initialization by providing a '{0}' initializer 25 | struct sco_conninfo cinfo; | ^~~~~ | = {0} For taint-detection, the patch includes a series of reproducers for detecting CVE-2011-0521. Unfortunately the analyzer doesn't yet detect the issue until the code has been significantly simplified from its original form: currently only in -5.c and -6.c in the series of test (see notes in the individual cases), such as: taint-CVE-2011-0521-6.c:33:48: warning: use of attacker-controlled value '*info.num' in array lookup without bounds checking [CWE-129] [-Wanalyzer-tainted-array-index] 33 | av7110->ci_slot[info->num].num = info->num; | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~~~ 'test_1': events 1-3 | | 19 | if (copy_from_user(&sbuf, (void __user *)arg, sizeof(sbuf)) != 0) | | ^ | | | | | (1) following 'false' branch... |...... | 23 | struct dvb_device *dvbdev = file->private_data; | | ~~~~~~ | | | | | (2) ...to here |...... | 33 | av7110->ci_slot[info->num].num = info->num; | | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | | | | | (3) use of attacker-controlled value '*info.num' in array lookup without bounds checking | The patch also includes various infoleak and taint cases from my antipatterns.ko kernel module: https://github.com/davidmalcolm/antipatterns.ko gcc/analyzer/ChangeLog: * analyzer.opt (Wanalyzer-exposure-through-uninit-copy): New. * checker-path.cc (region_creation_event::region_creation_event): Add "capacity" and "kind" params. (region_creation_event::get_desc): Generalize to different kinds of event. (checker_path::add_region_creation_event): Convert to... (checker_path::add_region_creation_events): ...this. * checker-path.h (enum rce_kind): New. (region_creation_event::region_creation_event): Add "capacity" and "kind" params. (region_creation_event::m_capacity): New field. (region_creation_event::m_rce_kind): New field. (checker_path::add_region_creation_event): Convert to... (checker_path::add_region_creation_events): ...this. * diagnostic-manager.cc (diagnostic_manager::build_emission_path): Update for multiple region creation events. (diagnostic_manager::add_event_on_final_node): Likewise. (diagnostic_manager::add_events_for_eedge): Likewise. * region-model-impl-calls.cc (call_details::get_logger): New. * region-model.cc: Define INCLUDE_MEMORY before including "system.h". Include "gcc-rich-location.h". (class record_layout): New. (class exposure_through_uninit_copy): New. (contains_uninit_p): New. (region_model::maybe_complain_about_infoleak): New. * region-model.h (call_details::get_logger): New decl. (region_model::maybe_complain_about_infoleak): New decl. (region_model::mark_as_tainted): New decl. * sm-taint.cc (region_model::mark_as_tainted): New. gcc/ChangeLog: * doc/invoke.texi (Static Analyzer Options): Add -Wanalyzer-exposure-through-uninit-copy. gcc/testsuite/ChangeLog: * gcc.dg/plugin/analyzer_kernel_plugin.c: New test. * gcc.dg/plugin/copy_from_user-1.c: New test. * gcc.dg/plugin/infoleak-1.c: New test. * gcc.dg/plugin/infoleak-2.c: New test. * gcc.dg/plugin/infoleak-3.c: New test. * gcc.dg/plugin/infoleak-CVE-2011-1078-1.c: New test. * gcc.dg/plugin/infoleak-CVE-2011-1078-2.c: New test. * gcc.dg/plugin/infoleak-CVE-2014-1446-1.c: New test. * gcc.dg/plugin/infoleak-CVE-2017-18549-1.c: New test. * gcc.dg/plugin/infoleak-CVE-2017-18550-1.c: New test. * gcc.dg/plugin/infoleak-antipatterns-1.c: New test. * gcc.dg/plugin/infoleak-fixit-1.c: New test. * gcc.dg/plugin/infoleak-net-ethtool-ioctl.c: New test. * gcc.dg/plugin/infoleak-vfio_iommu_type1.c: New test. * gcc.dg/plugin/plugin.exp (plugin_test_list): Add analyzer_kernel_plugin.c and the new test cases. * gcc.dg/plugin/taint-CVE-2011-0521-1-fixed.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-1.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-2-fixed.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-2.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-3-fixed.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-3.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-4.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-5-fixed.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-5.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521-6.c: New test. * gcc.dg/plugin/taint-CVE-2011-0521.h: New test. * gcc.dg/plugin/taint-antipatterns-1.c: New test. * gcc.dg/plugin/test-uaccess.h: New header for tests. Signed-off-by: David Malcolm <dmalcolm@redhat.com>
Diffstat (limited to 'gcc/analyzer/region-model.cc')
-rw-r--r--gcc/analyzer/region-model.cc568
1 files changed, 568 insertions, 0 deletions
diff --git a/gcc/analyzer/region-model.cc b/gcc/analyzer/region-model.cc
index bc9db69..6eeb684 100644
--- a/gcc/analyzer/region-model.cc
+++ b/gcc/analyzer/region-model.cc
@@ -19,6 +19,7 @@ along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#include "config.h"
+#define INCLUDE_MEMORY
#include "system.h"
#include "coretypes.h"
#include "tree.h"
@@ -74,6 +75,7 @@ along with GCC; see the file COPYING3. If not see
#include "ssa-iterators.h"
#include "calls.h"
#include "is-a.h"
+#include "gcc-rich-location.h"
#if ENABLE_ANALYZER
@@ -5784,6 +5786,572 @@ region_model::unset_dynamic_extents (const region *reg)
m_dynamic_extents.remove (reg);
}
+/* Information of the layout of a RECORD_TYPE, capturing it as a vector
+ of items, where each item is either a field or padding. */
+
+class record_layout
+{
+public:
+ /* An item within a record; either a field, or padding after a field. */
+ struct item
+ {
+ public:
+ item (const bit_range &br,
+ tree field,
+ bool is_padding)
+ : m_bit_range (br),
+ m_field (field),
+ m_is_padding (is_padding)
+ {
+ }
+
+ bit_offset_t get_start_bit_offset () const
+ {
+ return m_bit_range.get_start_bit_offset ();
+ }
+ bit_offset_t get_next_bit_offset () const
+ {
+ return m_bit_range.get_next_bit_offset ();
+ }
+
+ bool contains_p (bit_offset_t offset) const
+ {
+ return m_bit_range.contains_p (offset);
+ }
+
+ void dump_to_pp (pretty_printer *pp) const
+ {
+ if (m_is_padding)
+ pp_printf (pp, "padding after %qD", m_field);
+ else
+ pp_printf (pp, "%qD", m_field);
+ pp_string (pp, ", ");
+ m_bit_range.dump_to_pp (pp);
+ }
+
+ bit_range m_bit_range;
+ tree m_field;
+ bool m_is_padding;
+ };
+
+ record_layout (tree record_type)
+ : m_record_type (record_type)
+ {
+ gcc_assert (TREE_CODE (record_type) == RECORD_TYPE);
+
+ for (tree iter = TYPE_FIELDS (record_type); iter != NULL_TREE;
+ iter = DECL_CHAIN (iter))
+ {
+ if (TREE_CODE (iter) == FIELD_DECL)
+ {
+ int iter_field_offset = int_bit_position (iter);
+ bit_size_t size_in_bits;
+ if (!int_size_in_bits (TREE_TYPE (iter), &size_in_bits))
+ size_in_bits = 0;
+
+ maybe_pad_to (iter_field_offset);
+
+ /* Add field. */
+ m_items.safe_push (item (bit_range (iter_field_offset,
+ size_in_bits),
+ iter, false));
+ }
+ }
+
+ /* Add any trailing padding. */
+ bit_size_t size_in_bits;
+ if (int_size_in_bits (record_type, &size_in_bits))
+ maybe_pad_to (size_in_bits);
+ }
+
+ void dump_to_pp (pretty_printer *pp) const
+ {
+ unsigned i;
+ item *it;
+ FOR_EACH_VEC_ELT (m_items, i, it)
+ {
+ it->dump_to_pp (pp);
+ pp_newline (pp);
+ }
+ }
+
+ DEBUG_FUNCTION void dump () const
+ {
+ pretty_printer pp;
+ pp_format_decoder (&pp) = default_tree_printer;
+ pp.buffer->stream = stderr;
+ dump_to_pp (&pp);
+ pp_flush (&pp);
+ }
+
+ const record_layout::item *get_item_at (bit_offset_t offset) const
+ {
+ unsigned i;
+ item *it;
+ FOR_EACH_VEC_ELT (m_items, i, it)
+ if (it->contains_p (offset))
+ return it;
+ return NULL;
+ }
+
+private:
+ /* Subroutine of ctor. Add padding item to NEXT_OFFSET if necessary. */
+
+ void maybe_pad_to (bit_offset_t next_offset)
+ {
+ if (m_items.length () > 0)
+ {
+ const item &last_item = m_items[m_items.length () - 1];
+ bit_offset_t offset_after_last_item
+ = last_item.get_next_bit_offset ();
+ if (next_offset > offset_after_last_item)
+ {
+ bit_size_t padding_size
+ = next_offset - offset_after_last_item;
+ m_items.safe_push (item (bit_range (offset_after_last_item,
+ padding_size),
+ last_item.m_field, true));
+ }
+ }
+ }
+
+ tree m_record_type;
+ auto_vec<item> m_items;
+};
+
+/* A subclass of pending_diagnostic for complaining about uninitialized data
+ being copied across a trust boundary to an untrusted output
+ (e.g. copy_to_user infoleaks in the Linux kernel). */
+
+class exposure_through_uninit_copy
+ : public pending_diagnostic_subclass<exposure_through_uninit_copy>
+{
+public:
+ exposure_through_uninit_copy (const region *src_region,
+ const region *dest_region,
+ const svalue *copied_sval,
+ region_model_manager *mgr)
+ : m_src_region (src_region),
+ m_dest_region (dest_region),
+ m_copied_sval (copied_sval),
+ m_mgr (mgr)
+ {
+ gcc_assert (m_copied_sval->get_kind () == SK_POISONED
+ || m_copied_sval->get_kind () == SK_COMPOUND);
+ }
+
+ const char *get_kind () const final override
+ {
+ return "exposure_through_uninit_copy";
+ }
+
+ bool operator== (const exposure_through_uninit_copy &other) const
+ {
+ return (m_src_region == other.m_src_region
+ && m_dest_region == other.m_dest_region
+ && m_copied_sval == other.m_copied_sval);
+ }
+
+ int get_controlling_option () const final override
+ {
+ return OPT_Wanalyzer_exposure_through_uninit_copy;
+ }
+
+ bool emit (rich_location *rich_loc) final override
+ {
+ diagnostic_metadata m;
+ /* CWE-200: Exposure of Sensitive Information to an Unauthorized Actor. */
+ m.add_cwe (200);
+ enum memory_space mem_space = get_src_memory_space ();
+ bool warned;
+ switch (mem_space)
+ {
+ default:
+ warned = warning_meta
+ (rich_loc, m, get_controlling_option (),
+ "potential exposure of sensitive information"
+ " by copying uninitialized data across trust boundary");
+ break;
+ case MEMSPACE_STACK:
+ warned = warning_meta
+ (rich_loc, m, get_controlling_option (),
+ "potential exposure of sensitive information"
+ " by copying uninitialized data from stack across trust boundary");
+ break;
+ case MEMSPACE_HEAP:
+ warned = warning_meta
+ (rich_loc, m, get_controlling_option (),
+ "potential exposure of sensitive information"
+ " by copying uninitialized data from heap across trust boundary");
+ break;
+ }
+ if (warned)
+ {
+ location_t loc = rich_loc->get_loc ();
+ inform_number_of_uninit_bits (loc);
+ complain_about_uninit_ranges (loc);
+
+ if (mem_space == MEMSPACE_STACK)
+ maybe_emit_fixit_hint ();
+ }
+ return warned;
+ }
+
+ label_text describe_final_event (const evdesc::final_event &) final override
+ {
+ enum memory_space mem_space = get_src_memory_space ();
+ switch (mem_space)
+ {
+ default:
+ return label_text::borrow ("uninitialized data copied here");
+
+ case MEMSPACE_STACK:
+ return label_text::borrow ("uninitialized data copied from stack here");
+
+ case MEMSPACE_HEAP:
+ return label_text::borrow ("uninitialized data copied from heap here");
+ }
+ }
+
+ void mark_interesting_stuff (interesting_t *interest) final override
+ {
+ if (m_src_region)
+ interest->add_region_creation (m_src_region);
+ }
+
+private:
+ enum memory_space get_src_memory_space () const
+ {
+ return m_src_region ? m_src_region->get_memory_space () : MEMSPACE_UNKNOWN;
+ }
+
+ bit_size_t calc_num_uninit_bits () const
+ {
+ switch (m_copied_sval->get_kind ())
+ {
+ default:
+ gcc_unreachable ();
+ break;
+ case SK_POISONED:
+ {
+ const poisoned_svalue *poisoned_sval
+ = as_a <const poisoned_svalue *> (m_copied_sval);
+ gcc_assert (poisoned_sval->get_poison_kind () == POISON_KIND_UNINIT);
+
+ /* Give up if don't have type information. */
+ if (m_copied_sval->get_type () == NULL_TREE)
+ return 0;
+
+ bit_size_t size_in_bits;
+ if (int_size_in_bits (m_copied_sval->get_type (), &size_in_bits))
+ return size_in_bits;
+
+ /* Give up if we can't get the size of the type. */
+ return 0;
+ }
+ break;
+ case SK_COMPOUND:
+ {
+ const compound_svalue *compound_sval
+ = as_a <const compound_svalue *> (m_copied_sval);
+ bit_size_t result = 0;
+ /* Find keys for uninit svals. */
+ for (auto iter : *compound_sval)
+ {
+ const svalue *sval = iter.second;
+ if (const poisoned_svalue *psval
+ = sval->dyn_cast_poisoned_svalue ())
+ if (psval->get_poison_kind () == POISON_KIND_UNINIT)
+ {
+ const binding_key *key = iter.first;
+ const concrete_binding *ckey
+ = key->dyn_cast_concrete_binding ();
+ gcc_assert (ckey);
+ result += ckey->get_size_in_bits ();
+ }
+ }
+ return result;
+ }
+ }
+ }
+
+ void inform_number_of_uninit_bits (location_t loc) const
+ {
+ bit_size_t num_uninit_bits = calc_num_uninit_bits ();
+ if (num_uninit_bits <= 0)
+ return;
+ if (num_uninit_bits % BITS_PER_UNIT == 0)
+ {
+ /* Express in bytes. */
+ byte_size_t num_uninit_bytes = num_uninit_bits / BITS_PER_UNIT;
+ if (num_uninit_bytes == 1)
+ inform (loc, "1 byte is uninitialized");
+ else
+ inform (loc,
+ "%wu bytes are uninitialized", num_uninit_bytes.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ if (num_uninit_bits == 1)
+ inform (loc, "1 bit is uninitialized");
+ else
+ inform (loc,
+ "%wu bits are uninitialized", num_uninit_bits.to_uhwi ());
+ }
+ }
+
+ void complain_about_uninit_ranges (location_t loc) const
+ {
+ if (const compound_svalue *compound_sval
+ = m_copied_sval->dyn_cast_compound_svalue ())
+ {
+ /* Find keys for uninit svals. */
+ auto_vec<const concrete_binding *> uninit_keys;
+ for (auto iter : *compound_sval)
+ {
+ const svalue *sval = iter.second;
+ if (const poisoned_svalue *psval
+ = sval->dyn_cast_poisoned_svalue ())
+ if (psval->get_poison_kind () == POISON_KIND_UNINIT)
+ {
+ const binding_key *key = iter.first;
+ const concrete_binding *ckey
+ = key->dyn_cast_concrete_binding ();
+ gcc_assert (ckey);
+ uninit_keys.safe_push (ckey);
+ }
+ }
+ /* Complain about them in sorted order. */
+ uninit_keys.qsort (concrete_binding::cmp_ptr_ptr);
+
+ std::unique_ptr<record_layout> layout;
+
+ tree type = m_copied_sval->get_type ();
+ if (type && TREE_CODE (type) == RECORD_TYPE)
+ {
+ // (std::make_unique is C++14)
+ layout = std::unique_ptr<record_layout> (new record_layout (type));
+
+ if (0)
+ layout->dump ();
+ }
+
+ unsigned i;
+ const concrete_binding *ckey;
+ FOR_EACH_VEC_ELT (uninit_keys, i, ckey)
+ {
+ bit_offset_t start_bit = ckey->get_start_bit_offset ();
+ bit_offset_t next_bit = ckey->get_next_bit_offset ();
+ complain_about_uninit_range (loc, start_bit, next_bit,
+ layout.get ());
+ }
+ }
+ }
+
+ void complain_about_uninit_range (location_t loc,
+ bit_offset_t start_bit,
+ bit_offset_t next_bit,
+ const record_layout *layout) const
+ {
+ if (layout)
+ {
+ while (start_bit < next_bit)
+ {
+ if (const record_layout::item *item
+ = layout->get_item_at (start_bit))
+ {
+ gcc_assert (start_bit >= item->get_start_bit_offset ());
+ gcc_assert (start_bit < item->get_next_bit_offset ());
+ if (item->get_start_bit_offset () == start_bit
+ && item->get_next_bit_offset () <= next_bit)
+ complain_about_fully_uninit_item (*item);
+ else
+ complain_about_partially_uninit_item (*item);
+ start_bit = item->get_next_bit_offset ();
+ continue;
+ }
+ else
+ break;
+ }
+ }
+
+ if (start_bit >= next_bit)
+ return;
+
+ if (start_bit % 8 == 0 && next_bit % 8 == 0)
+ {
+ /* Express in bytes. */
+ byte_offset_t start_byte = start_bit / 8;
+ byte_offset_t last_byte = (next_bit / 8) - 1;
+ if (last_byte == start_byte)
+ inform (loc,
+ "byte %wu is uninitialized",
+ start_byte.to_uhwi ());
+ else
+ inform (loc,
+ "bytes %wu - %wu are uninitialized",
+ start_byte.to_uhwi (),
+ last_byte.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ bit_offset_t last_bit = next_bit - 1;
+ if (last_bit == start_bit)
+ inform (loc,
+ "bit %wu is uninitialized",
+ start_bit.to_uhwi ());
+ else
+ inform (loc,
+ "bits %wu - %wu are uninitialized",
+ start_bit.to_uhwi (),
+ last_bit.to_uhwi ());
+ }
+ }
+
+ static void
+ complain_about_fully_uninit_item (const record_layout::item &item)
+ {
+ tree field = item.m_field;
+ bit_size_t num_bits = item.m_bit_range.m_size_in_bits;
+ if (item.m_is_padding)
+ {
+ if (num_bits % 8 == 0)
+ {
+ /* Express in bytes. */
+ byte_size_t num_bytes = num_bits / BITS_PER_UNIT;
+ if (num_bytes == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (1 byte)",
+ field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (%wu bytes)",
+ field, num_bytes.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ if (num_bits == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (1 bit)",
+ field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is uninitialized (%wu bits)",
+ field, num_bits.to_uhwi ());
+ }
+ }
+ else
+ {
+ if (num_bits % 8 == 0)
+ {
+ /* Express in bytes. */
+ byte_size_t num_bytes = num_bits / BITS_PER_UNIT;
+ if (num_bytes == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (1 byte)", field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (%wu bytes)",
+ field, num_bytes.to_uhwi ());
+ }
+ else
+ {
+ /* Express in bits. */
+ if (num_bits == 1)
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (1 bit)", field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is uninitialized (%wu bits)",
+ field, num_bits.to_uhwi ());
+ }
+ }
+ }
+
+ static void
+ complain_about_partially_uninit_item (const record_layout::item &item)
+ {
+ tree field = item.m_field;
+ if (item.m_is_padding)
+ inform (DECL_SOURCE_LOCATION (field),
+ "padding after field %qD is partially uninitialized",
+ field);
+ else
+ inform (DECL_SOURCE_LOCATION (field),
+ "field %qD is partially uninitialized",
+ field);
+ /* TODO: ideally we'd describe what parts are uninitialized. */
+ }
+
+ void maybe_emit_fixit_hint () const
+ {
+ if (tree decl = m_src_region->maybe_get_decl ())
+ {
+ gcc_rich_location hint_richloc (DECL_SOURCE_LOCATION (decl));
+ hint_richloc.add_fixit_insert_after (" = {0}");
+ inform (&hint_richloc,
+ "suggest forcing zero-initialization by"
+ " providing a %<{0}%> initializer");
+ }
+ }
+
+private:
+ const region *m_src_region;
+ const region *m_dest_region;
+ const svalue *m_copied_sval;
+ region_model_manager *m_mgr;
+};
+
+/* Return true if any part of SVAL is uninitialized. */
+
+static bool
+contains_uninit_p (const svalue *sval)
+{
+ struct uninit_finder : public visitor
+ {
+ public:
+ uninit_finder () : m_found_uninit (false) {}
+ void visit_poisoned_svalue (const poisoned_svalue *sval)
+ {
+ if (sval->get_poison_kind () == POISON_KIND_UNINIT)
+ m_found_uninit = true;
+ }
+ bool m_found_uninit;
+ };
+
+ uninit_finder v;
+ sval->accept (&v);
+
+ return v.m_found_uninit;
+}
+
+/* Function for use by plugins when simulating writing data through a
+ pointer to an "untrusted" region DST_REG (and thus crossing a security
+ boundary), such as copying data to user space in an OS kernel.
+
+ Check that COPIED_SVAL is fully initialized. If not, complain about
+ an infoleak to CTXT.
+
+ SRC_REG can be NULL; if non-NULL it is used as a hint in the diagnostic
+ as to where COPIED_SVAL came from. */
+
+void
+region_model::maybe_complain_about_infoleak (const region *dst_reg,
+ const svalue *copied_sval,
+ const region *src_reg,
+ region_model_context *ctxt)
+{
+ /* Check for exposure. */
+ if (contains_uninit_p (copied_sval))
+ ctxt->warn (new exposure_through_uninit_copy (src_reg,
+ dst_reg,
+ copied_sval,
+ m_mgr));
+}
+
/* class noop_region_model_context : public region_model_context. */
void