aboutsummaryrefslogtreecommitdiff
path: root/libgcc/hardcfr.c
blob: 4f70dc56b26f6ffc076b9757b47d5e2111678232 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
/* Control flow redundancy hardening
   Copyright (C) 2022-2024 Free Software Foundation, Inc.
   Contributed by Alexandre Oliva <oliva@adacore.com>

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

Under Section 7 of GPL version 3, you are granted additional
permissions described in the GCC Runtime Library Exception, version
3.1, as published by the Free Software Foundation.

You should have received a copy of the GNU General Public License and
a copy of the GCC Runtime Library Exception along with this program;
see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
<http://www.gnu.org/licenses/>.  */

/* Avoid infinite recursion.  */
#pragma GCC optimize ("-fno-harden-control-flow-redundancy")

#include <stddef.h>
#include <stdbool.h>

/* This should be kept in sync with gcc/gimple-harden-control-flow.cc.  */
#if __CHAR_BIT__ >= 28
# define VWORDmode __QI__
#elif __CHAR_BIT__ >= 14
# define VWORDmode __HI__
#else
# define VWORDmode __SI__
#endif

typedef unsigned int __attribute__ ((__mode__ (VWORDmode))) vword;

/* This function is optionally called at the end of a function to verify that
   the VISITED array represents a sensible execution path in the CFG.  It is
   always expected to pass; the purpose is to detect attempts to subvert
   execution by taking unexpected paths, or other execution errors.  The
   function, instrumented by pass_harden_control_flow_redundancy at a time in
   which it had BLOCKS basic blocks (not counting ENTER and EXIT, so block 2
   maps to index 0, the first bit of the first VWORD), sets a bit in the bit
   array VISITED as it enters the corresponding basic block.  CFG holds a
   representation of the control flow graph at the time of the instrumentation:
   an array of VWORDs holding, for each block, a sequence of predecessors, and
   a sequence of successors.  Each pred and succ sequence is represented as a
   sequence of pairs (mask, index), terminated by an index-less all-zero mask.
   If the bit corresponding to the block is set, then at least one of the pred
   masks, and at least one of the succ masks, must have a bit set in
   VISITED[index].  An ENTRY block predecessor and an EXIT block successor are
   represented in a (mask, index) pair that tests the block's own bit.  */
extern void __hardcfr_check (size_t blocks,
			     vword const *visited,
			     vword const *cfg);

/* Compute the MASK for the bit representing BLOCK in WORDIDX's vword in a
   visited blocks bit array.  */
static inline void
block2mask (size_t const block, vword *const mask, size_t *const wordidx)
{
  size_t wbits = __CHAR_BIT__ * sizeof (vword);
  *wordidx = block / wbits;
  *mask = (vword)1 << (block % wbits);
}

/* Check whether the bit corresponding to BLOCK is set in VISITED.  */
static inline bool
visited_p (size_t const block, vword const *const visited)
{
  vword mask;
  size_t wordidx;
  block2mask (block, &mask, &wordidx);
  vword w = visited[wordidx];
  return (w & mask) != 0;
}

/* Check whether any VISITED bits that would correspond to blocks after BLOCKS
   are set.  */
static inline bool
excess_bits_set_p (size_t const blocks, vword const *const visited)
{
  vword mask;
  size_t wordidx;
  block2mask (blocks - 1, &mask, &wordidx);
  mask = -mask - mask;
  vword w = visited[wordidx];
  return (w & mask) != 0;
}

/* Read and consume a mask from **CFG_IT.  (Consume meaning advancing the
   iterator to the next word).  If the mask is zero, return FALSE.  Otherwise,
   also read and consume an index, and set *MASK and/or *WORDIDX, whichever are
   nonNULL, to the corresponding read values, and finally return TRUE.  */
static inline bool
next_pair (vword const **const cfg_it,
	   vword *const mask,
	   size_t *const wordidx)
{
  vword m = **cfg_it;
  ++*cfg_it;
  if (!m)
    return false;

  if (mask)
    *mask = m;

  size_t word = **cfg_it;
  ++*cfg_it;

  if (wordidx)
    *wordidx = word;

  return true;
}

/* Return TRUE iff any of the bits in MASK is set in VISITED[WORDIDX].  */
static inline bool
test_mask (vword const *const visited,
	   vword const mask, size_t const wordidx)
{
  return (visited[wordidx] & mask) != 0;
}

/* Scan a sequence of pairs (mask, index) at **CFG_IT until its terminator is
   reached and consumed.  */
static inline void
consume_seq (vword const **const cfg_it)
{
  while (next_pair (cfg_it, NULL, NULL))
    /* Do nothing.  */;
}

/* Check that at least one of the MASK bits in a sequence of pairs (mask,
   index) at **CFG_IT is set in the corresponding VISITED[INDEX] word.  Trap if
   we reach the terminator without finding any.  Consume the entire sequence
   otherwise, so that *CFG_IT points just past the terminator, which may be the
   beginning of the next sequence.  */
static inline bool
check_seq (vword const *const visited, vword const **const cfg_it)
{
  vword mask;
  size_t wordidx;

  /* If the block was visited, check that at least one of the
     preds/succs was also visited.  */
  do
    /* If we get to the end of the sequence without finding any
       match, something is amiss.  */
    if (!next_pair (cfg_it, &mask, &wordidx))
      return false;
  /* Keep searching until we find a match, at which point the
     condition is satisfied.  */
  while (!test_mask (visited, mask, wordidx));

  /* Consume the remaining entries in the sequence, whether we found a match or
     skipped the block, so as to position the iterator at the beginning of the
     next .  */
  consume_seq (cfg_it);

  return true;
}

/* Print out the CFG with BLOCKS blocks, presumed to be associated with CALLER.
   This is expected to be optimized out entirely, unless the verbose part of
   __hardcfr_check_fail is enabled.  */
static inline void
__hardcfr_debug_cfg (size_t const blocks,
		     void const *const caller,
		     vword const *const cfg)
{
  __builtin_printf ("CFG at %p, for %p", cfg, caller);
  vword const *cfg_it = cfg;
  for (size_t i = 0; i < blocks; i++)
    {
      vword mask; size_t wordidx;
      block2mask (i, &mask, &wordidx);
      __builtin_printf ("\nblock %lu (%lu/0x%lx)\npreds: ",
			(unsigned long)i,
			(unsigned long)wordidx, (unsigned long)mask);
      while (next_pair (&cfg_it, &mask, &wordidx))
	__builtin_printf (" (%lu/0x%lx)",
			  (unsigned long)wordidx, (unsigned long)mask);
      __builtin_printf ("\nsuccs: ");
      while (next_pair (&cfg_it, &mask, &wordidx))
	__builtin_printf (" (%lu/0x%lx)",
			  (unsigned long)wordidx, (unsigned long)mask);
    }
  __builtin_printf ("\n");
}

#ifndef ATTRIBUTE_UNUSED
# define ATTRIBUTE_UNUSED __attribute__ ((__unused__))
#endif

/* This is called when an out-of-line hardcfr check fails.  All the arguments
   are ignored, and it just traps, unless HARDCFR_VERBOSE_FAIL is enabled.  IF
   it is, it prints the PART of the CFG, expected to have BLOCKS blocks, that
   failed at CALLER's BLOCK, and the VISITED bitmap.  When the verbose mode is
   enabled, it also forces __hardcfr_debug_cfg (above) to be compiled into an
   out-of-line function, that could be called from a debugger.
   */

#ifdef __BPF__
__attribute__((__always_inline__))
#endif
static inline void
__hardcfr_check_fail (size_t const blocks ATTRIBUTE_UNUSED,
		      vword const *const visited ATTRIBUTE_UNUSED,
		      vword const *const cfg ATTRIBUTE_UNUSED,
		      size_t const block ATTRIBUTE_UNUSED,
		      int const part ATTRIBUTE_UNUSED,
		      void const *const caller ATTRIBUTE_UNUSED)
{
#if HARDCFR_VERBOSE_FAIL
  static const char *parts[] = { "preds", "succs", "no excess" };

  vword mask; size_t wordidx;
  block2mask (block, &mask, &wordidx);
  if (part == 2)
    mask = -mask - mask;
  __builtin_printf ("hardcfr fail at %p block %lu (%lu/0x%lx), expected %s:",
		    caller, (unsigned long)block,
		    (unsigned long)wordidx, (unsigned long)mask,
		    parts[part]);

  if (part != 2)
    {
      /* Skip data for previous blocks.  */
      vword const *cfg_it = cfg;
      for (size_t i = block; i--; )
	{
	  consume_seq (&cfg_it);
	  consume_seq (&cfg_it);
	}
      for (size_t i = part; i--; )
	consume_seq (&cfg_it);

      while (next_pair (&cfg_it, &mask, &wordidx))
	__builtin_printf (" (%lu/0x%lx)",
			  (unsigned long)wordidx, (unsigned long)mask);
    }

  __builtin_printf ("\nvisited:");
  block2mask (blocks - 1, &mask, &wordidx);
  for (size_t i = 0; i <= wordidx; i++)
    __builtin_printf (" (%lu/0x%lx)",
		      (unsigned long)i, (unsigned long)visited[i]);
  __builtin_printf ("\n");

  /* Reference __hardcfr_debug_cfg so that it's output out-of-line, so that it
     can be called from a debugger.  */
  if (!caller || caller == __hardcfr_debug_cfg)
    return;
#endif
  __builtin_trap ();
}

/* Check that, for each of the BLOCKS basic blocks, if its bit is set in
   VISITED, at least one of its predecessors in CFG is also set, and at also
   that at least one of its successors in CFG is also set.  */
void
__hardcfr_check (size_t const blocks,
		 vword const *const visited,
		 vword const *const cfg)
{
  vword const *cfg_it = cfg;
  for (size_t i = 0; i < blocks; i++)
    {
      bool v = visited_p (i, visited);

      /* For each block, there are two sequences of pairs (mask, index), each
	 sequence terminated by a single all-zero mask (no index).  The first
	 sequence is for predecessor blocks, the second is for successors.  At
	 least one of each must be set.  */
      if (!v)
	{
	  /* Consume predecessors.  */
	  consume_seq (&cfg_it);
	  /* Consume successors.  */
	  consume_seq (&cfg_it);
	}
      else
	{
	  /* Check predecessors.  */
	  if (!check_seq (visited, &cfg_it))
	    __hardcfr_check_fail (blocks, visited, cfg, i, 0,
				  __builtin_return_address (0));
	  /* Check successors.  */
	  if (!check_seq (visited, &cfg_it))
	    __hardcfr_check_fail (blocks, visited, cfg, i, 1,
				  __builtin_return_address (0));
	}
    }
  if (excess_bits_set_p (blocks, visited))
    __hardcfr_check_fail (blocks, visited, cfg, blocks - 1, 2,
			  __builtin_return_address (0));
}