aboutsummaryrefslogtreecommitdiff
path: root/gcc/c-cppbuiltin.c
blob: f54d17e86e6ebe3652024cae83596cf9d426ba47 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
/* Define builtin-in macros for the C family front ends.
   Copyright (C) 2002, 2003, 2004, 2005 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING.  If not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.  */

#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "version.h"
#include "flags.h"
#include "real.h"
#include "c-common.h"
#include "c-pragma.h"
#include "output.h"
#include "except.h"		/* For USING_SJLJ_EXCEPTIONS.  */
#include "toplev.h"
#include "tm_p.h"		/* Target prototypes.  */
#include "target.h"

#ifndef TARGET_OS_CPP_BUILTINS
# define TARGET_OS_CPP_BUILTINS()
#endif

#ifndef TARGET_OBJFMT_CPP_BUILTINS
# define TARGET_OBJFMT_CPP_BUILTINS()
#endif

#ifndef REGISTER_PREFIX
#define REGISTER_PREFIX ""
#endif

/* Non-static as some targets don't use it.  */
void builtin_define_std (const char *) ATTRIBUTE_UNUSED;
static void builtin_define_with_value_n (const char *, const char *,
					 size_t);
static void builtin_define_with_int_value (const char *, HOST_WIDE_INT);
static void builtin_define_with_hex_fp_value (const char *, tree,
					      int, const char *,
					      const char *,
					      const char *);
static void builtin_define_stdint_macros (void);
static void builtin_define_type_max (const char *, tree, int);
static void builtin_define_type_precision (const char *, tree);
static void builtin_define_float_constants (const char *, 
					    const char *,
					    const char *,
					    tree);
static void define__GNUC__ (void);

/* Define NAME with value TYPE precision.  */
static void
builtin_define_type_precision (const char *name, tree type)
{
  builtin_define_with_int_value (name, TYPE_PRECISION (type));
}

/* Define the float.h constants for TYPE using NAME_PREFIX, FP_SUFFIX,
   and FP_CAST. */
static void
builtin_define_float_constants (const char *name_prefix, 
		                const char *fp_suffix, 
				const char *fp_cast, 
				tree type)
{
  /* Used to convert radix-based values to base 10 values in several cases.

     In the max_exp -> max_10_exp conversion for 128-bit IEEE, we need at
     least 6 significant digits for correct results.  Using the fraction
     formed by (log(2)*1e6)/(log(10)*1e6) overflows a 32-bit integer as an
     intermediate; perhaps someone can find a better approximation, in the
     mean time, I suspect using doubles won't harm the bootstrap here.  */

  const double log10_2 = .30102999566398119521;
  double log10_b;
  const struct real_format *fmt;

  char name[64], buf[128];
  int dig, min_10_exp, max_10_exp;
  int decimal_dig;

  fmt = REAL_MODE_FORMAT (TYPE_MODE (type));
  gcc_assert (fmt->b != 10);

  /* The radix of the exponent representation.  */
  if (type == float_type_node)
    builtin_define_with_int_value ("__FLT_RADIX__", fmt->b);
  log10_b = log10_2 * fmt->log2_b;

  /* The number of radix digits, p, in the floating-point significand.  */
  sprintf (name, "__%s_MANT_DIG__", name_prefix);
  builtin_define_with_int_value (name, fmt->p);

  /* The number of decimal digits, q, such that any floating-point number
     with q decimal digits can be rounded into a floating-point number with
     p radix b digits and back again without change to the q decimal digits,

	p log10 b			if b is a power of 10
	floor((p - 1) log10 b)		otherwise
  */
  dig = (fmt->p - 1) * log10_b;
  sprintf (name, "__%s_DIG__", name_prefix);
  builtin_define_with_int_value (name, dig);

  /* The minimum negative int x such that b**(x-1) is a normalized float.  */
  sprintf (name, "__%s_MIN_EXP__", name_prefix);
  sprintf (buf, "(%d)", fmt->emin);
  builtin_define_with_value (name, buf, 0);

  /* The minimum negative int x such that 10**x is a normalized float,

	  ceil (log10 (b ** (emin - 1)))
	= ceil (log10 (b) * (emin - 1))

     Recall that emin is negative, so the integer truncation calculates
     the ceiling, not the floor, in this case.  */
  min_10_exp = (fmt->emin - 1) * log10_b;
  sprintf (name, "__%s_MIN_10_EXP__", name_prefix);
  sprintf (buf, "(%d)", min_10_exp);
  builtin_define_with_value (name, buf, 0);

  /* The maximum int x such that b**(x-1) is a representable float.  */
  sprintf (name, "__%s_MAX_EXP__", name_prefix);
  builtin_define_with_int_value (name, fmt->emax);

  /* The maximum int x such that 10**x is in the range of representable
     finite floating-point numbers,

	  floor (log10((1 - b**-p) * b**emax))
	= floor (log10(1 - b**-p) + log10(b**emax))
	= floor (log10(1 - b**-p) + log10(b)*emax)

     The safest thing to do here is to just compute this number.  But since
     we don't link cc1 with libm, we cannot.  We could implement log10 here
     a series expansion, but that seems too much effort because:

     Note that the first term, for all extant p, is a number exceedingly close
     to zero, but slightly negative.  Note that the second term is an integer
     scaling an irrational number, and that because of the floor we are only
     interested in its integral portion.

     In order for the first term to have any effect on the integral portion
     of the second term, the second term has to be exceedingly close to an
     integer itself (e.g. 123.000000000001 or something).  Getting a result
     that close to an integer requires that the irrational multiplicand have
     a long series of zeros in its expansion, which doesn't occur in the
     first 20 digits or so of log10(b).

     Hand-waving aside, crunching all of the sets of constants above by hand
     does not yield a case for which the first term is significant, which
     in the end is all that matters.  */
  max_10_exp = fmt->emax * log10_b;
  sprintf (name, "__%s_MAX_10_EXP__", name_prefix);
  builtin_define_with_int_value (name, max_10_exp);

  /* The number of decimal digits, n, such that any floating-point number
     can be rounded to n decimal digits and back again without change to
     the value.

	p * log10(b)			if b is a power of 10
	ceil(1 + p * log10(b))		otherwise

     The only macro we care about is this number for the widest supported
     floating type, but we want this value for rendering constants below.  */
  {
    double d_decimal_dig = 1 + fmt->p * log10_b;
    decimal_dig = d_decimal_dig;
    if (decimal_dig < d_decimal_dig)
      decimal_dig++;
  }
  if (type == long_double_type_node)
    builtin_define_with_int_value ("__DECIMAL_DIG__", decimal_dig);

  /* Since, for the supported formats, B is always a power of 2, we
     construct the following numbers directly as a hexadecimal
     constants.  */

  /* The maximum representable finite floating-point number,
     (1 - b**-p) * b**emax  */
  {
    int i, n;
    char *p;

    strcpy (buf, "0x0.");
    n = fmt->p * fmt->log2_b;
    for (i = 0, p = buf + 4; i + 3 < n; i += 4)
      *p++ = 'f';
    if (i < n)
      *p++ = "08ce"[n - i];
    sprintf (p, "p%d", fmt->emax * fmt->log2_b);
    if (fmt->pnan < fmt->p)
      {
	/* This is an IBM extended double format made up of two IEEE
	   doubles.  The value of the long double is the sum of the
	   values of the two parts.  The most significant part is
	   required to be the value of the long double rounded to the
	   nearest double.  Rounding means we need a slightly smaller
	   value for LDBL_MAX.  */
	buf[4 + fmt->pnan / 4] = "7bde"[fmt->pnan % 4];
      }
  }
  sprintf (name, "__%s_MAX__", name_prefix);
  builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);

  /* The minimum normalized positive floating-point number,
     b**(emin-1).  */
  sprintf (name, "__%s_MIN__", name_prefix);
  sprintf (buf, "0x1p%d", (fmt->emin - 1) * fmt->log2_b);
  builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);

  /* The difference between 1 and the least value greater than 1 that is
     representable in the given floating point type, b**(1-p).  */
  sprintf (name, "__%s_EPSILON__", name_prefix);
  if (fmt->pnan < fmt->p)
    /* This is an IBM extended double format, so 1.0 + any double is
       representable precisely.  */
      sprintf (buf, "0x1p%d", (fmt->emin - fmt->p) * fmt->log2_b);
    else      
      sprintf (buf, "0x1p%d", (1 - fmt->p) * fmt->log2_b);
  builtin_define_with_hex_fp_value (name, type, decimal_dig, buf, fp_suffix, fp_cast);

  /* For C++ std::numeric_limits<T>::denorm_min.  The minimum denormalized
     positive floating-point number, b**(emin-p).  Zero for formats that
     don't support denormals.  */
  sprintf (name, "__%s_DENORM_MIN__", name_prefix);
  if (fmt->has_denorm)
    {
      sprintf (buf, "0x1p%d", (fmt->emin - fmt->p) * fmt->log2_b);
      builtin_define_with_hex_fp_value (name, type, decimal_dig,
					buf, fp_suffix, fp_cast);
    }
  else
    {
      sprintf (buf, "0.0%s", fp_suffix);
      builtin_define_with_value (name, buf, 0);
    }

  sprintf (name, "__%s_HAS_DENORM__", name_prefix);
  builtin_define_with_value (name, fmt->has_denorm ? "1" : "0", 0);

  /* For C++ std::numeric_limits<T>::has_infinity.  */
  sprintf (name, "__%s_HAS_INFINITY__", name_prefix);
  builtin_define_with_int_value (name,
				 MODE_HAS_INFINITIES (TYPE_MODE (type)));
  /* For C++ std::numeric_limits<T>::has_quiet_NaN.  We do not have a
     predicate to distinguish a target that has both quiet and
     signalling NaNs from a target that has only quiet NaNs or only
     signalling NaNs, so we assume that a target that has any kind of
     NaN has quiet NaNs.  */
  sprintf (name, "__%s_HAS_QUIET_NAN__", name_prefix);
  builtin_define_with_int_value (name, MODE_HAS_NANS (TYPE_MODE (type)));
}

/* Define __DECx__ constants for TYPE using NAME_PREFIX and SUFFIX. */
static void
builtin_define_decimal_float_constants (const char *name_prefix, 
					const char *suffix, 
					tree type)
{
  const struct real_format *fmt;
  char name[64], buf[128], *p;
  int digits;

  fmt = REAL_MODE_FORMAT (TYPE_MODE (type));

  /* The number of radix digits, p, in the significand.  */
  sprintf (name, "__%s_MANT_DIG__", name_prefix);
  builtin_define_with_int_value (name, fmt->p);

  /* The minimum negative int x such that b**(x-1) is a normalized float.  */
  sprintf (name, "__%s_MIN_EXP__", name_prefix);
  sprintf (buf, "(%d)", fmt->emin);
  builtin_define_with_value (name, buf, 0);

  /* The maximum int x such that b**(x-1) is a representable float.  */
  sprintf (name, "__%s_MAX_EXP__", name_prefix);
  builtin_define_with_int_value (name, fmt->emax);

  /* Compute the minimum representable value.  */
  sprintf (name, "__%s_MIN__", name_prefix);
  sprintf (buf, "1E%d%s", fmt->emin, suffix);
  builtin_define_with_value (name, buf, 0); 

  /* Compute the maximum representable value.  */
  sprintf (name, "__%s_MAX__", name_prefix);
  p = buf;
  for (digits = fmt->p; digits; digits--)
    {
      *p++ = '9';
      if (digits == fmt->p)
	*p++ = '.';
    }
  *p = 0;
  /* fmt->p plus 1, to account for the decimal point.  */
  sprintf (&buf[fmt->p + 1], "E%d%s", fmt->emax, suffix); 
  builtin_define_with_value (name, buf, 0);

  /* Compute epsilon (the difference between 1 and least value greater
     than 1 representable).  */
  sprintf (name, "__%s_EPSILON__", name_prefix);
  sprintf (buf, "1E-%d%s", fmt->p - 1, suffix);
  builtin_define_with_value (name, buf, 0);

  /* Minimum denormalized postive decimal value.  */
  sprintf (name, "__%s_DEN__", name_prefix);
  p = buf;
  for (digits = fmt->p; digits > 1; digits--)
    {
      *p++ = '0';
      if (digits == fmt->p)
	*p++ = '.';
    }
  *p = 0;
  sprintf (&buf[fmt->p], "1E%d%s", fmt->emin, suffix); 
  builtin_define_with_value (name, buf, 0);
}

/* Define __GNUC__, __GNUC_MINOR__ and __GNUC_PATCHLEVEL__.  */
static void
define__GNUC__ (void)
{
  /* The format of the version string, enforced below, is
     ([^0-9]*-)?[0-9]+[.][0-9]+([.][0-9]+)?([- ].*)?  */
  const char *q, *v = version_string;

  while (*v && !ISDIGIT (*v))
    v++;
  gcc_assert (*v && (v <= version_string || v[-1] == '-'));

  q = v;
  while (ISDIGIT (*v))
    v++;
  builtin_define_with_value_n ("__GNUC__", q, v - q);
  if (c_dialect_cxx ())
    builtin_define_with_value_n ("__GNUG__", q, v - q);

  gcc_assert (*v == '.' && ISDIGIT (v[1]));
  
  q = ++v;
  while (ISDIGIT (*v))
    v++;
  builtin_define_with_value_n ("__GNUC_MINOR__", q, v - q);

  if (*v == '.')
    {
      gcc_assert (ISDIGIT (v[1]));
      q = ++v;
      while (ISDIGIT (*v))
	v++;
      builtin_define_with_value_n ("__GNUC_PATCHLEVEL__", q, v - q);
    }
  else
    builtin_define_with_value_n ("__GNUC_PATCHLEVEL__", "0", 1);

  gcc_assert (!*v || *v == ' ' || *v == '-');
}

/* Define macros used by <stdint.h>.  Currently only defines limits
   for intmax_t, used by the testsuite.  */
static void
builtin_define_stdint_macros (void)
{
  int intmax_long;
  if (intmax_type_node == long_long_integer_type_node)
    intmax_long = 2;
  else if (intmax_type_node == long_integer_type_node)
    intmax_long = 1;
  else if (intmax_type_node == integer_type_node)
    intmax_long = 0;
  else
    gcc_unreachable ();
  builtin_define_type_max ("__INTMAX_MAX__", intmax_type_node, intmax_long);
}

/* Hook that registers front end and target-specific built-ins.  */
void
c_cpp_builtins (cpp_reader *pfile)
{
  /* -undef turns off target-specific built-ins.  */
  if (flag_undef)
    return;

  define__GNUC__ ();

  /* For stddef.h.  They require macros defined in c-common.c.  */
  c_stddef_cpp_builtins ();

  if (c_dialect_cxx ())
    {
      if (flag_weak && SUPPORTS_ONE_ONLY) 
	cpp_define (pfile, "__GXX_WEAK__=1");
      else
	cpp_define (pfile, "__GXX_WEAK__=0");
      if (warn_deprecated)
	cpp_define (pfile, "__DEPRECATED");
    }
  /* Note that we define this for C as well, so that we know if
     __attribute__((cleanup)) will interface with EH.  */
  if (flag_exceptions)
    cpp_define (pfile, "__EXCEPTIONS");

  /* Represents the C++ ABI version, always defined so it can be used while
     preprocessing C and assembler.  */
  if (flag_abi_version == 0)
    /* Use a very large value so that:

         #if __GXX_ABI_VERSION >= <value for version X>

       will work whether the user explicitly says "-fabi-version=x" or
       "-fabi-version=0".  Do not use INT_MAX because that will be
       different from system to system.  */
    builtin_define_with_int_value ("__GXX_ABI_VERSION", 999999);
  else if (flag_abi_version == 1)
    /* Due to a historical accident, this version had the value
       "102".  */
    builtin_define_with_int_value ("__GXX_ABI_VERSION", 102);
  else
    /* Newer versions have values 1002, 1003, ....  */
    builtin_define_with_int_value ("__GXX_ABI_VERSION", 
				   1000 + flag_abi_version);

  /* libgcc needs to know this.  */
  if (USING_SJLJ_EXCEPTIONS)
    cpp_define (pfile, "__USING_SJLJ_EXCEPTIONS__");

  /* limits.h needs to know these.  */
  builtin_define_type_max ("__SCHAR_MAX__", signed_char_type_node, 0);
  builtin_define_type_max ("__SHRT_MAX__", short_integer_type_node, 0);
  builtin_define_type_max ("__INT_MAX__", integer_type_node, 0);
  builtin_define_type_max ("__LONG_MAX__", long_integer_type_node, 1);
  builtin_define_type_max ("__LONG_LONG_MAX__", long_long_integer_type_node, 2);
  builtin_define_type_max ("__WCHAR_MAX__", wchar_type_node, 0);

  builtin_define_type_precision ("__CHAR_BIT__", char_type_node);

  /* stdint.h (eventually) and the testsuite need to know these.  */
  builtin_define_stdint_macros ();

  /* float.h needs to know these.  */

  builtin_define_with_int_value ("__FLT_EVAL_METHOD__",
				 TARGET_FLT_EVAL_METHOD);

  /* And decfloat.h needs this.  */
  builtin_define_with_int_value ("__DEC_EVAL_METHOD__",
                                 TARGET_DEC_EVAL_METHOD);

  builtin_define_float_constants ("FLT", "F", "%s", float_type_node);
  /* Cast the double precision constants when single precision constants are
     specified. The correct result is computed by the compiler when using 
     macros that include a cast. This has the side-effect of making the value 
     unusable in const expressions. */
  if (flag_single_precision_constant)
    builtin_define_float_constants ("DBL", "L", "((double)%s)", double_type_node);
  else
    builtin_define_float_constants ("DBL", "", "%s", double_type_node);
  builtin_define_float_constants ("LDBL", "L", "%s", long_double_type_node);

  /* For decfloat.h.  */
  builtin_define_decimal_float_constants ("DEC32", "DF", dfloat32_type_node);
  builtin_define_decimal_float_constants ("DEC64", "DD", dfloat64_type_node);
  builtin_define_decimal_float_constants ("DEC128", "DL", dfloat128_type_node);

  /* For use in assembly language.  */
  builtin_define_with_value ("__REGISTER_PREFIX__", REGISTER_PREFIX, 0);
  builtin_define_with_value ("__USER_LABEL_PREFIX__", user_label_prefix, 0);

  /* Misc.  */
  builtin_define_with_value ("__VERSION__", version_string, 1);

  /* Definitions for LP64 model.  */
  if (TYPE_PRECISION (long_integer_type_node) == 64
      && POINTER_SIZE == 64
      && TYPE_PRECISION (integer_type_node) == 32)
    {
      cpp_define (pfile, "_LP64");
      cpp_define (pfile, "__LP64__");
    }

  /* Other target-independent built-ins determined by command-line
     options.  */
  if (optimize_size)
    cpp_define (pfile, "__OPTIMIZE_SIZE__");
  if (optimize)
    cpp_define (pfile, "__OPTIMIZE__");

  if (fast_math_flags_set_p ())
    cpp_define (pfile, "__FAST_MATH__");
  if (flag_really_no_inline)
    cpp_define (pfile, "__NO_INLINE__");
  if (flag_signaling_nans)
    cpp_define (pfile, "__SUPPORT_SNAN__");
  if (flag_finite_math_only)
    cpp_define (pfile, "__FINITE_MATH_ONLY__=1");
  else
    cpp_define (pfile, "__FINITE_MATH_ONLY__=0");
  if (flag_pic)
    {
      builtin_define_with_int_value ("__pic__", flag_pic);
      builtin_define_with_int_value ("__PIC__", flag_pic);
    }

  if (flag_iso)
    cpp_define (pfile, "__STRICT_ANSI__");

  if (!flag_signed_char)
    cpp_define (pfile, "__CHAR_UNSIGNED__");

  if (c_dialect_cxx () && TYPE_UNSIGNED (wchar_type_node))
    cpp_define (pfile, "__WCHAR_UNSIGNED__");

  /* Make the choice of ObjC runtime visible to source code.  */
  if (c_dialect_objc () && flag_next_runtime)
    cpp_define (pfile, "__NEXT_RUNTIME__");

  /* Show the availability of some target pragmas.  */
  if (flag_mudflap || targetm.handle_pragma_redefine_extname)
    cpp_define (pfile, "__PRAGMA_REDEFINE_EXTNAME");

  if (targetm.handle_pragma_extern_prefix)
    cpp_define (pfile, "__PRAGMA_EXTERN_PREFIX");

  /* Make the choice of the stack protector runtime visible to source code.
     The macro names and values here were chosen for compatibility with an
     earlier implementation, i.e. ProPolice.  */
  if (flag_stack_protect == 2)
    cpp_define (pfile, "__SSP_ALL__=2");
  else if (flag_stack_protect == 1)
    cpp_define (pfile, "__SSP__=1");

  if (flag_openmp)
    cpp_define (pfile, "_OPENMP=200505");

  /* A straightforward target hook doesn't work, because of problems
     linking that hook's body when part of non-C front ends.  */
# define preprocessing_asm_p() (cpp_get_options (pfile)->lang == CLK_ASM)
# define preprocessing_trad_p() (cpp_get_options (pfile)->traditional)
# define builtin_define(TXT) cpp_define (pfile, TXT)
# define builtin_assert(TXT) cpp_assert (pfile, TXT)
  TARGET_CPU_CPP_BUILTINS ();
  TARGET_OS_CPP_BUILTINS ();
  TARGET_OBJFMT_CPP_BUILTINS ();

  /* Support the __declspec keyword by turning them into attributes.
     Note that the current way we do this may result in a collision
     with predefined attributes later on.  This can be solved by using
     one attribute, say __declspec__, and passing args to it.  The
     problem with that approach is that args are not accumulated: each
     new appearance would clobber any existing args.  */
  if (TARGET_DECLSPEC)
    builtin_define ("__declspec(x)=__attribute__((x))");
}

/* Pass an object-like macro.  If it doesn't lie in the user's
   namespace, defines it unconditionally.  Otherwise define a version
   with two leading underscores, and another version with two leading
   and trailing underscores, and define the original only if an ISO
   standard was not nominated.

   e.g. passing "unix" defines "__unix", "__unix__" and possibly
   "unix".  Passing "_mips" defines "__mips", "__mips__" and possibly
   "_mips".  */
void
builtin_define_std (const char *macro)
{
  size_t len = strlen (macro);
  char *buff = (char *) alloca (len + 5);
  char *p = buff + 2;
  char *q = p + len;

  /* prepend __ (or maybe just _) if in user's namespace.  */
  memcpy (p, macro, len + 1);
  if (!( *p == '_' && (p[1] == '_' || ISUPPER (p[1]))))
    {
      if (*p != '_')
	*--p = '_';
      if (p[1] != '_')
	*--p = '_';
    }
  cpp_define (parse_in, p);

  /* If it was in user's namespace...  */
  if (p != buff + 2)
    {
      /* Define the macro with leading and following __.  */
      if (q[-1] != '_')
	*q++ = '_';
      if (q[-2] != '_')
	*q++ = '_';
      *q = '\0';
      cpp_define (parse_in, p);

      /* Finally, define the original macro if permitted.  */
      if (!flag_iso)
	cpp_define (parse_in, macro);
    }
}

/* Pass an object-like macro and a value to define it to.  The third
   parameter says whether or not to turn the value into a string
   constant.  */
void
builtin_define_with_value (const char *macro, const char *expansion, int is_str)
{
  char *buf;
  size_t mlen = strlen (macro);
  size_t elen = strlen (expansion);
  size_t extra = 2;  /* space for an = and a NUL */

  if (is_str)
    extra += 2;  /* space for two quote marks */

  buf = (char *) alloca (mlen + elen + extra);
  if (is_str)
    sprintf (buf, "%s=\"%s\"", macro, expansion);
  else
    sprintf (buf, "%s=%s", macro, expansion);

  cpp_define (parse_in, buf);
}

/* Pass an object-like macro and a value to define it to.  The third
   parameter is the length of the expansion.  */
static void
builtin_define_with_value_n (const char *macro, const char *expansion, size_t elen)
{
  char *buf;
  size_t mlen = strlen (macro);

  /* Space for an = and a NUL.  */
  buf = (char *) alloca (mlen + elen + 2);
  memcpy (buf, macro, mlen);
  buf[mlen] = '=';
  memcpy (buf + mlen + 1, expansion, elen);
  buf[mlen + elen + 1] = '\0';

  cpp_define (parse_in, buf);
}

/* Pass an object-like macro and an integer value to define it to.  */
static void
builtin_define_with_int_value (const char *macro, HOST_WIDE_INT value)
{
  char *buf;
  size_t mlen = strlen (macro);
  size_t vlen = 18;
  size_t extra = 2; /* space for = and NUL.  */

  buf = (char *) alloca (mlen + vlen + extra);
  memcpy (buf, macro, mlen);
  buf[mlen] = '=';
  sprintf (buf + mlen + 1, HOST_WIDE_INT_PRINT_DEC, value);

  cpp_define (parse_in, buf);
}

/* Pass an object-like macro a hexadecimal floating-point value.  */
static void
builtin_define_with_hex_fp_value (const char *macro,
				  tree type ATTRIBUTE_UNUSED, int digits,
				  const char *hex_str, 
				  const char *fp_suffix,
				  const char *fp_cast)
{
  REAL_VALUE_TYPE real;
  char dec_str[64], buf1[256], buf2[256];

  /* Hex values are really cool and convenient, except that they're
     not supported in strict ISO C90 mode.  First, the "p-" sequence
     is not valid as part of a preprocessor number.  Second, we get a
     pedwarn from the preprocessor, which has no context, so we can't
     suppress the warning with __extension__.

     So instead what we do is construct the number in hex (because
     it's easy to get the exact correct value), parse it as a real,
     then print it back out as decimal.  */

  real_from_string (&real, hex_str);
  real_to_decimal (dec_str, &real, sizeof (dec_str), digits, 0);

  /* Assemble the macro in the following fashion
     macro = fp_cast [dec_str fp_suffix] */
  sprintf (buf1, "%s%s", dec_str, fp_suffix);
  sprintf (buf2, fp_cast, buf1);
  sprintf (buf1, "%s=%s", macro, buf2);
  
  cpp_define (parse_in, buf1);
}

/* Define MAX for TYPE based on the precision of the type.  IS_LONG is
   1 for type "long" and 2 for "long long".  We have to handle
   unsigned types, since wchar_t might be unsigned.  */

static void
builtin_define_type_max (const char *macro, tree type, int is_long)
{
  static const char *const values[]
    = { "127", "255",
	"32767", "65535",
	"2147483647", "4294967295",
	"9223372036854775807", "18446744073709551615",
	"170141183460469231731687303715884105727",
	"340282366920938463463374607431768211455" };
  static const char *const suffixes[] = { "", "U", "L", "UL", "LL", "ULL" };

  const char *value, *suffix;
  char *buf;
  size_t idx;

  /* Pre-rendering the values mean we don't have to futz with printing a
     multi-word decimal value.  There are also a very limited number of
     precisions that we support, so it's really a waste of time.  */
  switch (TYPE_PRECISION (type))
    {
    case 8:	idx = 0; break;
    case 16:	idx = 2; break;
    case 32:	idx = 4; break;
    case 64:	idx = 6; break;
    case 128:	idx = 8; break;
    default:    gcc_unreachable ();
    }

  value = values[idx + TYPE_UNSIGNED (type)];
  suffix = suffixes[is_long * 2 + TYPE_UNSIGNED (type)];

  buf = (char *) alloca (strlen (macro) + 1 + strlen (value)
                         + strlen (suffix) + 1);
  sprintf (buf, "%s=%s%s", macro, value, suffix);

  cpp_define (parse_in, buf);
}
a id='n2469' href='#n2469'>2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707 2708 2709 2710 2711 2712 2713 2714 2715 2716 2717 2718 2719 2720 2721 2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771 2772 2773 2774 2775 2776 2777 2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847 2848 2849 2850 2851 2852 2853 2854 2855 2856 2857 2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888 2889 2890 2891 2892 2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145 3146 3147 3148 3149 3150 3151 3152 3153 3154 3155 3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166 3167 3168 3169 3170 3171 3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190 3191 3192 3193 3194 3195 3196 3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208 3209 3210 3211 3212 3213 3214 3215 3216 3217 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300 3301 3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395 3396 3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410 3411 3412 3413 3414 3415 3416 3417 3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433 3434 3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588 3589 3590 3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875 3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917 3918 3919 3920 3921 3922 3923 3924 3925 3926 3927 3928 3929 3930 3931 3932 3933 3934 3935 3936 3937 3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961 3962 3963 3964 3965 3966 3967 3968 3969 3970 3971 3972 3973 3974 3975 3976 3977 3978 3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103 4104 4105 4106 4107 4108 4109 4110 4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234 4235 4236 4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288 4289 4290 4291 4292 4293 4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344 4345 4346 4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358 4359 4360 4361 4362 4363 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404 4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451 4452 4453 4454 4455 4456 4457 4458 4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497 4498 4499 4500 4501 4502 4503 4504 4505 4506 4507 4508 4509 4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528 4529 4530 4531 4532 4533 4534 4535 4536 4537 4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570 4571 4572 4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650 4651 4652 4653 4654 4655 4656 4657 4658 4659 4660 4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684 4685 4686 4687 4688 4689 4690 4691 4692 4693 4694 4695 4696 4697 4698 4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797
/* GNU/Linux native-dependent code common to multiple platforms.

   Copyright (C) 2001-2025 Free Software Foundation, Inc.

   This file is part of GDB.

   This program is free software; you can redistribute it and/or modify
   it under the terms of the GNU General Public License as published by
   the Free Software Foundation; either version 3 of the License, or
   (at your option) any later version.

   This program is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   GNU General Public License for more details.

   You should have received a copy of the GNU General Public License
   along with this program.  If not, see <http://www.gnu.org/licenses/>.  */

#include "inferior.h"
#include "infrun.h"
#include "target.h"
#include "nat/linux-nat.h"
#include "nat/linux-waitpid.h"
#include "gdbsupport/gdb_wait.h"
#include <unistd.h>
#include <sys/syscall.h>
#include "nat/gdb_ptrace.h"
#include "linux-nat.h"
#include "nat/linux-ptrace.h"
#include "nat/linux-procfs.h"
#include "nat/linux-personality.h"
#include "linux-fork.h"
#include "gdbthread.h"
#include "cli/cli-cmds.h"
#include "regcache.h"
#include "regset.h"
#include "inf-child.h"
#include "inf-ptrace.h"
#include "auxv.h"
#include <sys/procfs.h>
#include "elf-bfd.h"
#include "gregset.h"
#include "gdbcore.h"
#include <sys/stat.h>
#include <fcntl.h>
#include "inf-loop.h"
#include "gdbsupport/event-loop.h"
#include "event-top.h"
#include <pwd.h>
#include <sys/types.h>
#include <dirent.h>
#include <sys/vfs.h>
#include "nat/linux-osdata.h"
#include "linux-tdep.h"
#include "gdbsupport/agent.h"
#include "tracepoint.h"
#include "target-descriptions.h"
#include "gdbsupport/filestuff.h"
#include "nat/linux-namespaces.h"
#include "gdbsupport/block-signals.h"
#include "gdbsupport/fileio.h"
#include "gdbsupport/scope-exit.h"
#include "gdbsupport/gdb-sigmask.h"
#include "gdbsupport/common-debug.h"

/* This comment documents high-level logic of this file.

Waiting for events in sync mode
===============================

When waiting for an event in a specific thread, we just use waitpid,
passing the specific pid, and not passing WNOHANG.

When waiting for an event in all threads, waitpid is not quite good:

- If the thread group leader exits while other threads in the thread
  group still exist, waitpid(TGID, ...) hangs.  That waitpid won't
  return an exit status until the other threads in the group are
  reaped.

- When a non-leader thread execs, that thread just vanishes without
  reporting an exit (so we'd hang if we waited for it explicitly in
  that case).  The exec event is instead reported to the TGID pid.

The solution is to always use -1 and WNOHANG, together with
sigsuspend.

First, we use non-blocking waitpid to check for events.  If nothing is
found, we use sigsuspend to wait for SIGCHLD.  When SIGCHLD arrives,
it means something happened to a child process.  As soon as we know
there's an event, we get back to calling nonblocking waitpid.

Note that SIGCHLD should be blocked between waitpid and sigsuspend
calls, so that we don't miss a signal.  If SIGCHLD arrives in between,
when it's blocked, the signal becomes pending and sigsuspend
immediately notices it and returns.

Waiting for events in async mode (TARGET_WNOHANG)
=================================================

In async mode, GDB should always be ready to handle both user input
and target events, so neither blocking waitpid nor sigsuspend are
viable options.  Instead, we should asynchronously notify the GDB main
event loop whenever there's an unprocessed event from the target.  We
detect asynchronous target events by handling SIGCHLD signals.  To
notify the event loop about target events, an event pipe is used
--- the pipe is registered as waitable event source in the event loop,
the event loop select/poll's on the read end of this pipe (as well on
other event sources, e.g., stdin), and the SIGCHLD handler marks the
event pipe to raise an event.  This is more portable than relying on
pselect/ppoll, since on kernels that lack those syscalls, libc
emulates them with select/poll+sigprocmask, and that is racy
(a.k.a. plain broken).

Obviously, if we fail to notify the event loop if there's a target
event, it's bad.  OTOH, if we notify the event loop when there's no
event from the target, linux_nat_wait will detect that there's no real
event to report, and return event of type TARGET_WAITKIND_IGNORE.
This is mostly harmless, but it will waste time and is better avoided.

The main design point is that every time GDB is outside linux-nat.c,
we have a SIGCHLD handler installed that is called when something
happens to the target and notifies the GDB event loop.  Whenever GDB
core decides to handle the event, and calls into linux-nat.c, we
process things as in sync mode, except that the we never block in
sigsuspend.

While processing an event, we may end up momentarily blocked in
waitpid calls.  Those waitpid calls, while blocking, are guaranteed to
return quickly.  E.g., in all-stop mode, before reporting to the core
that an LWP hit a breakpoint, all LWPs are stopped by sending them
SIGSTOP, and synchronously waiting for the SIGSTOP to be reported.
Note that this is different from blocking indefinitely waiting for the
next event --- here, we're already handling an event.

Use of signals
==============

We stop threads by sending a SIGSTOP.  The use of SIGSTOP instead of another
signal is not entirely significant; we just need for a signal to be delivered,
so that we can intercept it.  SIGSTOP's advantage is that it can not be
blocked.  A disadvantage is that it is not a real-time signal, so it can only
be queued once; we do not keep track of other sources of SIGSTOP.

Two other signals that can't be blocked are SIGCONT and SIGKILL.  But we can't
use them, because they have special behavior when the signal is generated -
not when it is delivered.  SIGCONT resumes the entire thread group and SIGKILL
kills the entire thread group.

A delivered SIGSTOP would stop the entire thread group, not just the thread we
tkill'd.  But we never let the SIGSTOP be delivered; we always intercept and
cancel it (by PTRACE_CONT without passing SIGSTOP).

We could use a real-time signal instead.  This would solve those problems; we
could use PTRACE_GETSIGINFO to locate the specific stop signals sent by GDB.
But we would still have to have some support for SIGSTOP, since PTRACE_ATTACH
generates it, and there are races with trying to find a signal that is not
blocked.

Exec events
===========

The case of a thread group (process) with 3 or more threads, and a
thread other than the leader execs is worth detailing:

On an exec, the Linux kernel destroys all threads except the execing
one in the thread group, and resets the execing thread's tid to the
tgid.  No exit notification is sent for the execing thread -- from the
ptracer's perspective, it appears as though the execing thread just
vanishes.  Until we reap all other threads except the leader and the
execing thread, the leader will be zombie, and the execing thread will
be in `D (disc sleep)' state.  As soon as all other threads are
reaped, the execing thread changes its tid to the tgid, and the
previous (zombie) leader vanishes, giving place to the "new"
leader.

Accessing inferior memory
=========================

To access inferior memory, we strongly prefer /proc/PID/mem.  We
fallback to ptrace if and only if /proc/PID/mem is not writable, as a
concession for obsolescent kernels (such as found in RHEL6).  For
modern kernels, the fallback shouldn't trigger.  GDBserver does not
have the ptrace fallback already, and at some point, we'll consider
removing it from native GDB too.

/proc/PID/mem has a few advantages over alternatives like
PTRACE_PEEKTEXT/PTRACE_POKETEXT or process_vm_readv/process_vm_writev:

- Because we can use a single read/write call, /proc/PID/mem can be
  much more efficient than banging away at
  PTRACE_PEEKTEXT/PTRACE_POKETEXT, one word at a time.

- /proc/PID/mem allows writing to read-only pages, which we need to
  e.g., plant breakpoint instructions.  process_vm_writev does not
  allow this.

- /proc/PID/mem allows memory access even if all threads are running.
  OTOH, PTRACE_PEEKTEXT/PTRACE_POKETEXT require passing down the tid
  of a stopped task.  This lets us e.g., install breakpoints while the
  inferior is running, clear a displaced stepping scratch pad when the
  thread that was displaced stepping exits, print inferior globals,
  etc., all without having to worry about temporarily pausing some
  thread.

- /proc/PID/mem does not suffer from a race that could cause us to
  access memory of the wrong address space when the inferior execs.

  process_vm_readv/process_vm_writev have this problem.

  E.g., say GDB decides to write to memory just while the inferior
  execs.  In this scenario, GDB could write memory to the post-exec
  address space thinking it was writing to the pre-exec address space,
  with high probability of corrupting the inferior.  Or if GDB decides
  instead to read memory just while the inferior execs, it could read
  bogus contents out of the wrong address space.

  ptrace used to have this problem too, but no longer has since Linux
  commit dbb5afad100a ("ptrace: make ptrace() fail if the tracee
  changed its pid unexpectedly"), in Linux 5.13.  (And if ptrace were
  ever changed to allow access memory via zombie or running threads,
  it would better not forget to consider this scenario.)

  We avoid this race with /proc/PID/mem, by opening the file as soon
  as we start debugging the inferior, when it is known the inferior is
  stopped, and holding on to the open file descriptor, to be used
  whenever we need to access inferior memory.  If the inferior execs
  or exits, reading/writing from/to the file returns 0 (EOF),
  indicating the address space is gone, and so we return
  TARGET_XFER_EOF to the core.  We close the old file and open a new
  one when we finally see the PTRACE_EVENT_EXEC event.  */

#ifndef O_LARGEFILE
#define O_LARGEFILE 0
#endif

struct linux_nat_target *linux_target;

/* See nat/linux-nat.h.  */
enum tribool have_ptrace_getregset = TRIBOOL_UNKNOWN;

/* When true, print debug messages relating to the linux native target.  */

static bool debug_linux_nat;

/* Implement 'show debug linux-nat'.  */

static void
show_debug_linux_nat (struct ui_file *file, int from_tty,
		      struct cmd_list_element *c, const char *value)
{
  gdb_printf (file, _("Debugging of GNU/Linux native targets is %s.\n"),
	      value);
}

/* Print a linux-nat debug statement.  */

#define linux_nat_debug_printf(fmt, ...) \
  debug_prefixed_printf_cond (debug_linux_nat, "linux-nat", fmt, ##__VA_ARGS__)

/* Print "linux-nat" enter/exit debug statements.  */

#define LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT \
  scoped_debug_enter_exit (debug_linux_nat, "linux-nat")

struct simple_pid_list
{
  int pid;
  int status;
  struct simple_pid_list *next;
};
static struct simple_pid_list *stopped_pids;

/* Whether target_thread_events is in effect.  */
static bool report_thread_events;

static int kill_lwp (int lwpid, int signo);

static int stop_callback (struct lwp_info *lp);

static void block_child_signals (sigset_t *prev_mask);
static void restore_child_signals_mask (sigset_t *prev_mask);

struct lwp_info;
static struct lwp_info *add_lwp (ptid_t ptid);
static void purge_lwp_list (int pid);
static void delete_lwp (ptid_t ptid);
static struct lwp_info *find_lwp_pid (ptid_t ptid);

static int lwp_status_pending_p (struct lwp_info *lp);

static bool is_lwp_marked_dead (lwp_info *lp);

static void save_stop_reason (struct lwp_info *lp);

static bool proc_mem_file_is_writable ();
static void close_proc_mem_file (pid_t pid);
static void open_proc_mem_file (ptid_t ptid);

/* Return TRUE if LWP is the leader thread of the process.  */

static bool
is_leader (lwp_info *lp)
{
  return lp->ptid.pid () == lp->ptid.lwp ();
}

/* Convert an LWP's pending status to a std::string.  */

static std::string
pending_status_str (lwp_info *lp)
{
  gdb_assert (lwp_status_pending_p (lp));

  if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
    return lp->waitstatus.to_string ();
  else
    return status_to_str (lp->status);
}

/* Return true if we should report exit events for LP.  */

static bool
report_exit_events_for (lwp_info *lp)
{
  thread_info *thr = linux_target->find_thread (lp->ptid);
  gdb_assert (thr != nullptr);

  return (report_thread_events
	  || (thr->thread_options () & GDB_THREAD_OPTION_EXIT) != 0);
}


/* LWP accessors.  */

/* See nat/linux-nat.h.  */

ptid_t
ptid_of_lwp (struct lwp_info *lwp)
{
  return lwp->ptid;
}

/* See nat/linux-nat.h.  */

void
lwp_set_arch_private_info (struct lwp_info *lwp,
			   struct arch_lwp_info *info)
{
  lwp->arch_private = info;
}

/* See nat/linux-nat.h.  */

struct arch_lwp_info *
lwp_arch_private_info (struct lwp_info *lwp)
{
  return lwp->arch_private;
}

/* See nat/linux-nat.h.  */

int
lwp_is_stopped (struct lwp_info *lwp)
{
  return lwp->stopped;
}

/* See nat/linux-nat.h.  */

enum target_stop_reason
lwp_stop_reason (struct lwp_info *lwp)
{
  return lwp->stop_reason;
}

/* See nat/linux-nat.h.  */

int
lwp_is_stepping (struct lwp_info *lwp)
{
  return lwp->step;
}


/* Trivial list manipulation functions to keep track of a list of
   new stopped processes.  */
static void
add_to_pid_list (struct simple_pid_list **listp, int pid, int status)
{
  struct simple_pid_list *new_pid = XNEW (struct simple_pid_list);

  new_pid->pid = pid;
  new_pid->status = status;
  new_pid->next = *listp;
  *listp = new_pid;
}

static int
pull_pid_from_list (struct simple_pid_list **listp, int pid, int *statusp)
{
  struct simple_pid_list **p;

  for (p = listp; *p != NULL; p = &(*p)->next)
    if ((*p)->pid == pid)
      {
	struct simple_pid_list *next = (*p)->next;

	*statusp = (*p)->status;
	xfree (*p);
	*p = next;
	return 1;
      }
  return 0;
}

/* Return the ptrace options that we want to try to enable.  */

static int
linux_nat_ptrace_options (int attached)
{
  int options = 0;

  if (!attached)
    options |= PTRACE_O_EXITKILL;

  options |= (PTRACE_O_TRACESYSGOOD
	      | PTRACE_O_TRACEVFORKDONE
	      | PTRACE_O_TRACEVFORK
	      | PTRACE_O_TRACEFORK
	      | PTRACE_O_TRACEEXEC);

  return options;
}

/* Initialize ptrace and procfs warnings and check for supported
   ptrace features given PID.

   ATTACHED should be nonzero iff we attached to the inferior.  */

static void
linux_init_ptrace_procfs (pid_t pid, int attached)
{
  int options = linux_nat_ptrace_options (attached);

  linux_enable_event_reporting (pid, options);
  linux_ptrace_init_warnings ();
  linux_proc_init_warnings ();
  proc_mem_file_is_writable ();

  /* Let the arch-specific native code do any needed initialization.
     Some architectures need to call ptrace to check for hardware
     watchpoints support, etc.  Call it now, when we know the tracee
     is ptrace-stopped.  */
  linux_target->low_init_process (pid);
}

linux_nat_target::~linux_nat_target ()
{}

void
linux_nat_target::post_attach (int pid)
{
  linux_init_ptrace_procfs (pid, 1);
}

/* Implement the virtual inf_ptrace_target::post_startup_inferior method.  */

void
linux_nat_target::post_startup_inferior (ptid_t ptid)
{
  linux_init_ptrace_procfs (ptid.pid (), 0);
}

/* Return the number of known LWPs in the tgid given by PID.  */

static int
num_lwps (int pid)
{
  int count = 0;

  for (const lwp_info &lp : all_lwps ())
    if (lp.ptid.pid () == pid)
      count++;

  return count;
}

/* Deleter for lwp_info unique_ptr specialisation.  */

struct lwp_deleter
{
  void operator() (struct lwp_info *lwp) const
  {
    delete_lwp (lwp->ptid);
  }
};

/* A unique_ptr specialisation for lwp_info.  */

typedef std::unique_ptr<struct lwp_info, lwp_deleter> lwp_info_up;

/* Target hook for follow_fork.  */

void
linux_nat_target::follow_fork (inferior *child_inf, ptid_t child_ptid,
			       target_waitkind fork_kind, bool follow_child,
			       bool detach_fork)
{
  inf_ptrace_target::follow_fork (child_inf, child_ptid, fork_kind,
				  follow_child, detach_fork);

  if (!follow_child)
    {
      bool has_vforked = fork_kind == TARGET_WAITKIND_VFORKED;
      ptid_t parent_ptid = inferior_ptid;
      int parent_pid = parent_ptid.lwp ();
      int child_pid = child_ptid.lwp ();

      /* We're already attached to the parent, by default.  */
      lwp_info *child_lp = add_lwp (child_ptid);
      child_lp->stopped = 1;
      child_lp->last_resume_kind = resume_stop;

      /* Detach new forked process?  */
      if (detach_fork)
	{
	  int child_stop_signal = 0;
	  bool detach_child = true;

	  /* Move CHILD_LP into a unique_ptr and clear the source pointer
	     to prevent us doing anything stupid with it.  */
	  lwp_info_up child_lp_ptr (child_lp);
	  child_lp = nullptr;

	  linux_target->low_prepare_to_resume (child_lp_ptr.get ());

	  /* When debugging an inferior in an architecture that supports
	     hardware single stepping on a kernel without commit
	     6580807da14c423f0d0a708108e6df6ebc8bc83d, the vfork child
	     process starts with the TIF_SINGLESTEP/X86_EFLAGS_TF bits
	     set if the parent process had them set.
	     To work around this, single step the child process
	     once before detaching to clear the flags.  */

	  /* Note that we consult the parent's architecture instead of
	     the child's because there's no inferior for the child at
	     this point.  */
	  if (!gdbarch_get_next_pcs_p (target_thread_architecture
				       (parent_ptid)))
	    {
	      int status;

	      linux_disable_event_reporting (child_pid);
	      if (ptrace (PTRACE_SINGLESTEP, child_pid, 0, 0) < 0)
		perror_with_name (_("Couldn't do single step"));
	      if (my_waitpid (child_pid, &status, 0) < 0)
		perror_with_name (_("Couldn't wait vfork process"));
	      else
		{
		  detach_child = WIFSTOPPED (status);
		  child_stop_signal = WSTOPSIG (status);
		}
	    }

	  if (detach_child)
	    {
	      int signo = child_stop_signal;

	      if (signo != 0
		  && !signal_pass_state (gdb_signal_from_host (signo)))
		signo = 0;
	      ptrace (PTRACE_DETACH, child_pid, 0, signo);

	      close_proc_mem_file (child_pid);
	    }
	}

      if (has_vforked)
	{
	  lwp_info *parent_lp = find_lwp_pid (parent_ptid);
	  linux_nat_debug_printf ("waiting for VFORK_DONE on %d", parent_pid);
	  parent_lp->stopped = 1;

	  /* We'll handle the VFORK_DONE event like any other
	     event, in target_wait.  */
	}
    }
  else
    {
      struct lwp_info *child_lp;

      child_lp = add_lwp (child_ptid);
      child_lp->stopped = 1;
      child_lp->last_resume_kind = resume_stop;
    }
}


int
linux_nat_target::insert_fork_catchpoint (int pid)
{
  return 0;
}

int
linux_nat_target::remove_fork_catchpoint (int pid)
{
  return 0;
}

int
linux_nat_target::insert_vfork_catchpoint (int pid)
{
  return 0;
}

int
linux_nat_target::remove_vfork_catchpoint (int pid)
{
  return 0;
}

int
linux_nat_target::insert_exec_catchpoint (int pid)
{
  return 0;
}

int
linux_nat_target::remove_exec_catchpoint (int pid)
{
  return 0;
}

int
linux_nat_target::set_syscall_catchpoint (int pid, bool needed, int any_count,
					  gdb::array_view<const int> syscall_counts)
{
  /* On GNU/Linux, we ignore the arguments.  It means that we only
     enable the syscall catchpoints, but do not disable them.

     Also, we do not use the `syscall_counts' information because we do not
     filter system calls here.  We let GDB do the logic for us.  */
  return 0;
}

/* List of known LWPs, keyed by LWP PID.  This speeds up the common
   case of mapping a PID returned from the kernel to our corresponding
   lwp_info data structure.  */
static htab_t lwp_lwpid_htab;

/* Calculate a hash from a lwp_info's LWP PID.  */

static hashval_t
lwp_info_hash (const void *ap)
{
  const struct lwp_info *lp = (struct lwp_info *) ap;
  pid_t pid = lp->ptid.lwp ();

  return iterative_hash_object (pid, 0);
}

/* Equality function for the lwp_info hash table.  Compares the LWP's
   PID.  */

static int
lwp_lwpid_htab_eq (const void *a, const void *b)
{
  const struct lwp_info *entry = (const struct lwp_info *) a;
  const struct lwp_info *element = (const struct lwp_info *) b;

  return entry->ptid.lwp () == element->ptid.lwp ();
}

/* Create the lwp_lwpid_htab hash table.  */

static void
lwp_lwpid_htab_create (void)
{
  lwp_lwpid_htab = htab_create (100, lwp_info_hash, lwp_lwpid_htab_eq, NULL);
}

/* Add LP to the hash table.  */

static void
lwp_lwpid_htab_add_lwp (struct lwp_info *lp)
{
  void **slot;

  slot = htab_find_slot (lwp_lwpid_htab, lp, INSERT);
  gdb_assert (slot != NULL && *slot == NULL);
  *slot = lp;
}

/* Head of doubly-linked list of known LWPs.  Sorted by reverse
   creation order.  This order is assumed in some cases.  E.g.,
   reaping status after killing all lwps of a process: the leader LWP
   must be reaped last.  */

static intrusive_list<lwp_info> lwp_list;

/* See linux-nat.h.  */

lwp_info_range
all_lwps ()
{
  lwp_info_iterator begin (lwp_list.begin ());

  return lwp_info_range (std::move (begin));
}

/* See linux-nat.h.  */

lwp_info_safe_range
all_lwps_safe ()
{
  return lwp_info_safe_range (all_lwps ());
}

/* Add LP to sorted-by-reverse-creation-order doubly-linked list.  */

static void
lwp_list_add (struct lwp_info *lp)
{
  lwp_list.push_front (*lp);
}

/* Remove LP from sorted-by-reverse-creation-order doubly-linked
   list.  */

static void
lwp_list_remove (struct lwp_info *lp)
{
  /* Remove from sorted-by-creation-order list.  */
  lwp_list.erase (lwp_list.iterator_to (*lp));
}



/* Signal mask for use with sigsuspend in linux_nat_wait, initialized in
   _initialize_linux_nat.  */
static sigset_t suspend_mask;

/* Signals to block to make that sigsuspend work.  */
static sigset_t blocked_mask;

/* SIGCHLD action.  */
static struct sigaction sigchld_action;

/* Block child signals (SIGCHLD and linux threads signals), and store
   the previous mask in PREV_MASK.  */

static void
block_child_signals (sigset_t *prev_mask)
{
  /* Make sure SIGCHLD is blocked.  */
  if (!sigismember (&blocked_mask, SIGCHLD))
    sigaddset (&blocked_mask, SIGCHLD);

  gdb_sigmask (SIG_BLOCK, &blocked_mask, prev_mask);
}

/* Restore child signals mask, previously returned by
   block_child_signals.  */

static void
restore_child_signals_mask (sigset_t *prev_mask)
{
  gdb_sigmask (SIG_SETMASK, prev_mask, NULL);
}

/* Mask of signals to pass directly to the inferior.  */
static sigset_t pass_mask;

/* Update signals to pass to the inferior.  */
void
linux_nat_target::pass_signals
  (gdb::array_view<const unsigned char> pass_signals)
{
  int signo;

  sigemptyset (&pass_mask);

  for (signo = 1; signo < NSIG; signo++)
    {
      int target_signo = gdb_signal_from_host (signo);
      if (target_signo < pass_signals.size () && pass_signals[target_signo])
	sigaddset (&pass_mask, signo);
    }
}



/* Prototypes for local functions.  */
static int stop_wait_callback (struct lwp_info *lp);
static int resume_stopped_resumed_lwps (struct lwp_info *lp, const ptid_t wait_ptid);
static int check_ptrace_stopped_lwp_gone (struct lwp_info *lp);



/* Destroy and free LP.  */

lwp_info::~lwp_info ()
{
  /* Let the arch specific bits release arch_lwp_info.  */
  linux_target->low_delete_thread (this->arch_private);
}

/* Traversal function for purge_lwp_list.  */

static int
lwp_lwpid_htab_remove_pid (void **slot, void *info)
{
  struct lwp_info *lp = (struct lwp_info *) *slot;
  int pid = *(int *) info;

  if (lp->ptid.pid () == pid)
    {
      htab_clear_slot (lwp_lwpid_htab, slot);
      lwp_list_remove (lp);
      delete lp;
    }

  return 1;
}

/* Remove all LWPs belong to PID from the lwp list.  */

static void
purge_lwp_list (int pid)
{
  htab_traverse_noresize (lwp_lwpid_htab, lwp_lwpid_htab_remove_pid, &pid);
}

/* Add the LWP specified by PTID to the list.  PTID is the first LWP
   in the process.  Return a pointer to the structure describing the
   new LWP.

   This differs from add_lwp in that we don't let the arch specific
   bits know about this new thread.  Current clients of this callback
   take the opportunity to install watchpoints in the new thread, and
   we shouldn't do that for the first thread.  If we're spawning a
   child ("run"), the thread executes the shell wrapper first, and we
   shouldn't touch it until it execs the program we want to debug.
   For "attach", it'd be okay to call the callback, but it's not
   necessary, because watchpoints can't yet have been inserted into
   the inferior.  */

static struct lwp_info *
add_initial_lwp (ptid_t ptid)
{
  gdb_assert (ptid.lwp_p ());

  lwp_info *lp = new lwp_info (ptid);


  /* Add to sorted-by-reverse-creation-order list.  */
  lwp_list_add (lp);

  /* Add to keyed-by-pid htab.  */
  lwp_lwpid_htab_add_lwp (lp);

  return lp;
}

/* Add the LWP specified by PID to the list.  Return a pointer to the
   structure describing the new LWP.  The LWP should already be
   stopped.  */

static struct lwp_info *
add_lwp (ptid_t ptid)
{
  struct lwp_info *lp;

  lp = add_initial_lwp (ptid);

  /* Let the arch specific bits know about this new thread.  Current
     clients of this callback take the opportunity to install
     watchpoints in the new thread.  We don't do this for the first
     thread though.  See add_initial_lwp.  */
  linux_target->low_new_thread (lp);

  return lp;
}

/* Remove the LWP specified by PID from the list.  */

static void
delete_lwp (ptid_t ptid)
{
  lwp_info dummy (ptid);

  void **slot = htab_find_slot (lwp_lwpid_htab, &dummy, NO_INSERT);
  if (slot == NULL)
    return;

  lwp_info *lp = *(struct lwp_info **) slot;
  gdb_assert (lp != NULL);

  htab_clear_slot (lwp_lwpid_htab, slot);

  /* Remove from sorted-by-creation-order list.  */
  lwp_list_remove (lp);

  /* Release.  */
  delete lp;
}

/* Return a pointer to the structure describing the LWP corresponding
   to PID.  If no corresponding LWP could be found, return NULL.  */

static struct lwp_info *
find_lwp_pid (ptid_t ptid)
{
  int lwp;

  if (ptid.lwp_p ())
    lwp = ptid.lwp ();
  else
    lwp = ptid.pid ();

  lwp_info dummy (ptid_t (0, lwp));
  return (struct lwp_info *) htab_find (lwp_lwpid_htab, &dummy);
}

/* See nat/linux-nat.h.  */

struct lwp_info *
iterate_over_lwps (ptid_t filter,
		   gdb::function_view<iterate_over_lwps_ftype> callback)
{
  for (lwp_info &lp : all_lwps_safe ())
    {
      if (lp.ptid.matches (filter))
	{
	  if (callback (&lp) != 0)
	    return &lp;
	}
    }

  return NULL;
}

/* Update our internal state when changing from one checkpoint to
   another indicated by NEW_PTID.  We can only switch single-threaded
   applications, so we only create one new LWP, and the previous list
   is discarded.  */

void
linux_nat_switch_fork (ptid_t new_ptid)
{
  struct lwp_info *lp;

  purge_lwp_list (inferior_ptid.pid ());

  lp = add_lwp (new_ptid);
  lp->stopped = 1;

  /* This changes the thread's ptid while preserving the gdb thread
     num.  Also changes the inferior pid, while preserving the
     inferior num.  */
  thread_change_ptid (linux_target, inferior_ptid, new_ptid);

  /* We've just told GDB core that the thread changed target id, but,
     in fact, it really is a different thread, with different register
     contents.  */
  registers_changed ();
}

/* Handle the exit of a single thread LP.  If DEL_THREAD is true,
   delete the thread_info associated to LP, if it exists.  */

static void
exit_lwp (struct lwp_info *lp, bool del_thread = true)
{
  struct thread_info *th = linux_target->find_thread (lp->ptid);

  if (th != nullptr && del_thread)
    delete_thread (th);

  delete_lwp (lp->ptid);
}

/* Wait for the LWP specified by LP, which we have just attached to.
   Returns a wait status for that LWP, to cache.  */

static int
linux_nat_post_attach_wait (ptid_t ptid, int *signalled)
{
  pid_t new_pid, pid = ptid.lwp ();
  int status;

  if (linux_proc_pid_is_stopped (pid))
    {
      linux_nat_debug_printf ("Attaching to a stopped process");

      /* The process is definitely stopped.  It is in a job control
	 stop, unless the kernel predates the TASK_STOPPED /
	 TASK_TRACED distinction, in which case it might be in a
	 ptrace stop.  Make sure it is in a ptrace stop; from there we
	 can kill it, signal it, et cetera.

	 First make sure there is a pending SIGSTOP.  Since we are
	 already attached, the process can not transition from stopped
	 to running without a PTRACE_CONT; so we know this signal will
	 go into the queue.  The SIGSTOP generated by PTRACE_ATTACH is
	 probably already in the queue (unless this kernel is old
	 enough to use TASK_STOPPED for ptrace stops); but since SIGSTOP
	 is not an RT signal, it can only be queued once.  */
      kill_lwp (pid, SIGSTOP);

      /* Finally, resume the stopped process.  This will deliver the SIGSTOP
	 (or a higher priority signal, just like normal PTRACE_ATTACH).  */
      ptrace (PTRACE_CONT, pid, 0, 0);
    }

  /* Make sure the initial process is stopped.  The user-level threads
     layer might want to poke around in the inferior, and that won't
     work if things haven't stabilized yet.  */
  new_pid = my_waitpid (pid, &status, __WALL);
  gdb_assert (pid == new_pid);

  if (!WIFSTOPPED (status))
    {
      /* The pid we tried to attach has apparently just exited.  */
      linux_nat_debug_printf ("Failed to stop %d: %s", pid,
			      status_to_str (status).c_str ());
      return status;
    }

  if (WSTOPSIG (status) != SIGSTOP)
    {
      *signalled = 1;
      linux_nat_debug_printf ("Received %s after attaching",
			      status_to_str (status).c_str ());
    }

  return status;
}

void
linux_nat_target::create_inferior (const char *exec_file,
				   const std::string &allargs,
				   char **env, int from_tty)
{
  maybe_disable_address_space_randomization restore_personality
    (disable_randomization);

  /* The fork_child mechanism is synchronous and calls target_wait, so
     we have to mask the async mode.  */

  /* Make sure we report all signals during startup.  */
  pass_signals ({});

  inf_ptrace_target::create_inferior (exec_file, allargs, env, from_tty);

  open_proc_mem_file (inferior_ptid);
}

/* Callback for linux_proc_attach_tgid_threads.  Attach to PTID if not
   already attached.  Returns true if a new LWP is found, false
   otherwise.  */

static int
attach_proc_task_lwp_callback (ptid_t ptid)
{
  struct lwp_info *lp;

  /* Ignore LWPs we're already attached to.  */
  lp = find_lwp_pid (ptid);
  if (lp == NULL)
    {
      int lwpid = ptid.lwp ();

      if (ptrace (PTRACE_ATTACH, lwpid, 0, 0) < 0)
	{
	  int err = errno;

	  /* Be quiet if we simply raced with the thread exiting.
	     EPERM is returned if the thread's task still exists, and
	     is marked as exited or zombie, as well as other
	     conditions, so in that case, confirm the status in
	     /proc/PID/status.  */
	  if (err == ESRCH
	      || (err == EPERM && linux_proc_pid_is_gone (lwpid)))
	    {
	      linux_nat_debug_printf
		("Cannot attach to lwp %d: thread is gone (%d: %s)",
		 lwpid, err, safe_strerror (err));

	    }
	  else
	    {
	      std::string reason
		= linux_ptrace_attach_fail_reason_string (ptid, err);

	      error (_("Cannot attach to lwp %d: %s"),
		     lwpid, reason.c_str ());
	    }
	}
      else
	{
	  linux_nat_debug_printf ("PTRACE_ATTACH %s, 0, 0 (OK)",
				  ptid.to_string ().c_str ());

	  lp = add_lwp (ptid);

	  /* The next time we wait for this LWP we'll see a SIGSTOP as
	     PTRACE_ATTACH brings it to a halt.  */
	  lp->signalled = 1;

	  /* We need to wait for a stop before being able to make the
	     next ptrace call on this LWP.  */
	  lp->must_set_ptrace_flags = 1;

	  /* So that wait collects the SIGSTOP.  */
	  lp->resumed = 1;
	}

      return 1;
    }
  return 0;
}

void
linux_nat_target::attach (const char *args, int from_tty)
{
  struct lwp_info *lp;
  int status;
  ptid_t ptid;

  /* Make sure we report all signals during attach.  */
  pass_signals ({});

  try
    {
      inf_ptrace_target::attach (args, from_tty);
    }
  catch (const gdb_exception_error &ex)
    {
      pid_t pid = parse_pid_to_attach (args);
      std::string reason = linux_ptrace_attach_fail_reason (pid);

      if (!reason.empty ())
	throw_error (ex.error, "warning: %s\n%s", reason.c_str (),
		     ex.what ());
      else
	throw_error (ex.error, "%s", ex.what ());
    }

  /* The ptrace base target adds the main thread with (pid,0,0)
     format.  Decorate it with lwp info.  */
  ptid = ptid_t (inferior_ptid.pid (),
		 inferior_ptid.pid ());
  thread_change_ptid (linux_target, inferior_ptid, ptid);

  /* Add the initial process as the first LWP to the list.  */
  lp = add_initial_lwp (ptid);

  status = linux_nat_post_attach_wait (lp->ptid, &lp->signalled);
  if (!WIFSTOPPED (status))
    {
      if (WIFEXITED (status))
	{
	  int exit_code = WEXITSTATUS (status);

	  target_terminal::ours ();
	  target_mourn_inferior (inferior_ptid);
	  if (exit_code == 0)
	    error (_("Unable to attach: program exited normally."));
	  else
	    error (_("Unable to attach: program exited with code %d."),
		   exit_code);
	}
      else if (WIFSIGNALED (status))
	{
	  enum gdb_signal signo;

	  target_terminal::ours ();
	  target_mourn_inferior (inferior_ptid);

	  signo = gdb_signal_from_host (WTERMSIG (status));
	  error (_("Unable to attach: program terminated with signal "
		   "%s, %s."),
		 gdb_signal_to_name (signo),
		 gdb_signal_to_string (signo));
	}

      internal_error (_("unexpected status %d for PID %ld"),
		      status, (long) ptid.lwp ());
    }

  lp->stopped = 1;

  open_proc_mem_file (lp->ptid);

  /* Save the wait status to report later.  */
  lp->resumed = 1;
  linux_nat_debug_printf ("waitpid %ld, saving status %s",
			  (long) lp->ptid.pid (),
			  status_to_str (status).c_str ());

  lp->status = status;

  /* We must attach to every LWP.  If /proc is mounted, use that to
     find them now.  The inferior may be using raw clone instead of
     using pthreads.  But even if it is using pthreads, thread_db
     walks structures in the inferior's address space to find the list
     of threads/LWPs, and those structures may well be corrupted.
     Note that once thread_db is loaded, we'll still use it to list
     threads and associate pthread info with each LWP.  */
  try
    {
      linux_proc_attach_tgid_threads (lp->ptid.pid (),
				      attach_proc_task_lwp_callback);
    }
  catch (const gdb_exception_error &)
    {
      /* Failed to attach to some LWP.  Detach any we've already
	 attached to.  */
      iterate_over_lwps (ptid_t (ptid.pid ()),
			 [] (struct lwp_info *lwp) -> int
			 {
			   /* Ignore errors when detaching.  */
			   ptrace (PTRACE_DETACH, lwp->ptid.lwp (), 0, 0);
			   delete_lwp (lwp->ptid);
			   return 0;
			 });

      target_terminal::ours ();
      target_mourn_inferior (inferior_ptid);

      throw;
    }

  /* Add all the LWPs to gdb's thread list.  */
  iterate_over_lwps (ptid_t (ptid.pid ()),
		     [] (struct lwp_info *lwp) -> int
		     {
		       if (lwp->ptid.pid () != lwp->ptid.lwp ())
			 {
			   add_thread (linux_target, lwp->ptid);
			   set_running (linux_target, lwp->ptid, true);
			   set_executing (linux_target, lwp->ptid, true);
			 }
		       return 0;
		     });
}

/* Ptrace-detach the thread with pid PID.  */

static void
detach_one_pid (int pid, int signo)
{
  if (ptrace (PTRACE_DETACH, pid, 0, signo) < 0)
    {
      int save_errno = errno;

      /* We know the thread exists, so ESRCH must mean the lwp is
	 zombie.  This can happen if one of the already-detached
	 threads exits the whole thread group.  In that case we're
	 still attached, and must reap the lwp.  */
      if (save_errno == ESRCH)
	{
	  int ret, status;

	  ret = my_waitpid (pid, &status, __WALL);
	  if (ret == -1)
	    {
	      warning (_("Couldn't reap LWP %d while detaching: %s"),
		       pid, safe_strerror (errno));
	    }
	  else if (!WIFEXITED (status) && !WIFSIGNALED (status))
	    {
	      warning (_("Reaping LWP %d while detaching "
			 "returned unexpected status 0x%x"),
		       pid, status);
	    }
	}
      else
	error (_("Can't detach %d: %s"),
	       pid, safe_strerror (save_errno));
    }
  else
    linux_nat_debug_printf ("PTRACE_DETACH (%d, %s, 0) (OK)",
			    pid, strsignal (signo));
}

/* Get pending signal of THREAD as a host signal number, for detaching
   purposes.  This is the signal the thread last stopped for, which we
   need to deliver to the thread when detaching, otherwise, it'd be
   suppressed/lost.  */

static int
get_detach_signal (struct lwp_info *lp)
{
  enum gdb_signal signo = GDB_SIGNAL_0;

  /* If we paused threads momentarily, we may have stored pending
     events in lp->status or lp->waitstatus (see stop_wait_callback),
     and GDB core hasn't seen any signal for those threads.
     Otherwise, the last signal reported to the core is found in the
     thread object's stop_signal.

     There's a corner case that isn't handled here at present.  Only
     if the thread stopped with a TARGET_WAITKIND_STOPPED does
     stop_signal make sense as a real signal to pass to the inferior.
     Some catchpoint related events, like
     TARGET_WAITKIND_(V)FORK|EXEC|SYSCALL, have their stop_signal set
     to GDB_SIGNAL_SIGTRAP when the catchpoint triggers.  But,
     those traps are debug API (ptrace in our case) related and
     induced; the inferior wouldn't see them if it wasn't being
     traced.  Hence, we should never pass them to the inferior, even
     when set to pass state.  Since this corner case isn't handled by
     infrun.c when proceeding with a signal, for consistency, neither
     do we handle it here (or elsewhere in the file we check for
     signal pass state).  Normally SIGTRAP isn't set to pass state, so
     this is really a corner case.  */

  if (lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE)
    signo = GDB_SIGNAL_0; /* a pending ptrace event, not a real signal.  */
  else if (lp->status)
    signo = gdb_signal_from_host (WSTOPSIG (lp->status));
  else
    {
      thread_info *tp = linux_target->find_thread (lp->ptid);

      if (target_is_non_stop_p () && !tp->executing ())
	{
	  if (tp->has_pending_waitstatus ())
	    {
	      /* If the thread has a pending event, and it was stopped with a
		 signal, use that signal to resume it.  If it has a pending
		 event of another kind, it was not stopped with a signal, so
		 resume it without a signal.  */
	      if (tp->pending_waitstatus ().kind () == TARGET_WAITKIND_STOPPED)
		signo = tp->pending_waitstatus ().sig ();
	      else
		signo = GDB_SIGNAL_0;
	    }
	  else
	    signo = tp->stop_signal ();
	}
      else if (!target_is_non_stop_p ())
	{
	  ptid_t last_ptid;
	  process_stratum_target *last_target;

	  get_last_target_status (&last_target, &last_ptid, nullptr);

	  if (last_target == linux_target
	      && lp->ptid.lwp () == last_ptid.lwp ())
	    signo = tp->stop_signal ();
	}
    }

  if (signo == GDB_SIGNAL_0)
    {
      linux_nat_debug_printf ("lwp %s has no pending signal",
			      lp->ptid.to_string ().c_str ());
    }
  else if (!signal_pass_state (signo))
    {
      linux_nat_debug_printf
	("lwp %s had signal %s but it is in no pass state",
	 lp->ptid.to_string ().c_str (), gdb_signal_to_string (signo));
    }
  else
    {
      linux_nat_debug_printf ("lwp %s has pending signal %s",
			      lp->ptid.to_string ().c_str (),
			      gdb_signal_to_string (signo));

      return gdb_signal_to_host (signo);
    }

  return 0;
}

/* If LP has a pending fork/vfork/clone status, return it.  */

static std::optional<target_waitstatus>
get_pending_child_status (lwp_info *lp)
{
  LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;

  linux_nat_debug_printf ("lwp %s (stopped = %d)",
			  lp->ptid.to_string ().c_str (), lp->stopped);

  /* Check in lwp_info::status.  */
  if (WIFSTOPPED (lp->status) && linux_is_extended_waitstatus (lp->status))
    {
      int event = linux_ptrace_get_extended_event (lp->status);

      if (event == PTRACE_EVENT_FORK
	  || event == PTRACE_EVENT_VFORK
	  || event == PTRACE_EVENT_CLONE)
	{
	  unsigned long child_pid;
	  int ret = ptrace (PTRACE_GETEVENTMSG, lp->ptid.lwp (), 0, &child_pid);
	  if (ret == 0)
	    {
	      target_waitstatus ws;

	      if (event == PTRACE_EVENT_FORK)
		ws.set_forked (ptid_t (child_pid, child_pid));
	      else if (event == PTRACE_EVENT_VFORK)
		ws.set_vforked (ptid_t (child_pid, child_pid));
	      else if (event == PTRACE_EVENT_CLONE)
		ws.set_thread_cloned (ptid_t (lp->ptid.pid (), child_pid));
	      else
		gdb_assert_not_reached ("unhandled");

	      return ws;
	    }
	  else
	    {
	      perror_warning_with_name (_("Failed to retrieve event msg"));
	      return {};
	    }
	}
    }

  /* Check in lwp_info::waitstatus.  */
  if (is_new_child_status (lp->waitstatus.kind ()))
    return lp->waitstatus;

  thread_info *tp = linux_target->find_thread (lp->ptid);

  /* Check in thread_info::pending_waitstatus.  */
  if (tp->has_pending_waitstatus ()
      && is_new_child_status (tp->pending_waitstatus ().kind ()))
    return tp->pending_waitstatus ();

  /* Check in thread_info::pending_follow.  */
  if (is_new_child_status (tp->pending_follow.kind ()))
    return tp->pending_follow;

  return {};
}

/* Detach from LP.  If SIGNO_P is non-NULL, then it points to the
   signal number that should be passed to the LWP when detaching.
   Otherwise pass any pending signal the LWP may have, if any.  */

static void
detach_one_lwp (struct lwp_info *lp, int *signo_p)
{
  int lwpid = lp->ptid.lwp ();
  int signo;

  /* If the lwp/thread we are about to detach has a pending fork/clone
     event, there is a process/thread GDB is attached to that the core
     of GDB doesn't know about.  Detach from it.  */

  std::optional<target_waitstatus> ws = get_pending_child_status (lp);
  if (ws.has_value ())
    detach_one_pid (ws->child_ptid ().lwp (), 0);

  /* If there is a pending SIGSTOP, get rid of it.  */
  if (lp->signalled)
    {
      linux_nat_debug_printf ("Sending SIGCONT to %s",
			      lp->ptid.to_string ().c_str ());

      kill_lwp (lwpid, SIGCONT);
      lp->signalled = 0;
    }

  /* If the lwp has exited or was terminated due to a signal, there's
     nothing left to do.  */
  if (is_lwp_marked_dead (lp))
    {
      linux_nat_debug_printf
	("Can't detach %s - it has exited or was terminated: %s.",
	 lp->ptid.to_string ().c_str (),
	 lp->waitstatus.to_string ().c_str ());
      delete_lwp (lp->ptid);
      return;
    }

  if (signo_p == NULL)
    {
      /* Pass on any pending signal for this LWP.  */
      signo = get_detach_signal (lp);
    }
  else
    signo = *signo_p;

  linux_nat_debug_printf ("preparing to resume lwp %s (stopped = %d)",
			  lp->ptid.to_string ().c_str (),
			  lp->stopped);

  /* Preparing to resume may try to write registers, and fail if the
     lwp is zombie.  If that happens, ignore the error.  We'll handle
     it below, when detach fails with ESRCH.  */
  try
    {
      linux_target->low_prepare_to_resume (lp);
    }
  catch (const gdb_exception_error &ex)
    {
      if (!check_ptrace_stopped_lwp_gone (lp))
	throw;
    }

  detach_one_pid (lwpid, signo);

  delete_lwp (lp->ptid);
}

static int
detach_callback (struct lwp_info *lp)
{
  /* We don't actually detach from the thread group leader just yet.
     If the thread group exits, we must reap the zombie clone lwps
     before we're able to reap the leader.  */
  if (lp->ptid.lwp () != lp->ptid.pid ())
    detach_one_lwp (lp, NULL);
  return 0;
}

void
linux_nat_target::detach (inferior *inf, int from_tty)
{
  LINUX_NAT_SCOPED_DEBUG_ENTER_EXIT;

  struct lwp_info *main_lwp;
  int pid = inf->pid;

  /* Don't unregister from the event loop, as there may be other
     inferiors running. */

  /* Stop all threads before detaching.  ptrace requires that the
     thread is stopped to successfully detach.  */
  iterate_over_lwps (ptid_t (pid), stop_callback);
  /* ... and wait until all of them have reported back that
     they're no longer running.  */
  iterate_over_lwps (ptid_t (pid), stop_wait_callback);

  /* We can now safely remove breakpoints.  We don't this in earlier
     in common code because this target doesn't currently support
     writing memory while the inferior is running.  */
  remove_breakpoints_inf (current_inferior ());

  iterate_over_lwps (ptid_t (pid), detach_callback);

  /* We have detached from everything except the main thread now, so
     should only have one thread left.  However, in non-stop mode the
     main thread might have exited, in which case we'll have no threads
     left.  */
  gdb_assert (num_lwps (pid) == 1
	      || (target_is_non_stop_p () && num_lwps (pid) == 0));

  if (forks_exist_p (inf))
    {
      /* Multi-fork case.  The current inferior_ptid is being detached
	 from, but there are other viable forks to debug.  Detach from
	 the current fork, and context-switch to the first
	 available.  */
      linux_fork_detach (from_tty, find_lwp_pid (ptid_t (pid)), inf);
    }
  else
    {
      target_announce_detach (from_tty);

      /* In non-stop mode it is possible that the main thread has exited,
	 in which case we don't try to detach.  */
      main_lwp = find_lwp_pid (ptid_t (pid));
      if (main_lwp != nullptr)
	{
	  /* Pass on any pending signal for the last LWP.  */
	  int signo = get_detach_signal (main_lwp);

	  detach_one_lwp (main_lwp, &signo);
	}
      else
	gdb_assert (target_is_non_stop_p ());

      detach_success (inf);
    }

  close_proc_mem_file (pid);
}

/* Resume execution of the inferior process.  If STEP is nonzero,
   single-step it.  If SIGNAL is nonzero, give it that signal.  */

static void
linux_resume_one_lwp_throw (struct lwp_info *lp, int step,
			    enum gdb_signal signo)
{
  lp->step = step;

  /* stop_pc doubles as the PC the LWP had when it was last resumed.
     We only presently need that if the LWP is stepped though (to
     handle the case of stepping a breakpoint instruction).  */
  if (step)
    {
      struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);

      lp->stop_pc = regcache_read_pc (regcache);
    }
  else
    lp->stop_pc = 0;

  linux_target->low_prepare_to_resume (lp);
  linux_target->low_resume (lp->ptid, step, signo);

  /* Successfully resumed.  Clear state that no longer makes sense,
     and mark the LWP as running.  Must not do this before resuming
     otherwise if that fails other code will be confused.  E.g., we'd
     later try to stop the LWP and hang forever waiting for a stop
     status.  Note that we must not throw after this is cleared,
     otherwise handle_zombie_lwp_error would get confused.  */
  lp->stopped = 0;
  lp->core = -1;
  lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
  registers_changed_ptid (linux_target, lp->ptid);
}

/* Called when we try to resume a stopped LWP and that errors out.  If
   the LWP is no longer in ptrace-stopped state (meaning it's zombie,
   or about to become), discard the error, clear any pending status
   the LWP may have, and return true (we'll collect the exit status
   soon enough).  Otherwise, return false.  */

static int
check_ptrace_stopped_lwp_gone (struct lwp_info *lp)
{
  /* If we get an error after resuming the LWP successfully, we'd
     confuse !T state for the LWP being gone.  */
  gdb_assert (lp->stopped);

  /* We can't just check whether the LWP is in 'Z (Zombie)' state,
     because even if ptrace failed with ESRCH, the tracee may be "not
     yet fully dead", but already refusing ptrace requests.  In that
     case the tracee has 'R (Running)' state for a little bit
     (observed in Linux 3.18).  See also the note on ESRCH in the
     ptrace(2) man page.  Instead, check whether the LWP has any state
     other than ptrace-stopped.  */

  /* Don't assume anything if /proc/PID/status can't be read.  */
  if (linux_proc_pid_is_trace_stopped_nowarn (lp->ptid.lwp ()) == 0)
    {
      lp->stop_reason = TARGET_STOPPED_BY_NO_REASON;
      lp->status = 0;
      lp->waitstatus.set_ignore ();
      return 1;
    }
  return 0;
}

/* Like linux_resume_one_lwp_throw, but no error is thrown if the LWP
   disappears while we try to resume it.  */

static void
linux_resume_one_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
{
  try
    {
      linux_resume_one_lwp_throw (lp, step, signo);
    }
  catch (const gdb_exception_error &ex)
    {
      if (!check_ptrace_stopped_lwp_gone (lp))
	throw;
    }
}

/* Resume LP.  */

static void
resume_lwp (struct lwp_info *lp, int step, enum gdb_signal signo)
{
  if (lp->stopped)
    {
      struct inferior *inf = find_inferior_ptid (linux_target, lp->ptid);

      if (inf->vfork_child != NULL)
	{
	  linux_nat_debug_printf ("Not resuming sibling %s (vfork parent)",
				  lp->ptid.to_string ().c_str ());
	}
      else if (!lwp_status_pending_p (lp))
	{
	  linux_nat_debug_printf ("Resuming sibling %s, %s, %s",
				  lp->ptid.to_string ().c_str (),
				  (signo != GDB_SIGNAL_0
				   ? strsignal (gdb_signal_to_host (signo))
				   : "0"),
				  step ? "step" : "resume");

	  linux_resume_one_lwp (lp, step, signo);
	}
      else
	{
	  linux_nat_debug_printf ("Not resuming sibling %s (has pending)",
				  lp->ptid.to_string ().c_str ());
	}
    }
  else
    linux_nat_debug_printf ("Not resuming sibling %s (not stopped)",
			    lp->ptid.to_string ().c_str ());
}

/* Callback for iterate_over_lwps.  If LWP is EXCEPT, do nothing.
   Resume LWP with the last stop signal, if it is in pass state.  */

static int
linux_nat_resume_callback (struct lwp_info *lp, struct lwp_info *except)
{
  enum gdb_signal signo = GDB_SIGNAL_0;

  if (lp == except)
    return 0;

  if (lp->stopped)
    {
      struct thread_info *thread;

      thread = linux_target->find_thread (lp->ptid);
      if (thread != NULL)
	{
	  signo = thread->stop_signal ();
	  thread->set_stop_signal (GDB_SIGNAL_0);
	}
    }

  resume_lwp (lp, 0, signo);
  return 0;
}

static int
resume_clear_callback (struct lwp_info *lp)
{
  lp->resumed = 0;
  lp->last_resume_kind = resume_stop;
  return 0;
}

static int
resume_set_callback (struct lwp_info *lp)
{
  lp->resumed = 1;
  lp->last_resume_kind = resume_continue;
  return 0;
}

void
linux_nat_target::resume (ptid_t scope_ptid, int step, enum gdb_signal signo)
{
  struct lwp_info *lp;

  linux_nat_debug_printf ("Preparing to %s %s, %s, inferior_ptid %s",
			  step ? "step" : "resume",
			  scope_ptid.to_string ().c_str (),
			  (signo != GDB_SIGNAL_0
			   ? strsignal (gdb_signal_to_host (signo)) : "0"),
			  inferior_ptid.to_string ().c_str ());

  /* Mark the lwps we're resuming as resumed and update their
     last_resume_kind to resume_continue.  */
  iterate_over_lwps (scope_ptid, resume_set_callback);

  lp = find_lwp_pid (inferior_ptid);
  gdb_assert (lp != NULL);

  /* Remember if we're stepping.  */
  lp->last_resume_kind = step ? resume_step : resume_continue;

  /* If we have a pending wait status for this thread, there is no
     point in resuming the process.  But first make sure that
     linux_nat_wait won't preemptively handle the event - we
     should never take this short-circuit if we are going to
     leave LP running, since we have skipped resuming all the
     other threads.  This bit of code needs to be synchronized
     with linux_nat_wait.  */

  if (lp->status && WIFSTOPPED (lp->status))
    {
      if (!lp->step
	  && WSTOPSIG (lp->status)
	  && sigismember (&pass_mask, WSTOPSIG (lp->status)))
	{
	  linux_nat_debug_printf
	    ("Not short circuiting for ignored status 0x%x", lp->status);

	  /* FIXME: What should we do if we are supposed to continue
	     this thread with a signal?  */
	  gdb_assert (signo == GDB_SIGNAL_0);
	  signo = gdb_signal_from_host (WSTOPSIG (lp->status));
	  lp->status = 0;
	}
    }

  if (lwp_status_pending_p (lp))
    {
      /* FIXME: What should we do if we are supposed to continue
	 this thread with a signal?  */
      gdb_assert (signo == GDB_SIGNAL_0);

      linux_nat_debug_printf ("Short circuiting for status %s",
			      pending_status_str (lp).c_str ());

      if (target_can_async_p ())
	{
	  target_async (true);
	  /* Tell the event loop we have something to process.  */
	  async_file_mark ();
	}
      return;
    }

  /* No use iterating unless we're resuming other threads.  */
  if (scope_ptid != lp->ptid)
    iterate_over_lwps (scope_ptid, [=] (struct lwp_info *info)
      {
	return linux_nat_resume_callback (info, lp);
      });

  linux_nat_debug_printf ("%s %s, %s (resume event thread)",
			  step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
			  lp->ptid.to_string ().c_str (),
			  (signo != GDB_SIGNAL_0
			   ? strsignal (gdb_signal_to_host (signo)) : "0"));

  linux_resume_one_lwp (lp, step, signo);
}

/* Send a signal to an LWP.  */

static int
kill_lwp (int lwpid, int signo)
{
  int ret;

  errno = 0;
  ret = syscall (__NR_tkill, lwpid, signo);
  if (errno == ENOSYS)
    {
      /* If tkill fails, then we are not using nptl threads, a
	 configuration we no longer support.  */
      perror_with_name (("tkill"));
    }
  return ret;
}

/* Handle a GNU/Linux syscall trap wait response.  If we see a syscall
   event, check if the core is interested in it: if not, ignore the
   event, and keep waiting; otherwise, we need to toggle the LWP's
   syscall entry/exit status, since the ptrace event itself doesn't
   indicate it, and report the trap to higher layers.  */

static int
linux_handle_syscall_trap (struct lwp_info *lp, int stopping)
{
  struct target_waitstatus *ourstatus = &lp->waitstatus;
  struct gdbarch *gdbarch = target_thread_architecture (lp->ptid);
  thread_info *thread = linux_target->find_thread (lp->ptid);
  int syscall_number = (int) gdbarch_get_syscall_number (gdbarch, thread);

  if (stopping)
    {
      /* If we're stopping threads, there's a SIGSTOP pending, which
	 makes it so that the LWP reports an immediate syscall return,
	 followed by the SIGSTOP.  Skip seeing that "return" using
	 PTRACE_CONT directly, and let stop_wait_callback collect the
	 SIGSTOP.  Later when the thread is resumed, a new syscall
	 entry event.  If we didn't do this (and returned 0), we'd
	 leave a syscall entry pending, and our caller, by using
	 PTRACE_CONT to collect the SIGSTOP, skips the syscall return
	 itself.  Later, when the user re-resumes this LWP, we'd see
	 another syscall entry event and we'd mistake it for a return.

	 If stop_wait_callback didn't force the SIGSTOP out of the LWP
	 (leaving immediately with LWP->signalled set, without issuing
	 a PTRACE_CONT), it would still be problematic to leave this
	 syscall enter pending, as later when the thread is resumed,
	 it would then see the same syscall exit mentioned above,
	 followed by the delayed SIGSTOP, while the syscall didn't
	 actually get to execute.  It seems it would be even more
	 confusing to the user.  */

      linux_nat_debug_printf
	("ignoring syscall %d for LWP %ld (stopping threads), resuming with "
	 "PTRACE_CONT for SIGSTOP", syscall_number, lp->ptid.lwp ());

      lp->syscall_state = TARGET_WAITKIND_IGNORE;
      ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
      lp->stopped = 0;
      return 1;
    }

  /* Always update the entry/return state, even if this particular
     syscall isn't interesting to the core now.  In async mode,
     the user could install a new catchpoint for this syscall
     between syscall enter/return, and we'll need to know to
     report a syscall return if that happens.  */
  lp->syscall_state = (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
		       ? TARGET_WAITKIND_SYSCALL_RETURN
		       : TARGET_WAITKIND_SYSCALL_ENTRY);

  if (catch_syscall_enabled ())
    {
      if (catching_syscall_number (syscall_number))
	{
	  /* Alright, an event to report.  */
	  if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY)
	    ourstatus->set_syscall_entry (syscall_number);
	  else if (lp->syscall_state == TARGET_WAITKIND_SYSCALL_RETURN)
	    ourstatus->set_syscall_return (syscall_number);
	  else
	    gdb_assert_not_reached ("unexpected syscall state");

	  linux_nat_debug_printf
	    ("stopping for %s of syscall %d for LWP %ld",
	     (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
	      ? "entry" : "return"), syscall_number, lp->ptid.lwp ());

	  return 0;
	}

      linux_nat_debug_printf
	("ignoring %s of syscall %d for LWP %ld",
	 (lp->syscall_state == TARGET_WAITKIND_SYSCALL_ENTRY
	  ? "entry" : "return"), syscall_number, lp->ptid.lwp ());
    }
  else
    {
      /* If we had been syscall tracing, and hence used PT_SYSCALL
	 before on this LWP, it could happen that the user removes all
	 syscall catchpoints before we get to process this event.
	 There are two noteworthy issues here:

	 - When stopped at a syscall entry event, resuming with
	   PT_STEP still resumes executing the syscall and reports a
	   syscall return.

	 - Only PT_SYSCALL catches syscall enters.  If we last
	   single-stepped this thread, then this event can't be a
	   syscall enter.  If we last single-stepped this thread, this
	   has to be a syscall exit.

	 The points above mean that the next resume, be it PT_STEP or
	 PT_CONTINUE, can not trigger a syscall trace event.  */
      linux_nat_debug_printf
	("caught syscall event with no syscall catchpoints. %d for LWP %ld, "
	 "ignoring", syscall_number, lp->ptid.lwp ());
      lp->syscall_state = TARGET_WAITKIND_IGNORE;
    }

  /* The core isn't interested in this event.  For efficiency, avoid
     stopping all threads only to have the core resume them all again.
     Since we're not stopping threads, if we're still syscall tracing
     and not stepping, we can't use PTRACE_CONT here, as we'd miss any
     subsequent syscall.  Simply resume using the inf-ptrace layer,
     which knows when to use PT_SYSCALL or PT_CONTINUE.  */

  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
  return 1;
}

/* See target.h.  */

void
linux_nat_target::follow_clone (ptid_t child_ptid)
{
  lwp_info *new_lp = add_lwp (child_ptid);
  new_lp->stopped = 1;

  /* If the thread_db layer is active, let it record the user
     level thread id and status, and add the thread to GDB's
     list.  */
  if (!thread_db_notice_clone (inferior_ptid, new_lp->ptid))
    {
      /* The process is not using thread_db.  Add the LWP to
	 GDB's list.  */
      add_thread (linux_target, new_lp->ptid);
    }

  /* We just created NEW_LP so it cannot yet contain STATUS.  */
  gdb_assert (new_lp->status == 0);

  if (!pull_pid_from_list (&stopped_pids, child_ptid.lwp (), &new_lp->status))
    internal_error (_("no saved status for clone lwp"));

  if (WSTOPSIG (new_lp->status) != SIGSTOP)
    {
      /* This can happen if someone starts sending signals to
	 the new thread before it gets a chance to run, which
	 have a lower number than SIGSTOP (e.g. SIGUSR1).
	 This is an unlikely case, and harder to handle for
	 fork / vfork than for clone, so we do not try - but
	 we handle it for clone events here.  */

      new_lp->signalled = 1;

      /* Save the wait status to report later.  */
      linux_nat_debug_printf
	("waitpid of new LWP %ld, saving status %s",
	 (long) new_lp->ptid.lwp (), status_to_str (new_lp->status).c_str ());
    }
  else
    {
      new_lp->status = 0;

      if (report_thread_events)
	new_lp->waitstatus.set_thread_created ();
    }
}

/* Handle a GNU/Linux extended wait response.  If we see a clone
   event, we need to add the new LWP to our list (and not report the
   trap to higher layers).  This function returns non-zero if the
   event should be ignored and we should wait again.  If STOPPING is
   true, the new LWP remains stopped, otherwise it is continued.  */

static int
linux_handle_extended_wait (struct lwp_info *lp, int status)
{
  int pid = lp->ptid.lwp ();
  struct target_waitstatus *ourstatus = &lp->waitstatus;
  int event = linux_ptrace_get_extended_event (status);

  /* All extended events we currently use are mid-syscall.  Only
     PTRACE_EVENT_STOP is delivered more like a signal-stop, but
     you have to be using PTRACE_SEIZE to get that.  */
  lp->syscall_state = TARGET_WAITKIND_SYSCALL_ENTRY;

  if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK
      || event == PTRACE_EVENT_CLONE)
    {
      unsigned long new_pid;
      int ret;

      ptrace (PTRACE_GETEVENTMSG, pid, 0, &new_pid);

      /* If we haven't already seen the new PID stop, wait for it now.  */
      if (! pull_pid_from_list (&stopped_pids, new_pid, &status))
	{
	  /* The new child has a pending SIGSTOP.  We can't affect it until it
	     hits the SIGSTOP, but we're already attached.  */
	  ret = my_waitpid (new_pid, &status, __WALL);
	  if (ret == -1)
	    perror_with_name (_("waiting for new child"));
	  else if (ret != new_pid)
	    internal_error (_("wait returned unexpected PID %d"), ret);
	  else if (!WIFSTOPPED (status))
	    internal_error (_("wait returned unexpected status 0x%x"), status);
	}

      if (event == PTRACE_EVENT_FORK || event == PTRACE_EVENT_VFORK)
	{
	  open_proc_mem_file (ptid_t (new_pid, new_pid));

	  /* The arch-specific native code may need to know about new
	     forks even if those end up never mapped to an
	     inferior.  */
	  linux_target->low_new_fork (lp, new_pid);
	}
      else if (event == PTRACE_EVENT_CLONE)
	{
	  linux_target->low_new_clone (lp, new_pid);
	}

      if (event == PTRACE_EVENT_FORK
	  && linux_fork_checkpointing_p (lp->ptid.pid ()))
	{
	  /* Handle checkpointing by linux-fork.c here as a special
	     case.  We don't want the follow-fork-mode or 'catch fork'
	     to interfere with this.  */

	  /* This won't actually modify the breakpoint list, but will
	     physically remove the breakpoints from the child.  */
	  detach_breakpoints (ptid_t (new_pid, new_pid));

	  /* Retain child fork in ptrace (stopped) state.  */
	  if (find_fork_pid (new_pid).first == nullptr)
	    {
	      struct inferior *inf = find_inferior_ptid (linux_target,
							 lp->ptid);
	      add_fork (new_pid, inf);
	    }

	  /* Report as spurious, so that infrun doesn't want to follow
	     this fork.  We're actually doing an infcall in
	     linux-fork.c.  */
	  ourstatus->set_spurious ();

	  /* Report the stop to the core.  */
	  return 0;
	}

      if (event == PTRACE_EVENT_FORK)
	ourstatus->set_forked (ptid_t (new_pid, new_pid));
      else if (event == PTRACE_EVENT_VFORK)
	ourstatus->set_vforked (ptid_t (new_pid, new_pid));
      else if (event == PTRACE_EVENT_CLONE)
	{
	  linux_nat_debug_printf
	    ("Got clone event from LWP %d, new child is LWP %ld", pid, new_pid);

	  /* Save the status again, we'll use it in follow_clone.  */
	  add_to_pid_list (&stopped_pids, new_pid, status);

	  ourstatus->set_thread_cloned (ptid_t (lp->ptid.pid (), new_pid));
	}

      return 0;
    }

  if (event == PTRACE_EVENT_EXEC)
    {
      linux_nat_debug_printf ("Got exec event from LWP %ld", lp->ptid.lwp ());

      /* Close the previous /proc/PID/mem file for this inferior,
	 which was using the address space which is now gone.
	 Reading/writing from this file would return 0/EOF.  */
      close_proc_mem_file (lp->ptid.pid ());

      /* Open a new file for the new address space.  */
      open_proc_mem_file (lp->ptid);

      ourstatus->set_execd
	(make_unique_xstrdup (linux_target->pid_to_exec_file (pid)));

      /* The thread that execed must have been resumed, but, when a
	 thread execs, it changes its tid to the tgid, and the old
	 tgid thread might have not been resumed.  */
      lp->resumed = 1;

      /* All other LWPs are gone now.  We'll have received a thread
	 exit notification for all threads other the execing one.
	 That one, if it wasn't the leader, just silently changes its
	 tid to the tgid, and the previous leader vanishes.  Since
	 Linux 3.0, the former thread ID can be retrieved with
	 PTRACE_GETEVENTMSG, but since we support older kernels, don't
	 bother with it, and just walk the LWP list.  Even with
	 PTRACE_GETEVENTMSG, we'd still need to lookup the
	 corresponding LWP object, and it would be an extra ptrace
	 syscall, so this way may even be more efficient.  */
      for (lwp_info &other_lp : all_lwps_safe ())
	if (&other_lp != lp && other_lp.ptid.pid () == lp->ptid.pid ())
	  exit_lwp (&other_lp);

      return 0;
    }

  if (event == PTRACE_EVENT_VFORK_DONE)
    {
      linux_nat_debug_printf
	("Got PTRACE_EVENT_VFORK_DONE from LWP %ld",
	 lp->ptid.lwp ());
	ourstatus->set_vfork_done ();
	return 0;
    }

  internal_error (_("unknown ptrace event %d"), event);
}

/* Suspend waiting for a signal.  We're mostly interested in
   SIGCHLD/SIGINT.  */

static void
wait_for_signal ()
{
  linux_nat_debug_printf ("about to sigsuspend");
  sigsuspend (&suspend_mask);

  /* If the quit flag is set, it means that the user pressed Ctrl-C
     and we're debugging a process that is running on a separate
     terminal, so we must forward the Ctrl-C to the inferior.  (If the
     inferior is sharing GDB's terminal, then the Ctrl-C reaches the
     inferior directly.)  We must do this here because functions that
     need to block waiting for a signal loop forever until there's an
     event to report before returning back to the event loop.  */
  if (!target_terminal::is_ours ())
    {
      if (check_quit_flag ())
	target_pass_ctrlc ();
    }
}

/* Mark LWP dead, with STATUS as exit status pending to report
   later.  */

static void
mark_lwp_dead (lwp_info *lp, int status)
{
  /* Store the exit status lp->waitstatus, because lp->status would be
     ambiguous (W_EXITCODE(0,0) == 0).  */
  lp->waitstatus = host_status_to_waitstatus (status);

  /* If we're processing LP's status, there should be no other event
     already recorded as pending.  */
  gdb_assert (lp->status == 0);

  /* Dead LWPs aren't expected to report a pending sigstop.  */
  lp->signalled = 0;

  /* Prevent trying to stop it.  */
  lp->stopped = 1;
}

/* Return true if LP is dead, with a pending exit/signalled event.  */

static bool
is_lwp_marked_dead (lwp_info *lp)
{
  switch (lp->waitstatus.kind ())
    {
    case TARGET_WAITKIND_EXITED:
    case TARGET_WAITKIND_THREAD_EXITED:
    case TARGET_WAITKIND_SIGNALLED:
      return true;
    }
  return false;
}

/* Wait for LP to stop.  Returns the wait status, or 0 if the LWP has
   exited.  */

static int
wait_lwp (struct lwp_info *lp)
{
  pid_t pid;
  int status = 0;
  int thread_dead = 0;
  sigset_t prev_mask;

  gdb_assert (!lp->stopped);
  gdb_assert (lp->status == 0);

  /* Make sure SIGCHLD is blocked for sigsuspend avoiding a race below.  */
  block_child_signals (&prev_mask);

  for (;;)
    {
      pid = my_waitpid (lp->ptid.lwp (), &status, __WALL | WNOHANG);
      if (pid == -1 && errno == ECHILD)
	{
	  /* The thread has previously exited.  We need to delete it
	     now because if this was a non-leader thread execing, we
	     won't get an exit event.  See comments on exec events at
	     the top of the file.  */
	  thread_dead = 1;
	  linux_nat_debug_printf ("%s vanished.",
				  lp->ptid.to_string ().c_str ());
	}
      if (pid != 0)
	break;

      /* Bugs 10970, 12702.
	 Thread group leader may have exited in which case we'll lock up in
	 waitpid if there are other threads, even if they are all zombies too.
	 Basically, we're not supposed to use waitpid this way.
	  tkill(pid,0) cannot be used here as it gets ESRCH for both
	 for zombie and running processes.

	 As a workaround, check if we're waiting for the thread group leader and
	 if it's a zombie, and avoid calling waitpid if it is.

	 This is racy, what if the tgl becomes a zombie right after we check?
	 Therefore always use WNOHANG with sigsuspend - it is equivalent to
	 waiting waitpid but linux_proc_pid_is_zombie is safe this way.  */

      if (lp->ptid.pid () == lp->ptid.lwp ()
	  && linux_proc_pid_is_zombie (lp->ptid.lwp ()))
	{
	  thread_dead = 1;
	  linux_nat_debug_printf ("Thread group leader %s vanished.",
				  lp->ptid.to_string ().c_str ());
	  break;
	}

      /* Wait for next SIGCHLD and try again.  This may let SIGCHLD handlers
	 get invoked despite our caller had them intentionally blocked by
	 block_child_signals.  This is sensitive only to the loop of
	 linux_nat_wait_1 and there if we get called my_waitpid gets called
	 again before it gets to sigsuspend so we can safely let the handlers
	 get executed here.  */
      wait_for_signal ();
    }

  restore_child_signals_mask (&prev_mask);

  if (!thread_dead)
    {
      gdb_assert (pid == lp->ptid.lwp ());

      linux_nat_debug_printf ("waitpid %s received %s",
			      lp->ptid.to_string ().c_str (),
			      status_to_str (status).c_str ());

      /* Check if the thread has exited.  */
      if (WIFEXITED (status) || WIFSIGNALED (status))
	{
	  if (report_exit_events_for (lp) || is_leader (lp))
	    {
	      linux_nat_debug_printf ("LWP %d exited.", lp->ptid.pid ());

	      /* If this is the leader exiting, it means the whole
		 process is gone.  Store the status to report to the
		 core.  */
	      mark_lwp_dead (lp, status);
	      return 0;
	    }

	  thread_dead = 1;
	  linux_nat_debug_printf ("%s exited.",
				  lp->ptid.to_string ().c_str ());
	}
    }

  if (thread_dead)
    {
      exit_lwp (lp);
      return 0;
    }

  gdb_assert (WIFSTOPPED (status));
  lp->stopped = 1;

  if (lp->must_set_ptrace_flags)
    {
      inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
      int options = linux_nat_ptrace_options (inf->attach_flag);

      linux_enable_event_reporting (lp->ptid.lwp (), options);
      lp->must_set_ptrace_flags = 0;
    }

  /* Handle GNU/Linux's syscall SIGTRAPs.  */
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
    {
      /* No longer need the sysgood bit.  The ptrace event ends up
	 recorded in lp->waitstatus if we care for it.  We can carry
	 on handling the event like a regular SIGTRAP from here
	 on.  */
      status = W_STOPCODE (SIGTRAP);
      if (linux_handle_syscall_trap (lp, 1))
	return wait_lwp (lp);
    }
  else
    {
      /* Almost all other ptrace-stops are known to be outside of system
	 calls, with further exceptions in linux_handle_extended_wait.  */
      lp->syscall_state = TARGET_WAITKIND_IGNORE;
    }

  /* Handle GNU/Linux's extended waitstatus for trace events.  */
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
      && linux_is_extended_waitstatus (status))
    {
      linux_nat_debug_printf ("Handling extended status 0x%06x", status);
      linux_handle_extended_wait (lp, status);
      return 0;
    }

  return status;
}

/* Send a SIGSTOP to LP.  */

static int
stop_callback (struct lwp_info *lp)
{
  if (!lp->stopped && !lp->signalled)
    {
      int ret;

      linux_nat_debug_printf ("kill %s **<SIGSTOP>**",
			      lp->ptid.to_string ().c_str ());

      errno = 0;
      ret = kill_lwp (lp->ptid.lwp (), SIGSTOP);
      linux_nat_debug_printf ("lwp kill %d %s", ret,
			      errno ? safe_strerror (errno) : "ERRNO-OK");

      lp->signalled = 1;
      gdb_assert (lp->status == 0);
    }

  return 0;
}

/* Request a stop on LWP.  */

void
linux_stop_lwp (struct lwp_info *lwp)
{
  stop_callback (lwp);
}

/* See linux-nat.h  */

void
linux_stop_and_wait_all_lwps (void)
{
  /* Stop all LWP's ...  */
  iterate_over_lwps (minus_one_ptid, stop_callback);

  /* ... and wait until all of them have reported back that
     they're no longer running.  */
  iterate_over_lwps (minus_one_ptid, stop_wait_callback);
}

/* See linux-nat.h  */

void
linux_unstop_all_lwps (void)
{
  iterate_over_lwps (minus_one_ptid,
		     [] (struct lwp_info *info)
		     {
		       return resume_stopped_resumed_lwps (info, minus_one_ptid);
		     });
}

/* Return non-zero if LWP PID has a pending SIGINT.  */

static int
linux_nat_has_pending_sigint (int pid)
{
  sigset_t pending, blocked, ignored;

  linux_proc_pending_signals (pid, &pending, &blocked, &ignored);

  if (sigismember (&pending, SIGINT)
      && !sigismember (&ignored, SIGINT))
    return 1;

  return 0;
}

/* Set a flag in LP indicating that we should ignore its next SIGINT.  */

static int
set_ignore_sigint (struct lwp_info *lp)
{
  /* If a thread has a pending SIGINT, consume it; otherwise, set a
     flag to consume the next one.  */
  if (lp->stopped && lp->status != 0 && WIFSTOPPED (lp->status)
      && WSTOPSIG (lp->status) == SIGINT)
    lp->status = 0;
  else
    lp->ignore_sigint = 1;

  return 0;
}

/* If LP does not have a SIGINT pending, then clear the ignore_sigint flag.
   This function is called after we know the LWP has stopped; if the LWP
   stopped before the expected SIGINT was delivered, then it will never have
   arrived.  Also, if the signal was delivered to a shared queue and consumed
   by a different thread, it will never be delivered to this LWP.  */

static void
maybe_clear_ignore_sigint (struct lwp_info *lp)
{
  if (!lp->ignore_sigint)
    return;

  if (!linux_nat_has_pending_sigint (lp->ptid.lwp ()))
    {
      linux_nat_debug_printf ("Clearing bogus flag for %s",
			      lp->ptid.to_string ().c_str ());
      lp->ignore_sigint = 0;
    }
}

/* Fetch the possible triggered data watchpoint info and store it in
   LP.

   On some archs, like x86, that use debug registers to set
   watchpoints, it's possible that the way to know which watched
   address trapped, is to check the register that is used to select
   which address to watch.  Problem is, between setting the watchpoint
   and reading back which data address trapped, the user may change
   the set of watchpoints, and, as a consequence, GDB changes the
   debug registers in the inferior.  To avoid reading back a stale
   stopped-data-address when that happens, we cache in LP the fact
   that a watchpoint trapped, and the corresponding data address, as
   soon as we see LP stop with a SIGTRAP.  If GDB changes the debug
   registers meanwhile, we have the cached data we can rely on.  */

static int
check_stopped_by_watchpoint (struct lwp_info *lp)
{
  scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
  inferior_ptid = lp->ptid;

  if (linux_target->low_stopped_by_watchpoint ())
    {
      lp->stop_reason = TARGET_STOPPED_BY_WATCHPOINT;
      lp->stopped_data_address_p
	= linux_target->low_stopped_data_address (&lp->stopped_data_address);
    }

  return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
}

/* Returns true if the LWP had stopped for a watchpoint.  */

bool
linux_nat_target::stopped_by_watchpoint ()
{
  struct lwp_info *lp = find_lwp_pid (inferior_ptid);

  gdb_assert (lp != NULL);

  return lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT;
}

bool
linux_nat_target::stopped_data_address (CORE_ADDR *addr_p)
{
  struct lwp_info *lp = find_lwp_pid (inferior_ptid);

  gdb_assert (lp != NULL);

  *addr_p = lp->stopped_data_address;

  return lp->stopped_data_address_p;
}

/* Commonly any breakpoint / watchpoint generate only SIGTRAP.  */

bool
linux_nat_target::low_status_is_event (int status)
{
  return WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP;
}

/* Wait until LP is stopped.  */

static int
stop_wait_callback (struct lwp_info *lp)
{
  inferior *inf = find_inferior_ptid (linux_target, lp->ptid);

  /* If this is a vfork parent, bail out, it is not going to report
     any SIGSTOP until the vfork is done with.  */
  if (inf->vfork_child != NULL)
    return 0;

  if (!lp->stopped)
    {
      int status;

      status = wait_lwp (lp);
      if (status == 0)
	return 0;

      if (lp->ignore_sigint && WIFSTOPPED (status)
	  && WSTOPSIG (status) == SIGINT)
	{
	  lp->ignore_sigint = 0;

	  errno = 0;
	  ptrace (PTRACE_CONT, lp->ptid.lwp (), 0, 0);
	  lp->stopped = 0;
	  linux_nat_debug_printf
	    ("PTRACE_CONT %s, 0, 0 (%s) (discarding SIGINT)",
	     lp->ptid.to_string ().c_str (),
	     errno ? safe_strerror (errno) : "OK");

	  return stop_wait_callback (lp);
	}

      maybe_clear_ignore_sigint (lp);

      if (WSTOPSIG (status) != SIGSTOP)
	{
	  /* The thread was stopped with a signal other than SIGSTOP.  */

	  linux_nat_debug_printf ("Pending event %s in %s",
				  status_to_str ((int) status).c_str (),
				  lp->ptid.to_string ().c_str ());

	  /* Save the sigtrap event.  */
	  lp->status = status;
	  gdb_assert (lp->signalled);
	  save_stop_reason (lp);
	}
      else
	{
	  /* We caught the SIGSTOP that we intended to catch.  */

	  linux_nat_debug_printf ("Expected SIGSTOP caught for %s.",
				  lp->ptid.to_string ().c_str ());

	  lp->signalled = 0;

	  /* If we are waiting for this stop so we can report the thread
	     stopped then we need to record this status.  Otherwise, we can
	     now discard this stop event.  */
	  if (lp->last_resume_kind == resume_stop)
	    {
	      lp->status = status;
	      save_stop_reason (lp);
	    }
	}
    }

  return 0;
}

/* Get the inferior associated to LWP.  Must be called with an LWP that has
   an associated inferior.  Always return non-nullptr.  */

static inferior *
lwp_inferior (const lwp_info *lwp)
{
  inferior *inf = find_inferior_ptid (linux_target, lwp->ptid);
  gdb_assert (inf != nullptr);
  return inf;
}

/* Return non-zero if LP has a wait status pending.  Discard the
   pending event and resume the LWP if the event that originally
   caused the stop became uninteresting.  */

static int
status_callback (struct lwp_info *lp)
{
  /* Only report a pending wait status if we pretend that this has
     indeed been resumed.  */
  if (!lp->resumed)
    return 0;

  if (!lwp_status_pending_p (lp))
    return 0;

  if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT
      || lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
    {
      struct regcache *regcache = get_thread_regcache (linux_target, lp->ptid);
      CORE_ADDR pc;
      int discard = 0;

      pc = regcache_read_pc (regcache);

      if (pc != lp->stop_pc)
	{
	  linux_nat_debug_printf ("PC of %s changed.  was=%s, now=%s",
				  lp->ptid.to_string ().c_str (),
				  paddress (current_inferior ()->arch (),
					    lp->stop_pc),
				  paddress (current_inferior ()->arch (), pc));
	  discard = 1;
	}

      if (discard)
	{
	  linux_nat_debug_printf ("pending event of %s cancelled.",
				  lp->ptid.to_string ().c_str ());

	  lp->status = 0;
	  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
	  return 0;
	}
    }

  return 1;
}

/* Count the LWP's that have had events.  */

static int
count_events_callback (struct lwp_info *lp, int *count)
{
  gdb_assert (count != NULL);

  /* Select only resumed LWPs that have an event pending.  */
  if (lp->resumed && lwp_status_pending_p (lp))
    (*count)++;

  return 0;
}

/* Select the LWP (if any) that is currently being single-stepped.  */

static int
select_singlestep_lwp_callback (struct lwp_info *lp)
{
  if (lp->last_resume_kind == resume_step
      && lp->status != 0)
    return 1;
  else
    return 0;
}

/* Returns true if LP has a status pending.  */

static int
lwp_status_pending_p (struct lwp_info *lp)
{
  /* We check for lp->waitstatus in addition to lp->status, because we
     can have pending process exits recorded in lp->status and
     W_EXITCODE(0,0) happens to be 0.  */
  return lp->status != 0 || lp->waitstatus.kind () != TARGET_WAITKIND_IGNORE;
}

/* Select the Nth LWP that has had an event.  */

static int
select_event_lwp_callback (struct lwp_info *lp, int *selector)
{
  gdb_assert (selector != NULL);

  /* Select only resumed LWPs that have an event pending.  */
  if (lp->resumed && lwp_status_pending_p (lp))
    if ((*selector)-- == 0)
      return 1;

  return 0;
}

/* Called when the LWP stopped for a signal/trap.  If it stopped for a
   trap check what caused it (breakpoint, watchpoint, trace, etc.),
   and save the result in the LWP's stop_reason field.  If it stopped
   for a breakpoint, decrement the PC if necessary on the lwp's
   architecture.  */

static void
save_stop_reason (struct lwp_info *lp)
{
  struct regcache *regcache;
  struct gdbarch *gdbarch;
  CORE_ADDR pc;
  CORE_ADDR sw_bp_pc;
  siginfo_t siginfo;

  gdb_assert (lp->stop_reason == TARGET_STOPPED_BY_NO_REASON);
  gdb_assert (lp->status != 0);

  if (!linux_target->low_status_is_event (lp->status))
    return;

  inferior *inf = lwp_inferior (lp);
  if (inf->starting_up)
    return;

  regcache = get_thread_regcache (linux_target, lp->ptid);
  gdbarch = regcache->arch ();

  pc = regcache_read_pc (regcache);
  sw_bp_pc = pc - gdbarch_decr_pc_after_break (gdbarch);

  if (linux_nat_get_siginfo (lp->ptid, &siginfo))
    {
      if (siginfo.si_signo == SIGTRAP)
	{
	  if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code)
	      && GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
	    {
	      /* The si_code is ambiguous on this arch -- check debug
		 registers.  */
	      if (!check_stopped_by_watchpoint (lp))
		lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
	    }
	  else if (GDB_ARCH_IS_TRAP_BRKPT (siginfo.si_code))
	    {
	      /* If we determine the LWP stopped for a SW breakpoint,
		 trust it.  Particularly don't check watchpoint
		 registers, because, at least on s390, we'd find
		 stopped-by-watchpoint as long as there's a watchpoint
		 set.  */
	      lp->stop_reason = TARGET_STOPPED_BY_SW_BREAKPOINT;
	    }
	  else if (GDB_ARCH_IS_TRAP_HWBKPT (siginfo.si_code))
	    {
	      /* This can indicate either a hardware breakpoint or
		 hardware watchpoint.  Check debug registers.  */
	      if (!check_stopped_by_watchpoint (lp))
		lp->stop_reason = TARGET_STOPPED_BY_HW_BREAKPOINT;
	    }
	  else if (siginfo.si_code == TRAP_TRACE)
	    {
	      linux_nat_debug_printf ("%s stopped by trace",
				      lp->ptid.to_string ().c_str ());

	      /* We may have single stepped an instruction that
		 triggered a watchpoint.  In that case, on some
		 architectures (such as x86), instead of TRAP_HWBKPT,
		 si_code indicates TRAP_TRACE, and we need to check
		 the debug registers separately.  */
	      check_stopped_by_watchpoint (lp);
	    }
	}
    }

  if (lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT)
    {
      linux_nat_debug_printf ("%s stopped by software breakpoint",
			      lp->ptid.to_string ().c_str ());

      /* Back up the PC if necessary.  */
      if (pc != sw_bp_pc)
	regcache_write_pc (regcache, sw_bp_pc);

      /* Update this so we record the correct stop PC below.  */
      pc = sw_bp_pc;
    }
  else if (lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT)
    {
      linux_nat_debug_printf ("%s stopped by hardware breakpoint",
			      lp->ptid.to_string ().c_str ());
    }
  else if (lp->stop_reason == TARGET_STOPPED_BY_WATCHPOINT)
    {
      linux_nat_debug_printf ("%s stopped by hardware watchpoint",
			      lp->ptid.to_string ().c_str ());
    }

  lp->stop_pc = pc;
}


/* Returns true if the LWP had stopped for a software breakpoint.  */

bool
linux_nat_target::stopped_by_sw_breakpoint ()
{
  struct lwp_info *lp = find_lwp_pid (inferior_ptid);

  gdb_assert (lp != NULL);

  return lp->stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
}

/* Implement the supports_stopped_by_sw_breakpoint method.  */

bool
linux_nat_target::supports_stopped_by_sw_breakpoint ()
{
  return true;
}

/* Returns true if the LWP had stopped for a hardware
   breakpoint/watchpoint.  */

bool
linux_nat_target::stopped_by_hw_breakpoint ()
{
  struct lwp_info *lp = find_lwp_pid (inferior_ptid);

  gdb_assert (lp != NULL);

  return lp->stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
}

/* Implement the supports_stopped_by_hw_breakpoint method.  */

bool
linux_nat_target::supports_stopped_by_hw_breakpoint ()
{
  return true;
}

/* Select one LWP out of those that have events pending.  */

static void
select_event_lwp (ptid_t filter, struct lwp_info **orig_lp, int *status)
{
  int num_events = 0;
  int random_selector;
  struct lwp_info *event_lp = NULL;

  /* Record the wait status for the original LWP.  */
  (*orig_lp)->status = *status;

  /* In all-stop, give preference to the LWP that is being
     single-stepped.  There will be at most one, and it will be the
     LWP that the core is most interested in.  If we didn't do this,
     then we'd have to handle pending step SIGTRAPs somehow in case
     the core later continues the previously-stepped thread, as
     otherwise we'd report the pending SIGTRAP then, and the core, not
     having stepped the thread, wouldn't understand what the trap was
     for, and therefore would report it to the user as a random
     signal.  */
  if (!target_is_non_stop_p ())
    {
      event_lp = iterate_over_lwps (filter, select_singlestep_lwp_callback);
      if (event_lp != NULL)
	{
	  linux_nat_debug_printf ("Select single-step %s",
				  event_lp->ptid.to_string ().c_str ());
	}
    }

  if (event_lp == NULL)
    {
      /* Pick one at random, out of those which have had events.  */

      /* First see how many events we have.  */
      iterate_over_lwps (filter,
			 [&] (struct lwp_info *info)
			 {
			   return count_events_callback (info, &num_events);
			 });
      gdb_assert (num_events > 0);

      /* Now randomly pick a LWP out of those that have had
	 events.  */
      random_selector = (int)
	((num_events * (double) rand ()) / (RAND_MAX + 1.0));

      if (num_events > 1)
	linux_nat_debug_printf ("Found %d events, selecting #%d",
				num_events, random_selector);

      event_lp
	= (iterate_over_lwps
	   (filter,
	    [&] (struct lwp_info *info)
	    {
	      return select_event_lwp_callback (info,
						&random_selector);
	    }));
    }

  if (event_lp != NULL)
    {
      /* Switch the event LWP.  */
      *orig_lp = event_lp;
      *status = event_lp->status;
    }

  /* Flush the wait status for the event LWP.  */
  (*orig_lp)->status = 0;
}

/* Return non-zero if LP has been resumed.  */

static int
resumed_callback (struct lwp_info *lp)
{
  return lp->resumed;
}

/* Check if we should go on and pass this event to common code.

   If so, save the status to the lwp_info structure associated to LWPID.  */

static void
linux_nat_filter_event (int lwpid, int status)
{
  struct lwp_info *lp;
  int event = linux_ptrace_get_extended_event (status);

  lp = find_lwp_pid (ptid_t (lwpid));

  /* Check for events reported by anything not in our LWP list.  */
  if (lp == nullptr)
    {
      if (WIFSTOPPED (status))
	{
	  if (WSTOPSIG (status) == SIGTRAP && event == PTRACE_EVENT_EXEC)
	    {
	      /* A non-leader thread exec'ed after we've seen the
		 leader zombie, and removed it from our lists (in
		 check_zombie_leaders).  The non-leader thread changes
		 its tid to the tgid.  */
	      linux_nat_debug_printf
		("Re-adding thread group leader LWP %d after exec.",
		 lwpid);

	      lp = add_lwp (ptid_t (lwpid, lwpid));
	      lp->stopped = 1;
	      lp->resumed = 1;
	      add_thread (linux_target, lp->ptid);
	    }
	  else
	    {
	      /* A process we are controlling has forked and the new
		 child's stop was reported to us by the kernel.  Save
		 its PID and go back to waiting for the fork event to
		 be reported - the stopped process might be returned
		 from waitpid before or after the fork event is.  */
	      linux_nat_debug_printf
		("Saving LWP %d status %s in stopped_pids list",
		 lwpid, status_to_str (status).c_str ());
	      add_to_pid_list (&stopped_pids, lwpid, status);
	    }
	}
      else
	{
	  /* Don't report an event for the exit of an LWP not in our
	     list, i.e. not part of any inferior we're debugging.
	     This can happen if we detach from a program we originally
	     forked and then it exits.  However, note that we may have
	     earlier deleted a leader of an inferior we're debugging,
	     in check_zombie_leaders.  Re-add it back here if so.  */
	  for (inferior *inf : all_inferiors (linux_target))
	    {
	      if (inf->pid == lwpid)
		{
		  linux_nat_debug_printf
		    ("Re-adding thread group leader LWP %d after exit.",
		     lwpid);

		  lp = add_lwp (ptid_t (lwpid, lwpid));
		  lp->resumed = 1;
		  add_thread (linux_target, lp->ptid);
		  break;
		}
	    }
	}

      if (lp == nullptr)
	return;
    }

  /* This LWP is stopped now.  (And if dead, this prevents it from
     ever being continued.)  */
  lp->stopped = 1;

  if (WIFSTOPPED (status) && lp->must_set_ptrace_flags)
    {
      inferior *inf = find_inferior_pid (linux_target, lp->ptid.pid ());
      int options = linux_nat_ptrace_options (inf->attach_flag);

      linux_enable_event_reporting (lp->ptid.lwp (), options);
      lp->must_set_ptrace_flags = 0;
    }

  /* Handle GNU/Linux's syscall SIGTRAPs.  */
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SYSCALL_SIGTRAP)
    {
      /* No longer need the sysgood bit.  The ptrace event ends up
	 recorded in lp->waitstatus if we care for it.  We can carry
	 on handling the event like a regular SIGTRAP from here
	 on.  */
      status = W_STOPCODE (SIGTRAP);
      if (linux_handle_syscall_trap (lp, 0))
	return;
    }
  else
    {
      /* Almost all other ptrace-stops are known to be outside of system
	 calls, with further exceptions in linux_handle_extended_wait.  */
      lp->syscall_state = TARGET_WAITKIND_IGNORE;
    }

  /* Handle GNU/Linux's extended waitstatus for trace events.  */
  if (WIFSTOPPED (status) && WSTOPSIG (status) == SIGTRAP
      && linux_is_extended_waitstatus (status))
    {
      linux_nat_debug_printf ("Handling extended status 0x%06x", status);

      if (linux_handle_extended_wait (lp, status))
	return;
    }

  /* Check if the thread has exited.  */
  if (WIFEXITED (status) || WIFSIGNALED (status))
    {
      if (!report_exit_events_for (lp) && !is_leader (lp))
	{
	  linux_nat_debug_printf ("%s exited.",
				  lp->ptid.to_string ().c_str ());

	  /* If this was not the leader exiting, then the exit signal
	     was not the end of the debugged application and should be
	     ignored.  */
	  exit_lwp (lp);
	  return;
	}

      /* Note that even if the leader was ptrace-stopped, it can still
	 exit, if e.g., some other thread brings down the whole
	 process (calls `exit').  So don't assert that the lwp is
	 resumed.  */
      linux_nat_debug_printf ("LWP %ld exited (resumed=%d)",
			      lp->ptid.lwp (), lp->resumed);

      mark_lwp_dead (lp, status);
      return;
    }

  /* Make sure we don't report a SIGSTOP that we sent ourselves in
     an attempt to stop an LWP.  */
  if (lp->signalled
      && WIFSTOPPED (status) && WSTOPSIG (status) == SIGSTOP)
    {
      lp->signalled = 0;

      if (lp->last_resume_kind == resume_stop)
	{
	  linux_nat_debug_printf ("resume_stop SIGSTOP caught for %s.",
				  lp->ptid.to_string ().c_str ());
	}
      else
	{
	  /* This is a delayed SIGSTOP.  Filter out the event.  */

	  linux_nat_debug_printf
	    ("%s %s, 0, 0 (discard delayed SIGSTOP)",
	     lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
	     lp->ptid.to_string ().c_str ());

	  linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
	  gdb_assert (lp->resumed);
	  return;
	}
    }

  /* Make sure we don't report a SIGINT that we have already displayed
     for another thread.  */
  if (lp->ignore_sigint
      && WIFSTOPPED (status) && WSTOPSIG (status) == SIGINT)
    {
      linux_nat_debug_printf ("Delayed SIGINT caught for %s.",
			      lp->ptid.to_string ().c_str ());

      /* This is a delayed SIGINT.  */
      lp->ignore_sigint = 0;

      linux_resume_one_lwp (lp, lp->step, GDB_SIGNAL_0);
      linux_nat_debug_printf ("%s %s, 0, 0 (discard SIGINT)",
			      lp->step ? "PTRACE_SINGLESTEP" : "PTRACE_CONT",
			      lp->ptid.to_string ().c_str ());
      gdb_assert (lp->resumed);

      /* Discard the event.  */
      return;
    }

  /* Don't report signals that GDB isn't interested in, such as
     signals that are neither printed nor stopped upon.  Stopping all
     threads can be a bit time-consuming, so if we want decent
     performance with heavily multi-threaded programs, especially when
     they're using a high frequency timer, we'd better avoid it if we
     can.  */
  if (WIFSTOPPED (status))
    {