aboutsummaryrefslogtreecommitdiff
path: root/gcc
diff options
context:
space:
mode:
authorJakub Jelinek <jakub@redhat.com>2021-10-07 15:16:13 +0200
committerJakub Jelinek <jakub@redhat.com>2021-10-07 15:16:13 +0200
commit348b426be3fc99453b42e79a18331c7bf24ee0dc (patch)
tree9abd79c677bf3d4266cb73d457de67b99d382ffd /gcc
parent1ebf2c14c60f5f37c00bea66aba46c25d4bcf473 (diff)
downloadgcc-348b426be3fc99453b42e79a18331c7bf24ee0dc.zip
gcc-348b426be3fc99453b42e79a18331c7bf24ee0dc.tar.gz
gcc-348b426be3fc99453b42e79a18331c7bf24ee0dc.tar.bz2
c++: Add testcase for C++23 P2316R2 - consistent character literal encoding [PR102615]
I believe we need no changes to the compiler for P2316R2, seems we treat character literals the same between preprocessor and C++ expressions, here is a testcase that should verify it. Note, seems the internal charset for GCC can be either UTF-8 or UTF-EBCDIC, but I bet it is very hard (at least for me) to actually test the latter. I'd guess one needs all system headers to be in EBCDIC and the gcc sources too. But looking around the source, I'm a little bit worried about the UTF-EBCDIC case. One is: #if '\n' == 0x0A && ' ' == 0x20 && '0' == 0x30 \ && 'A' == 0x41 && 'a' == 0x61 && '!' == 0x21 # define HOST_CHARSET HOST_CHARSET_ASCII #else # if '\n' == 0x15 && ' ' == 0x40 && '0' == 0xF0 \ && 'A' == 0xC1 && 'a' == 0x81 && '!' == 0x5A # define HOST_CHARSET HOST_CHARSET_EBCDIC # else # define HOST_CHARSET HOST_CHARSET_UNKNOWN # endif #endif in include/safe-ctype.h, does that mean we only support EBCDIC if -funsigned-char and otherwise fail to build gcc? Because with -fsigned-char, '0' is -0x10 rather than 0xF0, 'A' is -0x3F rather than 0xC1 and 'a' is -0x7F rather than 0x81. And another thing, if HOST_CHARSET == HOST_CHARSET_EBCDIC, how does the libcpp/lex.c static const cppchar_t utf8_signifier = 0xC0; ... if (*buffer->cur >= utf8_signifier) { if (_cpp_valid_utf8 (pfile, &buffer->cur, buffer->rlimit, 1 + !first, state, &s)) return true; } work? Because in UTF-EBCDIC, >= 0xC0 isn't the right test for start of multi-byte character, it is more complicated and seems _cpp_valid_utf8 assumes UTF-8 as the host charset. 2021-10-07 Jakub Jelinek <jakub@redhat.com> PR c++/102615 * g++.dg/cpp23/charlit-encoding1.C: New testcase for C++23 P2316R2.
Diffstat (limited to 'gcc')
-rw-r--r--gcc/testsuite/g++.dg/cpp23/charlit-encoding1.C33
1 files changed, 33 insertions, 0 deletions
diff --git a/gcc/testsuite/g++.dg/cpp23/charlit-encoding1.C b/gcc/testsuite/g++.dg/cpp23/charlit-encoding1.C
new file mode 100644
index 0000000..736f022
--- /dev/null
+++ b/gcc/testsuite/g++.dg/cpp23/charlit-encoding1.C
@@ -0,0 +1,33 @@
+// PR c++/102615 - P2316R2 - Consistent character literal encoding
+// { dg-do run }
+
+extern "C" void abort ();
+
+int
+main ()
+{
+#if ' ' == 0x20
+ if (' ' != 0x20)
+ abort ();
+#elif ' ' == 0x40
+ if (' ' != 0x40)
+ abort ();
+#else
+ if (' ' == 0x20 || ' ' == 0x40)
+ abort ();
+#endif
+#if 'a' == 0x61
+ if ('a' != 0x61)
+ abort ();
+#elif 'a' == 0x81
+ if ('a' != 0x81)
+ abort ();
+#elif 'a' == -0x7F
+ if ('a' != -0x7F)
+ abort ();
+#else
+ if ('a' == 0x61 || 'a' == 0x81 || 'a' == -0x7F)
+ abort ();
+#endif
+ return 0;
+}