aboutsummaryrefslogtreecommitdiff
path: root/libgcc/config
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2019-12-13 22:10:44 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2019-12-13 22:10:44 +0000
commit51426017f8fe0f18295ca467feba3fbb5aad3fa8 (patch)
tree2f686f2d4657aa570473986e7d0924794093c67b /libgcc/config
parent0cec14923830569b8727d461bcf64adaf965de83 (diff)
parentc926fd82bbd336b317266d43b9fa67a83397b06b (diff)
downloadgcc-51426017f8fe0f18295ca467feba3fbb5aad3fa8.zip
gcc-51426017f8fe0f18295ca467feba3fbb5aad3fa8.tar.gz
gcc-51426017f8fe0f18295ca467feba3fbb5aad3fa8.tar.bz2
Merge from trunk revision 279830.
From-SVN: r279387
Diffstat (limited to 'libgcc/config')
-rw-r--r--libgcc/config/arc/t-arc2
-rw-r--r--libgcc/config/arm/unwind-arm-vxworks.c65
-rw-r--r--libgcc/config/avr/t-avr8
-rw-r--r--libgcc/config/avr/t-avrlibc31
-rw-r--r--libgcc/config/avr/t-copy-libgcc13
-rw-r--r--libgcc/config/gthr-vxworks-cond.c83
-rw-r--r--libgcc/config/gthr-vxworks-thread.c349
-rw-r--r--libgcc/config/gthr-vxworks-tls.c (renamed from libgcc/config/vxlib-tls.c)110
-rw-r--r--libgcc/config/gthr-vxworks.c87
-rw-r--r--libgcc/config/gthr-vxworks.h272
-rw-r--r--libgcc/config/m68k/linux-unwind.h2
-rw-r--r--libgcc/config/msp430/t-msp4306
-rw-r--r--libgcc/config/pa/linux-atomic.c210
-rw-r--r--libgcc/config/riscv/t-softfp3217
-rw-r--r--libgcc/config/t-gthr-noweak2
-rw-r--r--libgcc/config/t-gthr-vxworks5
-rw-r--r--libgcc/config/t-gthr-vxworksae7
-rw-r--r--libgcc/config/t-vxcrtstuff12
-rw-r--r--libgcc/config/t-vxworks3
-rw-r--r--libgcc/config/t-vxworks74
-rw-r--r--libgcc/config/t-vxworksae17
-rw-r--r--libgcc/config/vxcrtstuff.c132
-rw-r--r--libgcc/config/vxlib.c95
23 files changed, 1198 insertions, 334 deletions
diff --git a/libgcc/config/arc/t-arc b/libgcc/config/arc/t-arc
index 7b0956c..9e1b130 100644
--- a/libgcc/config/arc/t-arc
+++ b/libgcc/config/arc/t-arc
@@ -46,7 +46,6 @@ LIB2ADD = fp-bit.c dp-bit.c
dp-bit.c: $(srcdir)/fp-bit.c
echo '#ifndef __big_endian__' > dp-bit.c
- echo '#define FLOAT_BIT_ORDER_MISMATCH' >> dp-bit.c
echo '#endif' >> dp-bit.c
echo '#include "fp-bit.h"' >> dp-bit.c
echo '#include "config/arc/dp-hack.h"' >> dp-bit.c
@@ -55,7 +54,6 @@ dp-bit.c: $(srcdir)/fp-bit.c
fp-bit.c: $(srcdir)/fp-bit.c
echo '#define FLOAT' > fp-bit.c
echo '#ifndef __big_endian__' >> fp-bit.c
- echo '#define FLOAT_BIT_ORDER_MISMATCH' >> fp-bit.c
echo '#endif' >> fp-bit.c
echo '#include "config/arc/fp-hack.h"' >> fp-bit.c
cat $(srcdir)/fp-bit.c >> fp-bit.c
diff --git a/libgcc/config/arm/unwind-arm-vxworks.c b/libgcc/config/arm/unwind-arm-vxworks.c
index 03d753d..6fccf10 100644
--- a/libgcc/config/arm/unwind-arm-vxworks.c
+++ b/libgcc/config/arm/unwind-arm-vxworks.c
@@ -1,4 +1,4 @@
-/* Support for ARM EABI unwinding in VxWorks Downloadable Kernel Modules.
+/* Support for ARM EABI unwinding on VxWorks Downloadable Kernel Modules.
Copyright (C) 2017-2019 Free Software Foundation, Inc.
This file is free software; you can redistribute it and/or modify it
@@ -20,14 +20,59 @@
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-#if defined(__vxworks) && !defined (__RTP__)
-/* Vxworks for ARM uses __gnu_Unwind_Find_exidx to retrieve the exception
- table for downloadable kernel modules. As those modules are only partially
- linked, the linker won't generate __exidx_start|end, but the two symbols
- are still used in alternate paths from unwind-arm-common.inc.
+/* The common unwinding code refers to __gnu_Unwind_Find_exidx and
+ __cxa_type_match symbols, which are not in VxWorks kernels on ARM,
+ now llvm based.
+
+ While the common code works just fine for RTPs thanks to weak references
+ and proper positioning of __exidx_start/end from linker scripts, we need
+ symbol definitions for kernel modules. */
+
+#ifndef __RTP__
+
+#include <private/moduleLibP.h>
+
+/* __gnu_Unwind_Find_exidx. See if we can use _func_moduleExidxGet to
+ refine whatever we have in __exidx_start and __exidx_end. */
+
+typedef struct
+{
+ UINT32 fnoffset;
+ UINT32 content;
+} __EIT_entry;
+
+extern __EIT_entry __exidx_start;
+extern __EIT_entry __exidx_end;
+
+__EIT_entry *
+__gnu_Unwind_Find_exidx (void *pc, int *nrec)
+{
+ __EIT_entry *pstart = 0;
+ __EIT_entry *pend = 0;
+
+ if (_func_moduleExidxGet != NULL)
+ _func_moduleExidxGet (pc,
+ (void *) &__exidx_start, (void *) &__exidx_end,
+ (void **) &pstart, (void **) &pend);
+
+ if (!pstart)
+ {
+ pstart = &__exidx_start;
+ pend = &__exidx_end;
+ }
+
+ *nrec = pend - pstart;
+
+ return pstart;
+}
+
+/* __cxa_type_match. A dummy version to be overridden by the libstdc++ one
+ when we link with it. */
+
+void * __attribute__((weak))
+__cxa_type_match ()
+{
+ return (void *) 0;
+}
- As we don't rely on them, but still need the symbols, we define dummy
- values here. */
-void *__exidx_start __attribute__((__visibility__ ("hidden")));
-void *__exidx_end __attribute__((__visibility__ ("hidden")));
#endif
diff --git a/libgcc/config/avr/t-avr b/libgcc/config/avr/t-avr
index c420c5d..e4f867b 100644
--- a/libgcc/config/avr/t-avr
+++ b/libgcc/config/avr/t-avr
@@ -112,10 +112,14 @@ LIB2FUNCS_EXCLUDE = \
_clrsbdi2 \
-# We do not have the DF type.
+ifeq ($(long_double_type_size),32)
+# We do not have the DFtype.
+HOST_LIBGCC2_CFLAGS += -DDF=SF
+endif
+
# Most of the C functions in libgcc2 use almost all registers,
# so use -mcall-prologues for smaller code size.
-HOST_LIBGCC2_CFLAGS += -DDF=SF -Dinhibit_libc -mcall-prologues -Os
+HOST_LIBGCC2_CFLAGS += -Dinhibit_libc -mcall-prologues -Os
# Extra 16-bit integer functions.
intfuncs16 = _absvXX2 _addvXX3 _subvXX3 _mulvXX3 _negvXX2 _clrsbXX2
diff --git a/libgcc/config/avr/t-avrlibc b/libgcc/config/avr/t-avrlibc
index d2c8b87..34eca4f 100644
--- a/libgcc/config/avr/t-avrlibc
+++ b/libgcc/config/avr/t-avrlibc
@@ -64,3 +64,34 @@ LIB2FUNCS_EXCLUDE += \
_fixunssfsi _fixsfdi \
_fixunssfdi \
_floatdisf _floatundisf
+
+ifneq (,$(findstring avr,$(MULTISUBDIR)))
+
+# We are not in the avr2 (default) subdir, hence copying will work.
+# In default dir, copying won'twork because the default multilib is
+# built after all the others.
+
+ifneq (,$(findstring double,$(MULTISUBDIR)))
+
+# We are in double{32|64}/libgcc or long-double{32|64}/libgcc:
+# Just copy from the [long ]double=float multilib; we would remove any DFmode
+# bits from this multilib variant, anyway, because the current assumption
+# is that avr-libc hosts *all* the IEEE-double stuff.
+
+LIB2FUNCS_EXCLUDE := %
+LIB1ASMFUNCS :=
+libgcc-objects :=
+libgcov-objects :=
+objects :=
+
+t-copy-libgcc.dep: $(srcdir)/config/avr/t-copy-libgcc
+ -rm -f libgcc.a
+ -rm -f libgcov.a
+ cp $< $@
+
+libgcc.a libgcov.a libgcc_tm.h: t-copy-libgcc.dep
+
+Makefile: t-copy-libgcc.dep
+
+endif
+endif
diff --git a/libgcc/config/avr/t-copy-libgcc b/libgcc/config/avr/t-copy-libgcc
new file mode 100644
index 0000000..d34cbde
--- /dev/null
+++ b/libgcc/config/avr/t-copy-libgcc
@@ -0,0 +1,13 @@
+# Only used with --with-avrlibc & (-mlong-double=64 | -mdouble=64)
+#
+# Inserted at the end of Makefile by magic[tm].
+# We need this *after* Makefile's rules so we can override them.
+
+libgcc.a: ../../libgcc/libgcc.a
+ cp $< $@
+
+libgcov.a: ../../libgcc/libgcov.a
+ @:
+ifeq ($(enable_gcov),yes)
+ cp $< $@
+endif
diff --git a/libgcc/config/gthr-vxworks-cond.c b/libgcc/config/gthr-vxworks-cond.c
new file mode 100644
index 0000000..0747a3d
--- /dev/null
+++ b/libgcc/config/gthr-vxworks-cond.c
@@ -0,0 +1,83 @@
+/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Threads compatibility routines for libgcc2 for VxWorks.
+
+ This file implements the GTHREAD_HAS_COND part of the interface
+ exposed by gthr-vxworks.h. */
+
+#include "gthr.h"
+#include <taskLib.h>
+
+/* --------------------------- Condition Variables ------------------------ */
+
+void
+__gthread_cond_init (__gthread_cond_t *cond)
+{
+ if (!cond)
+ return;
+ *cond = semBCreate (SEM_Q_FIFO, SEM_EMPTY);
+}
+
+int
+__gthread_cond_destroy (__gthread_cond_t *cond)
+{
+ if (!cond)
+ return ERROR;
+ return __CHECK_RESULT (semDelete (*cond));
+}
+
+int
+__gthread_cond_broadcast (__gthread_cond_t *cond)
+{
+ if (!cond)
+ return ERROR;
+
+ return __CHECK_RESULT (semFlush (*cond));
+}
+
+int
+__gthread_cond_wait (__gthread_cond_t *cond,
+ __gthread_mutex_t *mutex)
+{
+ if (!cond)
+ return ERROR;
+
+ if (!mutex)
+ return ERROR;
+
+ __RETURN_ERRNO_IF_NOT_OK (semGive (*mutex));
+
+ __RETURN_ERRNO_IF_NOT_OK (semTake (*cond, WAIT_FOREVER));
+
+ __RETURN_ERRNO_IF_NOT_OK (semTake (*mutex, WAIT_FOREVER));
+
+ return OK;
+}
+
+int
+__gthread_cond_wait_recursive (__gthread_cond_t *cond,
+ __gthread_recursive_mutex_t *mutex)
+{
+ return __gthread_cond_wait (cond, mutex);
+}
diff --git a/libgcc/config/gthr-vxworks-thread.c b/libgcc/config/gthr-vxworks-thread.c
new file mode 100644
index 0000000..3c880ba
--- /dev/null
+++ b/libgcc/config/gthr-vxworks-thread.c
@@ -0,0 +1,349 @@
+/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Threads compatibility routines for libgcc2 for VxWorks.
+
+ This file implements the GTHREAD_CXX0X part of the interface
+ exposed by gthr-vxworks.h, using APIs exposed by regular (!AE/653)
+ VxWorks kernels. */
+
+#include "gthr.h"
+#include <taskLib.h>
+
+#define __TIMESPEC_TO_NSEC(timespec) \
+ ((long long)timespec.tv_sec * 1000000000 + (long long)timespec.tv_nsec)
+
+#define __TIMESPEC_TO_TICKS(timespec) \
+ ((long long)(sysClkRateGet() * __TIMESPEC_TO_NSEC(timespec) + 999999999) \
+ / 1000000000)
+
+#ifdef __RTP__
+ void tls_delete_hook ();
+ #define __CALL_DELETE_HOOK(tcb) tls_delete_hook()
+#else
+ /* In kernel mode, we need to pass the TCB to task_delete_hook. The TCB is
+ the pointer to the WIND_TCB structure and is the ID of the task. */
+ void tls_delete_hook (void *TCB);
+ #define __CALL_DELETE_HOOK(tcb) tls_delete_hook((WIND_TCB *) ((tcb)->task_id))
+#endif
+
+/* -------------------- Timed Condition Variables --------------------- */
+
+int
+__gthread_cond_signal (__gthread_cond_t *cond)
+{
+ if (!cond)
+ return ERROR;
+
+ return __CHECK_RESULT (semGive (*cond));
+}
+
+int
+__gthread_cond_timedwait (__gthread_cond_t *cond,
+ __gthread_mutex_t *mutex,
+ const __gthread_time_t *abs_timeout)
+{
+ if (!cond)
+ return ERROR;
+
+ if (!mutex)
+ return ERROR;
+
+ if (!abs_timeout)
+ return ERROR;
+
+ struct timespec current;
+ if (clock_gettime (CLOCK_REALTIME, &current) == ERROR)
+ /* CLOCK_REALTIME is not supported. */
+ return ERROR;
+
+ const long long abs_timeout_ticks = __TIMESPEC_TO_TICKS ((*abs_timeout));
+ const long long current_ticks = __TIMESPEC_TO_TICKS (current);
+
+ long long waiting_ticks;
+
+ if (current_ticks < abs_timeout_ticks)
+ waiting_ticks = abs_timeout_ticks - current_ticks;
+ else
+ /* The point until we would need to wait is in the past,
+ no need to wait at all. */
+ waiting_ticks = 0;
+
+ /* We check that waiting_ticks can be safely casted as an int. */
+ if (waiting_ticks > INT_MAX)
+ waiting_ticks = INT_MAX;
+
+ __RETURN_ERRNO_IF_NOT_OK (semGive (*mutex));
+
+ __RETURN_ERRNO_IF_NOT_OK (semTake (*cond, waiting_ticks));
+
+ __RETURN_ERRNO_IF_NOT_OK (semTake (*mutex, WAIT_FOREVER));
+
+ return OK;
+}
+
+/* --------------------------- Timed Mutexes ------------------------------ */
+
+int
+__gthread_mutex_timedlock (__gthread_mutex_t *m,
+ const __gthread_time_t *abs_time)
+{
+ if (!m)
+ return ERROR;
+
+ if (!abs_time)
+ return ERROR;
+
+ struct timespec current;
+ if (clock_gettime (CLOCK_REALTIME, &current) == ERROR)
+ /* CLOCK_REALTIME is not supported. */
+ return ERROR;
+
+ const long long abs_timeout_ticks = __TIMESPEC_TO_TICKS ((*abs_time));
+ const long long current_ticks = __TIMESPEC_TO_TICKS (current);
+ long long waiting_ticks;
+
+ if (current_ticks < abs_timeout_ticks)
+ waiting_ticks = abs_timeout_ticks - current_ticks;
+ else
+ /* The point until we would need to wait is in the past,
+ no need to wait at all. */
+ waiting_ticks = 0;
+
+ /* Make sure that waiting_ticks can be safely casted as an int. */
+ if (waiting_ticks > INT_MAX)
+ waiting_ticks = INT_MAX;
+
+ return __CHECK_RESULT (semTake (*m, waiting_ticks));
+}
+
+int
+__gthread_recursive_mutex_timedlock (__gthread_recursive_mutex_t *mutex,
+ const __gthread_time_t *abs_timeout)
+{
+ return __gthread_mutex_timedlock ((__gthread_mutex_t *)mutex, abs_timeout);
+}
+
+/* ------------------------------ Threads --------------------------------- */
+
+/* Task control block initialization and destruction functions. */
+
+int
+__init_gthread_tcb (__gthread_t __tcb)
+{
+ if (!__tcb)
+ return ERROR;
+
+ __gthread_mutex_init (&(__tcb->return_value_available));
+ if (__tcb->return_value_available == SEM_ID_NULL)
+ return ERROR;
+
+ __gthread_mutex_init (&(__tcb->delete_ok));
+ if (__tcb->delete_ok == SEM_ID_NULL)
+ goto return_sem_delete;
+
+ /* We lock the two mutexes used for signaling. */
+ if (__gthread_mutex_lock (&(__tcb->delete_ok)) != OK)
+ goto delete_sem_delete;
+
+ if (__gthread_mutex_lock (&(__tcb->return_value_available)) != OK)
+ goto delete_sem_delete;
+
+ __tcb->task_id = TASK_ID_NULL;
+ return OK;
+
+delete_sem_delete:
+ semDelete (__tcb->delete_ok);
+return_sem_delete:
+ semDelete (__tcb->return_value_available);
+ return ERROR;
+}
+
+/* Here, we pass a pointer to a tcb to allow calls from
+ cleanup attributes. */
+void
+__delete_gthread_tcb (__gthread_t* __tcb)
+{
+ semDelete ((*__tcb)->return_value_available);
+ semDelete ((*__tcb)->delete_ok);
+ free (*__tcb);
+}
+
+/* This __gthread_t stores the address of the TCB malloc'ed in
+ __gthread_create. It is then accessible via __gthread_self(). */
+__thread __gthread_t __local_tcb = NULL;
+
+__gthread_t
+__gthread_self (void)
+{
+ if (!__local_tcb)
+ {
+ /* We are in the initial thread, we need to initialize the TCB. */
+ __local_tcb = malloc (sizeof (*__local_tcb));
+ if (!__local_tcb)
+ return NULL;
+
+ if (__init_gthread_tcb (__local_tcb) != OK)
+ {
+ __delete_gthread_tcb (&__local_tcb);
+ return NULL;
+ }
+ /* We do not set the mutexes in the structure as a thread is not supposed
+ to join or detach himself. */
+ __local_tcb->task_id = taskIdSelf ();
+ }
+ return __local_tcb;
+}
+
+int
+__task_wrapper (__gthread_t tcb, FUNCPTR __func, _Vx_usr_arg_t __args)
+{
+ if (!tcb)
+ return ERROR;
+
+ __local_tcb = tcb;
+
+ /* We use this variable to avoid memory leaks in the case where
+ the underlying function throws an exception. */
+ __attribute__ ((cleanup (__delete_gthread_tcb))) __gthread_t __tmp = tcb;
+
+ void *return_value = (void *) __func (__args);
+ tcb->return_value = return_value;
+
+ /* Call the destructors. */
+ __CALL_DELETE_HOOK (tcb);
+
+ /* Future calls of join() will be able to retrieve the return value. */
+ __gthread_mutex_unlock (&tcb->return_value_available);
+
+ /* We wait for the thread to be joined or detached. */
+ __gthread_mutex_lock (&(tcb->delete_ok));
+ __gthread_mutex_unlock (&(tcb->delete_ok));
+
+ /* Memory deallocation is done by the cleanup attribute of the tmp variable. */
+
+ return OK;
+}
+
+/* Proper gthreads API. */
+
+int
+__gthread_create (__gthread_t * __threadid, void *(*__func) (void *),
+ void *__args)
+{
+ if (!__threadid)
+ return ERROR;
+
+ int priority;
+ __RETURN_ERRNO_IF_NOT_OK (taskPriorityGet (taskIdSelf (), &priority));
+
+ int options;
+ __RETURN_ERRNO_IF_NOT_OK (taskOptionsGet (taskIdSelf (), &options));
+
+#if defined (__SPE__)
+ options |= VX_SPE_TASK;
+#else
+ options |= VX_FP_TASK;
+#endif
+ options &= VX_USR_TASK_OPTIONS;
+
+ int stacksize = 20 * 1024;
+
+ __gthread_t tcb = malloc (sizeof (*tcb));
+ if (!tcb)
+ return ERROR;
+
+ if (__init_gthread_tcb (tcb) != OK)
+ {
+ free (tcb);
+ return ERROR;
+ }
+
+ TASK_ID task_id = taskCreate (NULL,
+ priority, options, stacksize,
+ (FUNCPTR) & __task_wrapper,
+ (_Vx_usr_arg_t) tcb,
+ (_Vx_usr_arg_t) __func,
+ (_Vx_usr_arg_t) __args,
+ 0, 0, 0, 0, 0, 0, 0);
+
+ /* If taskCreate succeeds, task_id will be a valid TASK_ID and not zero. */
+ __RETURN_ERRNO_IF_NOT_OK (!task_id);
+
+ tcb->task_id = task_id;
+ *__threadid = tcb;
+
+ return __CHECK_RESULT (taskActivate (task_id));
+}
+
+int
+__gthread_equal (__gthread_t __t1, __gthread_t __t2)
+{
+ return (__t1 == __t2) ? OK : ERROR;
+}
+
+int
+__gthread_yield (void)
+{
+ return taskDelay (0);
+}
+
+int
+__gthread_join (__gthread_t __threadid, void **__value_ptr)
+{
+ if (!__threadid)
+ return ERROR;
+
+ /* A thread cannot join itself. */
+ if (__threadid->task_id == taskIdSelf ())
+ return ERROR;
+
+ /* Waiting for the task to set the return value. */
+ __gthread_mutex_lock (&__threadid->return_value_available);
+ __gthread_mutex_unlock (&__threadid->return_value_available);
+
+ if (__value_ptr)
+ *__value_ptr = __threadid->return_value;
+
+ /* The task will be safely be deleted. */
+ __gthread_mutex_unlock (&(__threadid->delete_ok));
+
+ __RETURN_ERRNO_IF_NOT_OK (taskWait (__threadid->task_id, WAIT_FOREVER));
+
+ return OK;
+}
+
+int
+__gthread_detach (__gthread_t __threadid)
+{
+ if (!__threadid)
+ return ERROR;
+
+ if (taskIdVerify (__threadid->task_id) != OK)
+ return ERROR;
+
+ /* The task will be safely be deleted. */
+ __gthread_mutex_unlock (&(__threadid->delete_ok));
+
+ return OK;
+}
diff --git a/libgcc/config/vxlib-tls.c b/libgcc/config/gthr-vxworks-tls.c
index b8d6907..cd5f7ac 100644
--- a/libgcc/config/vxlib-tls.c
+++ b/libgcc/config/gthr-vxworks-tls.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
+/* Copyright (C) 2002-2018 Free Software Foundation, Inc.
Contributed by Zack Weinberg <zack@codesourcery.com>
This file is part of GCC.
@@ -23,21 +23,17 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
/* Threads compatibility routines for libgcc2 for VxWorks.
- These are out-of-line routines called from gthr-vxworks.h.
+ These are out-of-line routines called from gthr-vxworks.h.
This file provides the TLS related support routines, calling specific
- VxWorks kernel entry points for this purpose. The base VxWorks 5.x kernels
- don't feature these entry points, and we provide gthr_supp_vxw_5x.c as an
- option to fill this gap. Asking users to rebuild a kernel is not to be
- taken lightly, still, so we have isolated these routines from the rest of
- vxlib to ensure that the kernel dependencies are only dragged when really
- necessary. */
+ VxWorks kernel entry points for this purpose. */
#include "tconfig.h"
#include "tsystem.h"
#include "gthr.h"
#if defined(__GTHREADS)
+
#include <vxWorks.h>
#ifndef __RTP__
#include <vxLib.h>
@@ -46,31 +42,31 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#ifndef __RTP__
#include <taskHookLib.h>
#else
-# include <errno.h>
+#include <errno.h>
#endif
/* Thread-local storage.
- We reserve a field in the TCB to point to a dynamically allocated
- array which is used to store TLS values. A TLS key is simply an
- offset in this array. The exact location of the TCB field is not
- known to this code nor to vxlib.c -- all access to it indirects
- through the routines __gthread_get_tls_data and
- __gthread_set_tls_data, which are provided by the VxWorks kernel.
+ A gthread TLS key is simply an offset in an array, the address of which
+ we store in a single pointer field associated with the current task.
+
+ On VxWorks 7, we have direct support for __thread variables and use
+ such a variable as the pointer "field". On other versions, we resort
+ to __gthread_get_tls_data and __gthread_set_tls_data functions provided
+ by the kernel.
There is also a global array which records which keys are valid and
which have destructors.
- A task delete hook is installed to execute key destructors. The
- routines __gthread_enter_tls_dtor_context and
- __gthread_leave_tls_dtor_context, which are also provided by the
- kernel, ensure that it is safe to call free() on memory allocated
- by the task being deleted. (This is a no-op on VxWorks 5, but
- a major undertaking on AE.)
+ A task delete hook is installed to execute key destructors. The routines
+ __gthread_enter_tls_dtor_context and __gthread_leave_tls_dtor_context,
+ which are also provided by the kernel, ensure that it is safe to call
+ free() on memory allocated by the task being deleted. This is a no-op on
+ VxWorks 5, but a major undertaking on AE.
The task delete hook is only installed when at least one thread
has TLS data. This is a necessary precaution, to allow this module
- to be unloaded - a module with a hook cannot be removed.
+ to be unloaded - a module with a hook can not be removed.
Since this interface is used to allocate only a small number of
keys, the table size is small and static, which simplifies the
@@ -95,21 +91,34 @@ static int self_owner;
it is only removed when unloading this module. */
static volatile int delete_hook_installed;
-/* kernel provided routines */
+/* TLS data access internal API. A straight __thread variable on VxWorks 7,
+ a pointer returned by kernel provided routines otherwise. */
+
+#ifdef __VXWORKS7__
+
+static __thread struct tls_data *__gthread_tls_data;
+
+#define VX_GET_TLS_DATA() __gthread_tls_data
+#define VX_SET_TLS_DATA(x) __gthread_tls_data = (x)
+
+#define VX_ENTER_TLS_DTOR()
+#define VX_LEAVE_TLS_DTOR()
+
+#else
+
extern void *__gthread_get_tls_data (void);
extern void __gthread_set_tls_data (void *data);
extern void __gthread_enter_tls_dtor_context (void);
extern void __gthread_leave_tls_dtor_context (void);
-#ifndef __RTP__
+#define VX_GET_TLS_DATA() __gthread_get_tls_data()
+#define VX_SET_TLS_DATA(x) __gthread_set_tls_data(x)
-extern void *__gthread_get_tsd_data (WIND_TCB *tcb);
-extern void __gthread_set_tsd_data (WIND_TCB *tcb, void *data);
-extern void __gthread_enter_tsd_dtor_context (WIND_TCB *tcb);
-extern void __gthread_leave_tsd_dtor_context (WIND_TCB *tcb);
+#define VX_ENTER_TLS_DTOR() __gthread_enter_tls_dtor_context ()
+#define VX_LEAVE_TLS_DTOR() __gthread_leave_tls_dtor_context ()
-#endif /* __RTP__ */
+#endif /* __VXWORKS7__ */
/* This is a global structure which records all of the active keys.
@@ -138,7 +147,7 @@ struct tls_keys
key is valid. */
static struct tls_keys tls_keys =
{
- { 0, 0, 0, 0 },
+ { NULL, NULL, NULL, NULL },
{ 1, 1, 1, 1 }
};
@@ -157,28 +166,17 @@ static __gthread_once_t tls_init_guard = __GTHREAD_ONCE_INIT;
count protects us from calling a stale destructor. It does
need to read tls_keys.dtor[key] atomically. */
-static void
+void
tls_delete_hook (void *tcb ATTRIBUTE_UNUSED)
{
struct tls_data *data;
__gthread_key_t key;
-#ifdef __RTP__
- data = __gthread_get_tls_data ();
-#else
- /* In kernel mode, we can be called in the context of the thread
- doing the killing, so must use the TCB to determine the data of
- the thread being killed. */
- data = __gthread_get_tsd_data (tcb);
-#endif
-
+ data = VX_GET_TLS_DATA();
+
if (data && data->owner == &self_owner)
{
-#ifdef __RTP__
- __gthread_enter_tls_dtor_context ();
-#else
- __gthread_enter_tsd_dtor_context (tcb);
-#endif
+ VX_ENTER_TLS_DTOR();
for (key = 0; key < MAX_KEYS; key++)
{
if (data->generation[key] == tls_keys.generation[key])
@@ -190,19 +188,11 @@ tls_delete_hook (void *tcb ATTRIBUTE_UNUSED)
}
}
free (data);
-#ifdef __RTP__
- __gthread_leave_tls_dtor_context ();
-#else
- __gthread_leave_tsd_dtor_context (tcb);
-#endif
-#ifdef __RTP__
- __gthread_set_tls_data (0);
-#else
- __gthread_set_tsd_data (tcb, 0);
-#endif
+ VX_LEAVE_TLS_DTOR();
+ VX_SET_TLS_DATA(NULL);
}
-}
+}
/* Initialize global data used by the TLS system. */
static void
@@ -303,7 +293,7 @@ __gthread_getspecific (__gthread_key_t key)
if (key >= MAX_KEYS)
return 0;
- data = __gthread_get_tls_data ();
+ data = GET_VX_TLS_DATA();
if (!data)
return 0;
@@ -332,7 +322,8 @@ __gthread_setspecific (__gthread_key_t key, void *value)
if (key >= MAX_KEYS)
return EINVAL;
- data = __gthread_get_tls_data ();
+ data = VX_GET_TLS_DATA();
+
if (!data)
{
if (!delete_hook_installed)
@@ -354,7 +345,8 @@ __gthread_setspecific (__gthread_key_t key, void *value)
memset (data, 0, sizeof (struct tls_data));
data->owner = &self_owner;
- __gthread_set_tls_data (data);
+
+ VX_SET_TLS_DATA(data);
}
generation = tls_keys.generation[key];
diff --git a/libgcc/config/gthr-vxworks.c b/libgcc/config/gthr-vxworks.c
new file mode 100644
index 0000000..ddc3593
--- /dev/null
+++ b/libgcc/config/gthr-vxworks.c
@@ -0,0 +1,87 @@
+/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
+ Contributed by Zack Weinberg <zack@codesourcery.com>
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* Threads compatibility routines for libgcc2 for VxWorks.
+
+ This file implements the init-once service exposed by gthr-vxworks.h. */
+
+#include "tconfig.h"
+#include "tsystem.h"
+#include "gthr.h"
+
+#if defined(__GTHREADS)
+
+#include <vxWorks.h>
+
+#ifndef __RTP__
+# include <vxLib.h>
+# include <taskHookLib.h>
+#else /* __RTP__ */
+# include <errno.h>
+#endif /* __RTP__ */
+
+/* ----------------------------- Init-once ------------------------------- */
+
+static void
+__release (__gthread_once_t ** __guard)
+{
+ (*__guard)->busy = 0;
+}
+
+int
+__gthread_once (__gthread_once_t * __guard, void (*__func) (void))
+{
+ if (__guard->done)
+ return 0;
+
+ /* Busy-wait until we have exclusive access to the state. Check if
+ another thread managed to perform the init call in the interim. */
+
+ while (!__TAS(&__guard->busy))
+ {
+ if (__guard->done)
+ return 0;
+ taskDelay (1);
+ }
+
+ if (!__guard->done)
+ {
+#ifndef __USING_SJLJ_EXCEPTIONS__
+ /* Setup a cleanup to release the guard when __func() throws an
+ exception. We cannot use this with SJLJ exceptions as
+ Unwind_Register calls __gthread_once, leading to an infinite
+ recursion. */
+ __attribute__ ((cleanup (__release)))
+ __gthread_once_t *__temp = __guard;
+#endif
+
+ __func ();
+ __guard->done = 1;
+ }
+
+ __release(&__guard);
+ return 0;
+}
+
+#endif /* __GTHREADS */
diff --git a/libgcc/config/gthr-vxworks.h b/libgcc/config/gthr-vxworks.h
index c9214a5..7e3779a 100644
--- a/libgcc/config/gthr-vxworks.h
+++ b/libgcc/config/gthr-vxworks.h
@@ -1,6 +1,6 @@
/* Threads compatibility routines for libgcc2 and libobjc for VxWorks. */
/* Compile this one with gcc. */
-/* Copyright (C) 1997-2019 Free Software Foundation, Inc.
+/* Copyright (C) 1997-2018 Free Software Foundation, Inc.
Contributed by Mike Stump <mrs@wrs.com>.
This file is part of GCC.
@@ -33,139 +33,295 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
#include "gthr-posix.h"
#else
-#ifdef __cplusplus
-#define UNUSED(x)
-#else
-#define UNUSED(x) x __attribute__((__unused__))
+
+#include <vxWorks.h>
+#include <version.h>
+
+/* Conditional compilation directives are easier to read when they fit on a
+ single line, which is helped by macros with shorter names. */
+#define _VXW_MAJOR _WRS_VXWORKS_MAJOR
+#define _VXW_MINOR _WRS_VXWORKS_MINOR
+#define _VXW_PRE_69 (_VXW_MAJOR < 6 || (_VXW_MAJOR == 6 && _VXW_MINOR < 9))
+
+/* Some VxWorks headers profusely use typedefs of a pointer to a function with
+ undefined number of arguments. */
+#pragma GCC diagnostic push
+ #pragma GCC diagnostic ignored "-Wstrict-prototypes"
+ #include <semLib.h>
+#pragma GCC diagnostic pop
+
+#include <errnoLib.h>
+
+
+/* --------------------- Test & Set/Swap internal API --------------------- */
+
+/* We use a bare atomic primitive with busy loops to handle mutual exclusion.
+ Inefficient, but reliable. The actual primitive used depends on the mode
+ (RTP vs Kernel) and the version of VxWorks. We define a macro and a type
+ here, for reuse without conditionals cluttering in the code afterwards. */
+
+/* RTP, pre 6.9. */
+
+#if defined(__RTP__) && _VXW_PRE_69
+
+#define __TAS(x) vxCas ((x), 0, 1)
+typedef volatile unsigned char __vx_tas_t;
+
+#endif
+
+/* RTP, 6.9 and beyond. */
+
+#if defined(__RTP__) && !_VXW_PRE_69
+
+#define __TAS(x) vxAtomicCas ((x), 0, 1)
+typedef atomic_t __vx_tas_t;
+
+#include <vxAtomicLib.h>
+
+#endif
+
+/* Kernel */
+
+#if !defined(__RTP__)
+
+#define __TAS(x) vxTas (x)
+typedef volatile unsigned char __vx_tas_t;
+
#endif
#ifdef __cplusplus
extern "C" {
#endif
+/* ------------------------ Base __GTHREADS support ----------------------- */
+
#define __GTHREADS 1
#define __gthread_active_p() 1
/* Mutexes are easy, except that they need to be initialized at runtime. */
-#include <semLib.h>
-
-typedef SEM_ID __gthread_mutex_t;
/* All VxWorks mutexes are recursive. */
+typedef SEM_ID __gthread_mutex_t;
typedef SEM_ID __gthread_recursive_mutex_t;
-#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init_function
-#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init_function
+#define __GTHREAD_MUTEX_INIT_FUNCTION __gthread_mutex_init
+#define __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION __gthread_recursive_mutex_init
+
+#define __CHECK_RESULT(result) (((result) == OK) ? OK : errnoGet())
+
+/* If a call to the VxWorks API fails, we must propagate the errno value. */
+#define __RETURN_ERRNO_IF_NOT_OK(exp) if ((exp) != OK) return errnoGet()
+
+/* Non re-entrant mutex implementation. Libstdc++ expects the default
+ gthread mutex to be non reentrant. */
static inline void
-__gthread_mutex_init_function (__gthread_mutex_t *mutex)
+__gthread_mutex_init (__gthread_mutex_t * __mutex)
{
- *mutex = semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
+ if (!__mutex)
+ return;
+ *__mutex = semBCreate (SEM_Q_PRIORITY, SEM_FULL);
}
static inline int
-__gthread_mutex_destroy (__gthread_mutex_t *mutex)
+__gthread_mutex_destroy (__gthread_mutex_t * __mutex)
{
- semDelete(*mutex);
- return 0;
+ if (!__mutex)
+ return ERROR;
+ return __CHECK_RESULT (semDelete (*__mutex));
}
static inline int
-__gthread_mutex_lock (__gthread_mutex_t *mutex)
+__gthread_mutex_lock (__gthread_mutex_t * __mutex)
{
- return semTake (*mutex, WAIT_FOREVER);
+ if (!__mutex)
+ return ERROR;
+ return __CHECK_RESULT (semTake(*__mutex, WAIT_FOREVER));
}
static inline int
-__gthread_mutex_trylock (__gthread_mutex_t *mutex)
+__gthread_mutex_trylock (__gthread_mutex_t * __mutex)
{
- return semTake (*mutex, NO_WAIT);
+ if (!__mutex)
+ return ERROR;
+ return __CHECK_RESULT (semTake (*__mutex, NO_WAIT));
}
static inline int
-__gthread_mutex_unlock (__gthread_mutex_t *mutex)
+__gthread_mutex_unlock (__gthread_mutex_t * __mutex)
{
- return semGive (*mutex);
+ if (!__mutex)
+ return ERROR;
+ return __CHECK_RESULT (semGive (*__mutex));
}
+/* Recursive mutex implementation. The only change is that we use semMCreate()
+ instead of semBCreate(). */
+
static inline void
-__gthread_recursive_mutex_init_function (__gthread_recursive_mutex_t *mutex)
+__gthread_recursive_mutex_init (__gthread_recursive_mutex_t * __mutex)
{
- __gthread_mutex_init_function (mutex);
+ if (!__mutex)
+ return;
+ *__mutex =
+ semMCreate (SEM_Q_PRIORITY | SEM_INVERSION_SAFE | SEM_DELETE_SAFE);
}
static inline int
-__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex)
+__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t * __mutex)
{
- return __gthread_mutex_lock (mutex);
+ return __gthread_mutex_destroy (__mutex);
}
static inline int
-__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t *mutex)
+__gthread_recursive_mutex_lock (__gthread_recursive_mutex_t * __mutex)
{
- return __gthread_mutex_trylock (mutex);
+ return __gthread_mutex_lock (__mutex);
}
static inline int
-__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t *mutex)
+__gthread_recursive_mutex_trylock (__gthread_recursive_mutex_t * __mutex)
{
- return __gthread_mutex_unlock (mutex);
+ return __gthread_mutex_trylock (__mutex);
}
static inline int
-__gthread_recursive_mutex_destroy (__gthread_recursive_mutex_t *__mutex)
+__gthread_recursive_mutex_unlock (__gthread_recursive_mutex_t * __mutex)
{
- return __gthread_mutex_destroy (__mutex);
+ return __gthread_mutex_unlock (__mutex);
}
-/* pthread_once is complicated enough that it's implemented
- out-of-line. See config/vxlib.c. */
-
typedef struct
{
-#if !defined(__RTP__)
+ /* PPC's test-and-set kernel mode implementation requires a pointer aligned
+ object, of which it only sets the first byte. We use padding in addition
+ to an alignment request here to maxmise the factors leading to the
+ desired actual alignment choice by the compiler. */
#if defined(__PPC__)
- __attribute ((aligned (__alignof (unsigned))))
-#endif
- volatile unsigned char busy;
+ __attribute ((aligned (__alignof__ (void *))))
#endif
+
+ __vx_tas_t busy;
volatile unsigned char done;
+
#if !defined(__RTP__) && defined(__PPC__)
- /* PPC's test-and-set implementation requires a 4 byte aligned
- object, of which it only sets the first byte. We use padding
- here, in order to maintain some amount of backwards
- compatibility. Without this padding, gthread_once objects worked
- by accident because they happen to be static objects and the ppc
- port automatically increased their alignment to 4 bytes. */
unsigned char pad1;
unsigned char pad2;
#endif
-}
-__gthread_once_t;
-
-#if defined (__RTP__)
-# define __GTHREAD_ONCE_INIT { 0 }
-#elif defined (__PPC__)
-# define __GTHREAD_ONCE_INIT { 0, 0, 0, 0 }
-#else
-# define __GTHREAD_ONCE_INIT { 0, 0 }
+#if !defined(__RTP__) && defined(__PPC64__)
+ unsigned char pad3;
+ unsigned char pad4;
+ unsigned char pad5;
+ unsigned char pad6;
#endif
+} __gthread_once_t;
+
+#define __GTHREAD_ONCE_INIT { 0 }
extern int __gthread_once (__gthread_once_t *__once, void (*__func)(void));
-/* Thread-specific data requires a great deal of effort, since VxWorks
- is not really set up for it. See config/vxlib.c for the gory
- details. All the TSD routines are sufficiently complex that they
+/* All the TSD routines are sufficiently complex that they
need to be implemented out of line. */
typedef unsigned int __gthread_key_t;
-extern int __gthread_key_create (__gthread_key_t *__keyp, void (*__dtor)(void *));
+extern int __gthread_key_create (__gthread_key_t *__keyp,
+ void (*__dtor)(void *));
extern int __gthread_key_delete (__gthread_key_t __key);
extern void *__gthread_getspecific (__gthread_key_t __key);
extern int __gthread_setspecific (__gthread_key_t __key, void *__ptr);
-#undef UNUSED
+/* ------------------ Base condition variables support ------------------- */
+
+#define __GTHREAD_HAS_COND 1
+
+typedef SEM_ID __gthread_cond_t;
+
+#define __GTHREAD_COND_INIT_FUNCTION __gthread_cond_init
+
+/* Condition variable declarations. */
+
+extern void __gthread_cond_init (__gthread_cond_t *cond);
+
+extern int __gthread_cond_destroy (__gthread_cond_t *cond);
+
+extern int __gthread_cond_broadcast (__gthread_cond_t *cond);
+
+extern int __gthread_cond_wait (__gthread_cond_t *cond,
+ __gthread_mutex_t *mutex);
+
+extern int __gthread_cond_wait_recursive (__gthread_cond_t *cond,
+ __gthread_recursive_mutex_t *mutex);
+
+/* ----------------------- C++0x thread support ------------------------- */
+
+/* We do not support C++0x threads on that VxWorks 653, which we can
+ recognize by VTHREADS being defined. */
+
+#ifndef VTHREADS
+
+#define __GTHREADS_CXX0X 1
+
+#include <limits.h>
+#include <time.h>
+#include <tickLib.h>
+#include <sysLib.h>
+#include <version.h>
+
+typedef struct
+{
+ TASK_ID task_id;
+ void *return_value;
+
+ /* This mutex is used to block in join() while the return value is
+ unavailable. */
+ __gthread_mutex_t return_value_available;
+
+ /* Before freeing the structure in the task wrapper, we need to wait until
+ join() or detach() are called on that thread. */
+ __gthread_mutex_t delete_ok;
+} __gthread_tcb;
+
+typedef __gthread_tcb *__gthread_t;
+
+/* Typedefs specific to different vxworks versions. */
+#if _VXW_PRE_69
+ typedef int _Vx_usr_arg_t;
+ #define TASK_ID_NULL ((TASK_ID)NULL)
+ #define SEM_ID_NULL ((SEM_ID)NULL)
+#endif
+
+typedef struct timespec __gthread_time_t;
+
+/* Timed mutex lock declarations. */
+
+extern int __gthread_mutex_timedlock (__gthread_mutex_t *m,
+ const __gthread_time_t *abs_time);
+
+extern int __gthread_recursive_mutex_timedlock
+ (__gthread_recursive_mutex_t *mutex,
+ const __gthread_time_t *abs_timeout);
+
+/* Timed condition variable declarations. */
+
+extern int __gthread_cond_signal (__gthread_cond_t *cond);
+extern int __gthread_cond_timedwait (__gthread_cond_t *cond,
+ __gthread_mutex_t *mutex,
+ const __gthread_time_t *abs_timeout);
+
+/* gthreads declarations. */
+
+extern int __gthread_equal (__gthread_t t1, __gthread_t t2);
+extern int __gthread_yield (void);
+extern int __gthread_create (__gthread_t *__threadid,
+ void *(*__func) (void*),
+ void *__args);
+extern int __gthread_join (__gthread_t thread, void **value_ptr);
+extern int __gthread_detach (__gthread_t thread);
+
+extern __gthread_t __gthread_self (void);
+
+#endif
#ifdef __cplusplus
}
diff --git a/libgcc/config/m68k/linux-unwind.h b/libgcc/config/m68k/linux-unwind.h
index fbe0d47..57c354e 100644
--- a/libgcc/config/m68k/linux-unwind.h
+++ b/libgcc/config/m68k/linux-unwind.h
@@ -37,7 +37,7 @@ struct uw_ucontext {
stack_t uc_stack;
mcontext_t uc_mcontext;
unsigned long uc_filler[80];
- __sigset_t uc_sigmask;
+ sigset_t uc_sigmask;
};
#define MD_FALLBACK_FRAME_STATE_FOR m68k_fallback_frame_state
diff --git a/libgcc/config/msp430/t-msp430 b/libgcc/config/msp430/t-msp430
index 17d85b6..72ae93a 100644
--- a/libgcc/config/msp430/t-msp430
+++ b/libgcc/config/msp430/t-msp430
@@ -42,6 +42,12 @@ LIB2ADD = \
HOST_LIBGCC2_CFLAGS += -Os -ffunction-sections -fdata-sections -mhwmult=none
+crtbegin_no_eh.o: $(srcdir)/crtstuff.c
+ $(crt_compile) -U__LIBGCC_EH_FRAME_SECTION_NAME__ -c $< -DCRT_BEGIN
+
+crtend_no_eh.o: $(srcdir)/crtstuff.c
+ $(crt_compile) -U__LIBGCC_EH_FRAME_SECTION_NAME__ -c $< -DCRT_END
+
mpy.o: $(srcdir)/config/msp430/mpy.c
$(gcc_compile) $< -c
diff --git a/libgcc/config/pa/linux-atomic.c b/libgcc/config/pa/linux-atomic.c
index 28b41a7..4cd69f7 100644
--- a/libgcc/config/pa/linux-atomic.c
+++ b/libgcc/config/pa/linux-atomic.c
@@ -41,7 +41,7 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
/* Kernel helper for compare-and-exchange a 32-bit value. */
static inline long
-__kernel_cmpxchg (int *mem, int oldval, int newval)
+__kernel_cmpxchg (volatile void *mem, int oldval, int newval)
{
register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
register int lws_old asm("r25") = oldval;
@@ -54,20 +54,18 @@ __kernel_cmpxchg (int *mem, int oldval, int newval)
: "i" (LWS_CAS), "r" (lws_mem), "r" (lws_old), "r" (lws_new)
: "r1", "r20", "r22", "r23", "r29", "r31", "memory"
);
- if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
- __builtin_trap ();
/* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains
the old value from memory. If this value is equal to OLDVAL, the
new value was written to memory. If not, return -EBUSY. */
if (!lws_errno && lws_ret != oldval)
- lws_errno = -EBUSY;
+ return -EBUSY;
return lws_errno;
}
static inline long
-__kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval,
+__kernel_cmpxchg2 (volatile void *mem, const void *oldval, const void *newval,
int val_size)
{
register unsigned long lws_mem asm("r26") = (unsigned long) (mem);
@@ -88,9 +86,6 @@ __kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval,
if (__builtin_expect (lws_ret == 0, 1))
return 0;
- if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0))
- __builtin_trap ();
-
/* If the kernel LWS call fails with no error, return -EBUSY */
if (__builtin_expect (!lws_errno, 0))
return -EBUSY;
@@ -108,13 +103,13 @@ __kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval,
#define FETCH_AND_OP_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
TYPE HIDDEN \
- __sync_fetch_and_##OP##_##WIDTH (TYPE *ptr, TYPE val) \
+ __sync_fetch_and_##OP##_##WIDTH (volatile void *ptr, TYPE val) \
{ \
TYPE tmp, newval; \
long failure; \
\
do { \
- tmp = __atomic_load_n (ptr, __ATOMIC_RELAXED); \
+ tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
newval = PFX_OP (tmp INF_OP val); \
failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
} while (failure != 0); \
@@ -122,36 +117,36 @@ __kernel_cmpxchg2 (void *mem, const void *oldval, const void *newval,
return tmp; \
}
-FETCH_AND_OP_2 (add, , +, long long, 8, 3)
-FETCH_AND_OP_2 (sub, , -, long long, 8, 3)
-FETCH_AND_OP_2 (or, , |, long long, 8, 3)
-FETCH_AND_OP_2 (and, , &, long long, 8, 3)
-FETCH_AND_OP_2 (xor, , ^, long long, 8, 3)
-FETCH_AND_OP_2 (nand, ~, &, long long, 8, 3)
-
-FETCH_AND_OP_2 (add, , +, short, 2, 1)
-FETCH_AND_OP_2 (sub, , -, short, 2, 1)
-FETCH_AND_OP_2 (or, , |, short, 2, 1)
-FETCH_AND_OP_2 (and, , &, short, 2, 1)
-FETCH_AND_OP_2 (xor, , ^, short, 2, 1)
-FETCH_AND_OP_2 (nand, ~, &, short, 2, 1)
-
-FETCH_AND_OP_2 (add, , +, signed char, 1, 0)
-FETCH_AND_OP_2 (sub, , -, signed char, 1, 0)
-FETCH_AND_OP_2 (or, , |, signed char, 1, 0)
-FETCH_AND_OP_2 (and, , &, signed char, 1, 0)
-FETCH_AND_OP_2 (xor, , ^, signed char, 1, 0)
-FETCH_AND_OP_2 (nand, ~, &, signed char, 1, 0)
+FETCH_AND_OP_2 (add, , +, long long unsigned int, 8, 3)
+FETCH_AND_OP_2 (sub, , -, long long unsigned int, 8, 3)
+FETCH_AND_OP_2 (or, , |, long long unsigned int, 8, 3)
+FETCH_AND_OP_2 (and, , &, long long unsigned int, 8, 3)
+FETCH_AND_OP_2 (xor, , ^, long long unsigned int, 8, 3)
+FETCH_AND_OP_2 (nand, ~, &, long long unsigned int, 8, 3)
+
+FETCH_AND_OP_2 (add, , +, short unsigned int, 2, 1)
+FETCH_AND_OP_2 (sub, , -, short unsigned int, 2, 1)
+FETCH_AND_OP_2 (or, , |, short unsigned int, 2, 1)
+FETCH_AND_OP_2 (and, , &, short unsigned int, 2, 1)
+FETCH_AND_OP_2 (xor, , ^, short unsigned int, 2, 1)
+FETCH_AND_OP_2 (nand, ~, &, short unsigned int, 2, 1)
+
+FETCH_AND_OP_2 (add, , +, unsigned char, 1, 0)
+FETCH_AND_OP_2 (sub, , -, unsigned char, 1, 0)
+FETCH_AND_OP_2 (or, , |, unsigned char, 1, 0)
+FETCH_AND_OP_2 (and, , &, unsigned char, 1, 0)
+FETCH_AND_OP_2 (xor, , ^, unsigned char, 1, 0)
+FETCH_AND_OP_2 (nand, ~, &, unsigned char, 1, 0)
#define OP_AND_FETCH_2(OP, PFX_OP, INF_OP, TYPE, WIDTH, INDEX) \
TYPE HIDDEN \
- __sync_##OP##_and_fetch_##WIDTH (TYPE *ptr, TYPE val) \
+ __sync_##OP##_and_fetch_##WIDTH (volatile void *ptr, TYPE val) \
{ \
TYPE tmp, newval; \
long failure; \
\
do { \
- tmp = __atomic_load_n (ptr, __ATOMIC_RELAXED); \
+ tmp = __atomic_load_n ((volatile TYPE *)ptr, __ATOMIC_RELAXED); \
newval = PFX_OP (tmp INF_OP val); \
failure = __kernel_cmpxchg2 (ptr, &tmp, &newval, INDEX); \
} while (failure != 0); \
@@ -159,36 +154,37 @@ FETCH_AND_OP_2 (nand, ~, &, signed char, 1, 0)
return PFX_OP (tmp INF_OP val); \
}
-OP_AND_FETCH_2 (add, , +, long long, 8, 3)
-OP_AND_FETCH_2 (sub, , -, long long, 8, 3)
-OP_AND_FETCH_2 (or, , |, long long, 8, 3)
-OP_AND_FETCH_2 (and, , &, long long, 8, 3)
-OP_AND_FETCH_2 (xor, , ^, long long, 8, 3)
-OP_AND_FETCH_2 (nand, ~, &, long long, 8, 3)
-
-OP_AND_FETCH_2 (add, , +, short, 2, 1)
-OP_AND_FETCH_2 (sub, , -, short, 2, 1)
-OP_AND_FETCH_2 (or, , |, short, 2, 1)
-OP_AND_FETCH_2 (and, , &, short, 2, 1)
-OP_AND_FETCH_2 (xor, , ^, short, 2, 1)
-OP_AND_FETCH_2 (nand, ~, &, short, 2, 1)
-
-OP_AND_FETCH_2 (add, , +, signed char, 1, 0)
-OP_AND_FETCH_2 (sub, , -, signed char, 1, 0)
-OP_AND_FETCH_2 (or, , |, signed char, 1, 0)
-OP_AND_FETCH_2 (and, , &, signed char, 1, 0)
-OP_AND_FETCH_2 (xor, , ^, signed char, 1, 0)
-OP_AND_FETCH_2 (nand, ~, &, signed char, 1, 0)
+OP_AND_FETCH_2 (add, , +, long long unsigned int, 8, 3)
+OP_AND_FETCH_2 (sub, , -, long long unsigned int, 8, 3)
+OP_AND_FETCH_2 (or, , |, long long unsigned int, 8, 3)
+OP_AND_FETCH_2 (and, , &, long long unsigned int, 8, 3)
+OP_AND_FETCH_2 (xor, , ^, long long unsigned int, 8, 3)
+OP_AND_FETCH_2 (nand, ~, &, long long unsigned int, 8, 3)
+
+OP_AND_FETCH_2 (add, , +, short unsigned int, 2, 1)
+OP_AND_FETCH_2 (sub, , -, short unsigned int, 2, 1)
+OP_AND_FETCH_2 (or, , |, short unsigned int, 2, 1)
+OP_AND_FETCH_2 (and, , &, short unsigned int, 2, 1)
+OP_AND_FETCH_2 (xor, , ^, short unsigned int, 2, 1)
+OP_AND_FETCH_2 (nand, ~, &, short unsigned int, 2, 1)
+
+OP_AND_FETCH_2 (add, , +, unsigned char, 1, 0)
+OP_AND_FETCH_2 (sub, , -, unsigned char, 1, 0)
+OP_AND_FETCH_2 (or, , |, unsigned char, 1, 0)
+OP_AND_FETCH_2 (and, , &, unsigned char, 1, 0)
+OP_AND_FETCH_2 (xor, , ^, unsigned char, 1, 0)
+OP_AND_FETCH_2 (nand, ~, &, unsigned char, 1, 0)
#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
- int HIDDEN \
- __sync_fetch_and_##OP##_4 (int *ptr, int val) \
+ unsigned int HIDDEN \
+ __sync_fetch_and_##OP##_4 (volatile void *ptr, unsigned int val) \
{ \
- int tmp; \
+ unsigned int tmp; \
long failure; \
\
do { \
- tmp = __atomic_load_n (ptr, __ATOMIC_RELAXED); \
+ tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
+ __ATOMIC_RELAXED); \
failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
} while (failure != 0); \
\
@@ -203,14 +199,15 @@ FETCH_AND_OP_WORD (xor, , ^)
FETCH_AND_OP_WORD (nand, ~, &)
#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
- int HIDDEN \
- __sync_##OP##_and_fetch_4 (int *ptr, int val) \
+ unsigned int HIDDEN \
+ __sync_##OP##_and_fetch_4 (volatile void *ptr, unsigned int val) \
{ \
- int tmp; \
+ unsigned int tmp; \
long failure; \
\
do { \
- tmp = __atomic_load_n (ptr, __ATOMIC_RELAXED); \
+ tmp = __atomic_load_n ((volatile unsigned int *)ptr, \
+ __ATOMIC_RELAXED); \
failure = __kernel_cmpxchg (ptr, tmp, PFX_OP (tmp INF_OP val)); \
} while (failure != 0); \
\
@@ -228,7 +225,7 @@ typedef unsigned char bool;
#define COMPARE_AND_SWAP_2(TYPE, WIDTH, INDEX) \
TYPE HIDDEN \
- __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ __sync_val_compare_and_swap_##WIDTH (volatile void *ptr, TYPE oldval, \
TYPE newval) \
{ \
TYPE actual_oldval; \
@@ -236,7 +233,8 @@ typedef unsigned char bool;
\
while (1) \
{ \
- actual_oldval = __atomic_load_n (ptr, __ATOMIC_RELAXED); \
+ actual_oldval = __atomic_load_n ((volatile TYPE *)ptr, \
+ __ATOMIC_RELAXED); \
\
if (__builtin_expect (oldval != actual_oldval, 0)) \
return actual_oldval; \
@@ -248,27 +246,29 @@ typedef unsigned char bool;
} \
} \
\
- bool HIDDEN \
- __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
- TYPE newval) \
+ _Bool HIDDEN \
+ __sync_bool_compare_and_swap_##WIDTH (volatile void *ptr, \
+ TYPE oldval, TYPE newval) \
{ \
long failure = __kernel_cmpxchg2 (ptr, &oldval, &newval, INDEX); \
return (failure == 0); \
}
-COMPARE_AND_SWAP_2 (long long, 8, 3)
-COMPARE_AND_SWAP_2 (short, 2, 1)
-COMPARE_AND_SWAP_2 (char, 1, 0)
+COMPARE_AND_SWAP_2 (long long unsigned int, 8, 3)
+COMPARE_AND_SWAP_2 (short unsigned int, 2, 1)
+COMPARE_AND_SWAP_2 (unsigned char, 1, 0)
-int HIDDEN
-__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
+unsigned int HIDDEN
+__sync_val_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
+ unsigned int newval)
{
long fail;
- int actual_oldval;
+ unsigned int actual_oldval;
while (1)
{
- actual_oldval = __atomic_load_n (ptr, __ATOMIC_RELAXED);
+ actual_oldval = __atomic_load_n ((volatile unsigned int *)ptr,
+ __ATOMIC_RELAXED);
if (__builtin_expect (oldval != actual_oldval, 0))
return actual_oldval;
@@ -280,8 +280,9 @@ __sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
}
}
-bool HIDDEN
-__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
+_Bool HIDDEN
+__sync_bool_compare_and_swap_4 (volatile void *ptr, unsigned int oldval,
+ unsigned int newval)
{
long failure = __kernel_cmpxchg (ptr, oldval, newval);
return (failure == 0);
@@ -289,55 +290,64 @@ __sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
#define SYNC_LOCK_TEST_AND_SET_2(TYPE, WIDTH, INDEX) \
TYPE HIDDEN \
- __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
+ __sync_lock_test_and_set_##WIDTH (volatile void *ptr, TYPE val) \
{ \
TYPE oldval; \
long failure; \
\
do { \
- oldval = __atomic_load_n (ptr, __ATOMIC_RELAXED); \
+ oldval = __atomic_load_n ((volatile TYPE *)ptr, \
+ __ATOMIC_RELAXED); \
failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
} while (failure != 0); \
\
return oldval; \
}
-SYNC_LOCK_TEST_AND_SET_2 (long long, 8, 3)
-SYNC_LOCK_TEST_AND_SET_2 (short, 2, 1)
-SYNC_LOCK_TEST_AND_SET_2 (signed char, 1, 0)
+SYNC_LOCK_TEST_AND_SET_2 (long long unsigned int, 8, 3)
+SYNC_LOCK_TEST_AND_SET_2 (short unsigned int, 2, 1)
+SYNC_LOCK_TEST_AND_SET_2 (unsigned char, 1, 0)
-int HIDDEN
-__sync_lock_test_and_set_4 (int *ptr, int val)
+unsigned int HIDDEN
+__sync_lock_test_and_set_4 (volatile void *ptr, unsigned int val)
{
long failure;
- int oldval;
+ unsigned int oldval;
do {
- oldval = __atomic_load_n (ptr, __ATOMIC_RELAXED);
+ oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED);
failure = __kernel_cmpxchg (ptr, oldval, val);
} while (failure != 0);
return oldval;
}
-void HIDDEN
-__sync_lock_release_8 (long long *ptr)
-{
- /* All accesses must be complete before we release the lock. */
- __sync_synchronize ();
- *(double *)ptr = 0;
-}
-
-#define SYNC_LOCK_RELEASE_1(TYPE, WIDTH) \
+#define SYNC_LOCK_RELEASE_1(TYPE, WIDTH, INDEX) \
void HIDDEN \
- __sync_lock_release_##WIDTH (TYPE *ptr) \
+ __sync_lock_release_##WIDTH (volatile void *ptr) \
{ \
- /* All accesses must be complete before we release \
- the lock. */ \
- __sync_synchronize (); \
- *ptr = 0; \
+ TYPE oldval, val = 0; \
+ long failure; \
+ \
+ do { \
+ oldval = __atomic_load_n ((volatile TYPE *)ptr, \
+ __ATOMIC_RELAXED); \
+ failure = __kernel_cmpxchg2 (ptr, &oldval, &val, INDEX); \
+ } while (failure != 0); \
}
-SYNC_LOCK_RELEASE_1 (int, 4)
-SYNC_LOCK_RELEASE_1 (short, 2)
-SYNC_LOCK_RELEASE_1 (signed char, 1)
+SYNC_LOCK_RELEASE_1 (long long unsigned int, 8, 3)
+SYNC_LOCK_RELEASE_1 (short unsigned int, 2, 1)
+SYNC_LOCK_RELEASE_1 (unsigned char, 1, 0)
+
+void HIDDEN
+__sync_lock_release_4 (volatile void *ptr)
+{
+ long failure;
+ unsigned int oldval;
+
+ do {
+ oldval = __atomic_load_n ((volatile unsigned int *)ptr, __ATOMIC_RELAXED);
+ failure = __kernel_cmpxchg (ptr, oldval, 0);
+ } while (failure != 0);
+}
diff --git a/libgcc/config/riscv/t-softfp32 b/libgcc/config/riscv/t-softfp32
index 1bd51e8..59be1df 100644
--- a/libgcc/config/riscv/t-softfp32
+++ b/libgcc/config/riscv/t-softfp32
@@ -12,7 +12,11 @@ softfp_float_modes := tf
softfp_extensions := sftf dftf
softfp_truncations := tfsf tfdf
+# Enable divide routines to make -mno-fdiv work.
+softfp_extras := divsf3 divdf3
+
else
+# !ABI_DOUBLE
softfp_float_modes := df tf
softfp_extensions := sfdf sftf dftf
@@ -20,7 +24,20 @@ softfp_truncations := dfsf tfsf tfdf
ifndef ABI_SINGLE
softfp_float_modes += sf
+else
+# ABI_SINGLE
+
+# Enable divide routines to make -mno-fdiv work.
+softfp_extras := divsf3
+
endif
endif
+
+else
+# ABI_QUAD
+
+# Enable divide routines to make -mno-fdiv work.
+softfp_extras := divsf3 divdf3 divtf3
+
endif
diff --git a/libgcc/config/t-gthr-noweak b/libgcc/config/t-gthr-noweak
new file mode 100644
index 0000000..45a21e9
--- /dev/null
+++ b/libgcc/config/t-gthr-noweak
@@ -0,0 +1,2 @@
+# Don't use weak references for single-thread detection
+HOST_LIBGCC2_CFLAGS += -DGTHREAD_USE_WEAK=0
diff --git a/libgcc/config/t-gthr-vxworks b/libgcc/config/t-gthr-vxworks
new file mode 100644
index 0000000..455d0b3
--- /dev/null
+++ b/libgcc/config/t-gthr-vxworks
@@ -0,0 +1,5 @@
+# Extra libgcc2 modules used by gthr-vxworks.h functions
+LIB2ADDEH += $(srcdir)/config/gthr-vxworks.c\
+ $(srcdir)/config/gthr-vxworks-cond.c\
+ $(srcdir)/config/gthr-vxworks-thread.c\
+ $(srcdir)/config/gthr-vxworks-tls.c \ No newline at end of file
diff --git a/libgcc/config/t-gthr-vxworksae b/libgcc/config/t-gthr-vxworksae
new file mode 100644
index 0000000..1471298
--- /dev/null
+++ b/libgcc/config/t-gthr-vxworksae
@@ -0,0 +1,7 @@
+# Extra libgcc2 modules used by gthr-vxworks.h functions. We don't
+# support the C++ threads on vx653.
+
+LIB2ADDEH += $(srcdir)/config/gthr-vxworks.c\
+ $(srcdir)/config/gthr-vxworks-cond.c\
+ $(srcdir)/config/gthr-vxworks-tls.c
+
diff --git a/libgcc/config/t-vxcrtstuff b/libgcc/config/t-vxcrtstuff
new file mode 100644
index 0000000..96b7285
--- /dev/null
+++ b/libgcc/config/t-vxcrtstuff
@@ -0,0 +1,12 @@
+# crtbegin/crtend for VxWorks (DKM or RTP)
+
+vx_crtbegin-kernel.o: $(srcdir)/config/vxcrtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -DCRT_BEGIN -c $<
+
+vx_crtbegin-rtp.o: $(srcdir)/config/vxcrtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -DCRT_BEGIN -c $< -mrtp
+
+vx_crtend.o: $(srcdir)/config/vxcrtstuff.c
+ $(crt_compile) $(CRTSTUFF_T_CFLAGS) -DCRT_END -c $<
+
+EXTRA_PARTS += vx_crtbegin-kernel.o vx_crtbegin-rtp.o vx_crtend.o
diff --git a/libgcc/config/t-vxworks b/libgcc/config/t-vxworks
index 2db8e05..757cead 100644
--- a/libgcc/config/t-vxworks
+++ b/libgcc/config/t-vxworks
@@ -6,9 +6,6 @@ LIBGCC2_DEBUG_CFLAGS =
LIB2FUNCS_EXCLUDE += _clear_cache
LIB2ADD += $(srcdir)/config/vxcache.c
-# Extra libgcc2 modules used by gthr-vxworks.h functions
-LIB2ADDEH += $(srcdir)/config/vxlib.c $(srcdir)/config/vxlib-tls.c
-
# This ensures that the correct target headers are used; some VxWorks
# system headers have names that collide with GCC's internal (host)
# headers, e.g. regs.h. Make sure the local libgcc headers still
diff --git a/libgcc/config/t-vxworks7 b/libgcc/config/t-vxworks7
index 054ab7c..f2cc904 100644
--- a/libgcc/config/t-vxworks7
+++ b/libgcc/config/t-vxworks7
@@ -6,9 +6,6 @@ LIBGCC2_DEBUG_CFLAGS =
LIB2FUNCS_EXCLUDE += _clear_cache
LIB2ADD += $(srcdir)/config/vxcache.c
-# Extra libgcc2 modules used by gthr-vxworks.h functions
-LIB2ADDEH += $(srcdir)/config/vxlib.c $(srcdir)/config/vxlib-tls.c
-
# This ensures that the correct target headers are used; some VxWorks
# system headers have names that collide with GCC's internal (host)
# headers, e.g. regs.h. Make sure the local libgcc headers still
@@ -21,4 +18,3 @@ LIBGCC2_INCLUDES = -nostdinc -I. \
*/mrtp*) echo -I$(VSB_DIR)/usr/h/public -I$(VSB_DIR)/usr/h ;; \
*) echo -I$(VSB_DIR)/krnl/h/system -I$(VSB_DIR)/krnl/h/public ;; \
esac`
-
diff --git a/libgcc/config/t-vxworksae b/libgcc/config/t-vxworksae
new file mode 100644
index 0000000..f5003ab
--- /dev/null
+++ b/libgcc/config/t-vxworksae
@@ -0,0 +1,17 @@
+# Don't build libgcc.a with debug info
+LIBGCC2_DEBUG_CFLAGS =
+
+# We do not have access to the cache library when building a vThreads
+# application.
+
+# This ensures that the correct target headers are used; some VxWorks
+# system headers have names that collide with GCC's internal (host)
+# headers, e.g. regs.h. Make sure the local libgcc headers still
+# prevail (e.g. unwind.h), and that gcc provided header files intended
+# to be user visible eventually are visible as well.
+LIBGCC2_INCLUDES = -nostdinc -I. \
+ -I$(MULTIBUILDTOP)../../gcc/include \
+ `case "/$(MULTIDIR)" in \
+ */mvthreads*) echo -I$(WIND_BASE)/target/vThreads/h -I$(WIND_BASE)/target/val/h -I$(WIND_BASE)/target/h/wrn/coreip ;; \
+ *) echo -I$(WIND_BASE)/target/h -I$(WIND_BASE)/target/h/wrn/coreip ;; \
+ esac`
diff --git a/libgcc/config/vxcrtstuff.c b/libgcc/config/vxcrtstuff.c
new file mode 100644
index 0000000..616ad07
--- /dev/null
+++ b/libgcc/config/vxcrtstuff.c
@@ -0,0 +1,132 @@
+/* This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/* The essential point of the crtbegin/crtend files on VxWorks is to handle
+ the eh frames registration thanks to dedicated constructors and
+ destructors. What needs to be done depends on the VxWorks version and the
+ kind of module (rtp, dkm, ...) one is building. */
+
+#define IN_LIBGCC2
+
+#include "auto-host.h"
+#include "tconfig.h"
+#include "tsystem.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "libgcc_tm.h"
+#include "unwind-dw2-fde.h"
+
+/* If we are entitled/requested to use init/fini arrays, we'll rely on that.
+ Otherwise, we may rely on ctors/dtors sections for RTPs or expect munch to
+ be involved for kernel modules. */
+
+#if !defined(USE_INIT_FINI_ARRAY) && defined(__RTP__)
+#define USE_CDTORS_SECTIONS
+#endif
+
+/* ------------------------------ crtbegin ------------------------------- */
+
+#ifdef CRT_BEGIN
+
+/* Stick a label at the beginning of the frame unwind info so we can register
+ and deregister it with the exception handling library code. */
+static const char __EH_FRAME_BEGIN__[]
+__attribute__((section(__LIBGCC_EH_FRAME_SECTION_NAME__), aligned(4)))
+ = { };
+
+/* Determine what names to use for the constructor/destructor functions. */
+
+#if defined(USE_CDTORS_SECTIONS) || defined(USE_INITFINI_ARRAY)
+
+#define EH_CTOR_NAME _crtbe_register_frame
+#define EH_DTOR_NAME _ctrbe_deregister_frame
+
+#else
+
+/* No specific sections for constructors or destructors: we thus use a
+ symbol naming convention so that the constructors are then recognized
+ by munch or whatever tool is used for the final link phase. */
+#define EH_CTOR_NAME _GLOBAL__I_00101_0__crtbe_register_frame
+#define EH_DTOR_NAME _GLOBAL__D_00101_1__crtbe_deregister_frame
+
+#endif
+
+#ifdef USE_INITFINI_ARRAY
+/* .init_array and .fini_array is supported starting VxWorks 7.2 in all
+ cases. The compiler is then configured to always support priorities in
+ constructors, so we can rely on the constructor and destructor attributes
+ to generate the proper sections. */
+#define EH_CTOR_ATTRIBUTE __attribute__((constructor (101)))
+#define EH_DTOR_ATTRIBUTE __attribute__((destructor (101)))
+
+#else /* USE_INITFINI_ARRAY */
+
+/* Note: Even in case of .ctors/.dtors sections, we can't use the attribute
+ (constructor (15)) here as gcc may have been configured with constructors
+ priority disabled. We will instead craft an explicit section name for this
+ purpose. */
+#define EH_CTOR_ATTRIBUTE
+#define EH_DTOR_ATTRIBUTE
+
+#endif /* USE_INITFINI_ARRAY */
+
+void EH_CTOR_NAME (void);
+void EH_DTOR_NAME (void);
+
+EH_CTOR_ATTRIBUTE void EH_CTOR_NAME (void)
+{
+ static struct object object;
+ __register_frame_info (__EH_FRAME_BEGIN__, &object);
+}
+
+EH_DTOR_ATTRIBUTE void EH_DTOR_NAME (void)
+{
+ __deregister_frame_info (__EH_FRAME_BEGIN__);
+}
+
+#ifdef USE_CDTORS_SECTIONS
+/* As explained above, we need to manually build the sections here as the
+ compiler may not have support for constructors priority enabled. */
+static void (* volatile eh_registration_ctors[])()
+ __attribute__((section (".ctors.101")))
+= { &EH_CTOR_NAME };
+static void (* volatile eh_registration_dtors[])()
+ __attribute__((section (".dtors.65434")))
+= { &EH_DTOR_NAME };
+#endif
+
+/* ------------------------------ crtend --------------------------------- */
+
+#elif defined (CRT_END) /* ! CRT_BEGIN */
+
+/* Terminate the frame unwind info section with a 4byte 0 as a sentinel;
+ this would be the 'length' field in a real FDE. */
+
+static const char __FRAME_END__[]
+ __attribute__ ((used, section(__LIBGCC_EH_FRAME_SECTION_NAME__),
+ aligned(4)))
+ = { 0, 0, 0, 0 };
+
+#else /* ! CRT_BEGIN & ! CRT_END */
+
+#error "One of CRT_BEGIN or CRT_END must be defined."
+
+#endif
diff --git a/libgcc/config/vxlib.c b/libgcc/config/vxlib.c
deleted file mode 100644
index 78b6776..0000000
--- a/libgcc/config/vxlib.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/* Copyright (C) 2002-2019 Free Software Foundation, Inc.
- Contributed by Zack Weinberg <zack@codesourcery.com>
-
-This file is part of GCC.
-
-GCC is free software; you can redistribute it and/or modify it under
-the terms of the GNU General Public License as published by the Free
-Software Foundation; either version 3, or (at your option) any later
-version.
-
-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
-WARRANTY; without even the implied warranty of MERCHANTABILITY or
-FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
-for more details.
-
-Under Section 7 of GPL version 3, you are granted additional
-permissions described in the GCC Runtime Library Exception, version
-3.1, as published by the Free Software Foundation.
-
-You should have received a copy of the GNU General Public License and
-a copy of the GCC Runtime Library Exception along with this program;
-see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
-<http://www.gnu.org/licenses/>. */
-
-/* Threads compatibility routines for libgcc2 for VxWorks.
- These are out-of-line routines called from gthr-vxworks.h. */
-
-#include "tconfig.h"
-#include "tsystem.h"
-#include "gthr.h"
-
-#if defined(__GTHREADS)
-#include <vxWorks.h>
-#ifndef __RTP__
-#include <vxLib.h>
-#endif
-#include <taskLib.h>
-#ifndef __RTP__
-#include <taskHookLib.h>
-#else
-# include <errno.h>
-#endif
-
-/* Init-once operation.
-
- This would be a clone of the implementation from gthr-solaris.h,
- except that we have a bootstrap problem - the whole point of this
- exercise is to prevent double initialization, but if two threads
- are racing with each other, once->mutex is liable to be initialized
- by both. Then each thread will lock its own mutex, and proceed to
- call the initialization routine.
-
- So instead we use a bare atomic primitive (vxTas()) to handle
- mutual exclusion. Threads losing the race then busy-wait, calling
- taskDelay() to yield the processor, until the initialization is
- completed. Inefficient, but reliable. */
-
-int
-__gthread_once (__gthread_once_t *guard, void (*func)(void))
-{
- if (guard->done)
- return 0;
-
-#ifdef __RTP__
- __gthread_lock_library ();
-#else
- while (!vxTas ((void *)&guard->busy))
- {
-#ifdef __PPC__
- /* This can happen on powerpc, which is using all 32 bits
- of the gthread_once_t structure. */
- if (guard->done)
- return 0;
-#endif
- taskDelay (1);
- }
-#endif
-
- /* Only one thread at a time gets here. Check ->done again, then
- go ahead and call func() if no one has done it yet. */
- if (!guard->done)
- {
- func ();
- guard->done = 1;
- }
-
-#ifdef __RTP__
- __gthread_unlock_library ();
-#else
- guard->busy = 0;
-#endif
- return 0;
-}
-
-#endif /* __GTHREADS */