aboutsummaryrefslogtreecommitdiff
path: root/gcc/ada/init.c
diff options
context:
space:
mode:
authorJerome Lambourg <lambourg@adacore.com>2016-04-18 09:24:08 +0000
committerArnaud Charlet <charlet@gcc.gnu.org>2016-04-18 11:24:08 +0200
commit84538074d9b5ae903c2d3561653b071995bdc934 (patch)
treebc015aa7a5ff8ea2b213941770336c5c8f466f0b /gcc/ada/init.c
parent230ad3691829ee68b2e25ac1347dbe88240d7598 (diff)
downloadgcc-84538074d9b5ae903c2d3561653b071995bdc934.zip
gcc-84538074d9b5ae903c2d3561653b071995bdc934.tar.gz
gcc-84538074d9b5ae903c2d3561653b071995bdc934.tar.bz2
sigtramp-vxworks-target.inc: sigtramp-vxworks: force the stack alignment for x86_64.
2016-04-18 Jerome Lambourg <lambourg@adacore.com> * sigtramp-vxworks-target.inc: sigtramp-vxworks: force the stack alignment for x86_64. * init.c: Better fix for guard page reset on x86_64-vx7. Do not try to retrieve the page that actually raised the signal as the probing mechanism used on x86_64 do not allow such retrieval. We thus just test if the guard page is active, and re-activate it if not. From-SVN: r235102
Diffstat (limited to 'gcc/ada/init.c')
-rw-r--r--gcc/ada/init.c55
1 files changed, 20 insertions, 35 deletions
diff --git a/gcc/ada/init.c b/gcc/ada/init.c
index 38907a1..ae9b58e 100644
--- a/gcc/ada/init.c
+++ b/gcc/ada/init.c
@@ -1759,7 +1759,7 @@ getpid (void)
This function returns TRUE in case the guard page was hit by the
signal. */
static int
-__gnat_reset_guard_page (int sig, void *sc)
+__gnat_reset_guard_page (int sig)
{
/* On ARM VxWorks 6.x and x86_64 VxWorks 7, the guard page is left un-armed
by the kernel after being violated, so subsequent violations aren't
@@ -1776,42 +1776,24 @@ __gnat_reset_guard_page (int sig, void *sc)
TASK_ID tid = taskIdSelf ();
WIND_TCB *pTcb = taskTcb (tid);
- REG_SET *pregs = ((struct sigcontext *) sc)->sc_pregs;
VIRT_ADDR guardPage = (VIRT_ADDR) pTcb->pStackEnd - INT_OVERFLOW_SIZE;
UINT stateMask = VM_STATE_MASK_VALID;
- UINT state = VM_STATE_VALID_NOT;
- size_t probe_distance = 0;
- VIRT_ADDR sigPage;
+ UINT guardState = VM_STATE_VALID_NOT;
-#if defined (ARMEL)
- /* violating address in rip: r12 */
- sigPage = pregs->r[12] & ~(INT_OVERFLOW_SIZE - 1);
-#elif defined (__x86_64__)
- /* violating address in rsp. */
- probe_distance = 16 * 1024; /* in gcc/config/i386/vxworks7.h */
- sigPage = pregs->rsp & ~(INT_OVERFLOW_SIZE - 1);
- stateMask |= MMU_ATTR_SPL_MSK;
- state |= MMU_ATTR_NO_BLOCK;
-#else
-#error "Not Implemented for this CPU"
+#if (_WRS_VXWORKS_MAJOR >= 7)
+ stateMask |= MMU_ATTR_SPL_MSK;
+ guardState |= MMU_ATTR_NO_BLOCK;
#endif
- if (guardPage == (sigPage - probe_distance))
+ UINT nState;
+ vmStateGet (NULL, guardPage, &nState);
+ if ((nState & VM_STATE_MASK_VALID) != VM_STATE_VALID_NOT)
{
- UINT nState;
- vmStateGet (NULL, guardPage, &nState);
- if ((nState & VM_STATE_MASK_VALID) != VM_STATE_VALID_NOT) {
- /* If the guard page has a valid state, we need to reset to
- invalid state here */
- vmStateSet (NULL, guardPage, INT_OVERFLOW_SIZE, stateMask, state);
- }
-
+ /* If the guard page has a valid state, we need to reset to
+ invalid state here */
+ vmStateSet (NULL, guardPage, INT_OVERFLOW_SIZE, stateMask, guardState);
return TRUE;
}
- else
- {
- return FALSE;
- }
#endif /* VXWORKS_FORCE_GUARD_PAGE */
return FALSE;
}
@@ -1919,7 +1901,7 @@ __gnat_map_signal (int sig, siginfo_t *si ATTRIBUTE_UNUSED, void *sc)
msg = "unhandled signal";
}
- if (__gnat_reset_guard_page (sig, sc))
+ if (__gnat_reset_guard_page (sig))
{
/* Set the exception message: we know for sure that we have a
stack overflow here */
@@ -1997,14 +1979,17 @@ __gnat_error_handler (int sig, siginfo_t *si, void *sc)
when they contain SPE instructions, we need to set it back before doing
anything else.
This mechanism is only need in kernel mode. */
-#if !(defined (__RTP__) || defined (CERT)) && ((CPU == PPCE500V2) || (CPU == PPC85XX))
+#if !(defined (__RTP__) || defined (VTHREADS)) && ((CPU == PPCE500V2) || (CPU == PPC85XX))
register unsigned msr;
/* Read the MSR value */
asm volatile ("mfmsr %0" : "=r" (msr));
- /* Force the SPE bit */
- msr |= 0x02000000;
- /* Store to MSR */
- asm volatile ("mtmsr %0" : : "r" (msr));
+ /* Force the SPE bit if not set. */
+ if ((msr & 0x02000000) == 0)
+ {
+ msr |= 0x02000000;
+ /* Store to MSR */
+ asm volatile ("mtmsr %0" : : "r" (msr));
+ }
#endif
/* VxWorks will always mask out the signal during the signal handler and