diff options
Diffstat (limited to 'libgcc/config/pa')
-rw-r--r-- | libgcc/config/pa/fptr.c | 131 | ||||
-rw-r--r-- | libgcc/config/pa/lib2funcs.S | 74 | ||||
-rw-r--r-- | libgcc/config/pa/linux-atomic.c | 305 | ||||
-rw-r--r-- | libgcc/config/pa/quadlib.c | 245 | ||||
-rw-r--r-- | libgcc/config/pa/t-hpux | 3 | ||||
-rw-r--r-- | libgcc/config/pa/t-hpux10 | 1 | ||||
-rw-r--r-- | libgcc/config/pa/t-linux | 6 | ||||
-rw-r--r-- | libgcc/config/pa/t-linux64 | 4 | ||||
-rw-r--r-- | libgcc/config/pa/t-pa64 | 3 |
9 files changed, 771 insertions, 1 deletions
diff --git a/libgcc/config/pa/fptr.c b/libgcc/config/pa/fptr.c new file mode 100644 index 0000000..320d182 --- /dev/null +++ b/libgcc/config/pa/fptr.c @@ -0,0 +1,131 @@ +/* Subroutine for function pointer canonicalization on PA-RISC with ELF32. + Copyright 2002, 2003, 2004, 2007, 2009 Free Software Foundation, Inc. + Contributed by John David Anglin (dave.anglin@nrc.ca). + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + + +/* WARNING: The code is this function depends on internal and undocumented + details of the GNU linker and dynamic loader as implemented for parisc + linux. */ + +/* This MUST match the defines sysdeps/hppa/dl-machine.h and + bfd/elf32-hppa.c. */ +#define GOT_FROM_PLT_STUB (4*4) + +/* List of byte offsets in _dl_runtime_resolve to search for "bl" branches. + The first "bl" branch instruction found MUST be a call to fixup. See + the define for TRAMPOLINE_TEMPLATE in sysdeps/hppa/dl-machine.h. If + the trampoline template is changed, the list must be appropriately + updated. The offset of -4 allows for a magic branch at the start of + the template should it be necessary to change the current branch + position. */ +#define NOFFSETS 2 +static int fixup_branch_offset[NOFFSETS] = { 32, -4 }; + +#define GET_FIELD(X, FROM, TO) \ + ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1)) +#define SIGN_EXTEND(VAL,BITS) \ + ((int) ((VAL) >> ((BITS) - 1) ? (-1 << (BITS)) | (VAL) : (VAL))) + +struct link_map; +typedef int (*fptr_t) (void); +typedef int (*fixup_t) (struct link_map *, unsigned int); +extern unsigned int _GLOBAL_OFFSET_TABLE_; + +/* __canonicalize_funcptr_for_compare must be hidden so that it is not + placed in the dynamic symbol table. Like millicode functions, it + must be linked into all binaries in order access the got table of + that binary. However, we don't use the millicode calling convention + and the routine must be a normal function so that it can be compiled + as pic code. */ +unsigned int __canonicalize_funcptr_for_compare (fptr_t) + __attribute__ ((visibility ("hidden"))); + +unsigned int +__canonicalize_funcptr_for_compare (fptr_t fptr) +{ + static unsigned int fixup_plabel[2]; + static fixup_t fixup; + unsigned int *plabel, *got; + + /* -1 and page 0 are special. -1 is used in crtend to mark the end of + a list of function pointers. Also return immediately if the plabel + bit is not set in the function pointer. In this case, the function + pointer points directly to the function. */ + if ((int) fptr == -1 || (unsigned int) fptr < 4096 || !((int) fptr & 2)) + return (unsigned int) fptr; + + /* The function pointer points to a function descriptor (plabel). If + the plabel hasn't been resolved, the first word of the plabel points + to the entry of the PLT stub just before the global offset table. + The second word in the plabel contains the relocation offset for the + function. */ + plabel = (unsigned int *) ((unsigned int) fptr & ~3); + got = (unsigned int *) (plabel[0] + GOT_FROM_PLT_STUB); + + /* Return the address of the function if the plabel has been resolved. */ + if (got != &_GLOBAL_OFFSET_TABLE_) + return plabel[0]; + + /* Initialize our plabel for calling fixup if we haven't done so already. + This code needs to be thread safe but we don't have to be too careful + as the result is invariant. */ + if (!fixup) + { + int i; + unsigned int *iptr; + + /* Find the first "bl" branch in the offset search list. This is a + call to fixup or a magic branch to fixup at the beginning of the + trampoline template. The fixup function does the actual runtime + resolution of function descriptors. We only look for "bl" branches + with a 17-bit pc-relative displacement. */ + for (i = 0; i < NOFFSETS; i++) + { + iptr = (unsigned int *) (got[-2] + fixup_branch_offset[i]); + if ((*iptr & 0xfc00e000) == 0xe8000000) + break; + } + + /* This should not happen... */ + if (i == NOFFSETS) + return ~0; + + /* Extract the 17-bit displacement from the instruction. */ + iptr += SIGN_EXTEND (GET_FIELD (*iptr, 19, 28) | + GET_FIELD (*iptr, 29, 29) << 10 | + GET_FIELD (*iptr, 11, 15) << 11 | + GET_FIELD (*iptr, 31, 31) << 16, 17); + + /* Build a plabel for an indirect call to fixup. */ + fixup_plabel[0] = (unsigned int) iptr + 8; /* address of fixup */ + fixup_plabel[1] = got[-1]; /* ltp for fixup */ + fixup = (fixup_t) ((int) fixup_plabel | 3); + } + + /* Call fixup to resolve the function address. got[1] contains the + link_map pointer and plabel[1] the relocation offset. */ + fixup ((struct link_map *) got[1], plabel[1]); + + return plabel[0]; +} diff --git a/libgcc/config/pa/lib2funcs.S b/libgcc/config/pa/lib2funcs.S new file mode 100644 index 0000000..8aa398c --- /dev/null +++ b/libgcc/config/pa/lib2funcs.S @@ -0,0 +1,74 @@ +; Subroutines for calling unbound dynamic functions from within GDB for HPPA. +; Subroutines for out of line prologues and epilogues on for the HPPA +; Copyright (C) 1994, 1995, 1996, 2009 Free Software Foundation, Inc. + +; This file is part of GCC. + +; GCC is free software; you can redistribute it and/or modify +; it under the terms of the GNU General Public License as published by +; the Free Software Foundation; either version 3, or (at your option) +; any later version. + +; GCC is distributed in the hope that it will be useful, +; but WITHOUT ANY WARRANTY; without even the implied warranty of +; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +; GNU General Public License for more details. + +; Under Section 7 of GPL version 3, you are granted additional +; permissions described in the GCC Runtime Library Exception, version +; 3.1, as published by the Free Software Foundation. + +; You should have received a copy of the GNU General Public License and +; a copy of the GCC Runtime Library Exception along with this program; +; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +; <http://www.gnu.org/licenses/>. + +#if !defined(__pro__) && !defined(__rtems__) + .SPACE $PRIVATE$ + .SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31 + .SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82 + .SPACE $TEXT$ + .SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44 + .SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY + .SUBSPA $MILLICODE$,QUAD=0,ALIGN=8,ACCESS=44,SORT=8 +#endif + .IMPORT $$dyncall,MILLICODE +#if !defined(__pro__) && !defined(__rtems__) + .SPACE $TEXT$ + .SUBSPA $CODE$ +#else + .text +#endif + +; Simply call with the address of the desired import stub in %r22 and +; arguments in the normal place (%r26-%r23 and stack slots). +; + .align 4 + .EXPORT __gcc_plt_call,ENTRY,PRIV_LEV=3,RTNVAL=GR +__gcc_plt_call + .PROC + .CALLINFO + .ENTRY + ; Our return address comes in %r31, not %r2! + stw %r31,-8(%r30) + + ; An inline version of dyncall so we don't have to worry + ; about long calls to millicode, PIC and other complexities. + bb,>=,n %r22,30,L$foo + depi 0,31,2,%r22 + ldw 4(%r22),%r19 + ldw 0(%r22),%r22 +L$foo + ldsid (%r22),%r1 + mtsp %r1,%sr0 + ble 0(%sr0,%r22) + copy %r31,%r2 + ldw -8(%r30),%r2 + + ; We're going to be returning to a stack address, so we + ; need to do an intra-space return. + ldsid (%rp),%r1 + mtsp %r1,%sr0 + be,n 0(%sr0,%rp) + .EXIT + .PROCEND diff --git a/libgcc/config/pa/linux-atomic.c b/libgcc/config/pa/linux-atomic.c new file mode 100644 index 0000000..2ae2426 --- /dev/null +++ b/libgcc/config/pa/linux-atomic.c @@ -0,0 +1,305 @@ +/* Linux-specific atomic operations for PA Linux. + Copyright (C) 2008, 2009, 2010 Free Software Foundation, Inc. + Based on code contributed by CodeSourcery for ARM EABI Linux. + Modifications for PA Linux by Helge Deller <deller@gmx.de> + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify it under +the terms of the GNU General Public License as published by the Free +Software Foundation; either version 3, or (at your option) any later +version. + +GCC is distributed in the hope that it will be useful, but WITHOUT ANY +WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +#define EFAULT 14 +#define EBUSY 16 +#define ENOSYS 251 + +/* All PA-RISC implementations supported by linux have strongly + ordered loads and stores. Only cache flushes and purges can be + delayed. The data cache implementations are all globally + coherent. Thus, there is no need to synchonize memory accesses. + + GCC automatically issues a asm memory barrier when it encounters + a __sync_synchronize builtin. Thus, we do not need to define this + builtin. + + We implement byte, short and int versions of each atomic operation + using the kernel helper defined below. There is no support for + 64-bit operations yet. */ + +/* A privileged instruction to crash a userspace program with SIGILL. */ +#define ABORT_INSTRUCTION asm ("iitlbp %r0,(%sr0, %r0)") + +/* Determine kernel LWS function call (0=32-bit, 1=64-bit userspace). */ +#define LWS_CAS (sizeof(unsigned long) == 4 ? 0 : 1) + +/* Kernel helper for compare-and-exchange a 32-bit value. */ +static inline long +__kernel_cmpxchg (int oldval, int newval, int *mem) +{ + register unsigned long lws_mem asm("r26") = (unsigned long) (mem); + register long lws_ret asm("r28"); + register long lws_errno asm("r21"); + register int lws_old asm("r25") = oldval; + register int lws_new asm("r24") = newval; + asm volatile ( "ble 0xb0(%%sr2, %%r0) \n\t" + "ldi %5, %%r20 \n\t" + : "=r" (lws_ret), "=r" (lws_errno), "=r" (lws_mem), + "=r" (lws_old), "=r" (lws_new) + : "i" (LWS_CAS), "2" (lws_mem), "3" (lws_old), "4" (lws_new) + : "r1", "r20", "r22", "r23", "r29", "r31", "memory" + ); + if (__builtin_expect (lws_errno == -EFAULT || lws_errno == -ENOSYS, 0)) + ABORT_INSTRUCTION; + + /* If the kernel LWS call succeeded (lws_errno == 0), lws_ret contains + the old value from memory. If this value is equal to OLDVAL, the + new value was written to memory. If not, return -EBUSY. */ + if (!lws_errno && lws_ret != oldval) + lws_errno = -EBUSY; + + return lws_errno; +} + +#define HIDDEN __attribute__ ((visibility ("hidden"))) + +/* Big endian masks */ +#define INVERT_MASK_1 24 +#define INVERT_MASK_2 16 + +#define MASK_1 0xffu +#define MASK_2 0xffffu + +#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \ + int HIDDEN \ + __sync_fetch_and_##OP##_4 (int *ptr, int val) \ + { \ + int failure, tmp; \ + \ + do { \ + tmp = *ptr; \ + failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ + } while (failure != 0); \ + \ + return tmp; \ + } + +FETCH_AND_OP_WORD (add, , +) +FETCH_AND_OP_WORD (sub, , -) +FETCH_AND_OP_WORD (or, , |) +FETCH_AND_OP_WORD (and, , &) +FETCH_AND_OP_WORD (xor, , ^) +FETCH_AND_OP_WORD (nand, ~, &) + +#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH +#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH + +/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for + subword-sized quantities. */ + +#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \ + TYPE HIDDEN \ + NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \ + { \ + int *wordptr = (int *) ((unsigned long) ptr & ~3); \ + unsigned int mask, shift, oldval, newval; \ + int failure; \ + \ + shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \ + mask = MASK_##WIDTH << shift; \ + \ + do { \ + oldval = *wordptr; \ + newval = ((PFX_OP (((oldval & mask) >> shift) \ + INF_OP (unsigned int) val)) << shift) & mask; \ + newval |= oldval & ~mask; \ + failure = __kernel_cmpxchg (oldval, newval, wordptr); \ + } while (failure != 0); \ + \ + return (RETURN & mask) >> shift; \ + } + +SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval) +SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval) +SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval) +SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval) +SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval) +SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval) + +SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval) +SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval) +SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval) +SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval) +SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval) +SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval) + +#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \ + int HIDDEN \ + __sync_##OP##_and_fetch_4 (int *ptr, int val) \ + { \ + int tmp, failure; \ + \ + do { \ + tmp = *ptr; \ + failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \ + } while (failure != 0); \ + \ + return PFX_OP (tmp INF_OP val); \ + } + +OP_AND_FETCH_WORD (add, , +) +OP_AND_FETCH_WORD (sub, , -) +OP_AND_FETCH_WORD (or, , |) +OP_AND_FETCH_WORD (and, , &) +OP_AND_FETCH_WORD (xor, , ^) +OP_AND_FETCH_WORD (nand, ~, &) + +SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval) +SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval) +SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval) +SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval) +SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval) +SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval) + +SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval) +SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval) +SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval) +SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval) +SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval) +SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval) + +int HIDDEN +__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval) +{ + int actual_oldval, fail; + + while (1) + { + actual_oldval = *ptr; + + if (__builtin_expect (oldval != actual_oldval, 0)) + return actual_oldval; + + fail = __kernel_cmpxchg (actual_oldval, newval, ptr); + + if (__builtin_expect (!fail, 1)) + return actual_oldval; + } +} + +#define SUBWORD_VAL_CAS(TYPE, WIDTH) \ + TYPE HIDDEN \ + __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \ + TYPE newval) \ + { \ + int *wordptr = (int *)((unsigned long) ptr & ~3), fail; \ + unsigned int mask, shift, actual_oldval, actual_newval; \ + \ + shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \ + mask = MASK_##WIDTH << shift; \ + \ + while (1) \ + { \ + actual_oldval = *wordptr; \ + \ + if (__builtin_expect (((actual_oldval & mask) >> shift) \ + != (unsigned int) oldval, 0)) \ + return (actual_oldval & mask) >> shift; \ + \ + actual_newval = (actual_oldval & ~mask) \ + | (((unsigned int) newval << shift) & mask); \ + \ + fail = __kernel_cmpxchg (actual_oldval, actual_newval, \ + wordptr); \ + \ + if (__builtin_expect (!fail, 1)) \ + return (actual_oldval & mask) >> shift; \ + } \ + } + +SUBWORD_VAL_CAS (unsigned short, 2) +SUBWORD_VAL_CAS (unsigned char, 1) + +typedef unsigned char bool; + +bool HIDDEN +__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval) +{ + int failure = __kernel_cmpxchg (oldval, newval, ptr); + return (failure == 0); +} + +#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \ + bool HIDDEN \ + __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \ + TYPE newval) \ + { \ + TYPE actual_oldval \ + = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \ + return (oldval == actual_oldval); \ + } + +SUBWORD_BOOL_CAS (unsigned short, 2) +SUBWORD_BOOL_CAS (unsigned char, 1) + +int HIDDEN +__sync_lock_test_and_set_4 (int *ptr, int val) +{ + int failure, oldval; + + do { + oldval = *ptr; + failure = __kernel_cmpxchg (oldval, val, ptr); + } while (failure != 0); + + return oldval; +} + +#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \ + TYPE HIDDEN \ + __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \ + { \ + int failure; \ + unsigned int oldval, newval, shift, mask; \ + int *wordptr = (int *) ((unsigned long) ptr & ~3); \ + \ + shift = (((unsigned long) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \ + mask = MASK_##WIDTH << shift; \ + \ + do { \ + oldval = *wordptr; \ + newval = (oldval & ~mask) \ + | (((unsigned int) val << shift) & mask); \ + failure = __kernel_cmpxchg (oldval, newval, wordptr); \ + } while (failure != 0); \ + \ + return (oldval & mask) >> shift; \ + } + +SUBWORD_TEST_AND_SET (unsigned short, 2) +SUBWORD_TEST_AND_SET (unsigned char, 1) + +#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \ + void HIDDEN \ + __sync_lock_release_##WIDTH (TYPE *ptr) \ + { \ + *ptr = 0; \ + } + +SYNC_LOCK_RELEASE (int, 4) +SYNC_LOCK_RELEASE (short, 2) +SYNC_LOCK_RELEASE (char, 1) diff --git a/libgcc/config/pa/quadlib.c b/libgcc/config/pa/quadlib.c new file mode 100644 index 0000000..2c11600 --- /dev/null +++ b/libgcc/config/pa/quadlib.c @@ -0,0 +1,245 @@ +/* Subroutines for long double support. + Copyright (C) 2000, 2002, 2004, 2005, 2006, 2009 Free Software Foundation, Inc. + +This file is part of GCC. + +GCC is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 3, or (at your option) +any later version. + +GCC is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +Under Section 7 of GPL version 3, you are granted additional +permissions described in the GCC Runtime Library Exception, version +3.1, as published by the Free Software Foundation. + +You should have received a copy of the GNU General Public License and +a copy of the GCC Runtime Library Exception along with this program; +see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +<http://www.gnu.org/licenses/>. */ + +/* HPUX TFmode compare requires a library call to _U_Qfcmp. It takes + a magic number as its third argument which indicates what to do. + The return value is an integer to be compared against zero. The + comparison conditions are the same as those listed in Table 8-12 + of the PA-RISC 2.0 Architecture book for the fcmp instruction. */ + +/* Raise FP_INVALID on SNaN as a side effect. */ +#define QCMP_INV 1 + +/* Comparison relations. */ +#define QCMP_UNORD 2 +#define QCMP_EQ 4 +#define QCMP_LT 8 +#define QCMP_GT 16 + +int _U_Qfcmp (long double a, long double b, int); +long _U_Qfcnvfxt_quad_to_sgl (long double); + +int _U_Qfeq (long double, long double); +int _U_Qfne (long double, long double); +int _U_Qfgt (long double, long double); +int _U_Qfge (long double, long double); +int _U_Qflt (long double, long double); +int _U_Qfle (long double, long double); +int _U_Qfltgt (long double, long double); +int _U_Qfunle (long double, long double); +int _U_Qfunlt (long double, long double); +int _U_Qfunge (long double, long double); +int _U_Qfungt (long double, long double); +int _U_Qfuneq (long double, long double); +int _U_Qfunord (long double, long double); +int _U_Qford (long double, long double); + +int _U_Qfcomp (long double, long double); + +long double _U_Qfneg (long double); +long double _U_Qfcopysign (long double, long double); + +#ifdef __LP64__ +int __U_Qfcnvfxt_quad_to_sgl (long double); +#endif +unsigned int _U_Qfcnvfxt_quad_to_usgl(long double); +long double _U_Qfcnvxf_usgl_to_quad (unsigned int); +unsigned long long _U_Qfcnvfxt_quad_to_udbl(long double); +long double _U_Qfcnvxf_udbl_to_quad (unsigned long long); + +int +_U_Qfeq (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_EQ) != 0); +} + +int +_U_Qfne (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_EQ) == 0); +} + +int +_U_Qfgt (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_GT) != 0); +} + +int +_U_Qfge (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_EQ | QCMP_GT) != 0); +} + +int +_U_Qflt (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_LT) != 0); +} + +int +_U_Qfle (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_EQ | QCMP_LT) != 0); +} + +int +_U_Qfltgt (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_LT | QCMP_GT) != 0); +} + +int +_U_Qfunle (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_EQ | QCMP_LT) != 0); +} + +int +_U_Qfunlt (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_LT) != 0); +} + +int +_U_Qfunge (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_EQ | QCMP_GT) != 0); +} + +int +_U_Qfungt (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_GT) != 0); +} + +int +_U_Qfuneq (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD | QCMP_EQ) != 0); +} + +int +_U_Qfunord (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_UNORD) != 0); +} + +int +_U_Qford (long double a, long double b) +{ + return (_U_Qfcmp (a, b, QCMP_INV | QCMP_EQ | QCMP_LT | QCMP_GT) != 0); +} + +int +_U_Qfcomp (long double a, long double b) +{ + if (_U_Qfcmp (a, b, QCMP_EQ) == 0) + return 0; + + return (_U_Qfcmp (a, b, QCMP_UNORD | QCMP_EQ | QCMP_GT) != 0 ? 1 : -1); +} + +/* Negate long double A. */ +long double +_U_Qfneg (long double a) +{ + union + { + long double ld; + int i[4]; + } u; + + u.ld = a; + u.i[0] ^= 0x80000000; + return u.ld; +} + +/* Return long double A with sign changed to sign of long double B. */ +long double +_U_Qfcopysign (long double a, long double b) +{ + union + { + long double ld; + int i[4]; + } ua, ub; + + ua.ld = a; + ub.ld = b; + ua.i[0] &= 0x7fffffff; + ua.i[0] |= (0x80000000 & ub.i[0]); + return ua.ld; +} + +#ifdef __LP64__ +/* This routine is only necessary for the PA64 port; for reasons unknown + _U_Qfcnvfxt_quad_to_sgl returns the integer in the high 32bits of the + return value. Ugh. */ +int +__U_Qfcnvfxt_quad_to_sgl (long double a) +{ + return _U_Qfcnvfxt_quad_to_sgl (a) >> 32; +} +#endif + +/* HP only has signed conversion in the C library, so need to synthesize + unsigned versions. */ +unsigned int +_U_Qfcnvfxt_quad_to_usgl (long double a) +{ + extern long long _U_Qfcnvfxt_quad_to_dbl (long double a); + return (unsigned int) _U_Qfcnvfxt_quad_to_dbl (a); +} + +long double +_U_Qfcnvxf_usgl_to_quad (unsigned int a) +{ + extern long double _U_Qfcnvxf_dbl_to_quad (long long); + return _U_Qfcnvxf_dbl_to_quad ((long long) a); +} + +typedef union { + unsigned long long u[2]; + long double d[1]; +} quad_type; + +unsigned long long +_U_Qfcnvfxt_quad_to_udbl (long double a) +{ + extern quad_type _U_Qfcnvfxt_quad_to_quad (long double a); + quad_type u; + u = _U_Qfcnvfxt_quad_to_quad(a); + return u.u[1]; +} + +long double +_U_Qfcnvxf_udbl_to_quad (unsigned long long a) +{ + extern long double _U_Qfcnvxf_quad_to_quad (quad_type a); + quad_type u; + u.u[0] = 0; + u.u[1] = a; + return _U_Qfcnvxf_quad_to_quad (u); +} diff --git a/libgcc/config/pa/t-hpux b/libgcc/config/pa/t-hpux new file mode 100644 index 0000000..fcf93ab --- /dev/null +++ b/libgcc/config/pa/t-hpux @@ -0,0 +1,3 @@ +LIB2ADD = $(srcdir)/config/pa/lib2funcs.S $(srcdir)/config/pa/quadlib.c + +HOST_LIBGCC2_CFLAGS += -frandom-seed=fixed-seed diff --git a/libgcc/config/pa/t-hpux10 b/libgcc/config/pa/t-hpux10 new file mode 100644 index 0000000..5620f31 --- /dev/null +++ b/libgcc/config/pa/t-hpux10 @@ -0,0 +1 @@ +HOST_LIBGCC2_CFLAGS += -D_T_HPUX10 diff --git a/libgcc/config/pa/t-linux b/libgcc/config/pa/t-linux index d396bf7..2157de9 100644 --- a/libgcc/config/pa/t-linux +++ b/libgcc/config/pa/t-linux @@ -1,6 +1,10 @@ #Plug millicode routines into libgcc.a We want these on both native and #cross compiles. We use the "64-bit" routines because the "32-bit" code #is broken for certain corner cases. - LIB1ASMSRC = pa/milli64.S LIB1ASMFUNCS = _divI _divU _remI _remU _div_const _mulI _dyncall + +HOST_LIBGCC2_CFLAGS += -DELF=1 -DLINUX=1 + +LIB2ADD = $(srcdir)/config/pa/fptr.c +LIB2ADD_ST = $(srcdir)/config/pa/linux-atomic.c diff --git a/libgcc/config/pa/t-linux64 b/libgcc/config/pa/t-linux64 index 6cb9806..1d0a6ad 100644 --- a/libgcc/config/pa/t-linux64 +++ b/libgcc/config/pa/t-linux64 @@ -2,3 +2,7 @@ # cross compiles. # FIXME: Explain. LIB1ASMFUNCS := $(filter-out _dyncall, $(LIB1ASMFUNCS)) + +LIB2ADD_ST = $(srcdir)/config/pa/linux-atomic.c + +HOST_LIBGCC2_CFLAGS += -Dpa64=1 -DELF=1 diff --git a/libgcc/config/pa/t-pa64 b/libgcc/config/pa/t-pa64 new file mode 100644 index 0000000..98f28ed --- /dev/null +++ b/libgcc/config/pa/t-pa64 @@ -0,0 +1,3 @@ +LIB2ADD = $(srcdir)/config/pa/quadlib.c + +HOST_LIBGCC2_CFLAGS += -Dpa64=1 -DELF=1 -mlong-calls |